mirror of
https://github.com/QuantumNous/new-api.git
synced 2026-03-30 02:25:00 +00:00
fix(gemini): fetch model list via native v1beta/models endpoint
Use the native Gemini Models API (/v1beta/models) instead of the OpenAI-compatible path when listing models for Gemini channels, improving compatibility with third-party Gemini-format providers that don't implement OpenAI routes. - Add paginated model listing with timeout and optional proxy support - Select an enabled key for multi-key Gemini channels
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/QuantumNous/new-api/common"
|
||||
@@ -1363,3 +1365,82 @@ func GeminiImageHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.
|
||||
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
type GeminiModelInfo struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
DisplayName string `json:"displayName"`
|
||||
Description string `json:"description"`
|
||||
InputTokenLimit int `json:"inputTokenLimit"`
|
||||
OutputTokenLimit int `json:"outputTokenLimit"`
|
||||
SupportedGenerationMethods []string `json:"supportedGenerationMethods"`
|
||||
}
|
||||
|
||||
type GeminiModelsResponse struct {
|
||||
Models []GeminiModelInfo `json:"models"`
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
func FetchGeminiModels(baseURL, apiKey, proxyURL string) ([]string, error) {
|
||||
client, err := service.GetHttpClientWithProxy(proxyURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("创建HTTP客户端失败: %v", err)
|
||||
}
|
||||
|
||||
allModels := make([]string, 0)
|
||||
nextPageToken := ""
|
||||
maxPages := 100 // Safety limit to prevent infinite loops
|
||||
|
||||
for page := 0; page < maxPages; page++ {
|
||||
url := fmt.Sprintf("%s/v1beta/models", baseURL)
|
||||
if nextPageToken != "" {
|
||||
url = fmt.Sprintf("%s?pageToken=%s", url, nextPageToken)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
request, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, fmt.Errorf("创建请求失败: %v", err)
|
||||
}
|
||||
|
||||
request.Header.Set("x-goog-api-key", apiKey)
|
||||
|
||||
response, err := client.Do(request)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, fmt.Errorf("请求失败: %v", err)
|
||||
}
|
||||
|
||||
if response.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(response.Body)
|
||||
response.Body.Close()
|
||||
cancel()
|
||||
return nil, fmt.Errorf("服务器返回错误 %d: %s", response.StatusCode, string(body))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(response.Body)
|
||||
response.Body.Close()
|
||||
cancel()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("读取响应失败: %v", err)
|
||||
}
|
||||
|
||||
var modelsResponse GeminiModelsResponse
|
||||
if err = common.Unmarshal(body, &modelsResponse); err != nil {
|
||||
return nil, fmt.Errorf("解析响应失败: %v", err)
|
||||
}
|
||||
|
||||
for _, model := range modelsResponse.Models {
|
||||
modelName := strings.TrimPrefix(model.Name, "models/")
|
||||
allModels = append(allModels, modelName)
|
||||
}
|
||||
|
||||
nextPageToken = modelsResponse.NextPageToken
|
||||
if nextPageToken == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return allModels, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user