mirror of
https://github.com/QuantumNous/new-api.git
synced 2026-04-16 13:17:26 +00:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd6838e690 | ||
|
|
b64480b750 | ||
|
|
da6423de33 | ||
|
|
fe37718259 |
@@ -16,6 +16,7 @@ var GeminiVisionMaxImageNum int
|
||||
var NotifyLimitCount int
|
||||
var NotificationLimitDurationMinute int
|
||||
var GenerateDefaultToken bool
|
||||
var ErrorLogEnabled bool
|
||||
|
||||
//var GeminiModelMap = map[string]string{
|
||||
// "gemini-1.0-pro": "v1",
|
||||
@@ -36,6 +37,8 @@ func InitEnv() {
|
||||
NotificationLimitDurationMinute = common.GetEnvOrDefault("NOTIFICATION_LIMIT_DURATION_MINUTE", 10)
|
||||
// GenerateDefaultToken 是否生成初始令牌,默认关闭。
|
||||
GenerateDefaultToken = common.GetEnvOrDefaultBool("GENERATE_DEFAULT_TOKEN", false)
|
||||
// 是否启用错误日志
|
||||
ErrorLogEnabled = common.GetEnvOrDefaultBool("ERROR_LOG_ENABLED", false)
|
||||
|
||||
//modelVersionMapStr := strings.TrimSpace(os.Getenv("GEMINI_MODEL_MAP"))
|
||||
//if modelVersionMapStr == "" {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
constant2 "one-api/constant"
|
||||
"one-api/dto"
|
||||
"one-api/middleware"
|
||||
"one-api/model"
|
||||
@@ -40,7 +41,7 @@ func relayHandler(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusCode
|
||||
err = relay.TextHelper(c)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if constant2.ErrorLogEnabled && err != nil {
|
||||
// 保存错误日志到mysql中
|
||||
userId := c.GetInt("id")
|
||||
tokenName := c.GetString("token_name")
|
||||
|
||||
@@ -15,6 +15,7 @@ services:
|
||||
- SQL_DSN=root:123456@tcp(mysql:3306)/new-api # Point to the mysql service
|
||||
- REDIS_CONN_STRING=redis://redis
|
||||
- TZ=Asia/Shanghai
|
||||
- ERROR_LOG_ENABLED=true # 是否启用错误日志记录
|
||||
# - TIKTOKEN_CACHE_DIR=./tiktoken_cache # 如果需要使用tiktoken_cache,请取消注释
|
||||
# - SESSION_SECRET=random_string # 多机部署时设置,必须修改这个随机字符串!!!!!!!
|
||||
# - NODE_TYPE=slave # Uncomment for slave node in multi-node deployment
|
||||
|
||||
@@ -51,6 +51,7 @@ type GeneralOpenAIRequest struct {
|
||||
Dimensions int `json:"dimensions,omitempty"`
|
||||
Modalities any `json:"modalities,omitempty"`
|
||||
Audio any `json:"audio,omitempty"`
|
||||
EnableThinking any `json:"enable_thinking,omitempty"` // ali
|
||||
ExtraBody any `json:"extra_body,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
@@ -84,9 +84,11 @@ func CacheGetRandomSatisfiedChannel(group string, model string, retry int) (*Cha
|
||||
if !common.MemoryCacheEnabled {
|
||||
return GetRandomSatisfiedChannel(group, model, retry)
|
||||
}
|
||||
|
||||
channelSyncLock.RLock()
|
||||
defer channelSyncLock.RUnlock()
|
||||
channels := group2model2channels[group][model]
|
||||
channelSyncLock.RUnlock()
|
||||
|
||||
if len(channels) == 0 {
|
||||
return nil, errors.New("channel not found")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
package ali
|
||||
|
||||
var ModelList = []string{
|
||||
"qwen-turbo", "qwen-plus", "qwen-max", "qwen-max-longcontext",
|
||||
"qwen-turbo",
|
||||
"qwen-plus",
|
||||
"qwen-max",
|
||||
"qwen-max-longcontext",
|
||||
"qwq-32b",
|
||||
"qwen3-235b-a22b",
|
||||
"text-embedding-v1",
|
||||
}
|
||||
|
||||
|
||||
@@ -670,6 +670,7 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom
|
||||
usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount
|
||||
usage.CompletionTokens = geminiResponse.UsageMetadata.CandidatesTokenCount
|
||||
usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
|
||||
usage.TotalTokens = geminiResponse.UsageMetadata.TotalTokenCount
|
||||
}
|
||||
err = helper.ObjectData(c, response)
|
||||
if err != nil {
|
||||
@@ -690,9 +691,8 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom
|
||||
}
|
||||
}
|
||||
|
||||
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
|
||||
usage.PromptTokensDetails.TextTokens = usage.PromptTokens
|
||||
//usage.CompletionTokenDetails.TextTokens = usage.CompletionTokens
|
||||
usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
|
||||
|
||||
if info.ShouldIncludeUsage {
|
||||
response = helper.GenerateFinalUsageResponse(id, createAt, info.UpstreamModelName, *usage)
|
||||
@@ -740,6 +740,7 @@ func GeminiChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
|
||||
}
|
||||
|
||||
usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
|
||||
usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
|
||||
|
||||
fullTextResponse.Usage = usage
|
||||
jsonResponse, err := json.Marshal(fullTextResponse)
|
||||
|
||||
@@ -517,18 +517,18 @@ func getHardcodedCompletionModelRatio(name string) (float64, bool) {
|
||||
|
||||
func GetAudioRatio(name string) float64 {
|
||||
if strings.Contains(name, "-realtime") {
|
||||
if strings.HasSuffix(name, "gpt-4o-realtime-preview-2024-12-17") {
|
||||
if strings.HasSuffix(name, "gpt-4o-realtime-preview") {
|
||||
return 8
|
||||
} else if strings.Contains(name, "mini") {
|
||||
} else if strings.Contains(name, "gpt-4o-mini-realtime-preview") {
|
||||
return 10 / 0.6
|
||||
} else {
|
||||
return 20
|
||||
}
|
||||
}
|
||||
if strings.Contains(name, "-audio") {
|
||||
if strings.HasSuffix(name, "gpt-4o-audio-preview-2024-12-17") {
|
||||
return 16
|
||||
} else if strings.Contains(name, "mini") {
|
||||
if strings.HasPrefix(name, "gpt-4o-audio-preview") {
|
||||
return 40 / 2.5
|
||||
} else if strings.HasPrefix(name, "gpt-4o-mini-audio-preview") {
|
||||
return 10 / 0.15
|
||||
} else {
|
||||
return 40
|
||||
|
||||
Reference in New Issue
Block a user