Compare commits

...

80 Commits

Author SHA1 Message Date
IcedTangerine
87540b4f7c Merge pull request #1110 from wangr0031/fix_parallel_tool_calls
feat: chat/completion路由透传parallel_tool_calls参数
2025-05-28 14:25:43 +08:00
IcedTangerine
e3d7b31a49 Update openai_request.go 2025-05-28 14:25:24 +08:00
IcedTangerine
bf016543c3 Merge pull request #1113 from tbphp/tbphp_vertex_gemini_global_region
fix: Vertex channel global region format
2025-05-28 14:16:47 +08:00
IcedTangerine
eb94aa13e6 Merge pull request #1111 from feitianbubu/fxm-ali-fetch-models-url
fix: ali FetchUpstreamModels url
2025-05-28 14:11:17 +08:00
tbphp
6e72dcd0ba fix: Vertex channel global region format 2025-05-27 21:50:53 +08:00
skynono
96ab4177ca fix: ali FetchUpstreamModels url 2025-05-27 11:22:40 +08:00
wang.rong
76824a0337 chat/completion透传parallel_tool_calls参数 2025-05-27 09:32:20 +08:00
IcedTangerine
3cd29a4963 Merge pull request #1109 from feitianbubu/fix-qwen-thinking
fix: ali parameter.enable_thinking must be set to false for non-strea…
2025-05-26 19:32:34 +08:00
creamlike1024
41120b4d75 Merge branch 'main' of github.com:QuantumNous/new-api 2025-05-26 18:56:14 +08:00
creamlike1024
30d5a11f46 fix: search-preview model web search billing 2025-05-26 18:53:41 +08:00
skynono
368fd75c86 fix: ali parameter.enable_thinking must be set to false for non-streaming calls 2025-05-26 17:41:02 +08:00
IcedTangerine
ee07762611 Merge pull request #1075 from feitianbubu/fix-default-model-not-exist
fix: if default model is not exist, set the first one as default
2025-05-26 17:21:14 +08:00
IcedTangerine
a215538b4d Merge pull request #1081 from feitianbubu/fixTypoOidcEnabledField
fix: typo in oidc_enabled field (previously oidc)
2025-05-26 17:20:35 +08:00
IcedTangerine
873e3f3dc8 Merge pull request #1099 from feitianbubu/fixTagModeStatusSave
fix: keep BatchDelete and TagMode enabled status
2025-05-26 17:17:34 +08:00
Calcium-Ion
b564cac048 Merge pull request #1100 from daggeryu/patch-4
fix aws claude-sonnet-4-20250514
2025-05-24 15:27:30 +08:00
CaIon
fbdad581b5 fix: improve input validation and error handling in ModelSetting and SettingGeminiModel components 2025-05-24 15:26:55 +08:00
daggeryu
0595636ceb fix aws claude-sonnet-4-20250514 2025-05-24 01:21:14 +08:00
CaIon
d95c2436d7 feat: add support for new regions in Claude Sonnet 4 and Claude Opus 4 models in AWS constants 2025-05-23 21:11:00 +08:00
skynono
2cc2d4f652 fix: keep BatchDelete and TagMode enabled status 2025-05-23 20:17:48 +08:00
CaIon
1644b7b15d feat: add new model entries for Claude Sonnet 4 and Claude Opus 4 across multiple components, including constants and cache settings 2025-05-23 15:20:16 +08:00
CaIon
66a8612d12 feat: add new model ratios for Claude Sonnet 4 and Claude Opus 4; update ratio retrieval logic for improved handling of model names 2025-05-23 02:02:21 +08:00
CaIon
f796c3b216 fix: update Init method to correctly set RequestMode based on upstream model name prefixes 2025-05-23 01:34:53 +08:00
CaIon
c53a48cde5 feat: add panic recovery and retry mechanism for InitChannelCache; improve batch deletion of abilities in FixAbility 2025-05-23 01:26:52 +08:00
CaIon
9a59da16a5 feat: implement search functionality in ChannelsTable for improved channel filtering 2025-05-22 16:54:55 +08:00
CaIon
e18001299b feat: enhance Gemini response handling by adding reasoning content and updating JSON decoding method 2025-05-22 16:11:50 +08:00
CaIon
66bdfe180c feat: add Thought field to GeminiPart and update response handling in streamResponseGeminiChat2OpenAI 2025-05-22 15:52:23 +08:00
skynono
e1190f98e9 fix: typo in oidc_enabled field (previously oidc) 2025-05-21 09:33:57 +08:00
CaIon
1f9fc09989 feat: add OutputFormat field to ImageRequest for enhanced image processing options 2025-05-20 19:40:29 +08:00
CaIon
498d73f67c refactor: update JSON field names in GeminiChatRequest for consistency 2025-05-19 20:26:30 +08:00
skynono
9c12e02cb5 fix: if default model is not exist, set the first one as default 2025-05-19 14:56:39 +08:00
IcedTangerine
0ca17d3e6d Merge pull request #1071 from feitianbubu/fixMjImageProxy
fix: proxy settings not applied when request MJ image url
2025-05-18 14:56:47 +08:00
skynono
9927e5d191 fix: proxy settings not applied when request MJ image url 2025-05-16 18:07:56 +08:00
Calcium-Ion
7171a69512 Merge pull request #1067 from QuantumNous/coze
Coze 渠道
2025-05-16 16:11:02 +08:00
creamlike1024
e379ee8f66 coze stream 2025-05-16 10:27:07 +08:00
creamlike1024
59aabb4311 add frontend display, more model 2025-05-15 20:00:59 +08:00
CaIon
4825404d37 feat: enhance image decoding logic to handle base64 file types and improve error handling 2025-05-15 14:51:33 +08:00
CaIon
ea04e6bcc5 fix: update model selection logic for image edits in distributor middleware 2025-05-14 17:01:50 +08:00
creamlike1024
108b67be6c use channel bot id 2025-05-13 22:23:38 +08:00
creamlike1024
29c95c598e cozeChatHelper 2025-05-13 22:01:12 +08:00
creamlike1024
b2499b0a7e DoRequest 2025-05-13 21:13:34 +08:00
IcedTangerine
12737fb7e5 Merge pull request #1063 from kingfs/fix/ali-completions-api
fix: ALI completions api path error
2025-05-13 17:51:35 +08:00
王永振
f17f38e569 fix: ALI completions api path error 2025-05-13 13:39:44 +08:00
creamlike1024
b2cad22952 add coze request 2025-05-13 12:52:22 +08:00
creamlike1024
e763124b69 Merge branch 'a37836323-add-dalle-fields' 2025-05-11 17:03:56 +08:00
creamlike1024
153012789d Merge branch 'add-dalle-fields' of github.com:a37836323/new-api into a37836323-add-dalle-fields 2025-05-11 17:03:27 +08:00
creamlike1024
d985563516 feat: add support for socks5h 2025-05-11 17:00:33 +08:00
CaIon
58dc7ad770 feat: add moderation and background fields to ImageRequest struct in dalle.go #1052 2025-05-10 15:52:41 +08:00
a37836323
28cdfc0a14 添加DALL-E图像生成请求中的Background和Moderation字段 2025-05-10 04:33:49 +08:00
CaIon
7b176015b8 feat: enhance OpenAI handler to support forced response formatting and add debug logging for request URLs 2025-05-09 18:57:06 +08:00
Calcium-Ion
cc2d9f539d Merge pull request #1046 from QuantumNous/workerHttpRequest
feat: add option to allow worker HTTP image requests
2025-05-09 18:31:25 +08:00
IcedTangerine
7f86bdf548 Merge pull request #1050 from feitianbubu/fixRatio
fix: correct formatting string in PriceData.ToSetting to handle Image…
2025-05-09 18:15:39 +08:00
creamlike1024
0d929800cf fix: GetRequestURL remove unnecessary case 2025-05-09 18:13:19 +08:00
creamlike1024
9ebfcaf6aa feat: change azure default api version to 2025-04-01-preview 2025-05-09 18:11:37 +08:00
skynono
40efa73a42 fix: correct formatting string in PriceData.ToSetting to handle ImageRatio as float instead of integer 2025-05-09 17:12:35 +08:00
creamlike1024
4a59b3ccd6 Merge branch '9Ninety-fix/sse_ping' 2025-05-09 13:57:26 +08:00
creamlike1024
ec61534256 feat: send SSE ping before get response 2025-05-09 13:57:00 +08:00
creamlike1024
2a218c1c89 Merge branch 'fix/sse_ping' of github.com:9Ninety/new-api into 9Ninety-fix/sse_ping 2025-05-09 12:28:05 +08:00
IcedTangerine
993cd6b624 Merge pull request #1045 from tbphp/feat_openrouter_balance
feat: update OpenRouter balance
2025-05-09 12:19:20 +08:00
creamlike1024
3d4bd76083 feat: add option to allow worker HTTP image requests 2025-05-09 02:00:42 +08:00
tbphp
7192437863 fix: 修改命名规范 2025-05-09 00:20:26 +08:00
tbphp
4bbcb00d13 feat: update OpenRouter balance 2025-05-09 00:15:44 +08:00
creamlike1024
9de24668d8 Merge branch 'tbphp-tbphp_model_request_rate_limit_for_group' 2025-05-08 23:20:08 +08:00
CaIon
7aa54a2cd7 feat: add AzureNoRemoveDotTime constant and update channel handling #1044
- Introduced a new constant `AzureNoRemoveDotTime` in `constant/azure.go` to manage model name formatting for channels created after May 10, 2025.
- Updated `distributor.go` to set `channel_create_time` in the context.
- Modified `adaptor.go` to conditionally remove dots from model names based on the channel creation time.
- Enhanced `relay_info.go` to include `ChannelCreateTime` in the `RelayInfo` struct.
- Updated English localization files to reflect changes in model name handling for new channels.
2025-05-08 23:19:40 +08:00
CaIon
a836e97315 fix: update OpenAI request handling to include 'o1-preview' model support #1029 2025-05-08 23:19:38 +08:00
creamlike1024
3373f5e0a0 fix: tool quota calculate 2025-05-08 23:19:37 +08:00
liusanp
d6e601b424 fix: xAi response 2025-05-08 23:19:35 +08:00
liusanp
8c3a559690 fix: xAi requestUrl 2025-05-08 23:19:34 +08:00
liusanp
c008d391df fix: quality, size or style are not supported by xAI API 2025-05-08 23:19:32 +08:00
creamlike1024
7c29844e4a Merge branch 'tbphp_model_request_rate_limit_for_group' of github.com:tbphp/new-api into tbphp-tbphp_model_request_rate_limit_for_group 2025-05-08 23:16:06 +08:00
9
02acc52fdb fix: ensure SSE ping packets are sent before upstream response
These changes ensures SSE ping packets are sent before receiving a response from the upstream. The previous implementation did not send ping packets until after the upstream response, rendering the feature ineffective.
2025-05-07 23:29:06 +08:00
tbphp
3d243c3ee2 fix: 样式修复 2025-05-05 23:56:15 +08:00
tbphp
87188cd7d4 fix: 缩进修复还原 2025-05-05 23:53:05 +08:00
tbphp
bbab729619 fix: text 2025-05-05 23:48:15 +08:00
tbphp
0be3678c9c fix: 请求完成数必须大于等于1 2025-05-05 23:41:43 +08:00
tbphp
1cb4d750e4 feat: 分组速率前端优化 2025-05-05 22:06:16 +08:00
tbphp
88ed83f419 feat: Modellimitgroup check 2025-05-05 20:00:06 +08:00
tbphp
1513ed7847 refactor: 调整代码,符合项目现有规范 2025-05-05 19:32:22 +08:00
tbphp
1e1d24d1b0 fix: rm debug file 2025-05-05 17:57:02 +08:00
tbphp
7e7d6112ca feat: 优化代码,去除多余注释和修改 2025-05-05 11:34:57 +08:00
tbphp
6c3fb7777e feat: 增加分组速率功能 2025-05-05 07:31:54 +08:00
55 changed files with 1226 additions and 151 deletions

View File

@@ -107,7 +107,7 @@ For detailed configuration instructions, please refer to [Installation Guide-Env
- `GEMINI_VISION_MAX_IMAGE_NUM`: Maximum number of images for Gemini models, default is `16`
- `MAX_FILE_DOWNLOAD_MB`: Maximum file download size in MB, default is `20`
- `CRYPTO_SECRET`: Encryption key used for encrypting database content
- `AZURE_DEFAULT_API_VERSION`: Azure channel default API version, default is `2024-12-01-preview`
- `AZURE_DEFAULT_API_VERSION`: Azure channel default API version, default is `2025-04-01-preview`
- `NOTIFICATION_LIMIT_DURATION_MINUTE`: Notification limit duration, default is `10` minutes
- `NOTIFY_LIMIT_COUNT`: Maximum number of user notifications within the specified duration, default is `2`

View File

@@ -107,7 +107,7 @@ New API提供了丰富的功能详细特性请参考[特性说明](https://do
- `GEMINI_VISION_MAX_IMAGE_NUM`Gemini模型最大图片数量默认 `16`
- `MAX_FILE_DOWNLOAD_MB`: 最大文件下载大小单位MB默认 `20`
- `CRYPTO_SECRET`:加密密钥,用于加密数据库内容
- `AZURE_DEFAULT_API_VERSION`Azure渠道默认API版本默认 `2024-12-01-preview`
- `AZURE_DEFAULT_API_VERSION`Azure渠道默认API版本默认 `2025-04-01-preview`
- `NOTIFICATION_LIMIT_DURATION_MINUTE`:通知限制持续时间,默认 `10`分钟
- `NOTIFY_LIMIT_COUNT`:用户通知在指定持续时间内的最大数量,默认 `2`

View File

@@ -240,6 +240,7 @@ const (
ChannelTypeBaiduV2 = 46
ChannelTypeXinference = 47
ChannelTypeXai = 48
ChannelTypeCoze = 49
ChannelTypeDummy // this one is only for count, do not add any channel after this
)
@@ -294,4 +295,5 @@ var ChannelBaseURLs = []string{
"https://qianfan.baidubce.com", //46
"", //47
"https://api.x.ai", //48
"https://api.coze.cn", //49
}

View File

@@ -31,7 +31,7 @@ func InitEnv() {
GetMediaToken = common.GetEnvOrDefaultBool("GET_MEDIA_TOKEN", true)
GetMediaTokenNotStream = common.GetEnvOrDefaultBool("GET_MEDIA_TOKEN_NOT_STREAM", true)
UpdateTask = common.GetEnvOrDefaultBool("UPDATE_TASK", true)
AzureDefaultAPIVersion = common.GetEnvOrDefaultString("AZURE_DEFAULT_API_VERSION", "2024-12-01-preview")
AzureDefaultAPIVersion = common.GetEnvOrDefaultString("AZURE_DEFAULT_API_VERSION", "2025-04-01-preview")
GeminiVisionMaxImageNum = common.GetEnvOrDefault("GEMINI_VISION_MAX_IMAGE_NUM", 16)
NotifyLimitCount = common.GetEnvOrDefault("NOTIFY_LIMIT_COUNT", 2)
NotificationLimitDurationMinute = common.GetEnvOrDefault("NOTIFICATION_LIMIT_DURATION_MINUTE", 10)

View File

@@ -108,6 +108,13 @@ type DeepSeekUsageResponse struct {
} `json:"balance_infos"`
}
type OpenRouterCreditResponse struct {
Data struct {
TotalCredits float64 `json:"total_credits"`
TotalUsage float64 `json:"total_usage"`
} `json:"data"`
}
// GetAuthHeader get auth header
func GetAuthHeader(token string) http.Header {
h := http.Header{}
@@ -281,6 +288,22 @@ func updateChannelAIGC2DBalance(channel *model.Channel) (float64, error) {
return response.TotalAvailable, nil
}
func updateChannelOpenRouterBalance(channel *model.Channel) (float64, error) {
url := "https://openrouter.ai/api/v1/credits"
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
if err != nil {
return 0, err
}
response := OpenRouterCreditResponse{}
err = json.Unmarshal(body, &response)
if err != nil {
return 0, err
}
balance := response.Data.TotalCredits - response.Data.TotalUsage
channel.UpdateBalance(balance)
return balance, nil
}
func updateChannelBalance(channel *model.Channel) (float64, error) {
baseURL := common.ChannelBaseURLs[channel.Type]
if channel.GetBaseURL() == "" {
@@ -307,6 +330,8 @@ func updateChannelBalance(channel *model.Channel) (float64, error) {
return updateChannelSiliconFlowBalance(channel)
case common.ChannelTypeDeepSeek:
return updateChannelDeepSeekBalance(channel)
case common.ChannelTypeOpenRouter:
return updateChannelOpenRouterBalance(channel)
default:
return 0, errors.New("尚未实现")
}

View File

@@ -119,8 +119,11 @@ func FetchUpstreamModels(c *gin.Context) {
baseURL = channel.GetBaseURL()
}
url := fmt.Sprintf("%s/v1/models", baseURL)
if channel.Type == common.ChannelTypeGemini {
switch channel.Type {
case common.ChannelTypeGemini:
url = fmt.Sprintf("%s/v1beta/openai/models", baseURL)
case common.ChannelTypeAli:
url = fmt.Sprintf("%s/compatible-mode/v1/models", baseURL)
}
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
if err != nil {

View File

@@ -110,6 +110,15 @@ func UpdateOption(c *gin.Context) {
})
return
}
case "ModelRequestRateLimitGroup":
err = setting.CheckModelRequestRateLimitGroup(option.Value)
if err != nil {
c.JSON(http.StatusOK, gin.H{
"success": false,
"message": err.Error(),
})
return
}
}
err = model.UpdateOption(option.Key, option.Value)

View File

@@ -12,6 +12,9 @@ type ImageRequest struct {
Style string `json:"style,omitempty"`
User string `json:"user,omitempty"`
ExtraFields json.RawMessage `json:"extra_fields,omitempty"`
Background string `json:"background,omitempty"`
Moderation string `json:"moderation,omitempty"`
OutputFormat string `json:"output_format,omitempty"`
}
type ImageResponse struct {

View File

@@ -43,6 +43,7 @@ type GeneralOpenAIRequest struct {
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
EncodingFormat any `json:"encoding_format,omitempty"`
Seed float64 `json:"seed,omitempty"`
ParallelTooCalls bool `json:"parallel_tool_calls,omitempty"`
Tools []ToolCallRequest `json:"tools,omitempty"`
ToolChoice any `json:"tool_choice,omitempty"`
User string `json:"user,omitempty"`
@@ -53,6 +54,7 @@ type GeneralOpenAIRequest struct {
Audio any `json:"audio,omitempty"`
EnableThinking any `json:"enable_thinking,omitempty"` // ali
ExtraBody any `json:"extra_body,omitempty"`
WebSearchOptions *WebSearchOptions `json:"web_search_options,omitempty"`
}
type ToolCallRequest struct {
@@ -371,6 +373,11 @@ func (m *Message) ParseContent() []MediaContent {
return contentList
}
type WebSearchOptions struct {
SearchContextSize string `json:"search_context_size,omitempty"`
UserLocation json.RawMessage `json:"user_location,omitempty"`
}
type OpenAIResponsesRequest struct {
Model string `json:"model"`
Input json.RawMessage `json:"input,omitempty"`

19
main.go
View File

@@ -89,9 +89,22 @@ func main() {
if common.MemoryCacheEnabled {
common.SysLog("memory cache enabled")
common.SysError(fmt.Sprintf("sync frequency: %d seconds", common.SyncFrequency))
model.InitChannelCache()
}
if common.MemoryCacheEnabled {
// Add panic recovery and retry for InitChannelCache
func() {
defer func() {
if r := recover(); r != nil {
common.SysError(fmt.Sprintf("InitChannelCache panic: %v, retrying once", r))
// Retry once
_, fixErr := model.FixAbility()
if fixErr != nil {
common.SysError(fmt.Sprintf("InitChannelCache failed: %s", fixErr.Error()))
}
}
}()
model.InitChannelCache()
}()
go model.SyncOptions(common.SyncFrequency)
go model.SyncChannelCache(common.SyncFrequency)
}

View File

@@ -185,7 +185,7 @@ func getModelRequest(c *gin.Context) (*ModelRequest, bool, error) {
if strings.HasPrefix(c.Request.URL.Path, "/v1/images/generations") {
modelRequest.Model = common.GetStringIfEmpty(modelRequest.Model, "dall-e")
} else if strings.HasPrefix(c.Request.URL.Path, "/v1/images/edits") {
modelRequest.Model = common.GetStringIfEmpty(modelRequest.Model, "gpt-image-1")
modelRequest.Model = common.GetStringIfEmpty(c.PostForm("model"), "gpt-image-1")
}
if strings.HasPrefix(c.Request.URL.Path, "/v1/audio") {
relayMode := relayconstant.RelayModeAudioSpeech
@@ -240,5 +240,7 @@ func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, mode
c.Set("api_version", channel.Other)
case common.ChannelTypeMokaAI:
c.Set("api_version", channel.Other)
case common.ChannelTypeCoze:
c.Set("bot_id", channel.Other)
}
}

View File

@@ -6,6 +6,7 @@ import (
"net/http"
"one-api/common"
"one-api/common/limiter"
"one-api/constant"
"one-api/setting"
"strconv"
"time"
@@ -175,6 +176,19 @@ func ModelRequestRateLimit() func(c *gin.Context) {
totalMaxCount := setting.ModelRequestRateLimitCount
successMaxCount := setting.ModelRequestRateLimitSuccessCount
// 获取分组
group := c.GetString("token_group")
if group == "" {
group = c.GetString(constant.ContextKeyUserGroup)
}
//获取分组的限流配置
groupTotalCount, groupSuccessCount, found := setting.GetGroupRateLimit(group)
if found {
totalMaxCount = groupTotalCount
successMaxCount = groupSuccessCount
}
// 根据存储类型选择并执行限流处理器
if common.RedisEnabled {
redisRateLimitHandler(duration, totalMaxCount, successMaxCount)(c)

View File

@@ -50,7 +50,7 @@ func getPriority(group string, model string, retry int) (int, error) {
err := DB.Model(&Ability{}).
Select("DISTINCT(priority)").
Where(groupCol+" = ? and model = ? and enabled = "+trueVal, group, model).
Order("priority DESC"). // 按优先级降序排序
Order("priority DESC"). // 按优先级降序排序
Pluck("priority", &priorities).Error // Pluck用于将查询的结果直接扫描到一个切片中
if err != nil {
@@ -261,12 +261,28 @@ func FixAbility() (int, error) {
common.SysError(fmt.Sprintf("Get channel ids from channel table failed: %s", err.Error()))
return 0, err
}
// Delete abilities of channels that are not in channel table
err = DB.Where("channel_id NOT IN (?)", channelIds).Delete(&Ability{}).Error
if err != nil {
common.SysError(fmt.Sprintf("Delete abilities of channels that are not in channel table failed: %s", err.Error()))
return 0, err
// Delete abilities of channels that are not in channel table - in batches to avoid too many placeholders
if len(channelIds) > 0 {
// Process deletion in chunks to avoid "too many placeholders" error
for _, chunk := range lo.Chunk(channelIds, 100) {
err = DB.Where("channel_id NOT IN (?)", chunk).Delete(&Ability{}).Error
if err != nil {
common.SysError(fmt.Sprintf("Delete abilities of channels (batch) that are not in channel table failed: %s", err.Error()))
return 0, err
}
}
} else {
// If no channels exist, delete all abilities
err = DB.Delete(&Ability{}).Error
if err != nil {
common.SysError(fmt.Sprintf("Delete all abilities failed: %s", err.Error()))
return 0, err
}
common.SysLog("Delete all abilities successfully")
return 0, nil
}
common.SysLog(fmt.Sprintf("Delete abilities of channels that are not in channel table successfully, ids: %v", channelIds))
count += len(channelIds)
@@ -275,17 +291,26 @@ func FixAbility() (int, error) {
err = DB.Table("abilities").Distinct("channel_id").Pluck("channel_id", &abilityChannelIds).Error
if err != nil {
common.SysError(fmt.Sprintf("Get channel ids from abilities table failed: %s", err.Error()))
return 0, err
return count, err
}
var channels []Channel
if len(abilityChannelIds) == 0 {
err = DB.Find(&channels).Error
} else {
err = DB.Where("id NOT IN (?)", abilityChannelIds).Find(&channels).Error
}
if err != nil {
return 0, err
// Process query in chunks to avoid "too many placeholders" error
err = nil
for _, chunk := range lo.Chunk(abilityChannelIds, 100) {
var channelsChunk []Channel
err = DB.Where("id NOT IN (?)", chunk).Find(&channelsChunk).Error
if err != nil {
common.SysError(fmt.Sprintf("Find channels not in abilities table failed: %s", err.Error()))
return count, err
}
channels = append(channels, channelsChunk...)
}
}
for _, channel := range channels {
err := channel.UpdateAbilities(nil)
if err != nil {

View File

@@ -16,6 +16,9 @@ var channelsIDM map[int]*Channel
var channelSyncLock sync.RWMutex
func InitChannelCache() {
if !common.MemoryCacheEnabled {
return
}
newChannelId2channel := make(map[int]*Channel)
var channels []*Channel
DB.Where("status = ?", common.ChannelStatusEnabled).Find(&channels)
@@ -84,11 +87,11 @@ func CacheGetRandomSatisfiedChannel(group string, model string, retry int) (*Cha
if !common.MemoryCacheEnabled {
return GetRandomSatisfiedChannel(group, model, retry)
}
channelSyncLock.RLock()
channels := group2model2channels[group][model]
channelSyncLock.RUnlock()
if len(channels) == 0 {
return nil, errors.New("channel not found")
}

View File

@@ -46,6 +46,17 @@ func (channel *Channel) GetModels() []string {
return strings.Split(strings.Trim(channel.Models, ","), ",")
}
func (channel *Channel) GetGroups() []string {
if channel.Group == "" {
return []string{}
}
groups := strings.Split(strings.Trim(channel.Group, ","), ",")
for i, group := range groups {
groups[i] = strings.TrimSpace(group)
}
return groups
}
func (channel *Channel) GetOtherInfo() map[string]interface{} {
otherInfo := make(map[string]interface{})
if channel.OtherInfo != "" {

View File

@@ -67,6 +67,7 @@ func InitOptionMap() {
common.OptionMap["ServerAddress"] = ""
common.OptionMap["WorkerUrl"] = setting.WorkerUrl
common.OptionMap["WorkerValidKey"] = setting.WorkerValidKey
common.OptionMap["WorkerAllowHttpImageRequestEnabled"] = strconv.FormatBool(setting.WorkerAllowHttpImageRequestEnabled)
common.OptionMap["PayAddress"] = ""
common.OptionMap["CustomCallbackAddress"] = ""
common.OptionMap["EpayId"] = ""
@@ -92,6 +93,7 @@ func InitOptionMap() {
common.OptionMap["ModelRequestRateLimitCount"] = strconv.Itoa(setting.ModelRequestRateLimitCount)
common.OptionMap["ModelRequestRateLimitDurationMinutes"] = strconv.Itoa(setting.ModelRequestRateLimitDurationMinutes)
common.OptionMap["ModelRequestRateLimitSuccessCount"] = strconv.Itoa(setting.ModelRequestRateLimitSuccessCount)
common.OptionMap["ModelRequestRateLimitGroup"] = setting.ModelRequestRateLimitGroup2JSONString()
common.OptionMap["ModelRatio"] = operation_setting.ModelRatio2JSONString()
common.OptionMap["ModelPrice"] = operation_setting.ModelPrice2JSONString()
common.OptionMap["CacheRatio"] = operation_setting.CacheRatio2JSONString()
@@ -256,6 +258,8 @@ func updateOptionMap(key string, value string) (err error) {
setting.StopOnSensitiveEnabled = boolValue
case "SMTPSSLEnabled":
common.SMTPSSLEnabled = boolValue
case "WorkerAllowHttpImageRequestEnabled":
setting.WorkerAllowHttpImageRequestEnabled = boolValue
}
}
switch key {
@@ -338,6 +342,8 @@ func updateOptionMap(key string, value string) (err error) {
setting.ModelRequestRateLimitDurationMinutes, _ = strconv.Atoi(value)
case "ModelRequestRateLimitSuccessCount":
setting.ModelRequestRateLimitSuccessCount, _ = strconv.Atoi(value)
case "ModelRequestRateLimitGroup":
err = setting.UpdateModelRequestRateLimitGroupByJSONString(value)
case "RetryTimes":
common.RetryTimes, _ = strconv.Atoi(value)
case "DataExportInterval":

View File

@@ -33,6 +33,8 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
fullRequestURL = fmt.Sprintf("%s/api/v1/services/embeddings/text-embedding/text-embedding", info.BaseUrl)
case constant.RelayModeImagesGenerations:
fullRequestURL = fmt.Sprintf("%s/api/v1/services/aigc/text2image/image-synthesis", info.BaseUrl)
case constant.RelayModeCompletions:
fullRequestURL = fmt.Sprintf("%s/compatible-mode/v1/completions", info.BaseUrl)
default:
fullRequestURL = fmt.Sprintf("%s/compatible-mode/v1/chat/completions", info.BaseUrl)
}
@@ -55,6 +57,12 @@ func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayIn
if request == nil {
return nil, errors.New("request is nil")
}
// fix: ali parameter.enable_thinking must be set to false for non-streaming calls
if !info.IsStream {
request.EnableThinking = false
}
switch info.RelayMode {
default:
aliReq := requestOpenAI2Ali(*request)

View File

@@ -1,16 +1,23 @@
package channel
import (
"context"
"errors"
"fmt"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
"io"
"net/http"
common2 "one-api/common"
"one-api/relay/common"
"one-api/relay/constant"
"one-api/relay/helper"
"one-api/service"
"one-api/setting/operation_setting"
"sync"
"time"
"github.com/bytedance/gopkg/util/gopool"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
)
func SetupApiRequestHeader(info *common.RelayInfo, c *gin.Context, req *http.Header) {
@@ -55,6 +62,9 @@ func DoFormRequest(a Adaptor, c *gin.Context, info *common.RelayInfo, requestBod
if err != nil {
return nil, fmt.Errorf("get request url failed: %w", err)
}
if common2.DebugEnabled {
println("fullRequestURL:", fullRequestURL)
}
req, err := http.NewRequest(c.Request.Method, fullRequestURL, requestBody)
if err != nil {
return nil, fmt.Errorf("new request failed: %w", err)
@@ -105,7 +115,62 @@ func doRequest(c *gin.Context, req *http.Request, info *common.RelayInfo) (*http
} else {
client = service.GetHttpClient()
}
// 流式请求 ping 保活
var stopPinger func()
generalSettings := operation_setting.GetGeneralSetting()
pingEnabled := generalSettings.PingIntervalEnabled
var pingerWg sync.WaitGroup
if info.IsStream {
helper.SetEventStreamHeaders(c)
pingInterval := time.Duration(generalSettings.PingIntervalSeconds) * time.Second
var pingerCtx context.Context
pingerCtx, stopPinger = context.WithCancel(c.Request.Context())
if pingEnabled {
pingerWg.Add(1)
gopool.Go(func() {
defer pingerWg.Done()
if pingInterval <= 0 {
pingInterval = helper.DefaultPingInterval
}
ticker := time.NewTicker(pingInterval)
defer ticker.Stop()
var pingMutex sync.Mutex
if common2.DebugEnabled {
println("SSE ping goroutine started")
}
for {
select {
case <-ticker.C:
pingMutex.Lock()
err2 := helper.PingData(c)
pingMutex.Unlock()
if err2 != nil {
common2.LogError(c, "SSE ping error: "+err.Error())
return
}
if common2.DebugEnabled {
println("SSE ping data sent.")
}
case <-pingerCtx.Done():
if common2.DebugEnabled {
println("SSE ping goroutine stopped.")
}
return
}
}
})
}
}
resp, err := client.Do(req)
// request结束后停止ping
if info.IsStream && pingEnabled {
stopPinger()
pingerWg.Wait()
}
if err != nil {
return nil, err
}

View File

@@ -11,6 +11,8 @@ var awsModelIDMap = map[string]string{
"claude-3-5-sonnet-20241022": "anthropic.claude-3-5-sonnet-20241022-v2:0",
"claude-3-5-haiku-20241022": "anthropic.claude-3-5-haiku-20241022-v1:0",
"claude-3-7-sonnet-20250219": "anthropic.claude-3-7-sonnet-20250219-v1:0",
"claude-sonnet-4-20250514": "anthropic.claude-sonnet-4-20250514-v1:0",
"claude-opus-4-20250514": "anthropic.claude-opus-4-20250514-v1:0",
}
var awsModelCanCrossRegionMap = map[string]map[string]bool{
@@ -41,6 +43,16 @@ var awsModelCanCrossRegionMap = map[string]map[string]bool{
},
"anthropic.claude-3-7-sonnet-20250219-v1:0": {
"us": true,
"ap": true,
"eu": true,
},
"anthropic.claude-sonnet-4-20250514-v1:0": {
"us": true,
"ap": true,
"eu": true,
},
"anthropic.claude-opus-4-20250514-v1:0": {
"us": true,
},
}

View File

@@ -38,10 +38,10 @@ func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInf
}
func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
if strings.HasPrefix(info.UpstreamModelName, "claude-3") {
a.RequestMode = RequestModeMessage
} else {
if strings.HasPrefix(info.UpstreamModelName, "claude-2") || strings.HasPrefix(info.UpstreamModelName, "claude-instant") {
a.RequestMode = RequestModeCompletion
} else {
a.RequestMode = RequestModeMessage
}
}

View File

@@ -13,6 +13,10 @@ var ModelList = []string{
"claude-3-5-sonnet-20241022",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-20250219-thinking",
"claude-sonnet-4-20250514",
"claude-sonnet-4-20250514-thinking",
"claude-opus-4-20250514",
"claude-opus-4-20250514-thinking",
}
var ChannelName = "claude"

View File

@@ -0,0 +1,132 @@
package coze
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"one-api/dto"
"one-api/relay/channel"
"one-api/relay/common"
"time"
"github.com/gin-gonic/gin"
)
type Adaptor struct {
}
// ConvertAudioRequest implements channel.Adaptor.
func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *common.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
return nil, errors.New("not implemented")
}
// ConvertClaudeRequest implements channel.Adaptor.
func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *common.RelayInfo, request *dto.ClaudeRequest) (any, error) {
return nil, errors.New("not implemented")
}
// ConvertEmbeddingRequest implements channel.Adaptor.
func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *common.RelayInfo, request dto.EmbeddingRequest) (any, error) {
return nil, errors.New("not implemented")
}
// ConvertImageRequest implements channel.Adaptor.
func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *common.RelayInfo, request dto.ImageRequest) (any, error) {
return nil, errors.New("not implemented")
}
// ConvertOpenAIRequest implements channel.Adaptor.
func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *common.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
if request == nil {
return nil, errors.New("request is nil")
}
return convertCozeChatRequest(c, *request), nil
}
// ConvertOpenAIResponsesRequest implements channel.Adaptor.
func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *common.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) {
return nil, errors.New("not implemented")
}
// ConvertRerankRequest implements channel.Adaptor.
func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {
return nil, errors.New("not implemented")
}
// DoRequest implements channel.Adaptor.
func (a *Adaptor) DoRequest(c *gin.Context, info *common.RelayInfo, requestBody io.Reader) (any, error) {
if info.IsStream {
return channel.DoApiRequest(a, c, info, requestBody)
}
// 首先发送创建消息请求,成功后再发送获取消息请求
// 发送创建消息请求
resp, err := channel.DoApiRequest(a, c, info, requestBody)
if err != nil {
return nil, err
}
// 解析 resp
var cozeResponse CozeChatResponse
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
err = json.Unmarshal(respBody, &cozeResponse)
if cozeResponse.Code != 0 {
return nil, errors.New(cozeResponse.Msg)
}
c.Set("coze_conversation_id", cozeResponse.Data.ConversationId)
c.Set("coze_chat_id", cozeResponse.Data.Id)
// 轮询检查消息是否完成
for {
err, isComplete := checkIfChatComplete(a, c, info)
if err != nil {
return nil, err
} else {
if isComplete {
break
}
}
time.Sleep(time.Second * 1)
}
// 发送获取消息请求
return getChatDetail(a, c, info)
}
// DoResponse implements channel.Adaptor.
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *common.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
if info.IsStream {
err, usage = cozeChatStreamHandler(c, resp, info)
} else {
err, usage = cozeChatHandler(c, resp, info)
}
return
}
// GetChannelName implements channel.Adaptor.
func (a *Adaptor) GetChannelName() string {
return ChannelName
}
// GetModelList implements channel.Adaptor.
func (a *Adaptor) GetModelList() []string {
return ModelList
}
// GetRequestURL implements channel.Adaptor.
func (a *Adaptor) GetRequestURL(info *common.RelayInfo) (string, error) {
return fmt.Sprintf("%s/v3/chat", info.BaseUrl), nil
}
// Init implements channel.Adaptor.
func (a *Adaptor) Init(info *common.RelayInfo) {
}
// SetupRequestHeader implements channel.Adaptor.
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *common.RelayInfo) error {
channel.SetupApiRequestHeader(info, c, req)
req.Set("Authorization", "Bearer "+info.ApiKey)
return nil
}

View File

@@ -0,0 +1,30 @@
package coze
var ModelList = []string{
"moonshot-v1-8k",
"moonshot-v1-32k",
"moonshot-v1-128k",
"Baichuan4",
"abab6.5s-chat-pro",
"glm-4-0520",
"qwen-max",
"deepseek-r1",
"deepseek-v3",
"deepseek-r1-distill-qwen-32b",
"deepseek-r1-distill-qwen-7b",
"step-1v-8k",
"step-1.5v-mini",
"Doubao-pro-32k",
"Doubao-pro-256k",
"Doubao-lite-128k",
"Doubao-lite-32k",
"Doubao-vision-lite-32k",
"Doubao-vision-pro-32k",
"Doubao-1.5-pro-vision-32k",
"Doubao-1.5-lite-32k",
"Doubao-1.5-pro-32k",
"Doubao-1.5-thinking-pro",
"Doubao-1.5-pro-256k",
}
var ChannelName = "coze"

78
relay/channel/coze/dto.go Normal file
View File

@@ -0,0 +1,78 @@
package coze
import "encoding/json"
type CozeError struct {
Code int `json:"code"`
Message string `json:"message"`
}
type CozeEnterMessage struct {
Role string `json:"role"`
Type string `json:"type,omitempty"`
Content json.RawMessage `json:"content,omitempty"`
MetaData json.RawMessage `json:"meta_data,omitempty"`
ContentType string `json:"content_type,omitempty"`
}
type CozeChatRequest struct {
BotId string `json:"bot_id"`
UserId string `json:"user_id"`
AdditionalMessages []CozeEnterMessage `json:"additional_messages,omitempty"`
Stream bool `json:"stream,omitempty"`
CustomVariables json.RawMessage `json:"custom_variables,omitempty"`
AutoSaveHistory bool `json:"auto_save_history,omitempty"`
MetaData json.RawMessage `json:"meta_data,omitempty"`
ExtraParams json.RawMessage `json:"extra_params,omitempty"`
ShortcutCommand json.RawMessage `json:"shortcut_command,omitempty"`
Parameters json.RawMessage `json:"parameters,omitempty"`
}
type CozeChatResponse struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data CozeChatResponseData `json:"data"`
}
type CozeChatResponseData struct {
Id string `json:"id"`
ConversationId string `json:"conversation_id"`
BotId string `json:"bot_id"`
CreatedAt int64 `json:"created_at"`
LastError CozeError `json:"last_error"`
Status string `json:"status"`
Usage CozeChatUsage `json:"usage"`
}
type CozeChatUsage struct {
TokenCount int `json:"token_count"`
OutputCount int `json:"output_count"`
InputCount int `json:"input_count"`
}
type CozeChatDetailResponse struct {
Data []CozeChatV3MessageDetail `json:"data"`
Code int `json:"code"`
Msg string `json:"msg"`
Detail CozeResponseDetail `json:"detail"`
}
type CozeChatV3MessageDetail struct {
Id string `json:"id"`
Role string `json:"role"`
Type string `json:"type"`
BotId string `json:"bot_id"`
ChatId string `json:"chat_id"`
Content json.RawMessage `json:"content"`
MetaData json.RawMessage `json:"meta_data"`
CreatedAt int64 `json:"created_at"`
SectionId string `json:"section_id"`
UpdatedAt int64 `json:"updated_at"`
ContentType string `json:"content_type"`
ConversationId string `json:"conversation_id"`
ReasoningContent string `json:"reasoning_content"`
}
type CozeResponseDetail struct {
Logid string `json:"logid"`
}

View File

@@ -0,0 +1,300 @@
package coze
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"one-api/common"
"one-api/dto"
relaycommon "one-api/relay/common"
"one-api/relay/helper"
"one-api/service"
"strings"
"github.com/gin-gonic/gin"
)
func convertCozeChatRequest(c *gin.Context, request dto.GeneralOpenAIRequest) *CozeChatRequest {
var messages []CozeEnterMessage
// 将 request的messages的role为user的content转换为CozeMessage
for _, message := range request.Messages {
if message.Role == "user" {
messages = append(messages, CozeEnterMessage{
Role: "user",
Content: message.Content,
// TODO: support more content type
ContentType: "text",
})
}
}
user := request.User
if user == "" {
user = helper.GetResponseID(c)
}
cozeRequest := &CozeChatRequest{
BotId: c.GetString("bot_id"),
UserId: user,
AdditionalMessages: messages,
Stream: request.Stream,
}
return cozeRequest
}
func cozeChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
}
err = resp.Body.Close()
if err != nil {
return service.OpenAIErrorWrapperLocal(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
// convert coze response to openai response
var response dto.TextResponse
var cozeResponse CozeChatDetailResponse
response.Model = info.UpstreamModelName
err = json.Unmarshal(responseBody, &cozeResponse)
if err != nil {
return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if cozeResponse.Code != 0 {
return service.OpenAIErrorWrapper(errors.New(cozeResponse.Msg), fmt.Sprintf("%d", cozeResponse.Code), http.StatusInternalServerError), nil
}
// 从上下文获取 usage
var usage dto.Usage
usage.PromptTokens = c.GetInt("coze_input_count")
usage.CompletionTokens = c.GetInt("coze_output_count")
usage.TotalTokens = c.GetInt("coze_token_count")
response.Usage = usage
response.Id = helper.GetResponseID(c)
var responseContent json.RawMessage
for _, data := range cozeResponse.Data {
if data.Type == "answer" {
responseContent = data.Content
response.Created = data.CreatedAt
}
}
// 添加 response.Choices
response.Choices = []dto.OpenAITextResponseChoice{
{
Index: 0,
Message: dto.Message{Role: "assistant", Content: responseContent},
FinishReason: "stop",
},
}
jsonResponse, err := json.Marshal(response)
if err != nil {
return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
}
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(resp.StatusCode)
_, _ = c.Writer.Write(jsonResponse)
return nil, &usage
}
func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
scanner := bufio.NewScanner(resp.Body)
scanner.Split(bufio.ScanLines)
helper.SetEventStreamHeaders(c)
id := helper.GetResponseID(c)
var responseText string
var currentEvent string
var currentData string
var usage dto.Usage
for scanner.Scan() {
line := scanner.Text()
if line == "" {
if currentEvent != "" && currentData != "" {
// handle last event
handleCozeEvent(c, currentEvent, currentData, &responseText, &usage, id, info)
currentEvent = ""
currentData = ""
}
continue
}
if strings.HasPrefix(line, "event:") {
currentEvent = strings.TrimSpace(line[6:])
continue
}
if strings.HasPrefix(line, "data:") {
currentData = strings.TrimSpace(line[5:])
continue
}
}
// Last event
if currentEvent != "" && currentData != "" {
handleCozeEvent(c, currentEvent, currentData, &responseText, &usage, id, info)
}
if err := scanner.Err(); err != nil {
return service.OpenAIErrorWrapper(err, "stream_scanner_error", http.StatusInternalServerError), nil
}
helper.Done(c)
if usage.TotalTokens == 0 {
usage.PromptTokens = info.PromptTokens
usage.CompletionTokens, _ = service.CountTextToken("gpt-3.5-turbo", responseText)
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
}
return nil, &usage
}
func handleCozeEvent(c *gin.Context, event string, data string, responseText *string, usage *dto.Usage, id string, info *relaycommon.RelayInfo) {
switch event {
case "conversation.chat.completed":
// 将 data 解析为 CozeChatResponseData
var chatData CozeChatResponseData
err := json.Unmarshal([]byte(data), &chatData)
if err != nil {
common.SysError("error_unmarshalling_stream_response: " + err.Error())
return
}
usage.PromptTokens = chatData.Usage.InputCount
usage.CompletionTokens = chatData.Usage.OutputCount
usage.TotalTokens = chatData.Usage.TokenCount
finishReason := "stop"
stopResponse := helper.GenerateStopResponse(id, common.GetTimestamp(), info.UpstreamModelName, finishReason)
helper.ObjectData(c, stopResponse)
case "conversation.message.delta":
// 将 data 解析为 CozeChatV3MessageDetail
var messageData CozeChatV3MessageDetail
err := json.Unmarshal([]byte(data), &messageData)
if err != nil {
common.SysError("error_unmarshalling_stream_response: " + err.Error())
return
}
var content string
err = json.Unmarshal(messageData.Content, &content)
if err != nil {
common.SysError("error_unmarshalling_stream_response: " + err.Error())
return
}
*responseText += content
openaiResponse := dto.ChatCompletionsStreamResponse{
Id: id,
Object: "chat.completion.chunk",
Created: common.GetTimestamp(),
Model: info.UpstreamModelName,
}
choice := dto.ChatCompletionsStreamResponseChoice{
Index: 0,
}
choice.Delta.SetContentString(content)
openaiResponse.Choices = append(openaiResponse.Choices, choice)
helper.ObjectData(c, openaiResponse)
case "error":
var errorData CozeError
err := json.Unmarshal([]byte(data), &errorData)
if err != nil {
common.SysError("error_unmarshalling_stream_response: " + err.Error())
return
}
common.SysError(fmt.Sprintf("stream event error: ", errorData.Code, errorData.Message))
}
}
func checkIfChatComplete(a *Adaptor, c *gin.Context, info *relaycommon.RelayInfo) (error, bool) {
requestURL := fmt.Sprintf("%s/v3/chat/retrieve", info.BaseUrl)
requestURL = requestURL + "?conversation_id=" + c.GetString("coze_conversation_id") + "&chat_id=" + c.GetString("coze_chat_id")
// 将 conversationId和chatId作为参数发送get请求
req, err := http.NewRequest("GET", requestURL, nil)
if err != nil {
return err, false
}
err = a.SetupRequestHeader(c, &req.Header, info)
if err != nil {
return err, false
}
resp, err := doRequest(req, info) // 调用 doRequest
if err != nil {
return err, false
}
if resp == nil { // 确保在 doRequest 失败时 resp 不为 nil 导致 panic
return fmt.Errorf("resp is nil"), false
}
defer resp.Body.Close() // 确保响应体被关闭
// 解析 resp 到 CozeChatResponse
var cozeResponse CozeChatResponse
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("read response body failed: %w", err), false
}
err = json.Unmarshal(responseBody, &cozeResponse)
if err != nil {
return fmt.Errorf("unmarshal response body failed: %w", err), false
}
if cozeResponse.Data.Status == "completed" {
// 在上下文设置 usage
c.Set("coze_token_count", cozeResponse.Data.Usage.TokenCount)
c.Set("coze_output_count", cozeResponse.Data.Usage.OutputCount)
c.Set("coze_input_count", cozeResponse.Data.Usage.InputCount)
return nil, true
} else if cozeResponse.Data.Status == "failed" || cozeResponse.Data.Status == "canceled" || cozeResponse.Data.Status == "requires_action" {
return fmt.Errorf("chat status: %s", cozeResponse.Data.Status), false
} else {
return nil, false
}
}
func getChatDetail(a *Adaptor, c *gin.Context, info *relaycommon.RelayInfo) (*http.Response, error) {
requestURL := fmt.Sprintf("%s/v3/chat/message/list", info.BaseUrl)
requestURL = requestURL + "?conversation_id=" + c.GetString("coze_conversation_id") + "&chat_id=" + c.GetString("coze_chat_id")
req, err := http.NewRequest("GET", requestURL, nil)
if err != nil {
return nil, fmt.Errorf("new request failed: %w", err)
}
err = a.SetupRequestHeader(c, &req.Header, info)
if err != nil {
return nil, fmt.Errorf("setup request header failed: %w", err)
}
resp, err := doRequest(req, info)
if err != nil {
return nil, fmt.Errorf("do request failed: %w", err)
}
return resp, nil
}
func doRequest(req *http.Request, info *relaycommon.RelayInfo) (*http.Response, error) {
var client *http.Client
var err error // 声明 err 变量
if proxyURL, ok := info.ChannelSetting["proxy"]; ok {
client, err = service.NewProxyHttpClient(proxyURL.(string))
if err != nil {
return nil, fmt.Errorf("new proxy http client failed: %w", err)
}
} else {
client = service.GetHttpClient()
}
resp, err := client.Do(req)
if err != nil { // 增加对 client.Do(req) 返回错误的检查
return nil, fmt.Errorf("client.Do failed: %w", err)
}
// _ = resp.Body.Close()
return resp, nil
}

View File

@@ -2,10 +2,10 @@ package gemini
type GeminiChatRequest struct {
Contents []GeminiChatContent `json:"contents"`
SafetySettings []GeminiChatSafetySettings `json:"safety_settings,omitempty"`
GenerationConfig GeminiChatGenerationConfig `json:"generation_config,omitempty"`
SafetySettings []GeminiChatSafetySettings `json:"safetySettings,omitempty"`
GenerationConfig GeminiChatGenerationConfig `json:"generationConfig,omitempty"`
Tools []GeminiChatTool `json:"tools,omitempty"`
SystemInstructions *GeminiChatContent `json:"system_instruction,omitempty"`
SystemInstructions *GeminiChatContent `json:"systemInstruction,omitempty"`
}
type GeminiThinkingConfig struct {
@@ -54,6 +54,7 @@ type GeminiFileData struct {
type GeminiPart struct {
Text string `json:"text,omitempty"`
Thought bool `json:"thought,omitempty"`
InlineData *GeminiInlineData `json:"inlineData,omitempty"`
FunctionCall *FunctionCall `json:"functionCall,omitempty"`
FunctionResponse *FunctionResponse `json:"functionResponse,omitempty"`

View File

@@ -539,6 +539,8 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *dto.OpenAITextResp
if call := getResponseToolCall(&part); call != nil {
toolCalls = append(toolCalls, *call)
}
} else if part.Thought {
choice.Message.ReasoningContent = part.Text
} else {
if part.ExecutableCode != nil {
texts = append(texts, "```"+part.ExecutableCode.Language+"\n"+part.ExecutableCode.Code+"\n```")
@@ -556,7 +558,6 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *dto.OpenAITextResp
choice.Message.SetToolCalls(toolCalls)
isToolCall = true
}
choice.Message.SetStringContent(strings.Join(texts, "\n"))
}
@@ -596,6 +597,7 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) (*dto.C
}
var texts []string
isTools := false
isThought := false
if candidate.FinishReason != nil {
// p := GeminiConvertFinishReason(*candidate.FinishReason)
switch *candidate.FinishReason {
@@ -620,6 +622,9 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) (*dto.C
call.SetIndex(len(choice.Delta.ToolCalls))
choice.Delta.ToolCalls = append(choice.Delta.ToolCalls, *call)
}
} else if part.Thought {
isThought = true
texts = append(texts, part.Text)
} else {
if part.ExecutableCode != nil {
texts = append(texts, "```"+part.ExecutableCode.Language+"\n"+part.ExecutableCode.Code+"\n```\n")
@@ -632,7 +637,11 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) (*dto.C
}
}
}
choice.Delta.SetContentString(strings.Join(texts, "\n"))
if isThought {
choice.Delta.SetReasoningContent(strings.Join(texts, "\n"))
} else {
choice.Delta.SetContentString(strings.Join(texts, "\n"))
}
if isTools {
choice.FinishReason = &constant.FinishReasonToolCalls
}
@@ -716,8 +725,11 @@ func GeminiChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
if err != nil {
return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
if common.DebugEnabled {
println(string(responseBody))
}
var geminiResponse GeminiChatResponse
err = json.Unmarshal(responseBody, &geminiResponse)
err = common.DecodeJson(responseBody, &geminiResponse)
if err != nil {
return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}

View File

@@ -67,9 +67,6 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
if info.RelayFormat == relaycommon.RelayFormatClaude {
return fmt.Sprintf("%s/v1/chat/completions", info.BaseUrl), nil
}
if info.RelayMode == constant.RelayModeResponses {
return fmt.Sprintf("%s/v1/responses", info.BaseUrl), nil
}
if info.RelayMode == constant.RelayModeRealtime {
if strings.HasPrefix(info.BaseUrl, "https://") {
baseUrl := strings.TrimPrefix(info.BaseUrl, "https://")

View File

@@ -215,10 +215,35 @@ func OpenaiHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayI
StatusCode: resp.StatusCode,
}, nil
}
forceFormat := false
if forceFmt, ok := info.ChannelSetting[constant.ForceFormat].(bool); ok {
forceFormat = forceFmt
}
if simpleResponse.Usage.TotalTokens == 0 || (simpleResponse.Usage.PromptTokens == 0 && simpleResponse.Usage.CompletionTokens == 0) {
completionTokens := 0
for _, choice := range simpleResponse.Choices {
ctkm, _ := service.CountTextToken(choice.Message.StringContent()+choice.Message.ReasoningContent+choice.Message.Reasoning, info.UpstreamModelName)
completionTokens += ctkm
}
simpleResponse.Usage = dto.Usage{
PromptTokens: info.PromptTokens,
CompletionTokens: completionTokens,
TotalTokens: info.PromptTokens + completionTokens,
}
}
switch info.RelayFormat {
case relaycommon.RelayFormatOpenAI:
break
if forceFormat {
responseBody, err = json.Marshal(simpleResponse)
if err != nil {
return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
}
} else {
break
}
case relaycommon.RelayFormatClaude:
claudeResp := service.ResponseOpenAI2Claude(&simpleResponse, info)
claudeRespStr, err := json.Marshal(claudeResp)
@@ -244,18 +269,6 @@ func OpenaiHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayI
common.SysError("error copying response body: " + err.Error())
}
resp.Body.Close()
if simpleResponse.Usage.TotalTokens == 0 || (simpleResponse.Usage.PromptTokens == 0 && simpleResponse.Usage.CompletionTokens == 0) {
completionTokens := 0
for _, choice := range simpleResponse.Choices {
ctkm, _ := service.CountTextToken(choice.Message.StringContent()+choice.Message.ReasoningContent+choice.Message.Reasoning, info.UpstreamModelName)
completionTokens += ctkm
}
simpleResponse.Usage = dto.Usage{
PromptTokens: info.PromptTokens,
CompletionTokens: completionTokens,
TotalTokens: info.PromptTokens + completionTokens,
}
}
return nil, &simpleResponse.Usage
}

View File

@@ -31,6 +31,8 @@ var claudeModelMap = map[string]string{
"claude-3-5-sonnet-20240620": "claude-3-5-sonnet@20240620",
"claude-3-5-sonnet-20241022": "claude-3-5-sonnet-v2@20241022",
"claude-3-7-sonnet-20250219": "claude-3-7-sonnet@20250219",
"claude-sonnet-4-20250514": "claude-sonnet-4@20250514",
"claude-opus-4-20250514": "claude-opus-4@20250514",
}
const anthropicVersion = "vertex-2023-10-16"
@@ -93,14 +95,23 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
} else {
suffix = "generateContent"
}
return fmt.Sprintf(
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:%s",
region,
adc.ProjectID,
region,
info.UpstreamModelName,
suffix,
), nil
if region == "global" {
return fmt.Sprintf(
"https://aiplatform.googleapis.com/v1/projects/%s/locations/global/publishers/google/models/%s:%s",
adc.ProjectID,
info.UpstreamModelName,
suffix,
), nil
} else {
return fmt.Sprintf(
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:%s",
region,
adc.ProjectID,
region,
info.UpstreamModelName,
suffix,
), nil
}
} else if a.RequestMode == RequestModeClaude {
if info.IsStream {
suffix = "streamRawPredict?alt=sse"

View File

@@ -33,6 +33,7 @@ const (
APITypeOpenRouter
APITypeXinference
APITypeXai
APITypeCoze
APITypeDummy // this one is only for count, do not add any channel after this
)
@@ -95,6 +96,8 @@ func ChannelType2APIType(channelType int) (int, bool) {
apiType = APITypeXinference
case common.ChannelTypeXai:
apiType = APITypeXai
case common.ChannelTypeCoze:
apiType = APITypeCoze
}
if apiType == -1 {
return APITypeOpenAI, false

View File

@@ -12,11 +12,19 @@ import (
)
func SetEventStreamHeaders(c *gin.Context) {
c.Writer.Header().Set("Content-Type", "text/event-stream")
c.Writer.Header().Set("Cache-Control", "no-cache")
c.Writer.Header().Set("Connection", "keep-alive")
c.Writer.Header().Set("Transfer-Encoding", "chunked")
c.Writer.Header().Set("X-Accel-Buffering", "no")
// 检查是否已经设置过头部
if _, exists := c.Get("event_stream_headers_set"); exists {
return
}
c.Writer.Header().Set("Content-Type", "text/event-stream")
c.Writer.Header().Set("Cache-Control", "no-cache")
c.Writer.Header().Set("Connection", "keep-alive")
c.Writer.Header().Set("Transfer-Encoding", "chunked")
c.Writer.Header().Set("X-Accel-Buffering", "no")
// 设置标志,表示头部已经设置过
c.Set("event_stream_headers_set", true)
}
func ClaudeData(c *gin.Context, resp dto.ClaudeResponse) error {

View File

@@ -23,7 +23,7 @@ type PriceData struct {
}
func (p PriceData) ToSetting() string {
return fmt.Sprintf("ModelPrice: %f, ModelRatio: %f, CompletionRatio: %f, CacheRatio: %f, GroupRatio: %f, UsePrice: %t, CacheCreationRatio: %f, ShouldPreConsumedQuota: %d, ImageRatio: %d", p.ModelPrice, p.ModelRatio, p.CompletionRatio, p.CacheRatio, p.GroupRatio, p.UsePrice, p.CacheCreationRatio, p.ShouldPreConsumedQuota, p.ImageRatio)
return fmt.Sprintf("ModelPrice: %f, ModelRatio: %f, CompletionRatio: %f, CacheRatio: %f, GroupRatio: %f, UsePrice: %t, CacheCreationRatio: %f, ShouldPreConsumedQuota: %d, ImageRatio: %f", p.ModelPrice, p.ModelRatio, p.CompletionRatio, p.CacheRatio, p.GroupRatio, p.UsePrice, p.CacheCreationRatio, p.ShouldPreConsumedQuota, p.ImageRatio)
}
func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens int, maxTokens int) (PriceData, error) {

View File

@@ -3,7 +3,6 @@ package helper
import (
"bufio"
"context"
"github.com/bytedance/gopkg/util/gopool"
"io"
"net/http"
"one-api/common"
@@ -14,6 +13,8 @@ import (
"sync"
"time"
"github.com/bytedance/gopkg/util/gopool"
"github.com/gin-gonic/gin"
)

View File

@@ -32,7 +32,23 @@ func RelayMidjourneyImage(c *gin.Context) {
})
return
}
resp, err := http.Get(midjourneyTask.ImageUrl)
var httpClient *http.Client
if channel, err := model.CacheGetChannel(midjourneyTask.ChannelId); err == nil {
if proxy, ok := channel.GetSetting()["proxy"]; ok {
if proxyURL, ok := proxy.(string); ok && proxyURL != "" {
if httpClient, err = service.NewProxyHttpClient(proxyURL); err != nil {
c.JSON(400, gin.H{
"error": "proxy_url_invalid",
})
return
}
}
}
}
if httpClient == nil {
httpClient = service.GetHttpClient()
}
resp, err := httpClient.Get(midjourneyTask.ImageUrl)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"error": "http_get_image_failed",

View File

@@ -47,6 +47,20 @@ func getAndValidateTextRequest(c *gin.Context, relayInfo *relaycommon.RelayInfo)
if textRequest.Model == "" {
return nil, errors.New("model is required")
}
if textRequest.WebSearchOptions != nil {
if textRequest.WebSearchOptions.SearchContextSize != "" {
validSizes := map[string]bool{
"high": true,
"medium": true,
"low": true,
}
if !validSizes[textRequest.WebSearchOptions.SearchContextSize] {
return nil, errors.New("invalid search_context_size, must be one of: high, medium, low")
}
} else {
textRequest.WebSearchOptions.SearchContextSize = "medium"
}
}
switch relayInfo.RelayMode {
case relayconstant.RelayModeCompletions:
if textRequest.Prompt == "" {
@@ -76,6 +90,10 @@ func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
// get & validate textRequest 获取并验证文本请求
textRequest, err := getAndValidateTextRequest(c, relayInfo)
if textRequest.WebSearchOptions != nil {
c.Set("chat_completion_web_search_context_size", textRequest.WebSearchOptions.SearchContextSize)
}
if err != nil {
common.LogError(c, fmt.Sprintf("getAndValidateTextRequest failed: %s", err.Error()))
return service.OpenAIErrorWrapperLocal(err, "invalid_text_request", http.StatusBadRequest)
@@ -194,6 +212,7 @@ func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
var httpResp *http.Response
resp, err := adaptor.DoRequest(c, relayInfo, requestBody)
if err != nil {
return service.OpenAIErrorWrapper(err, "do_request_failed", http.StatusInternalServerError)
}
@@ -369,9 +388,20 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
dWebSearchQuota = decimal.NewFromFloat(webSearchPrice).
Mul(decimal.NewFromInt(int64(webSearchTool.CallCount))).
Div(decimal.NewFromInt(1000)).Mul(dGroupRatio).Mul(dQuotaPerUnit)
extraContent += fmt.Sprintf("Web Search 调用 %d 次,上下文大小 %s调用花费 $%s",
extraContent += fmt.Sprintf("Web Search 调用 %d 次,上下文大小 %s调用花费 %s",
webSearchTool.CallCount, webSearchTool.SearchContextSize, dWebSearchQuota.String())
}
} else if strings.HasSuffix(modelName, "search-preview") {
// search-preview 模型不支持 response api
searchContextSize := ctx.GetString("chat_completion_web_search_context_size")
if searchContextSize == "" {
searchContextSize = "medium"
}
webSearchPrice = operation_setting.GetWebSearchPricePerThousand(modelName, searchContextSize)
dWebSearchQuota = decimal.NewFromFloat(webSearchPrice).
Div(decimal.NewFromInt(1000)).Mul(dGroupRatio).Mul(dQuotaPerUnit)
extraContent += fmt.Sprintf("Web Search 调用 1 次,上下文大小 %s调用花费 %s",
searchContextSize, dWebSearchQuota.String())
}
// file search tool 计费
var dFileSearchQuota decimal.Decimal
@@ -462,10 +492,16 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
other["image_ratio"] = imageRatio
other["image_output"] = imageTokens
}
if !dWebSearchQuota.IsZero() && relayInfo.ResponsesUsageInfo != nil {
if webSearchTool, exists := relayInfo.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolWebSearchPreview]; exists {
if !dWebSearchQuota.IsZero() {
if relayInfo.ResponsesUsageInfo != nil {
if webSearchTool, exists := relayInfo.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolWebSearchPreview]; exists {
other["web_search"] = true
other["web_search_call_count"] = webSearchTool.CallCount
other["web_search_price"] = webSearchPrice
}
} else if strings.HasSuffix(modelName, "search-preview") {
other["web_search"] = true
other["web_search_call_count"] = webSearchTool.CallCount
other["web_search_call_count"] = 1
other["web_search_price"] = webSearchPrice
}
}

View File

@@ -10,6 +10,7 @@ import (
"one-api/relay/channel/claude"
"one-api/relay/channel/cloudflare"
"one-api/relay/channel/cohere"
"one-api/relay/channel/coze"
"one-api/relay/channel/deepseek"
"one-api/relay/channel/dify"
"one-api/relay/channel/gemini"
@@ -88,6 +89,8 @@ func GetAdaptor(apiType int) channel.Adaptor {
return &openai.Adaptor{}
case constant.APITypeXai:
return &xai.Adaptor{}
case constant.APITypeCoze:
return &coze.Adaptor{}
}
return nil
}

View File

@@ -24,7 +24,7 @@ func DoWorkerRequest(req *WorkerRequest) (*http.Response, error) {
if !setting.EnableWorker() {
return nil, fmt.Errorf("worker not enabled")
}
if !strings.HasPrefix(req.URL, "https") {
if !setting.WorkerAllowHttpImageRequestEnabled && !strings.HasPrefix(req.URL, "https") {
return nil, fmt.Errorf("only support https url")
}

View File

@@ -3,12 +3,13 @@ package service
import (
"context"
"fmt"
"golang.org/x/net/proxy"
"net"
"net/http"
"net/url"
"one-api/common"
"time"
"golang.org/x/net/proxy"
)
var httpClient *http.Client
@@ -55,7 +56,7 @@ func NewProxyHttpClient(proxyURL string) (*http.Client, error) {
},
}, nil
case "socks5":
case "socks5", "socks5h":
// 获取认证信息
var auth *proxy.Auth
if parsedURL.User != nil {
@@ -69,6 +70,7 @@ func NewProxyHttpClient(proxyURL string) (*http.Client, error) {
}
// 创建 SOCKS5 代理拨号器
// proxy.SOCKS5 使用 tcp 参数,所有 TCP 连接包括 DNS 查询都将通过代理进行。行为与 socks5h 相同
dialer, err := proxy.SOCKS5("tcp", parsedURL.Host, auth, proxy.Direct)
if err != nil {
return nil, err

View File

@@ -120,11 +120,12 @@ func getImageToken(info *relaycommon.RelayInfo, imageUrl *dto.MessageImageUrl, m
var config image.Config
var err error
var format string
var b64str string
if strings.HasPrefix(imageUrl.Url, "http") {
config, format, err = DecodeUrlImageData(imageUrl.Url)
} else {
common.SysLog(fmt.Sprintf("decoding image"))
config, format, _, err = DecodeBase64ImageData(imageUrl.Url)
config, format, b64str, err = DecodeBase64ImageData(imageUrl.Url)
}
if err != nil {
return 0, err
@@ -132,7 +133,12 @@ func getImageToken(info *relaycommon.RelayInfo, imageUrl *dto.MessageImageUrl, m
imageUrl.MimeType = format
if config.Width == 0 || config.Height == 0 {
return 0, errors.New(fmt.Sprintf("fail to decode image config: %s", imageUrl.Url))
// not an image
if format != "" && b64str != "" {
// file type
return 3 * baseTokens, nil
}
return 0, errors.New(fmt.Sprintf("fail to decode base64 config: %s", imageUrl.Url))
}
shortSide := config.Width

View File

@@ -36,6 +36,10 @@ var defaultCacheRatio = map[string]float64{
"claude-3-5-sonnet-20241022": 0.1,
"claude-3-7-sonnet-20250219": 0.1,
"claude-3-7-sonnet-20250219-thinking": 0.1,
"claude-sonnet-4-20250514": 0.1,
"claude-sonnet-4-20250514-thinking": 0.1,
"claude-opus-4-20250514": 0.1,
"claude-opus-4-20250514-thinking": 0.1,
}
var defaultCreateCacheRatio = map[string]float64{
@@ -47,6 +51,10 @@ var defaultCreateCacheRatio = map[string]float64{
"claude-3-5-sonnet-20241022": 1.25,
"claude-3-7-sonnet-20250219": 1.25,
"claude-3-7-sonnet-20250219-thinking": 1.25,
"claude-sonnet-4-20250514": 1.25,
"claude-sonnet-4-20250514-thinking": 1.25,
"claude-opus-4-20250514": 1.25,
"claude-opus-4-20250514-thinking": 1.25,
}
//var defaultCreateCacheRatio = map[string]float64{}

View File

@@ -114,7 +114,9 @@ var defaultModelRatio = map[string]float64{
"claude-3-5-sonnet-20241022": 1.5,
"claude-3-7-sonnet-20250219": 1.5,
"claude-3-7-sonnet-20250219-thinking": 1.5,
"claude-sonnet-4-20250514": 1.5,
"claude-3-opus-20240229": 7.5, // $15 / 1M tokens
"claude-opus-4-20250514": 7.5,
"ERNIE-4.0-8K": 0.120 * RMB,
"ERNIE-3.5-8K": 0.012 * RMB,
"ERNIE-3.5-8K-0205": 0.024 * RMB,
@@ -440,13 +442,15 @@ func getHardcodedCompletionModelRatio(name string) (float64, bool) {
if name == "chatgpt-4o-latest" {
return 3, true
}
if strings.Contains(name, "claude-instant-1") {
return 3, true
} else if strings.Contains(name, "claude-2") {
return 3, true
} else if strings.Contains(name, "claude-3") {
if strings.Contains(name, "claude-3") {
return 5, true
} else if strings.Contains(name, "claude-sonnet-4") || strings.Contains(name, "claude-opus-4") {
return 5, true
} else if strings.Contains(name, "claude-instant-1") || strings.Contains(name, "claude-2") {
return 3, true
}
if strings.HasPrefix(name, "gpt-3.5") {
if name == "gpt-3.5-turbo" || strings.HasSuffix(name, "0125") {
// https://openai.com/blog/new-embedding-models-and-api-updates

View File

@@ -1,6 +1,64 @@
package setting
import (
"encoding/json"
"fmt"
"one-api/common"
"sync"
)
var ModelRequestRateLimitEnabled = false
var ModelRequestRateLimitDurationMinutes = 1
var ModelRequestRateLimitCount = 0
var ModelRequestRateLimitSuccessCount = 1000
var ModelRequestRateLimitGroup = map[string][2]int{}
var ModelRequestRateLimitMutex sync.RWMutex
func ModelRequestRateLimitGroup2JSONString() string {
ModelRequestRateLimitMutex.RLock()
defer ModelRequestRateLimitMutex.RUnlock()
jsonBytes, err := json.Marshal(ModelRequestRateLimitGroup)
if err != nil {
common.SysError("error marshalling model ratio: " + err.Error())
}
return string(jsonBytes)
}
func UpdateModelRequestRateLimitGroupByJSONString(jsonStr string) error {
ModelRequestRateLimitMutex.RLock()
defer ModelRequestRateLimitMutex.RUnlock()
ModelRequestRateLimitGroup = make(map[string][2]int)
return json.Unmarshal([]byte(jsonStr), &ModelRequestRateLimitGroup)
}
func GetGroupRateLimit(group string) (totalCount, successCount int, found bool) {
ModelRequestRateLimitMutex.RLock()
defer ModelRequestRateLimitMutex.RUnlock()
if ModelRequestRateLimitGroup == nil {
return 0, 0, false
}
limits, found := ModelRequestRateLimitGroup[group]
if !found {
return 0, 0, false
}
return limits[0], limits[1], true
}
func CheckModelRequestRateLimitGroup(jsonStr string) error {
checkModelRequestRateLimitGroup := make(map[string][2]int)
err := json.Unmarshal([]byte(jsonStr), &checkModelRequestRateLimitGroup)
if err != nil {
return err
}
for group, limits := range checkModelRequestRateLimitGroup {
if limits[0] < 0 || limits[1] < 1 {
return fmt.Errorf("group %s has negative rate limit values: [%d, %d]", group, limits[0], limits[1])
}
}
return nil
}

View File

@@ -3,6 +3,7 @@ package setting
var ServerAddress = "http://localhost:3000"
var WorkerUrl = ""
var WorkerValidKey = ""
var WorkerAllowHttpImageRequestEnabled = false
func EnableWorker() bool {
return WorkerUrl != ""

View File

@@ -871,7 +871,16 @@ const ChannelsTable = () => {
};
const refresh = async () => {
await loadChannels(activePage - 1, pageSize, idSort, enableTagMode);
if (searchKeyword === '' && searchGroup === '' && searchModel === '') {
await loadChannels(activePage - 1, pageSize, idSort, enableTagMode);
} else {
await searchChannels(
searchKeyword,
searchGroup,
searchModel,
enableTagMode,
);
}
};
useEffect(() => {
@@ -879,9 +888,13 @@ const ChannelsTable = () => {
const localIdSort = localStorage.getItem('id-sort') === 'true';
const localPageSize =
parseInt(localStorage.getItem('page-size')) || ITEMS_PER_PAGE;
const localEnableTagMode = localStorage.getItem('enable-tag-mode') === 'true';
const localEnableBatchDelete = localStorage.getItem('enable-batch-delete') === 'true';
setIdSort(localIdSort);
setPageSize(localPageSize);
loadChannels(0, localPageSize, localIdSort, enableTagMode)
setEnableTagMode(localEnableTagMode);
setEnableBatchDelete(localEnableBatchDelete);
loadChannels(0, localPageSize, localIdSort, localEnableTagMode)
.then()
.catch((reason) => {
showError(reason);
@@ -979,8 +992,8 @@ const ChannelsTable = () => {
enableTagMode,
) => {
if (searchKeyword === '' && searchGroup === '' && searchModel === '') {
await loadChannels(0, pageSize, idSort, enableTagMode);
setActivePage(1);
await loadChannels(activePage - 1, pageSize, idSort, enableTagMode);
// setActivePage(1);
return;
}
setSearching(true);
@@ -1477,10 +1490,12 @@ const ChannelsTable = () => {
{t('开启批量操作')}
</Typography.Text>
<Switch
checked={enableBatchDelete}
label={t('开启批量操作')}
uncheckedText={t('关')}
aria-label={t('是否开启批量操作')}
onChange={(v) => {
localStorage.setItem('enable-batch-delete', v + '');
setEnableBatchDelete(v);
}}
/>
@@ -1544,6 +1559,7 @@ const ChannelsTable = () => {
uncheckedText={t('关')}
aria-label={t('是否启用标签聚合')}
onChange={(v) => {
localStorage.setItem('enable-tag-mode', v + '');
setEnableTagMode(v);
loadChannels(0, pageSize, idSort, v);
}}

View File

@@ -39,7 +39,9 @@ const ModelSetting = () => {
item.key === 'claude.default_max_tokens' ||
item.key === 'gemini.supported_imagine_models'
) {
item.value = JSON.stringify(JSON.parse(item.value), null, 2);
if (item.value !== '') {
item.value = JSON.stringify(JSON.parse(item.value), null, 2);
}
}
if (item.key.endsWith('Enabled') || item.key.endsWith('enabled')) {
newInputs[item.key] = item.value === 'true' ? true : false;
@@ -60,6 +62,7 @@ const ModelSetting = () => {
// showSuccess('刷新成功');
} catch (error) {
showError('刷新失败');
console.error(error);
} finally {
setLoading(false);
}

View File

@@ -13,6 +13,7 @@ const RateLimitSetting = () => {
ModelRequestRateLimitCount: 0,
ModelRequestRateLimitSuccessCount: 1000,
ModelRequestRateLimitDurationMinutes: 1,
ModelRequestRateLimitGroup: '',
});
let [loading, setLoading] = useState(false);
@@ -23,10 +24,14 @@ const RateLimitSetting = () => {
if (success) {
let newInputs = {};
data.forEach((item) => {
if (item.key.endsWith('Enabled')) {
newInputs[item.key] = item.value === 'true' ? true : false;
} else {
newInputs[item.key] = item.value;
if (item.key === 'ModelRequestRateLimitGroup') {
item.value = JSON.stringify(JSON.parse(item.value), null, 2);
}
if (item.key.endsWith('Enabled')) {
newInputs[item.key] = item.value === 'true' ? true : false;
} else {
newInputs[item.key] = item.value;
}
});

View File

@@ -19,7 +19,7 @@ import {
verifyJSON,
} from '../helpers/utils';
import { API } from '../helpers/api';
import axios from "axios";
import axios from 'axios';
const SystemSetting = () => {
let [inputs, setInputs] = useState({
@@ -45,6 +45,7 @@ const SystemSetting = () => {
ServerAddress: '',
WorkerUrl: '',
WorkerValidKey: '',
WorkerAllowHttpImageRequestEnabled: '',
EpayId: '',
EpayKey: '',
Price: 7.3,
@@ -111,6 +112,7 @@ const SystemSetting = () => {
case 'SMTPSSLEnabled':
case 'LinuxDOOAuthEnabled':
case 'oidc.enabled':
case 'WorkerAllowHttpImageRequestEnabled':
item.value = item.value === 'true';
break;
case 'Price':
@@ -206,7 +208,11 @@ const SystemSetting = () => {
let WorkerUrl = removeTrailingSlash(inputs.WorkerUrl);
const options = [
{ key: 'WorkerUrl', value: WorkerUrl },
]
{
key: 'WorkerAllowHttpImageRequestEnabled',
value: inputs.WorkerAllowHttpImageRequestEnabled ? 'true' : 'false',
},
];
if (inputs.WorkerValidKey !== '' || WorkerUrl === '') {
options.push({ key: 'WorkerValidKey', value: inputs.WorkerValidKey });
}
@@ -302,7 +308,8 @@ const SystemSetting = () => {
const domain = emailToAdd.trim();
// 验证域名格式
const domainRegex = /^([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,}$/;
const domainRegex =
/^([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,}$/;
if (!domainRegex.test(domain)) {
showError('邮箱域名格式不正确,请输入有效的域名,如 gmail.com');
return;
@@ -577,6 +584,12 @@ const SystemSetting = () => {
/>
</Col>
</Row>
<Form.Checkbox
field='WorkerAllowHttpImageRequestEnabled'
noLabel
>
允许 HTTP 协议图片请求适用于自部署代理
</Form.Checkbox>
<Button onClick={submitWorker}>更新Worker设置</Button>
</Form.Section>
</Card>
@@ -799,7 +812,13 @@ const SystemSetting = () => {
onChange={(value) => setEmailToAdd(value)}
style={{ marginTop: 16 }}
suffix={
<Button theme="solid" type="primary" onClick={handleAddEmail}>添加</Button>
<Button
theme='solid'
type='primary'
onClick={handleAddEmail}
>
添加
</Button>
}
onEnterPress={handleAddEmail}
/>

View File

@@ -118,6 +118,11 @@ export const CHANNEL_OPTIONS = [
{
value: 48,
color: 'blue',
label: 'xAI'
}
label: 'xAI',
},
{
value: 49,
color: 'blue',
label: 'Coze',
},
];

View File

@@ -1086,7 +1086,7 @@
"没有账户?": "No account? ",
"请输入 AZURE_OPENAI_ENDPOINT例如https://docs-test-001.openai.azure.com": "Please enter AZURE_OPENAI_ENDPOINT, e.g.: https://docs-test-001.openai.azure.com",
"默认 API 版本": "Default API Version",
"请输入默认 API 版本例如2024-12-01-preview": "Please enter default API version, e.g.: 2024-12-01-preview.",
"请输入默认 API 版本例如2025-04-01-preview": "Please enter default API version, e.g.: 2025-04-01-preview.",
"请为渠道命名": "Please name the channel",
"请选择可以使用该渠道的分组": "Please select groups that can use this channel",
"请在系统设置页面编辑分组倍率以添加新的分组:": "Please edit Group ratios in system settings to add new groups:",
@@ -1374,4 +1374,4 @@
"适用于展示系统功能的场景。": "Suitable for scenarios where the system functions are displayed.",
"可在初始化后修改": "Can be modified after initialization",
"初始化系统": "Initialize system"
}
}

View File

@@ -24,7 +24,8 @@ import {
TextArea,
Checkbox,
Banner,
Modal, ImagePreview
Modal,
ImagePreview,
} from '@douyinfe/semi-ui';
import { getChannelModels, loadChannelModels } from '../../components/utils.js';
import { IconHelpCircle } from '@douyinfe/semi-icons';
@@ -306,7 +307,7 @@ const EditChannel = (props) => {
fetchModels().then();
fetchGroups().then();
if (isEdit) {
loadChannel().then(() => { });
loadChannel().then(() => {});
} else {
setInputs(originInputs);
let localModels = getChannelModels(inputs.type);
@@ -477,7 +478,9 @@ const EditChannel = (props) => {
type={'warning'}
description={
<>
{t('2025年5月10日后添加的渠道不需要再在部署的时候移除模型名称中的"."')}
{t(
'2025年5月10日后添加的渠道不需要再在部署的时候移除模型名称中的"."',
)}
{/*<br />*/}
{/*<Typography.Text*/}
{/* style={{*/}
@@ -522,7 +525,7 @@ const EditChannel = (props) => {
<Input
label={t('默认 API 版本')}
name='azure_other'
placeholder={t('请输入默认 API 版本例如2024-12-01-preview')}
placeholder={t('请输入默认 API 版本例如2025-04-01-preview')}
onChange={(value) => {
handleInputChange('other', value);
}}
@@ -584,25 +587,35 @@ const EditChannel = (props) => {
value={inputs.name}
autoComplete='new-password'
/>
{inputs.type !== 3 && inputs.type !== 8 && inputs.type !== 22 && inputs.type !== 36 && inputs.type !== 45 && (
<>
<div style={{ marginTop: 10 }}>
<Typography.Text strong>{t('API地址')}</Typography.Text>
</div>
<Tooltip content={t('对于官方渠道new-api已经内置地址除非是第三方代理站点或者Azure的特殊接入地址否则不需要填写')}>
<Input
label={t('API地址')}
name="base_url"
placeholder={t('此项可选用于通过自定义API地址来进行 API 调用,末尾不要带/v1和/')}
onChange={(value) => {
handleInputChange('base_url', value);
}}
value={inputs.base_url}
autoComplete="new-password"
/>
</Tooltip>
</>
)}
{inputs.type !== 3 &&
inputs.type !== 8 &&
inputs.type !== 22 &&
inputs.type !== 36 &&
inputs.type !== 45 && (
<>
<div style={{ marginTop: 10 }}>
<Typography.Text strong>{t('API地址')}</Typography.Text>
</div>
<Tooltip
content={t(
'对于官方渠道new-api已经内置地址除非是第三方代理站点或者Azure的特殊接入地址否则不需要填写',
)}
>
<Input
label={t('API地址')}
name='base_url'
placeholder={t(
'此项可选用于通过自定义API地址来进行 API 调用,末尾不要带/v1和/',
)}
onChange={(value) => {
handleInputChange('base_url', value);
}}
value={inputs.base_url}
autoComplete='new-password'
/>
</Tooltip>
</>
)}
<div style={{ marginTop: 10 }}>
<Typography.Text strong>{t('密钥')}</Typography.Text>
</div>
@@ -761,10 +774,10 @@ const EditChannel = (props) => {
name='other'
placeholder={t(
'请输入部署地区例如us-central1\n支持使用模型映射格式\n' +
'{\n' +
' "default": "us-central1",\n' +
' "claude-3-5-sonnet-20240620": "europe-west1"\n' +
'}',
'{\n' +
' "default": "us-central1",\n' +
' "claude-3-5-sonnet-20240620": "europe-west1"\n' +
'}',
)}
autosize={{ minRows: 2 }}
onChange={(value) => {
@@ -825,6 +838,22 @@ const EditChannel = (props) => {
/>
</>
)}
{inputs.type === 49 && (
<>
<div style={{ marginTop: 10 }}>
<Typography.Text strong>智能体ID</Typography.Text>
</div>
<Input
name='other'
placeholder={'请输入智能体ID例如7342866812345'}
onChange={(value) => {
handleInputChange('other', value);
}}
value={inputs.other}
autoComplete='new-password'
/>
</>
)}
<div style={{ marginTop: 10 }}>
<Typography.Text strong>{t('模型')}</Typography.Text>
</div>

View File

@@ -158,7 +158,7 @@ const Home = () => {
</p>
<p>
{t('OIDC 身份验证')}
{statusState?.status?.oidc === true
{statusState?.status?.oidc_enabled === true
? t('已启用')
: t('未启用')}
</p>

View File

@@ -64,8 +64,9 @@ const Playground = () => {
},
];
const defaultModel = 'gpt-4o-mini';
const [inputs, setInputs] = useState({
model: 'gpt-4o-mini',
model: defaultModel,
group: '',
max_tokens: 0,
temperature: 0,
@@ -108,6 +109,11 @@ const Playground = () => {
value: model,
}));
setModels(localModelOptions);
// if default model is not in the list, set the first one as default
const hasDefault = localModelOptions.some(option => option.value === defaultModel);
if (!hasDefault && localModelOptions.length > 0) {
setInputs((inputs) => ({ ...inputs, model: localModelOptions[0].value }));
}
} else {
showError(t(message));
}

View File

@@ -27,40 +27,48 @@ export default function SettingGeminiModel(props) {
const [inputs, setInputs] = useState({
'gemini.safety_settings': '',
'gemini.version_settings': '',
'gemini.supported_imagine_models': [],
'gemini.supported_imagine_models': '',
'gemini.thinking_adapter_enabled': false,
'gemini.thinking_adapter_budget_tokens_percentage': 0.6,
});
const refForm = useRef();
const [inputsRow, setInputsRow] = useState(inputs);
function onSubmit() {
const updateArray = compareObjects(inputs, inputsRow);
if (!updateArray.length) return showWarning(t('你似乎并没有修改什么'));
const requestQueue = updateArray.map((item) => {
let value = String(inputs[item.key]);
return API.put('/api/option/', {
key: item.key,
value,
});
});
setLoading(true);
Promise.all(requestQueue)
.then((res) => {
if (requestQueue.length === 1) {
if (res.includes(undefined)) return;
} else if (requestQueue.length > 1) {
if (res.includes(undefined))
return showError(t('部分保存失败,请重试'));
}
showSuccess(t('保存成功'));
props.refresh();
async function onSubmit() {
await refForm.current
.validate()
.then(() => {
const updateArray = compareObjects(inputs, inputsRow);
if (!updateArray.length) return showWarning(t('你似乎并没有修改什么'));
const requestQueue = updateArray.map((item) => {
let value = String(inputs[item.key]);
return API.put('/api/option/', {
key: item.key,
value,
});
});
setLoading(true);
Promise.all(requestQueue)
.then((res) => {
if (requestQueue.length === 1) {
if (res.includes(undefined)) return;
} else if (requestQueue.length > 1) {
if (res.includes(undefined))
return showError(t('部分保存失败,请重试'));
}
showSuccess(t('保存成功'));
props.refresh();
})
.catch(() => {
showError(t('保存失败,请重试'));
})
.finally(() => {
setLoading(false);
});
})
.catch(() => {
showError(t('保存失败,请重试'));
})
.finally(() => {
setLoading(false);
.catch((error) => {
console.error('Validation failed:', error);
showError(t('请检查输入'));
});
}
@@ -146,6 +154,14 @@ export default function SettingGeminiModel(props) {
label={t('支持的图像模型')}
placeholder={t('例如:') + '\n' + JSON.stringify(['gemini-2.0-flash-exp-image-generation'], null, 2)}
onChange={(value) => setInputs({ ...inputs, 'gemini.supported_imagine_models': value })}
trigger='blur'
stopValidateWithError
rules={[
{
validator: (rule, value) => verifyJSON(value),
message: t('不是合法的 JSON 字符串'),
},
]}
/>
</Col>
</Row>

View File

@@ -6,6 +6,7 @@ import {
showError,
showSuccess,
showWarning,
verifyJSON,
} from '../../../helpers';
import { useTranslation } from 'react-i18next';
@@ -18,6 +19,7 @@ export default function RequestRateLimit(props) {
ModelRequestRateLimitCount: -1,
ModelRequestRateLimitSuccessCount: 1000,
ModelRequestRateLimitDurationMinutes: 1,
ModelRequestRateLimitGroup: '',
});
const refForm = useRef();
const [inputsRow, setInputsRow] = useState(inputs);
@@ -46,6 +48,13 @@ export default function RequestRateLimit(props) {
if (res.includes(undefined))
return showError(t('部分保存失败,请重试'));
}
for (let i = 0; i < res.length; i++) {
if (!res[i].data.success) {
return showError(res[i].data.message);
}
}
showSuccess(t('保存成功'));
props.refresh();
})
@@ -147,6 +156,41 @@ export default function RequestRateLimit(props) {
/>
</Col>
</Row>
<Row>
<Col xs={24} sm={16}>
<Form.TextArea
label={t('分组速率限制')}
placeholder={t(
'{\n "default": [200, 100],\n "vip": [0, 1000]\n}',
)}
field={'ModelRequestRateLimitGroup'}
autosize={{ minRows: 5, maxRows: 15 }}
trigger='blur'
stopValidateWithError
rules={[
{
validator: (rule, value) => verifyJSON(value),
message: t('不是合法的 JSON 字符串'),
},
]}
extraText={
<div>
<p style={{ marginBottom: -15 }}>{t('说明:')}</p>
<ul>
<li>{t('使用 JSON 对象格式,格式为:{"组名": [最多请求次数, 最多请求完成次数]}')}</li>
<li>{t('示例:{"default": [200, 100], "vip": [0, 1000]}。')}</li>
<li>{t('[最多请求次数]必须大于等于0[最多请求完成次数]必须大于等于1。')}</li>
<li>{t('分组速率配置优先级高于全局速率限制。')}</li>
<li>{t('限制周期统一使用上方配置的“限制周期”值。')}</li>
</ul>
</div>
}
onChange={(value) => {
setInputs({ ...inputs, ModelRequestRateLimitGroup: value });
}}
/>
</Col>
</Row>
<Row>
<Button size='default' onClick={onSubmit}>
{t('保存模型速率限制')}