mirror of
https://github.com/QuantumNous/new-api.git
synced 2026-04-05 07:26:18 +00:00
Compare commits
44 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7c03ad71de | ||
|
|
4f194f4e6a | ||
|
|
81137e0533 | ||
|
|
b9b66dda54 | ||
|
|
fd22948ead | ||
|
|
894dce7366 | ||
|
|
b95142bbac | ||
|
|
7f74a9664e | ||
|
|
a3739f67f7 | ||
|
|
b841ce006f | ||
|
|
e3f9ef1894 | ||
|
|
558e625a01 | ||
|
|
37a83ecc33 | ||
|
|
37bb34b4b0 | ||
|
|
8deab221f9 | ||
|
|
17e9f1a07d | ||
|
|
792754cee3 | ||
|
|
98b27a17a6 | ||
|
|
7855f83e2d | ||
|
|
cbdf26bf2c | ||
|
|
eb46b71a71 | ||
|
|
a42c3b6227 | ||
|
|
b00dd8b405 | ||
|
|
be228ccd2c | ||
|
|
b1be64bcf3 | ||
|
|
6ecfb81cbc | ||
|
|
14848ff789 | ||
|
|
47d3b515da | ||
|
|
760514c3e1 | ||
|
|
254c25c27a | ||
|
|
8731a32e56 | ||
|
|
7208a65e5d | ||
|
|
4084b18071 | ||
|
|
2ca0d7246d | ||
|
|
d042a1bd55 | ||
|
|
816e831a2e | ||
|
|
a3ceae4a86 | ||
|
|
eb163d9c94 | ||
|
|
a592a81bc2 | ||
|
|
bb300d199e | ||
|
|
7dbb6b017c | ||
|
|
ce1854847b | ||
|
|
2f9faba40d | ||
|
|
a5085014cc |
@@ -68,7 +68,7 @@
|
||||
|
||||
## Model Support
|
||||
This version additionally supports:
|
||||
1. Third-party model **gps** (gpt-4-gizmo-*)
|
||||
1. Third-party model **gpts** (gpt-4-gizmo-*)
|
||||
2. [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy) interface, [Integration Guide](Midjourney.md)
|
||||
3. Custom channels with full API URL support
|
||||
4. [Suno API](https://github.com/Suno-API/Suno-API) interface, [Integration Guide](Suno.md)
|
||||
@@ -162,7 +162,7 @@ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtow
|
||||
|
||||
## Channel Retry
|
||||
Channel retry is implemented, configurable in `Settings->Operation Settings->General Settings`. **Cache recommended**.
|
||||
First retry uses same priority, second retry uses next priority, and so on.
|
||||
If retry is enabled, the system will automatically use the next priority channel for the same request after a failed request.
|
||||
|
||||
### Cache Configuration
|
||||
1. `REDIS_CONN_STRING`: Use Redis as cache
|
||||
|
||||
@@ -77,7 +77,7 @@
|
||||
|
||||
## 模型支持
|
||||
此版本额外支持以下模型:
|
||||
1. 第三方模型 **gps** (gpt-4-gizmo-*)
|
||||
1. 第三方模型 **gpts** (gpt-4-gizmo-*)
|
||||
2. [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy)接口,[对接文档](Midjourney.md)
|
||||
3. 自定义渠道,支持填入完整调用地址
|
||||
4. [Suno API](https://github.com/Suno-API/Suno-API) 接口,[对接文档](Suno.md)
|
||||
@@ -177,7 +177,7 @@ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtow
|
||||
|
||||
## 渠道重试
|
||||
渠道重试功能已经实现,可以在`设置->运营设置->通用设置`设置重试次数,**建议开启缓存**功能。
|
||||
如果开启了重试功能,第一次重试使用同优先级,第二次重试使用下一个优先级,以此类推。
|
||||
如果开启了重试功能,重试使用下一个优先级,以此类推。
|
||||
### 缓存设置方法
|
||||
1. `REDIS_CONN_STRING`:设置之后将使用 Redis 作为缓存使用。
|
||||
+ 例子:`REDIS_CONN_STRING=redis://default:redispw@localhost:49153`
|
||||
@@ -220,8 +220,8 @@ docker run --rm -v /var/run/docker.sock:/var/run/docker.sock containrrr/watchtow
|
||||
- [neko-api-key-tool](https://github.com/Calcium-Ion/neko-api-key-tool):用key查询使用额度
|
||||
|
||||
其他基于New API的项目:
|
||||
- [new-api-horizon](https://github.com/Calcium-Ion/new-api-horizon):New API高性能优化版,并支持Claude格式
|
||||
- [VoAPI](https://github.com/VoAPI/VoAPI):基于New API的闭源项目
|
||||
- [new-api-horizon](https://github.com/Calcium-Ion/new-api-horizon):New API高性能优化版,专注于高并发优化,并支持Claude格式
|
||||
- [VoAPI](https://github.com/VoAPI/VoAPI):基于New API的前端美化版本,闭源免费
|
||||
|
||||
## 🌟 Star History
|
||||
|
||||
|
||||
24
common/gopool.go
Normal file
24
common/gopool.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/bytedance/gopkg/util/gopool"
|
||||
"math"
|
||||
)
|
||||
|
||||
var relayGoPool gopool.Pool
|
||||
|
||||
func init() {
|
||||
relayGoPool = gopool.NewPool("gopool.RelayPool", math.MaxInt32, gopool.NewConfig())
|
||||
relayGoPool.SetPanicHandler(func(ctx context.Context, i interface{}) {
|
||||
if stopChan, ok := ctx.Value("stop_chan").(chan bool); ok {
|
||||
SafeSendBool(stopChan, true)
|
||||
}
|
||||
SysError(fmt.Sprintf("panic in gopool.RelayPool: %v", i))
|
||||
})
|
||||
}
|
||||
|
||||
func RelayCtxGo(ctx context.Context, f func()) {
|
||||
relayGoPool.CtxGo(ctx, f)
|
||||
}
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"one-api/relay"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/relay/constant"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -72,18 +73,6 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
|
||||
}
|
||||
}
|
||||
|
||||
modelMapping := *channel.ModelMapping
|
||||
if modelMapping != "" && modelMapping != "{}" {
|
||||
modelMap := make(map[string]string)
|
||||
err := json.Unmarshal([]byte(modelMapping), &modelMap)
|
||||
if err != nil {
|
||||
return err, service.OpenAIErrorWrapperLocal(err, "unmarshal_model_mapping_failed", http.StatusInternalServerError)
|
||||
}
|
||||
if modelMap[testModel] != "" {
|
||||
testModel = modelMap[testModel]
|
||||
}
|
||||
}
|
||||
|
||||
cache, err := model.GetUserCache(1)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
@@ -97,7 +86,14 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
|
||||
|
||||
middleware.SetupContextForSelectedChannel(c, channel, testModel)
|
||||
|
||||
meta := relaycommon.GenRelayInfo(c)
|
||||
info := relaycommon.GenRelayInfo(c)
|
||||
|
||||
err = helper.ModelMappedHelper(c, info)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
testModel = info.UpstreamModelName
|
||||
|
||||
apiType, _ := constant.ChannelType2APIType(channel.Type)
|
||||
adaptor := relay.GetAdaptor(apiType)
|
||||
if adaptor == nil {
|
||||
@@ -105,12 +101,11 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
|
||||
}
|
||||
|
||||
request := buildTestRequest(testModel)
|
||||
meta.UpstreamModelName = testModel
|
||||
common.SysLog(fmt.Sprintf("testing channel %d with model %s , meta %v ", channel.Id, testModel, meta))
|
||||
common.SysLog(fmt.Sprintf("testing channel %d with model %s , info %v ", channel.Id, testModel, info))
|
||||
|
||||
adaptor.Init(meta)
|
||||
adaptor.Init(info)
|
||||
|
||||
convertedRequest, err := adaptor.ConvertRequest(c, meta, request)
|
||||
convertedRequest, err := adaptor.ConvertRequest(c, info, request)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
@@ -120,7 +115,7 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
|
||||
}
|
||||
requestBody := bytes.NewBuffer(jsonData)
|
||||
c.Request.Body = io.NopCloser(requestBody)
|
||||
resp, err := adaptor.DoRequest(c, meta, requestBody)
|
||||
resp, err := adaptor.DoRequest(c, info, requestBody)
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
@@ -132,7 +127,7 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
|
||||
return fmt.Errorf("status code %d: %s", httpResp.StatusCode, err.Error.Message), err
|
||||
}
|
||||
}
|
||||
usageA, respErr := adaptor.DoResponse(c, httpResp, meta)
|
||||
usageA, respErr := adaptor.DoResponse(c, httpResp, info)
|
||||
if respErr != nil {
|
||||
return fmt.Errorf("%s", respErr.Error.Message), respErr
|
||||
}
|
||||
@@ -145,29 +140,27 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
modelPrice, usePrice := common.GetModelPrice(testModel, false)
|
||||
modelRatio, success := common.GetModelRatio(testModel)
|
||||
if !success {
|
||||
return fmt.Errorf("模型 %s 倍率未设置", testModel), nil
|
||||
info.PromptTokens = usage.PromptTokens
|
||||
priceData, err := helper.ModelPriceHelper(c, info, usage.PromptTokens, int(request.MaxTokens))
|
||||
if err != nil {
|
||||
return err, nil
|
||||
}
|
||||
completionRatio := common.GetCompletionRatio(testModel)
|
||||
ratio := modelRatio
|
||||
quota := 0
|
||||
if !usePrice {
|
||||
quota = usage.PromptTokens + int(math.Round(float64(usage.CompletionTokens)*completionRatio))
|
||||
quota = int(math.Round(float64(quota) * ratio))
|
||||
if ratio != 0 && quota <= 0 {
|
||||
if !priceData.UsePrice {
|
||||
quota = usage.PromptTokens + int(math.Round(float64(usage.CompletionTokens)*priceData.CompletionRatio))
|
||||
quota = int(math.Round(float64(quota) * priceData.ModelRatio))
|
||||
if priceData.ModelRatio != 0 && quota <= 0 {
|
||||
quota = 1
|
||||
}
|
||||
} else {
|
||||
quota = int(modelPrice * common.QuotaPerUnit)
|
||||
quota = int(priceData.ModelPrice * common.QuotaPerUnit)
|
||||
}
|
||||
tok := time.Now()
|
||||
milliseconds := tok.Sub(tik).Milliseconds()
|
||||
consumedTime := float64(milliseconds) / 1000.0
|
||||
other := service.GenerateTextOtherInfo(c, meta, modelRatio, 1, completionRatio, modelPrice)
|
||||
model.RecordConsumeLog(c, 1, channel.Id, usage.PromptTokens, usage.CompletionTokens, testModel, "模型测试",
|
||||
quota, "模型测试", 0, quota, int(consumedTime), false, "default", other)
|
||||
other := service.GenerateTextOtherInfo(c, info, priceData.ModelRatio, priceData.GroupRatio, priceData.CompletionRatio, 0, 0.0, priceData.ModelPrice)
|
||||
model.RecordConsumeLog(c, 1, channel.Id, usage.PromptTokens, usage.CompletionTokens, info.OriginModelName, "模型测试",
|
||||
quota, "模型测试", 0, quota, int(consumedTime), false, info.Group, other)
|
||||
common.SysLog(fmt.Sprintf("testing channel #%d, response: \n%s", channel.Id, string(respBody)))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/setting"
|
||||
"one-api/setting/operation_setting"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -66,7 +67,8 @@ func GetStatus(c *gin.Context) {
|
||||
"enable_online_topup": setting.PayAddress != "" && setting.EpayId != "" && setting.EpayKey != "",
|
||||
"mj_notify_enabled": setting.MjNotifyEnabled,
|
||||
"chats": setting.Chats,
|
||||
"demo_site_enabled": setting.DemoSiteEnabled,
|
||||
"demo_site_enabled": operation_setting.DemoSiteEnabled,
|
||||
"self_use_mode_enabled": operation_setting.SelfUseModeEnabled,
|
||||
},
|
||||
})
|
||||
return
|
||||
|
||||
@@ -2,9 +2,9 @@ package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/common"
|
||||
"one-api/model"
|
||||
"one-api/setting"
|
||||
"one-api/setting/operation_setting"
|
||||
)
|
||||
|
||||
func GetPricing(c *gin.Context) {
|
||||
@@ -40,7 +40,7 @@ func GetPricing(c *gin.Context) {
|
||||
}
|
||||
|
||||
func ResetModelRatio(c *gin.Context) {
|
||||
defaultStr := common.DefaultModelRatio2JSONString()
|
||||
defaultStr := operation_setting.DefaultModelRatio2JSONString()
|
||||
err := model.UpdateOption("ModelRatio", defaultStr)
|
||||
if err != nil {
|
||||
c.JSON(200, gin.H{
|
||||
@@ -49,7 +49,7 @@ func ResetModelRatio(c *gin.Context) {
|
||||
})
|
||||
return
|
||||
}
|
||||
err = common.UpdateModelRatioByJSONString(defaultStr)
|
||||
err = operation_setting.UpdateModelRatioByJSONString(defaultStr)
|
||||
if err != nil {
|
||||
c.JSON(200, gin.H{
|
||||
"success": false,
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"one-api/relay"
|
||||
"one-api/relay/constant"
|
||||
relayconstant "one-api/relay/constant"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strings"
|
||||
)
|
||||
@@ -41,15 +42,6 @@ func relayHandler(c *gin.Context, relayMode int) *dto.OpenAIErrorWithStatusCode
|
||||
return err
|
||||
}
|
||||
|
||||
func wsHandler(c *gin.Context, ws *websocket.Conn, relayMode int) *dto.OpenAIErrorWithStatusCode {
|
||||
var err *dto.OpenAIErrorWithStatusCode
|
||||
switch relayMode {
|
||||
default:
|
||||
err = relay.TextHelper(c)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func Relay(c *gin.Context) {
|
||||
relayMode := constant.Path2RelayMode(c.Request.URL.Path)
|
||||
requestId := c.GetString(common.RequestIdKey)
|
||||
@@ -110,7 +102,7 @@ func WssRelay(c *gin.Context) {
|
||||
|
||||
if err != nil {
|
||||
openaiErr := service.OpenAIErrorWrapper(err, "get_channel_failed", http.StatusInternalServerError)
|
||||
service.WssError(c, ws, openaiErr.Error)
|
||||
helper.WssError(c, ws, openaiErr.Error)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -152,7 +144,7 @@ func WssRelay(c *gin.Context) {
|
||||
openaiErr.Error.Message = "当前分组上游负载已饱和,请稍后再试"
|
||||
}
|
||||
openaiErr.Error.Message = common.MessageWithRequestId(openaiErr.Error.Message, requestId)
|
||||
service.WssError(c, ws, openaiErr.Error)
|
||||
helper.WssError(c, ws, openaiErr.Error)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -99,6 +99,7 @@ type Message struct {
|
||||
Name *string `json:"name,omitempty"`
|
||||
Prefix *bool `json:"prefix,omitempty"`
|
||||
ReasoningContent string `json:"reasoning_content,omitempty"`
|
||||
Reasoning string `json:"reasoning,omitempty"`
|
||||
ToolCalls json.RawMessage `json:"tool_calls,omitempty"`
|
||||
ToolCallId string `json:"tool_call_id,omitempty"`
|
||||
parsedContent []MediaContent
|
||||
|
||||
@@ -64,6 +64,7 @@ type ChatCompletionsStreamResponseChoice struct {
|
||||
type ChatCompletionsStreamResponseChoiceDelta struct {
|
||||
Content *string `json:"content,omitempty"`
|
||||
ReasoningContent *string `json:"reasoning_content,omitempty"`
|
||||
Reasoning *string `json:"reasoning,omitempty"`
|
||||
Role string `json:"role,omitempty"`
|
||||
ToolCalls []ToolCallResponse `json:"tool_calls,omitempty"`
|
||||
}
|
||||
@@ -80,14 +81,18 @@ func (c *ChatCompletionsStreamResponseChoiceDelta) GetContentString() string {
|
||||
}
|
||||
|
||||
func (c *ChatCompletionsStreamResponseChoiceDelta) GetReasoningContent() string {
|
||||
if c.ReasoningContent == nil {
|
||||
if c.ReasoningContent == nil && c.Reasoning == nil {
|
||||
return ""
|
||||
}
|
||||
return *c.ReasoningContent
|
||||
if c.ReasoningContent != nil {
|
||||
return *c.ReasoningContent
|
||||
}
|
||||
return *c.Reasoning
|
||||
}
|
||||
|
||||
func (c *ChatCompletionsStreamResponseChoiceDelta) SetReasoningContent(s string) {
|
||||
c.ReasoningContent = &s
|
||||
c.Reasoning = &s
|
||||
}
|
||||
|
||||
type ToolCallResponse struct {
|
||||
|
||||
@@ -51,7 +51,7 @@ func checkRedisRateLimit(ctx context.Context, rdb *redis.Client, key string, max
|
||||
// 如果在时间窗口内已达到限制,拒绝请求
|
||||
subTime := nowTime.Sub(oldTime).Seconds()
|
||||
if int64(subTime) < duration {
|
||||
rdb.Expire(ctx, key, common.RateLimitKeyExpirationDuration)
|
||||
rdb.Expire(ctx, key, time.Duration(setting.ModelRequestRateLimitDurationMinutes)*time.Minute)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func recordRedisRequest(ctx context.Context, rdb *redis.Client, key string, maxC
|
||||
now := time.Now().Format(timeFormat)
|
||||
rdb.LPush(ctx, key, now)
|
||||
rdb.LTrim(ctx, key, 0, int64(maxCount-1))
|
||||
rdb.Expire(ctx, key, common.RateLimitKeyExpirationDuration)
|
||||
rdb.Expire(ctx, key, time.Duration(setting.ModelRequestRateLimitDurationMinutes)*time.Minute)
|
||||
}
|
||||
|
||||
// Redis限流处理器
|
||||
@@ -118,7 +118,7 @@ func redisRateLimitHandler(duration int64, totalMaxCount, successMaxCount int) g
|
||||
|
||||
// 内存限流处理器
|
||||
func memoryRateLimitHandler(duration int64, totalMaxCount, successMaxCount int) gin.HandlerFunc {
|
||||
inMemoryRateLimiter.Init(common.RateLimitKeyExpirationDuration)
|
||||
inMemoryRateLimiter.Init(time.Duration(setting.ModelRequestRateLimitDurationMinutes) * time.Minute)
|
||||
|
||||
return func(c *gin.Context) {
|
||||
userId := strconv.Itoa(c.GetInt("id"))
|
||||
@@ -153,20 +153,23 @@ func memoryRateLimitHandler(duration int64, totalMaxCount, successMaxCount int)
|
||||
|
||||
// ModelRequestRateLimit 模型请求限流中间件
|
||||
func ModelRequestRateLimit() func(c *gin.Context) {
|
||||
// 如果未启用限流,直接放行
|
||||
if !setting.ModelRequestRateLimitEnabled {
|
||||
return defNext
|
||||
}
|
||||
return func(c *gin.Context) {
|
||||
// 在每个请求时检查是否启用限流
|
||||
if !setting.ModelRequestRateLimitEnabled {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
// 计算限流参数
|
||||
duration := int64(setting.ModelRequestRateLimitDurationMinutes * 60)
|
||||
totalMaxCount := setting.ModelRequestRateLimitCount
|
||||
successMaxCount := setting.ModelRequestRateLimitSuccessCount
|
||||
// 计算限流参数
|
||||
duration := int64(setting.ModelRequestRateLimitDurationMinutes * 60)
|
||||
totalMaxCount := setting.ModelRequestRateLimitCount
|
||||
successMaxCount := setting.ModelRequestRateLimitSuccessCount
|
||||
|
||||
// 根据存储类型选择限流处理器
|
||||
if common.RedisEnabled {
|
||||
return redisRateLimitHandler(duration, totalMaxCount, successMaxCount)
|
||||
} else {
|
||||
return memoryRateLimitHandler(duration, totalMaxCount, successMaxCount)
|
||||
// 根据存储类型选择并执行限流处理器
|
||||
if common.RedisEnabled {
|
||||
redisRateLimitHandler(duration, totalMaxCount, successMaxCount)(c)
|
||||
} else {
|
||||
memoryRateLimitHandler(duration, totalMaxCount, successMaxCount)(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,35 +290,42 @@ func (channel *Channel) Delete() error {
|
||||
|
||||
var channelStatusLock sync.Mutex
|
||||
|
||||
func UpdateChannelStatusById(id int, status int, reason string) {
|
||||
func UpdateChannelStatusById(id int, status int, reason string) bool {
|
||||
if common.MemoryCacheEnabled {
|
||||
channelStatusLock.Lock()
|
||||
defer channelStatusLock.Unlock()
|
||||
|
||||
channelCache, _ := CacheGetChannel(id)
|
||||
// 如果缓存渠道存在,且状态已是目标状态,直接返回
|
||||
if channelCache != nil && channelCache.Status == status {
|
||||
channelStatusLock.Unlock()
|
||||
return
|
||||
return false
|
||||
}
|
||||
// 如果缓存渠道不存在(说明已经被禁用),且要设置的状态不为启用,直接返回
|
||||
if channelCache == nil && status != common.ChannelStatusEnabled {
|
||||
channelStatusLock.Unlock()
|
||||
return
|
||||
return false
|
||||
}
|
||||
CacheUpdateChannelStatus(id, status)
|
||||
channelStatusLock.Unlock()
|
||||
}
|
||||
err := UpdateAbilityStatus(id, status == common.ChannelStatusEnabled)
|
||||
if err != nil {
|
||||
common.SysError("failed to update ability status: " + err.Error())
|
||||
return false
|
||||
}
|
||||
channel, err := GetChannelById(id, true)
|
||||
if err != nil {
|
||||
// find channel by id error, directly update status
|
||||
err = DB.Model(&Channel{}).Where("id = ?", id).Update("status", status).Error
|
||||
if err != nil {
|
||||
common.SysError("failed to update channel status: " + err.Error())
|
||||
result := DB.Model(&Channel{}).Where("id = ?", id).Update("status", status)
|
||||
if result.Error != nil {
|
||||
common.SysError("failed to update channel status: " + result.Error.Error())
|
||||
return false
|
||||
}
|
||||
if result.RowsAffected == 0 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if channel.Status == status {
|
||||
return false
|
||||
}
|
||||
// find channel by id success, update status and other info
|
||||
info := channel.GetOtherInfo()
|
||||
info["status_reason"] = reason
|
||||
@@ -328,9 +335,10 @@ func UpdateChannelStatusById(id int, status int, reason string) {
|
||||
err = channel.Save()
|
||||
if err != nil {
|
||||
common.SysError("failed to update channel status: " + err.Error())
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func EnableChannelByTag(tag string) error {
|
||||
|
||||
@@ -2,12 +2,13 @@ package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/common"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/bytedance/gopkg/util/gopool"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
@@ -18,7 +19,7 @@ type Log struct {
|
||||
CreatedAt int64 `json:"created_at" gorm:"bigint;index:idx_created_at_id,priority:2;index:idx_created_at_type"`
|
||||
Type int `json:"type" gorm:"index:idx_created_at_type"`
|
||||
Content string `json:"content"`
|
||||
Username string `json:"username" gorm:"index:index_username_model_name,priority:2;default:''"`
|
||||
Username string `json:"username" gorm:"index;index:index_username_model_name,priority:2;default:''"`
|
||||
TokenName string `json:"token_name" gorm:"index;default:''"`
|
||||
ModelName string `json:"model_name" gorm:"index;index:index_username_model_name,priority:1;default:''"`
|
||||
Quota int `json:"quota" gorm:"default:0"`
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/setting"
|
||||
"one-api/setting/config"
|
||||
"one-api/setting/operation_setting"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -87,15 +88,16 @@ func InitOptionMap() {
|
||||
common.OptionMap["QuotaForInviter"] = strconv.Itoa(common.QuotaForInviter)
|
||||
common.OptionMap["QuotaForInvitee"] = strconv.Itoa(common.QuotaForInvitee)
|
||||
common.OptionMap["QuotaRemindThreshold"] = strconv.Itoa(common.QuotaRemindThreshold)
|
||||
common.OptionMap["ShouldPreConsumedQuota"] = strconv.Itoa(common.PreConsumedQuota)
|
||||
common.OptionMap["PreConsumedQuota"] = strconv.Itoa(common.PreConsumedQuota)
|
||||
common.OptionMap["ModelRequestRateLimitCount"] = strconv.Itoa(setting.ModelRequestRateLimitCount)
|
||||
common.OptionMap["ModelRequestRateLimitDurationMinutes"] = strconv.Itoa(setting.ModelRequestRateLimitDurationMinutes)
|
||||
common.OptionMap["ModelRequestRateLimitSuccessCount"] = strconv.Itoa(setting.ModelRequestRateLimitSuccessCount)
|
||||
common.OptionMap["ModelRatio"] = common.ModelRatio2JSONString()
|
||||
common.OptionMap["ModelPrice"] = common.ModelPrice2JSONString()
|
||||
common.OptionMap["ModelRatio"] = operation_setting.ModelRatio2JSONString()
|
||||
common.OptionMap["ModelPrice"] = operation_setting.ModelPrice2JSONString()
|
||||
common.OptionMap["CacheRatio"] = operation_setting.CacheRatio2JSONString()
|
||||
common.OptionMap["GroupRatio"] = setting.GroupRatio2JSONString()
|
||||
common.OptionMap["UserUsableGroups"] = setting.UserUsableGroups2JSONString()
|
||||
common.OptionMap["CompletionRatio"] = common.CompletionRatio2JSONString()
|
||||
common.OptionMap["CompletionRatio"] = operation_setting.CompletionRatio2JSONString()
|
||||
common.OptionMap["TopUpLink"] = common.TopUpLink
|
||||
common.OptionMap["ChatLink"] = common.ChatLink
|
||||
common.OptionMap["ChatLink2"] = common.ChatLink2
|
||||
@@ -110,13 +112,14 @@ func InitOptionMap() {
|
||||
common.OptionMap["MjForwardUrlEnabled"] = strconv.FormatBool(setting.MjForwardUrlEnabled)
|
||||
common.OptionMap["MjActionCheckSuccessEnabled"] = strconv.FormatBool(setting.MjActionCheckSuccessEnabled)
|
||||
common.OptionMap["CheckSensitiveEnabled"] = strconv.FormatBool(setting.CheckSensitiveEnabled)
|
||||
common.OptionMap["DemoSiteEnabled"] = strconv.FormatBool(setting.DemoSiteEnabled)
|
||||
common.OptionMap["DemoSiteEnabled"] = strconv.FormatBool(operation_setting.DemoSiteEnabled)
|
||||
common.OptionMap["SelfUseModeEnabled"] = strconv.FormatBool(operation_setting.SelfUseModeEnabled)
|
||||
common.OptionMap["ModelRequestRateLimitEnabled"] = strconv.FormatBool(setting.ModelRequestRateLimitEnabled)
|
||||
common.OptionMap["CheckSensitiveOnPromptEnabled"] = strconv.FormatBool(setting.CheckSensitiveOnPromptEnabled)
|
||||
common.OptionMap["StopOnSensitiveEnabled"] = strconv.FormatBool(setting.StopOnSensitiveEnabled)
|
||||
common.OptionMap["SensitiveWords"] = setting.SensitiveWordsToString()
|
||||
common.OptionMap["StreamCacheQueueLength"] = strconv.Itoa(setting.StreamCacheQueueLength)
|
||||
common.OptionMap["AutomaticDisableKeywords"] = setting.AutomaticDisableKeywordsToString()
|
||||
common.OptionMap["AutomaticDisableKeywords"] = operation_setting.AutomaticDisableKeywordsToString()
|
||||
|
||||
// 自动添加所有注册的模型配置
|
||||
modelConfigs := config.GlobalConfig.ExportAllConfigs()
|
||||
@@ -242,7 +245,9 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
case "CheckSensitiveEnabled":
|
||||
setting.CheckSensitiveEnabled = boolValue
|
||||
case "DemoSiteEnabled":
|
||||
setting.DemoSiteEnabled = boolValue
|
||||
operation_setting.DemoSiteEnabled = boolValue
|
||||
case "SelfUseModeEnabled":
|
||||
operation_setting.SelfUseModeEnabled = boolValue
|
||||
case "CheckSensitiveOnPromptEnabled":
|
||||
setting.CheckSensitiveOnPromptEnabled = boolValue
|
||||
case "ModelRequestRateLimitEnabled":
|
||||
@@ -325,7 +330,7 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
common.QuotaForInvitee, _ = strconv.Atoi(value)
|
||||
case "QuotaRemindThreshold":
|
||||
common.QuotaRemindThreshold, _ = strconv.Atoi(value)
|
||||
case "ShouldPreConsumedQuota":
|
||||
case "PreConsumedQuota":
|
||||
common.PreConsumedQuota, _ = strconv.Atoi(value)
|
||||
case "ModelRequestRateLimitCount":
|
||||
setting.ModelRequestRateLimitCount, _ = strconv.Atoi(value)
|
||||
@@ -340,15 +345,17 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
case "DataExportDefaultTime":
|
||||
common.DataExportDefaultTime = value
|
||||
case "ModelRatio":
|
||||
err = common.UpdateModelRatioByJSONString(value)
|
||||
err = operation_setting.UpdateModelRatioByJSONString(value)
|
||||
case "GroupRatio":
|
||||
err = setting.UpdateGroupRatioByJSONString(value)
|
||||
case "UserUsableGroups":
|
||||
err = setting.UpdateUserUsableGroupsByJSONString(value)
|
||||
case "CompletionRatio":
|
||||
err = common.UpdateCompletionRatioByJSONString(value)
|
||||
err = operation_setting.UpdateCompletionRatioByJSONString(value)
|
||||
case "ModelPrice":
|
||||
err = common.UpdateModelPriceByJSONString(value)
|
||||
err = operation_setting.UpdateModelPriceByJSONString(value)
|
||||
case "CacheRatio":
|
||||
err = operation_setting.UpdateCacheRatioByJSONString(value)
|
||||
case "TopUpLink":
|
||||
common.TopUpLink = value
|
||||
case "ChatLink":
|
||||
@@ -362,7 +369,7 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
case "SensitiveWords":
|
||||
setting.SensitiveWordsFromString(value)
|
||||
case "AutomaticDisableKeywords":
|
||||
setting.AutomaticDisableKeywordsFromString(value)
|
||||
operation_setting.AutomaticDisableKeywordsFromString(value)
|
||||
case "StreamCacheQueueLength":
|
||||
setting.StreamCacheQueueLength, _ = strconv.Atoi(value)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package model
|
||||
|
||||
import (
|
||||
"one-api/common"
|
||||
"one-api/setting/operation_setting"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -64,14 +65,14 @@ func updatePricing() {
|
||||
ModelName: model,
|
||||
EnableGroup: groups,
|
||||
}
|
||||
modelPrice, findPrice := common.GetModelPrice(model, false)
|
||||
modelPrice, findPrice := operation_setting.GetModelPrice(model, false)
|
||||
if findPrice {
|
||||
pricing.ModelPrice = modelPrice
|
||||
pricing.QuotaType = 1
|
||||
} else {
|
||||
modelRatio, _ := common.GetModelRatio(model)
|
||||
modelRatio, _ := operation_setting.GetModelRatio(model)
|
||||
pricing.ModelRatio = modelRatio
|
||||
pricing.CompletionRatio = common.GetCompletionRatio(model)
|
||||
pricing.CompletionRatio = operation_setting.GetCompletionRatio(model)
|
||||
pricing.QuotaType = 0
|
||||
}
|
||||
pricingMap = append(pricingMap, pricing)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/dto"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strings"
|
||||
)
|
||||
@@ -153,7 +154,7 @@ func aliStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWith
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
service.SetEventStreamHeaders(c)
|
||||
helper.SetEventStreamHeaders(c)
|
||||
lastResponseText := ""
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
|
||||
@@ -14,7 +14,7 @@ type AwsClaudeRequest struct {
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Tools []claude.Tool `json:"tools,omitempty"`
|
||||
Tools any `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
Thinking *claude.Thinking `json:"thinking,omitempty"`
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
relaymodel "one-api/dto"
|
||||
"one-api/relay/channel/claude"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -203,13 +204,13 @@ func awsStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
|
||||
}
|
||||
})
|
||||
if info.ShouldIncludeUsage {
|
||||
response := service.GenerateFinalUsageResponse(id, createdTime, info.UpstreamModelName, usage)
|
||||
err := service.ObjectData(c, response)
|
||||
response := helper.GenerateFinalUsageResponse(id, createdTime, info.UpstreamModelName, usage)
|
||||
err := helper.ObjectData(c, response)
|
||||
if err != nil {
|
||||
common.SysError("send final response failed: " + err.Error())
|
||||
}
|
||||
}
|
||||
service.Done(c)
|
||||
helper.Done(c)
|
||||
if resp != nil {
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -138,7 +139,7 @@ func baiduStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWi
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
service.SetEventStreamHeaders(c)
|
||||
helper.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
|
||||
@@ -58,7 +58,7 @@ type ClaudeRequest struct {
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
//ClaudeMetadata `json:"metadata,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Tools []Tool `json:"tools,omitempty"`
|
||||
Tools any `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
Thinking *Thinking `json:"thinking,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package claude
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -9,6 +8,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/dto"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"one-api/setting/model_setting"
|
||||
"strings"
|
||||
@@ -443,28 +443,18 @@ func ClaudeStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.
|
||||
usage = &dto.Usage{}
|
||||
responseText := ""
|
||||
createdTime := common.GetTimestamp()
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
service.SetEventStreamHeaders(c)
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
info.SetFirstResponseTime()
|
||||
if len(data) < 6 || !strings.HasPrefix(data, "data:") {
|
||||
continue
|
||||
}
|
||||
data = strings.TrimPrefix(data, "data:")
|
||||
data = strings.TrimSpace(data)
|
||||
helper.StreamScannerHandler(c, resp, info, func(data string) bool {
|
||||
var claudeResponse ClaudeResponse
|
||||
err := json.Unmarshal([]byte(data), &claudeResponse)
|
||||
if err != nil {
|
||||
common.SysError("error unmarshalling stream response: " + err.Error())
|
||||
continue
|
||||
return true
|
||||
}
|
||||
|
||||
response, claudeUsage := StreamResponseClaude2OpenAI(requestMode, &claudeResponse)
|
||||
if response == nil {
|
||||
continue
|
||||
return true
|
||||
}
|
||||
if requestMode == RequestModeCompletion {
|
||||
responseText += claudeResponse.Completion
|
||||
@@ -481,9 +471,9 @@ func ClaudeStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.
|
||||
usage.CompletionTokens = claudeUsage.OutputTokens
|
||||
usage.TotalTokens = claudeUsage.InputTokens + claudeUsage.OutputTokens
|
||||
} else if claudeResponse.Type == "content_block_start" {
|
||||
|
||||
return true
|
||||
} else {
|
||||
continue
|
||||
return true
|
||||
}
|
||||
}
|
||||
//response.Id = responseId
|
||||
@@ -491,11 +481,12 @@ func ClaudeStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.
|
||||
response.Created = createdTime
|
||||
response.Model = info.UpstreamModelName
|
||||
|
||||
err = service.ObjectData(c, response)
|
||||
err = helper.ObjectData(c, response)
|
||||
if err != nil {
|
||||
common.LogError(c, "send_stream_response_failed: "+err.Error())
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if requestMode == RequestModeCompletion {
|
||||
usage, _ = service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
|
||||
@@ -508,14 +499,14 @@ func ClaudeStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.
|
||||
}
|
||||
}
|
||||
if info.ShouldIncludeUsage {
|
||||
response := service.GenerateFinalUsageResponse(responseId, createdTime, info.UpstreamModelName, *usage)
|
||||
err := service.ObjectData(c, response)
|
||||
response := helper.GenerateFinalUsageResponse(responseId, createdTime, info.UpstreamModelName, *usage)
|
||||
err := helper.ObjectData(c, response)
|
||||
if err != nil {
|
||||
common.SysError("send final response failed: " + err.Error())
|
||||
}
|
||||
}
|
||||
service.Done(c)
|
||||
resp.Body.Close()
|
||||
helper.Done(c)
|
||||
//resp.Body.Close()
|
||||
return nil, usage
|
||||
}
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/dto"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -28,8 +29,8 @@ func cfStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rela
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
service.SetEventStreamHeaders(c)
|
||||
id := service.GetResponseID(c)
|
||||
helper.SetEventStreamHeaders(c)
|
||||
id := helper.GetResponseID(c)
|
||||
var responseText string
|
||||
isFirst := true
|
||||
|
||||
@@ -57,7 +58,7 @@ func cfStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rela
|
||||
}
|
||||
response.Id = id
|
||||
response.Model = info.UpstreamModelName
|
||||
err = service.ObjectData(c, response)
|
||||
err = helper.ObjectData(c, response)
|
||||
if isFirst {
|
||||
isFirst = false
|
||||
info.FirstResponseTime = time.Now()
|
||||
@@ -72,13 +73,13 @@ func cfStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rela
|
||||
}
|
||||
usage, _ := service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
|
||||
if info.ShouldIncludeUsage {
|
||||
response := service.GenerateFinalUsageResponse(id, info.StartTime.Unix(), info.UpstreamModelName, *usage)
|
||||
err := service.ObjectData(c, response)
|
||||
response := helper.GenerateFinalUsageResponse(id, info.StartTime.Unix(), info.UpstreamModelName, *usage)
|
||||
err := helper.ObjectData(c, response)
|
||||
if err != nil {
|
||||
common.LogError(c, "error_rendering_final_usage_response: "+err.Error())
|
||||
}
|
||||
}
|
||||
service.Done(c)
|
||||
helper.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
@@ -109,7 +110,7 @@ func cfHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo)
|
||||
}
|
||||
usage, _ := service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
|
||||
response.Usage = *usage
|
||||
response.Id = service.GetResponseID(c)
|
||||
response.Id = helper.GetResponseID(c)
|
||||
jsonResponse, err := json.Marshal(response)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/dto"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -103,7 +104,7 @@ func cohereStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
service.SetEventStreamHeaders(c)
|
||||
helper.SetEventStreamHeaders(c)
|
||||
isFirst := true
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strings"
|
||||
)
|
||||
@@ -66,7 +67,7 @@ func difyStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
service.SetEventStreamHeaders(c)
|
||||
helper.SetEventStreamHeaders(c)
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
@@ -92,7 +93,7 @@ func difyStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
|
||||
responseText += openaiResponse.Choices[0].Delta.GetContentString()
|
||||
}
|
||||
}
|
||||
err = service.ObjectData(c, openaiResponse)
|
||||
err = helper.ObjectData(c, openaiResponse)
|
||||
if err != nil {
|
||||
common.SysError(err.Error())
|
||||
}
|
||||
@@ -100,7 +101,7 @@ func difyStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
|
||||
if err := scanner.Err(); err != nil {
|
||||
common.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
service.Done(c)
|
||||
helper.Done(c)
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
//return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -10,6 +9,7 @@ import (
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"one-api/setting/model_setting"
|
||||
"strings"
|
||||
@@ -429,10 +429,10 @@ func responseGeminiChat2OpenAI(response *GeminiChatResponse) *dto.OpenAITextResp
|
||||
|
||||
func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) (*dto.ChatCompletionsStreamResponse, bool) {
|
||||
choices := make([]dto.ChatCompletionsStreamResponseChoice, 0, len(geminiResponse.Candidates))
|
||||
is_stop := false
|
||||
isStop := false
|
||||
for _, candidate := range geminiResponse.Candidates {
|
||||
if candidate.FinishReason != nil && *candidate.FinishReason == "STOP" {
|
||||
is_stop = true
|
||||
isStop = true
|
||||
candidate.FinishReason = nil
|
||||
}
|
||||
choice := dto.ChatCompletionsStreamResponseChoice{
|
||||
@@ -482,9 +482,8 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) (*dto.C
|
||||
|
||||
var response dto.ChatCompletionsStreamResponse
|
||||
response.Object = "chat.completion.chunk"
|
||||
response.Model = "gemini"
|
||||
response.Choices = choices
|
||||
return &response, is_stop
|
||||
return &response, isStop
|
||||
}
|
||||
|
||||
func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
|
||||
@@ -492,27 +491,16 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom
|
||||
id := fmt.Sprintf("chatcmpl-%s", common.GetUUID())
|
||||
createAt := common.GetTimestamp()
|
||||
var usage = &dto.Usage{}
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
service.SetEventStreamHeaders(c)
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
info.SetFirstResponseTime()
|
||||
data = strings.TrimSpace(data)
|
||||
if !strings.HasPrefix(data, "data: ") {
|
||||
continue
|
||||
}
|
||||
data = strings.TrimPrefix(data, "data: ")
|
||||
data = strings.TrimSuffix(data, "\"")
|
||||
helper.StreamScannerHandler(c, resp, info, func(data string) bool {
|
||||
var geminiResponse GeminiChatResponse
|
||||
err := json.Unmarshal([]byte(data), &geminiResponse)
|
||||
if err != nil {
|
||||
common.LogError(c, "error unmarshalling stream response: "+err.Error())
|
||||
continue
|
||||
return false
|
||||
}
|
||||
|
||||
response, is_stop := streamResponseGeminiChat2OpenAI(&geminiResponse)
|
||||
response, isStop := streamResponseGeminiChat2OpenAI(&geminiResponse)
|
||||
response.Id = id
|
||||
response.Created = createAt
|
||||
response.Model = info.UpstreamModelName
|
||||
@@ -521,15 +509,16 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom
|
||||
usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount
|
||||
usage.CompletionTokens = geminiResponse.UsageMetadata.CandidatesTokenCount
|
||||
}
|
||||
err = service.ObjectData(c, response)
|
||||
err = helper.ObjectData(c, response)
|
||||
if err != nil {
|
||||
common.LogError(c, err.Error())
|
||||
}
|
||||
if is_stop {
|
||||
response := service.GenerateStopResponse(id, createAt, info.UpstreamModelName, constant.FinishReasonStop)
|
||||
service.ObjectData(c, response)
|
||||
if isStop {
|
||||
response := helper.GenerateStopResponse(id, createAt, info.UpstreamModelName, constant.FinishReasonStop)
|
||||
helper.ObjectData(c, response)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
var response *dto.ChatCompletionsStreamResponse
|
||||
|
||||
@@ -538,14 +527,14 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom
|
||||
usage.CompletionTokenDetails.TextTokens = usage.CompletionTokens
|
||||
|
||||
if info.ShouldIncludeUsage {
|
||||
response = service.GenerateFinalUsageResponse(id, createAt, info.UpstreamModelName, *usage)
|
||||
err := service.ObjectData(c, response)
|
||||
response = helper.GenerateFinalUsageResponse(id, createAt, info.UpstreamModelName, *usage)
|
||||
err := helper.ObjectData(c, response)
|
||||
if err != nil {
|
||||
common.SysError("send final response failed: " + err.Error())
|
||||
}
|
||||
}
|
||||
service.Done(c)
|
||||
resp.Body.Close()
|
||||
helper.Done(c)
|
||||
//resp.Body.Close()
|
||||
return nil, usage
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -14,11 +13,10 @@ import (
|
||||
"one-api/dto"
|
||||
relaycommon "one-api/relay/common"
|
||||
relayconstant "one-api/relay/constant"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bytedance/gopkg/util/gopool"
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -32,7 +30,7 @@ func sendStreamData(c *gin.Context, info *relaycommon.RelayInfo, data string, fo
|
||||
}
|
||||
|
||||
if !forceFormat && !thinkToContent {
|
||||
return service.StringData(c, data)
|
||||
return helper.StringData(c, data)
|
||||
}
|
||||
|
||||
var lastStreamResponse dto.ChatCompletionsStreamResponse
|
||||
@@ -41,44 +39,68 @@ func sendStreamData(c *gin.Context, info *relaycommon.RelayInfo, data string, fo
|
||||
}
|
||||
|
||||
if !thinkToContent {
|
||||
return service.ObjectData(c, lastStreamResponse)
|
||||
return helper.ObjectData(c, lastStreamResponse)
|
||||
}
|
||||
|
||||
hasThinkingContent := false
|
||||
hasContent := false
|
||||
var thinkingContent strings.Builder
|
||||
for _, choice := range lastStreamResponse.Choices {
|
||||
if len(choice.Delta.GetReasoningContent()) > 0 {
|
||||
hasThinkingContent = true
|
||||
thinkingContent.WriteString(choice.Delta.GetReasoningContent())
|
||||
}
|
||||
if len(choice.Delta.GetContentString()) > 0 {
|
||||
hasContent = true
|
||||
}
|
||||
}
|
||||
|
||||
// Handle think to content conversion
|
||||
if info.IsFirstResponse {
|
||||
response := lastStreamResponse.Copy()
|
||||
for i := range response.Choices {
|
||||
response.Choices[i].Delta.SetContentString("<think>\n")
|
||||
response.Choices[i].Delta.SetReasoningContent("")
|
||||
if info.ThinkingContentInfo.IsFirstThinkingContent {
|
||||
if hasThinkingContent {
|
||||
response := lastStreamResponse.Copy()
|
||||
for i := range response.Choices {
|
||||
// send `think` tag with thinking content
|
||||
response.Choices[i].Delta.SetContentString("<think>\n" + thinkingContent.String())
|
||||
response.Choices[i].Delta.ReasoningContent = nil
|
||||
response.Choices[i].Delta.Reasoning = nil
|
||||
}
|
||||
info.ThinkingContentInfo.IsFirstThinkingContent = false
|
||||
return helper.ObjectData(c, response)
|
||||
}
|
||||
service.ObjectData(c, response)
|
||||
}
|
||||
|
||||
if lastStreamResponse.Choices == nil || len(lastStreamResponse.Choices) == 0 {
|
||||
return service.ObjectData(c, lastStreamResponse)
|
||||
return helper.ObjectData(c, lastStreamResponse)
|
||||
}
|
||||
|
||||
// Process each choice
|
||||
for i, choice := range lastStreamResponse.Choices {
|
||||
// Handle transition from thinking to content
|
||||
if len(choice.Delta.GetContentString()) > 0 && !info.SendLastReasoningResponse {
|
||||
if hasContent && !info.ThinkingContentInfo.SendLastThinkingContent {
|
||||
response := lastStreamResponse.Copy()
|
||||
for j := range response.Choices {
|
||||
response.Choices[j].Delta.SetContentString("\n</think>")
|
||||
response.Choices[j].Delta.SetReasoningContent("")
|
||||
response.Choices[j].Delta.SetContentString("\n</think>\n")
|
||||
response.Choices[j].Delta.ReasoningContent = nil
|
||||
response.Choices[j].Delta.Reasoning = nil
|
||||
}
|
||||
info.SendLastReasoningResponse = true
|
||||
service.ObjectData(c, response)
|
||||
info.ThinkingContentInfo.SendLastThinkingContent = true
|
||||
helper.ObjectData(c, response)
|
||||
}
|
||||
|
||||
// Convert reasoning content to regular content
|
||||
if len(choice.Delta.GetReasoningContent()) > 0 {
|
||||
lastStreamResponse.Choices[i].Delta.SetContentString(choice.Delta.GetReasoningContent())
|
||||
lastStreamResponse.Choices[i].Delta.SetReasoningContent("")
|
||||
lastStreamResponse.Choices[i].Delta.ReasoningContent = nil
|
||||
lastStreamResponse.Choices[i].Delta.Reasoning = nil
|
||||
} else if !hasThinkingContent && !hasContent {
|
||||
// flush thinking content
|
||||
lastStreamResponse.Choices[i].Delta.ReasoningContent = nil
|
||||
lastStreamResponse.Choices[i].Delta.Reasoning = nil
|
||||
}
|
||||
}
|
||||
|
||||
return service.ObjectData(c, lastStreamResponse)
|
||||
return helper.ObjectData(c, lastStreamResponse)
|
||||
}
|
||||
|
||||
func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
|
||||
@@ -108,64 +130,22 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
|
||||
}
|
||||
|
||||
toolCount := 0
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
service.SetEventStreamHeaders(c)
|
||||
streamingTimeout := time.Duration(constant.StreamingTimeout) * time.Second
|
||||
if strings.HasPrefix(info.UpstreamModelName, "o1") || strings.HasPrefix(info.UpstreamModelName, "o3") {
|
||||
// twice timeout for o1 model
|
||||
streamingTimeout *= 2
|
||||
}
|
||||
ticker := time.NewTicker(streamingTimeout)
|
||||
defer ticker.Stop()
|
||||
|
||||
stopChan := make(chan bool)
|
||||
defer close(stopChan)
|
||||
var (
|
||||
lastStreamData string
|
||||
mu sync.Mutex
|
||||
)
|
||||
gopool.Go(func() {
|
||||
for scanner.Scan() {
|
||||
//info.SetFirstResponseTime()
|
||||
ticker.Reset(time.Duration(constant.StreamingTimeout) * time.Second)
|
||||
data := scanner.Text()
|
||||
if common.DebugEnabled {
|
||||
println(data)
|
||||
}
|
||||
if len(data) < 6 { // ignore blank line or wrong format
|
||||
continue
|
||||
}
|
||||
if data[:5] != "data:" && data[:6] != "[DONE]" {
|
||||
continue
|
||||
}
|
||||
mu.Lock()
|
||||
data = data[5:]
|
||||
data = strings.TrimSpace(data)
|
||||
if !strings.HasPrefix(data, "[DONE]") {
|
||||
if lastStreamData != "" {
|
||||
err := sendStreamData(c, info, lastStreamData, forceFormat, thinkToContent)
|
||||
if err != nil {
|
||||
common.LogError(c, "streaming error: "+err.Error())
|
||||
}
|
||||
info.SetFirstResponseTime()
|
||||
}
|
||||
lastStreamData = data
|
||||
streamItems = append(streamItems, data)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
common.SafeSendBool(stopChan, true)
|
||||
})
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// 超时处理逻辑
|
||||
common.LogError(c, "streaming timeout")
|
||||
case <-stopChan:
|
||||
// 正常结束
|
||||
}
|
||||
helper.StreamScannerHandler(c, resp, info, func(data string) bool {
|
||||
if lastStreamData != "" {
|
||||
err := sendStreamData(c, info, lastStreamData, forceFormat, thinkToContent)
|
||||
if err != nil {
|
||||
common.LogError(c, "streaming error: "+err.Error())
|
||||
}
|
||||
}
|
||||
lastStreamData = data
|
||||
streamItems = append(streamItems, data)
|
||||
return true
|
||||
})
|
||||
|
||||
shouldSendLastResp := true
|
||||
var lastStreamResponse dto.ChatCompletionsStreamResponse
|
||||
@@ -210,7 +190,10 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
|
||||
//}
|
||||
for _, choice := range streamResponse.Choices {
|
||||
responseTextBuilder.WriteString(choice.Delta.GetContentString())
|
||||
|
||||
// handle both reasoning_content and reasoning
|
||||
responseTextBuilder.WriteString(choice.Delta.GetReasoningContent())
|
||||
|
||||
if choice.Delta.ToolCalls != nil {
|
||||
if len(choice.Delta.ToolCalls) > toolCount {
|
||||
toolCount = len(choice.Delta.ToolCalls)
|
||||
@@ -231,7 +214,7 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
|
||||
//}
|
||||
for _, choice := range streamResponse.Choices {
|
||||
responseTextBuilder.WriteString(choice.Delta.GetContentString())
|
||||
responseTextBuilder.WriteString(choice.Delta.GetReasoningContent())
|
||||
responseTextBuilder.WriteString(choice.Delta.GetReasoningContent()) // This will handle both reasoning_content and reasoning
|
||||
if choice.Delta.ToolCalls != nil {
|
||||
if len(choice.Delta.ToolCalls) > toolCount {
|
||||
toolCount = len(choice.Delta.ToolCalls)
|
||||
@@ -274,14 +257,14 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
|
||||
}
|
||||
|
||||
if info.ShouldIncludeUsage && !containStreamUsage {
|
||||
response := service.GenerateFinalUsageResponse(responseId, createAt, model, *usage)
|
||||
response := helper.GenerateFinalUsageResponse(responseId, createAt, model, *usage)
|
||||
response.SetSystemFingerprint(systemFingerprint)
|
||||
service.ObjectData(c, response)
|
||||
helper.ObjectData(c, response)
|
||||
}
|
||||
|
||||
service.Done(c)
|
||||
helper.Done(c)
|
||||
|
||||
resp.Body.Close()
|
||||
//resp.Body.Close()
|
||||
return nil, usage
|
||||
}
|
||||
|
||||
@@ -323,7 +306,7 @@ func OpenaiHandler(c *gin.Context, resp *http.Response, promptTokens int, model
|
||||
if simpleResponse.Usage.TotalTokens == 0 || (simpleResponse.Usage.PromptTokens == 0 && simpleResponse.Usage.CompletionTokens == 0) {
|
||||
completionTokens := 0
|
||||
for _, choice := range simpleResponse.Choices {
|
||||
ctkm, _ := service.CountTextToken(choice.Message.StringContent()+choice.Message.ReasoningContent, model)
|
||||
ctkm, _ := service.CountTextToken(choice.Message.StringContent()+choice.Message.ReasoningContent+choice.Message.Reasoning, model)
|
||||
completionTokens += ctkm
|
||||
}
|
||||
simpleResponse.Usage = dto.Usage{
|
||||
@@ -512,7 +495,7 @@ func OpenaiRealtimeHandler(c *gin.Context, info *relaycommon.RelayInfo) (*dto.Op
|
||||
localUsage.InputTokenDetails.TextTokens += textToken
|
||||
localUsage.InputTokenDetails.AudioTokens += audioToken
|
||||
|
||||
err = service.WssString(c, targetConn, string(message))
|
||||
err = helper.WssString(c, targetConn, string(message))
|
||||
if err != nil {
|
||||
errChan <- fmt.Errorf("error writing to target: %v", err)
|
||||
return
|
||||
@@ -618,7 +601,7 @@ func OpenaiRealtimeHandler(c *gin.Context, info *relaycommon.RelayInfo) (*dto.Op
|
||||
localUsage.OutputTokenDetails.AudioTokens += audioToken
|
||||
}
|
||||
|
||||
err = service.WssString(c, clientConn, string(message))
|
||||
err = helper.WssString(c, clientConn, string(message))
|
||||
if err != nil {
|
||||
errChan <- fmt.Errorf("error writing to client: %v", err)
|
||||
return
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
)
|
||||
|
||||
@@ -112,7 +113,7 @@ func palmStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWit
|
||||
dataChan <- string(jsonResponse)
|
||||
stopChan <- true
|
||||
}()
|
||||
service.SetEventStreamHeaders(c)
|
||||
helper.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -91,7 +92,7 @@ func tencentStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIError
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
service.SetEventStreamHeaders(c)
|
||||
helper.SetEventStreamHeaders(c)
|
||||
|
||||
for scanner.Scan() {
|
||||
data := scanner.Text()
|
||||
@@ -112,7 +113,7 @@ func tencentStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIError
|
||||
responseText += response.Choices[0].Delta.GetContentString()
|
||||
}
|
||||
|
||||
err = service.ObjectData(c, response)
|
||||
err = helper.ObjectData(c, response)
|
||||
if err != nil {
|
||||
common.SysError(err.Error())
|
||||
}
|
||||
@@ -122,7 +123,7 @@ func tencentStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIError
|
||||
common.SysError("error reading stream: " + err.Error())
|
||||
}
|
||||
|
||||
service.Done(c)
|
||||
helper.Done(c)
|
||||
|
||||
err := resp.Body.Close()
|
||||
if err != nil {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/jinzhu/copier"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/dto"
|
||||
@@ -28,6 +27,7 @@ var claudeModelMap = map[string]string{
|
||||
"claude-3-opus-20240229": "claude-3-opus@20240229",
|
||||
"claude-3-haiku-20240307": "claude-3-haiku@20240307",
|
||||
"claude-3-5-sonnet-20240620": "claude-3-5-sonnet@20240620",
|
||||
"claude-3-5-sonnet-20241022": "claude-3-5-sonnet-v2@20241022",
|
||||
"claude-3-7-sonnet-20250219": "claude-3-7-sonnet@20250219",
|
||||
}
|
||||
|
||||
@@ -86,15 +86,16 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
|
||||
} else {
|
||||
suffix = "rawPredict"
|
||||
}
|
||||
model := info.UpstreamModelName
|
||||
if v, ok := claudeModelMap[info.UpstreamModelName]; ok {
|
||||
info.UpstreamModelName = v
|
||||
model = v
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/anthropic/models/%s:%s",
|
||||
region,
|
||||
adc.ProjectID,
|
||||
region,
|
||||
info.UpstreamModelName,
|
||||
model,
|
||||
suffix,
|
||||
), nil
|
||||
} else if a.RequestMode == RequestModeLlama {
|
||||
@@ -127,13 +128,9 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, re
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vertexClaudeReq := &VertexAIClaudeRequest{
|
||||
AnthropicVersion: anthropicVersion,
|
||||
}
|
||||
if err = copier.Copy(vertexClaudeReq, claudeReq); err != nil {
|
||||
return nil, errors.New("failed to copy claude request")
|
||||
}
|
||||
vertexClaudeReq := copyRequest(claudeReq, anthropicVersion)
|
||||
c.Set("request_model", claudeReq.Model)
|
||||
info.UpstreamModelName = claudeReq.Model
|
||||
return vertexClaudeReq, nil
|
||||
} else if a.RequestMode == RequestModeGemini {
|
||||
geminiRequest, err := gemini.CovertGemini2OpenAI(*request)
|
||||
|
||||
@@ -1,17 +1,37 @@
|
||||
package vertex
|
||||
|
||||
import "one-api/relay/channel/claude"
|
||||
import (
|
||||
"one-api/relay/channel/claude"
|
||||
)
|
||||
|
||||
type VertexAIClaudeRequest struct {
|
||||
AnthropicVersion string `json:"anthropic_version"`
|
||||
Messages []claude.ClaudeMessage `json:"messages"`
|
||||
System string `json:"system,omitempty"`
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
System any `json:"system,omitempty"`
|
||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Tools []claude.Tool `json:"tools,omitempty"`
|
||||
Tools any `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
Thinking *claude.Thinking `json:"thinking,omitempty"`
|
||||
}
|
||||
|
||||
func copyRequest(req *claude.ClaudeRequest, version string) *VertexAIClaudeRequest {
|
||||
return &VertexAIClaudeRequest{
|
||||
AnthropicVersion: version,
|
||||
System: req.System,
|
||||
Messages: req.Messages,
|
||||
MaxTokens: req.MaxTokens,
|
||||
Stream: req.Stream,
|
||||
Temperature: req.Temperature,
|
||||
TopP: req.TopP,
|
||||
TopK: req.TopK,
|
||||
StopSequences: req.StopSequences,
|
||||
Tools: req.Tools,
|
||||
ToolChoice: req.ToolChoice,
|
||||
Thinking: req.Thinking,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -132,7 +133,7 @@ func xunfeiStreamHandler(c *gin.Context, textRequest dto.GeneralOpenAIRequest, a
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapper(err, "make xunfei request err", http.StatusInternalServerError), nil
|
||||
}
|
||||
service.SetEventStreamHeaders(c)
|
||||
helper.SetEventStreamHeaders(c)
|
||||
var usage dto.Usage
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -177,7 +178,7 @@ func zhipuStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWi
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
service.SetEventStreamHeaders(c)
|
||||
helper.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/dto"
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -197,7 +198,7 @@ func zhipuStreamHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWi
|
||||
}
|
||||
stopChan <- true
|
||||
}()
|
||||
service.SetEventStreamHeaders(c)
|
||||
helper.SetEventStreamHeaders(c)
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-dataChan:
|
||||
|
||||
@@ -12,25 +12,30 @@ import (
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
type ThinkingContentInfo struct {
|
||||
IsFirstThinkingContent bool
|
||||
SendLastThinkingContent bool
|
||||
}
|
||||
|
||||
type RelayInfo struct {
|
||||
ChannelType int
|
||||
ChannelId int
|
||||
TokenId int
|
||||
TokenKey string
|
||||
UserId int
|
||||
Group string
|
||||
TokenUnlimited bool
|
||||
StartTime time.Time
|
||||
FirstResponseTime time.Time
|
||||
IsFirstResponse bool
|
||||
SendLastReasoningResponse bool
|
||||
ApiType int
|
||||
IsStream bool
|
||||
IsPlayground bool
|
||||
UsePrice bool
|
||||
RelayMode int
|
||||
UpstreamModelName string
|
||||
OriginModelName string
|
||||
ChannelType int
|
||||
ChannelId int
|
||||
TokenId int
|
||||
TokenKey string
|
||||
UserId int
|
||||
Group string
|
||||
TokenUnlimited bool
|
||||
StartTime time.Time
|
||||
FirstResponseTime time.Time
|
||||
isFirstResponse bool
|
||||
//SendLastReasoningResponse bool
|
||||
ApiType int
|
||||
IsStream bool
|
||||
IsPlayground bool
|
||||
UsePrice bool
|
||||
RelayMode int
|
||||
UpstreamModelName string
|
||||
OriginModelName string
|
||||
//RecodeModelName string
|
||||
RequestURLPath string
|
||||
ApiVersion string
|
||||
@@ -53,6 +58,7 @@ type RelayInfo struct {
|
||||
UserSetting map[string]interface{}
|
||||
UserEmail string
|
||||
UserQuota int
|
||||
ThinkingContentInfo
|
||||
}
|
||||
|
||||
// 定义支持流式选项的通道类型
|
||||
@@ -95,7 +101,7 @@ func GenRelayInfo(c *gin.Context) *RelayInfo {
|
||||
UserQuota: c.GetInt(constant.ContextKeyUserQuota),
|
||||
UserSetting: c.GetStringMap(constant.ContextKeyUserSetting),
|
||||
UserEmail: c.GetString(constant.ContextKeyUserEmail),
|
||||
IsFirstResponse: true,
|
||||
isFirstResponse: true,
|
||||
RelayMode: relayconstant.Path2RelayMode(c.Request.URL.Path),
|
||||
BaseUrl: c.GetString("base_url"),
|
||||
RequestURLPath: c.Request.URL.String(),
|
||||
@@ -117,6 +123,10 @@ func GenRelayInfo(c *gin.Context) *RelayInfo {
|
||||
ApiKey: strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "),
|
||||
Organization: c.GetString("channel_organization"),
|
||||
ChannelSetting: channelSetting,
|
||||
ThinkingContentInfo: ThinkingContentInfo{
|
||||
IsFirstThinkingContent: true,
|
||||
SendLastThinkingContent: false,
|
||||
},
|
||||
}
|
||||
if strings.HasPrefix(c.Request.URL.Path, "/pg") {
|
||||
info.IsPlayground = true
|
||||
@@ -147,9 +157,9 @@ func (info *RelayInfo) SetIsStream(isStream bool) {
|
||||
}
|
||||
|
||||
func (info *RelayInfo) SetFirstResponseTime() {
|
||||
if info.IsFirstResponse {
|
||||
if info.isFirstResponse {
|
||||
info.FirstResponseTime = time.Now()
|
||||
info.IsFirstResponse = false
|
||||
info.isFirstResponse = false
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package service
|
||||
package helper
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -6,31 +6,42 @@ import (
|
||||
"one-api/common"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/setting"
|
||||
"one-api/setting/operation_setting"
|
||||
)
|
||||
|
||||
type PriceData struct {
|
||||
ModelPrice float64
|
||||
ModelRatio float64
|
||||
CompletionRatio float64
|
||||
CacheRatio float64
|
||||
GroupRatio float64
|
||||
UsePrice bool
|
||||
ShouldPreConsumedQuota int
|
||||
}
|
||||
|
||||
func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens int, maxTokens int) (PriceData, error) {
|
||||
modelPrice, usePrice := common.GetModelPrice(info.OriginModelName, false)
|
||||
modelPrice, usePrice := operation_setting.GetModelPrice(info.OriginModelName, false)
|
||||
groupRatio := setting.GetGroupRatio(info.Group)
|
||||
var preConsumedQuota int
|
||||
var modelRatio float64
|
||||
var completionRatio float64
|
||||
var cacheRatio float64
|
||||
if !usePrice {
|
||||
preConsumedTokens := common.PreConsumedQuota
|
||||
if maxTokens != 0 {
|
||||
preConsumedTokens = promptTokens + maxTokens
|
||||
}
|
||||
var success bool
|
||||
modelRatio, success = common.GetModelRatio(info.OriginModelName)
|
||||
modelRatio, success = operation_setting.GetModelRatio(info.OriginModelName)
|
||||
if !success {
|
||||
return PriceData{}, fmt.Errorf("model %s ratio not found", info.OriginModelName)
|
||||
if info.UserId == 1 {
|
||||
return PriceData{}, fmt.Errorf("模型 %s 倍率或价格未配置,请设置或开始自用模式;Model %s ratio or price not set, please set or start self-use mode", info.OriginModelName, info.OriginModelName)
|
||||
} else {
|
||||
return PriceData{}, fmt.Errorf("模型 %s 倍率或价格未配置, 请联系管理员设置;Model %s ratio or price not set, please contact administrator to set", info.OriginModelName, info.OriginModelName)
|
||||
}
|
||||
}
|
||||
completionRatio = operation_setting.GetCompletionRatio(info.OriginModelName)
|
||||
cacheRatio, _ = operation_setting.GetCacheRatio(info.OriginModelName)
|
||||
ratio := modelRatio * groupRatio
|
||||
preConsumedQuota = int(float64(preConsumedTokens) * ratio)
|
||||
} else {
|
||||
@@ -39,8 +50,10 @@ func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens
|
||||
return PriceData{
|
||||
ModelPrice: modelPrice,
|
||||
ModelRatio: modelRatio,
|
||||
CompletionRatio: completionRatio,
|
||||
GroupRatio: groupRatio,
|
||||
UsePrice: usePrice,
|
||||
CacheRatio: cacheRatio,
|
||||
ShouldPreConsumedQuota: preConsumedQuota,
|
||||
}, nil
|
||||
}
|
||||
|
||||
91
relay/helper/stream_scanner.go
Normal file
91
relay/helper/stream_scanner.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package helper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/constant"
|
||||
relaycommon "one-api/relay/common"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func StreamScannerHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo, dataHandler func(data string) bool) {
|
||||
|
||||
if resp == nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
streamingTimeout := time.Duration(constant.StreamingTimeout) * time.Second
|
||||
if strings.HasPrefix(info.UpstreamModelName, "o1") || strings.HasPrefix(info.UpstreamModelName, "o3") {
|
||||
// twice timeout for thinking model
|
||||
streamingTimeout *= 2
|
||||
}
|
||||
|
||||
var (
|
||||
stopChan = make(chan bool, 2)
|
||||
scanner = bufio.NewScanner(resp.Body)
|
||||
ticker = time.NewTicker(streamingTimeout)
|
||||
)
|
||||
|
||||
defer func() {
|
||||
ticker.Stop()
|
||||
close(stopChan)
|
||||
}()
|
||||
|
||||
scanner.Split(bufio.ScanLines)
|
||||
SetEventStreamHeaders(c)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ctx = context.WithValue(ctx, "stop_chan", stopChan)
|
||||
common.RelayCtxGo(ctx, func() {
|
||||
for scanner.Scan() {
|
||||
ticker.Reset(streamingTimeout)
|
||||
data := scanner.Text()
|
||||
if common.DebugEnabled {
|
||||
println(data)
|
||||
}
|
||||
|
||||
if len(data) < 6 {
|
||||
continue
|
||||
}
|
||||
if data[:5] != "data:" && data[:6] != "[DONE]" {
|
||||
continue
|
||||
}
|
||||
data = data[5:]
|
||||
data = strings.TrimLeft(data, " ")
|
||||
data = strings.TrimSuffix(data, "\"")
|
||||
if !strings.HasPrefix(data, "[DONE]") {
|
||||
info.SetFirstResponseTime()
|
||||
success := dataHandler(data)
|
||||
if !success {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
if err != io.EOF {
|
||||
common.LogError(c, "scanner error: "+err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
common.SafeSendBool(stopChan, true)
|
||||
})
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// 超时处理逻辑
|
||||
common.LogError(c, "streaming timeout")
|
||||
case <-stopChan:
|
||||
// 正常结束
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/dto"
|
||||
"one-api/model"
|
||||
relaycommon "one-api/relay/common"
|
||||
relayconstant "one-api/relay/constant"
|
||||
"one-api/relay/helper"
|
||||
@@ -80,10 +79,6 @@ func AudioHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
|
||||
return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
userQuota, err := model.GetUserQuota(relayInfo.UserId, false)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapperLocal(err, "get_user_quota_failed", http.StatusInternalServerError)
|
||||
}
|
||||
preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
|
||||
if openaiErr != nil {
|
||||
return openaiErr
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
relayconstant "one-api/relay/constant"
|
||||
"one-api/service"
|
||||
"one-api/setting"
|
||||
"one-api/setting/operation_setting"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -157,10 +158,10 @@ func RelaySwapFace(c *gin.Context) *dto.MidjourneyResponse {
|
||||
return service.MidjourneyErrorWrapper(constant.MjRequestError, "sour_base64_and_target_base64_is_required")
|
||||
}
|
||||
modelName := service.CoverActionToModelName(constant.MjActionSwapFace)
|
||||
modelPrice, success := common.GetModelPrice(modelName, true)
|
||||
modelPrice, success := operation_setting.GetModelPrice(modelName, true)
|
||||
// 如果没有配置价格,则使用默认价格
|
||||
if !success {
|
||||
defaultPrice, ok := common.GetDefaultModelRatioMap()[modelName]
|
||||
defaultPrice, ok := operation_setting.GetDefaultModelRatioMap()[modelName]
|
||||
if !ok {
|
||||
modelPrice = 0.1
|
||||
} else {
|
||||
@@ -463,10 +464,10 @@ func RelayMidjourneySubmit(c *gin.Context, relayMode int) *dto.MidjourneyRespons
|
||||
fullRequestURL := fmt.Sprintf("%s%s", baseURL, requestURL)
|
||||
|
||||
modelName := service.CoverActionToModelName(midjRequest.Action)
|
||||
modelPrice, success := common.GetModelPrice(modelName, true)
|
||||
modelPrice, success := operation_setting.GetModelPrice(modelName, true)
|
||||
// 如果没有配置价格,则使用默认价格
|
||||
if !success {
|
||||
defaultPrice, ok := common.GetDefaultModelRatioMap()[modelName]
|
||||
defaultPrice, ok := operation_setting.GetDefaultModelRatioMap()[modelName]
|
||||
if !ok {
|
||||
modelPrice = 0.1
|
||||
} else {
|
||||
|
||||
@@ -110,6 +110,7 @@ func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// pre-consume quota 预消耗配额
|
||||
preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
|
||||
if openaiErr != nil {
|
||||
@@ -303,24 +304,26 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
|
||||
CompletionTokens: 0,
|
||||
TotalTokens: relayInfo.PromptTokens,
|
||||
}
|
||||
extraContent += " ,(可能是请求出错)"
|
||||
extraContent += "(可能是请求出错)"
|
||||
}
|
||||
useTimeSeconds := time.Now().Unix() - relayInfo.StartTime.Unix()
|
||||
promptTokens := usage.PromptTokens
|
||||
cacheTokens := usage.PromptTokensDetails.CachedTokens
|
||||
completionTokens := usage.CompletionTokens
|
||||
modelName := relayInfo.OriginModelName
|
||||
|
||||
tokenName := ctx.GetString("token_name")
|
||||
completionRatio := common.GetCompletionRatio(modelName)
|
||||
completionRatio := priceData.CompletionRatio
|
||||
cacheRatio := priceData.CacheRatio
|
||||
ratio := priceData.ModelRatio * priceData.GroupRatio
|
||||
modelRatio := priceData.ModelRatio
|
||||
groupRatio := priceData.GroupRatio
|
||||
modelPrice := priceData.ModelPrice
|
||||
usePrice := priceData.UsePrice
|
||||
|
||||
quota := 0
|
||||
if !priceData.UsePrice {
|
||||
quota = promptTokens + int(math.Round(float64(completionTokens)*completionRatio))
|
||||
quota = (promptTokens - cacheTokens) + int(math.Round(float64(cacheTokens)*cacheRatio))
|
||||
quota += int(math.Round(float64(completionTokens) * completionRatio))
|
||||
quota = int(math.Round(float64(quota) * ratio))
|
||||
if ratio != 0 && quota <= 0 {
|
||||
quota = 1
|
||||
@@ -329,8 +332,9 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
|
||||
quota = int(modelPrice * common.QuotaPerUnit * groupRatio)
|
||||
}
|
||||
totalTokens := promptTokens + completionTokens
|
||||
|
||||
var logContent string
|
||||
if !usePrice {
|
||||
if !priceData.UsePrice {
|
||||
logContent = fmt.Sprintf("模型倍率 %.2f,补全倍率 %.2f,分组倍率 %.2f", modelRatio, completionRatio, groupRatio)
|
||||
} else {
|
||||
logContent = fmt.Sprintf("模型价格 %.2f,分组倍率 %.2f", modelPrice, groupRatio)
|
||||
@@ -371,7 +375,7 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
|
||||
if extraContent != "" {
|
||||
logContent += ", " + extraContent
|
||||
}
|
||||
other := service.GenerateTextOtherInfo(ctx, relayInfo, modelRatio, groupRatio, completionRatio, modelPrice)
|
||||
other := service.GenerateTextOtherInfo(ctx, relayInfo, modelRatio, groupRatio, completionRatio, cacheTokens, cacheRatio, modelPrice)
|
||||
model.RecordConsumeLog(ctx, relayInfo.UserId, relayInfo.ChannelId, promptTokens, completionTokens, logModel,
|
||||
tokenName, quota, logContent, relayInfo.TokenId, userQuota, int(useTimeSeconds), relayInfo.IsStream, relayInfo.Group, other)
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
relayconstant "one-api/relay/constant"
|
||||
"one-api/service"
|
||||
"one-api/setting"
|
||||
"one-api/setting/operation_setting"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -37,9 +38,9 @@ func RelayTaskSubmit(c *gin.Context, relayMode int) (taskErr *dto.TaskError) {
|
||||
}
|
||||
|
||||
modelName := service.CoverTaskActionToModelName(platform, relayInfo.Action)
|
||||
modelPrice, success := common.GetModelPrice(modelName, true)
|
||||
modelPrice, success := operation_setting.GetModelPrice(modelName, true)
|
||||
if !success {
|
||||
defaultPrice, ok := common.GetDefaultModelRatioMap()[modelName]
|
||||
defaultPrice, ok := operation_setting.GetDefaultModelRatioMap()[modelName]
|
||||
if !ok {
|
||||
modelPrice = 0.1
|
||||
} else {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/service"
|
||||
"one-api/setting"
|
||||
"one-api/setting/operation_setting"
|
||||
)
|
||||
|
||||
func WssHelper(c *gin.Context, ws *websocket.Conn) (openaiErr *dto.OpenAIErrorWithStatusCode) {
|
||||
@@ -39,7 +40,7 @@ func WssHelper(c *gin.Context, ws *websocket.Conn) (openaiErr *dto.OpenAIErrorWi
|
||||
}
|
||||
}
|
||||
//relayInfo.UpstreamModelName = textRequest.Model
|
||||
modelPrice, getModelPriceSuccess := common.GetModelPrice(relayInfo.UpstreamModelName, false)
|
||||
modelPrice, getModelPriceSuccess := operation_setting.GetModelPrice(relayInfo.UpstreamModelName, false)
|
||||
groupRatio := setting.GetGroupRatio(relayInfo.Group)
|
||||
|
||||
var preConsumedQuota int
|
||||
@@ -65,7 +66,7 @@ func WssHelper(c *gin.Context, ws *websocket.Conn) (openaiErr *dto.OpenAIErrorWi
|
||||
//if realtimeEvent.Session.MaxResponseOutputTokens != 0 {
|
||||
// preConsumedTokens = promptTokens + int(realtimeEvent.Session.MaxResponseOutputTokens)
|
||||
//}
|
||||
modelRatio, _ = common.GetModelRatio(relayInfo.UpstreamModelName)
|
||||
modelRatio, _ = operation_setting.GetModelRatio(relayInfo.UpstreamModelName)
|
||||
ratio = modelRatio * groupRatio
|
||||
preConsumedQuota = int(float64(preConsumedTokens) * ratio)
|
||||
} else {
|
||||
|
||||
@@ -6,23 +6,31 @@ import (
|
||||
"one-api/common"
|
||||
"one-api/dto"
|
||||
"one-api/model"
|
||||
"one-api/setting"
|
||||
"one-api/setting/operation_setting"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func formatNotifyType(channelId int, status int) string {
|
||||
return fmt.Sprintf("%s_%d_%d", dto.NotifyTypeChannelUpdate, channelId, status)
|
||||
}
|
||||
|
||||
// disable & notify
|
||||
func DisableChannel(channelId int, channelName string, reason string) {
|
||||
model.UpdateChannelStatusById(channelId, common.ChannelStatusAutoDisabled, reason)
|
||||
subject := fmt.Sprintf("通道「%s」(#%d)已被禁用", channelName, channelId)
|
||||
content := fmt.Sprintf("通道「%s」(#%d)已被禁用,原因:%s", channelName, channelId, reason)
|
||||
NotifyRootUser(subject, content, dto.NotifyTypeChannelUpdate)
|
||||
success := model.UpdateChannelStatusById(channelId, common.ChannelStatusAutoDisabled, reason)
|
||||
if success {
|
||||
subject := fmt.Sprintf("通道「%s」(#%d)已被禁用", channelName, channelId)
|
||||
content := fmt.Sprintf("通道「%s」(#%d)已被禁用,原因:%s", channelName, channelId, reason)
|
||||
NotifyRootUser(formatNotifyType(channelId, common.ChannelStatusAutoDisabled), subject, content)
|
||||
}
|
||||
}
|
||||
|
||||
func EnableChannel(channelId int, channelName string) {
|
||||
model.UpdateChannelStatusById(channelId, common.ChannelStatusEnabled, "")
|
||||
subject := fmt.Sprintf("通道「%s」(#%d)已被启用", channelName, channelId)
|
||||
content := fmt.Sprintf("通道「%s」(#%d)已被启用", channelName, channelId)
|
||||
NotifyRootUser(subject, content, dto.NotifyTypeChannelUpdate)
|
||||
success := model.UpdateChannelStatusById(channelId, common.ChannelStatusEnabled, "")
|
||||
if success {
|
||||
subject := fmt.Sprintf("通道「%s」(#%d)已被启用", channelName, channelId)
|
||||
content := fmt.Sprintf("通道「%s」(#%d)已被启用", channelName, channelId)
|
||||
NotifyRootUser(formatNotifyType(channelId, common.ChannelStatusEnabled), subject, content)
|
||||
}
|
||||
}
|
||||
|
||||
func ShouldDisableChannel(channelType int, err *dto.OpenAIErrorWithStatusCode) bool {
|
||||
@@ -67,7 +75,7 @@ func ShouldDisableChannel(channelType int, err *dto.OpenAIErrorWithStatusCode) b
|
||||
}
|
||||
|
||||
lowerMessage := strings.ToLower(err.Error.Message)
|
||||
search, _ := AcSearch(lowerMessage, setting.AutomaticDisableKeywords, true)
|
||||
search, _ := AcSearch(lowerMessage, operation_setting.AutomaticDisableKeywords, true)
|
||||
if search {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -7,7 +7,9 @@ import (
|
||||
"fmt"
|
||||
"image"
|
||||
"io"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
"one-api/constant"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/image/webp"
|
||||
@@ -23,7 +25,7 @@ func DecodeBase64ImageData(base64String string) (image.Config, string, string, e
|
||||
decodedData, err := base64.StdEncoding.DecodeString(base64String)
|
||||
if err != nil {
|
||||
fmt.Println("Error: Failed to decode base64 string")
|
||||
return image.Config{}, "", "", err
|
||||
return image.Config{}, "", "", fmt.Errorf("failed to decode base64 string: %s", err.Error())
|
||||
}
|
||||
|
||||
// 创建一个bytes.Buffer用于存储解码后的数据
|
||||
@@ -61,20 +63,51 @@ func DecodeBase64FileData(base64String string) (string, string, error) {
|
||||
func GetImageFromUrl(url string) (mimeType string, data string, err error) {
|
||||
resp, err := DoDownloadRequest(url)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if !strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
||||
return "", "", fmt.Errorf("invalid content type: %s, required image/*", resp.Header.Get("Content-Type"))
|
||||
return "", "", fmt.Errorf("failed to download image: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
_, err = buffer.ReadFrom(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
// Check HTTP status code
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", "", fmt.Errorf("failed to download image: HTTP %d", resp.StatusCode)
|
||||
}
|
||||
mimeType = resp.Header.Get("Content-Type")
|
||||
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
if contentType != "application/octet-stream" && !strings.HasPrefix(contentType, "image/") {
|
||||
return "", "", fmt.Errorf("invalid content type: %s, required image/*", contentType)
|
||||
}
|
||||
maxImageSize := int64(constant.MaxFileDownloadMB * 1024 * 1024)
|
||||
|
||||
// Check Content-Length if available
|
||||
if resp.ContentLength > maxImageSize {
|
||||
return "", "", fmt.Errorf("image size %d exceeds maximum allowed size of %d bytes", resp.ContentLength, maxImageSize)
|
||||
}
|
||||
|
||||
// Use LimitReader to prevent reading oversized images
|
||||
limitReader := io.LimitReader(resp.Body, maxImageSize)
|
||||
buffer := &bytes.Buffer{}
|
||||
|
||||
written, err := io.Copy(buffer, limitReader)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to read image data: %w", err)
|
||||
}
|
||||
if written >= maxImageSize {
|
||||
return "", "", fmt.Errorf("image size exceeds maximum allowed size of %d bytes", maxImageSize)
|
||||
}
|
||||
|
||||
data = base64.StdEncoding.EncodeToString(buffer.Bytes())
|
||||
return
|
||||
mimeType = contentType
|
||||
|
||||
// Handle application/octet-stream type
|
||||
if mimeType == "application/octet-stream" {
|
||||
_, format, _, err := DecodeBase64ImageData(data)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
mimeType = "image/" + format
|
||||
}
|
||||
|
||||
return mimeType, data, nil
|
||||
}
|
||||
|
||||
func DecodeUrlImageData(imageUrl string) (image.Config, string, error) {
|
||||
@@ -92,7 +125,7 @@ func DecodeUrlImageData(imageUrl string) (image.Config, string, error) {
|
||||
|
||||
mimeType := response.Header.Get("Content-Type")
|
||||
|
||||
if !strings.HasPrefix(mimeType, "image/") {
|
||||
if mimeType != "application/octet-stream" && !strings.HasPrefix(mimeType, "image/") {
|
||||
return image.Config{}, "", fmt.Errorf("invalid content type: %s, required image/*", mimeType)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"one-api/dto"
|
||||
relaycommon "one-api/relay/common"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func GenerateTextOtherInfo(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, modelRatio, groupRatio, completionRatio, modelPrice float64) map[string]interface{} {
|
||||
func GenerateTextOtherInfo(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, modelRatio, groupRatio, completionRatio float64,
|
||||
cacheTokens int, cacheRatio float64, modelPrice float64) map[string]interface{} {
|
||||
other := make(map[string]interface{})
|
||||
other["model_ratio"] = modelRatio
|
||||
other["group_ratio"] = groupRatio
|
||||
other["completion_ratio"] = completionRatio
|
||||
other["cache_tokens"] = cacheTokens
|
||||
other["cache_ratio"] = cacheRatio
|
||||
other["model_price"] = modelPrice
|
||||
other["frt"] = float64(relayInfo.FirstResponseTime.UnixMilli() - relayInfo.StartTime.UnixMilli())
|
||||
if relayInfo.ReasoningEffort != "" {
|
||||
@@ -27,7 +31,7 @@ func GenerateTextOtherInfo(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, m
|
||||
}
|
||||
|
||||
func GenerateWssOtherInfo(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, usage *dto.RealtimeUsage, modelRatio, groupRatio, completionRatio, audioRatio, audioCompletionRatio, modelPrice float64) map[string]interface{} {
|
||||
info := GenerateTextOtherInfo(ctx, relayInfo, modelRatio, groupRatio, completionRatio, modelPrice)
|
||||
info := GenerateTextOtherInfo(ctx, relayInfo, modelRatio, groupRatio, completionRatio, 0, 0.0, modelPrice)
|
||||
info["ws"] = true
|
||||
info["audio_input"] = usage.InputTokenDetails.AudioTokens
|
||||
info["audio_output"] = usage.OutputTokenDetails.AudioTokens
|
||||
@@ -39,7 +43,7 @@ func GenerateWssOtherInfo(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, us
|
||||
}
|
||||
|
||||
func GenerateAudioOtherInfo(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, usage *dto.Usage, modelRatio, groupRatio, completionRatio, audioRatio, audioCompletionRatio, modelPrice float64) map[string]interface{} {
|
||||
info := GenerateTextOtherInfo(ctx, relayInfo, modelRatio, groupRatio, completionRatio, modelPrice)
|
||||
info := GenerateTextOtherInfo(ctx, relayInfo, modelRatio, groupRatio, completionRatio, 0, 0.0, modelPrice)
|
||||
info["audio"] = true
|
||||
info["audio_input"] = usage.PromptTokensDetails.AudioTokens
|
||||
info["audio_output"] = usage.CompletionTokenDetails.AudioTokens
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/relay/helper"
|
||||
"one-api/setting"
|
||||
"one-api/setting/operation_setting"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -38,9 +39,9 @@ func calculateAudioQuota(info QuotaInfo) int {
|
||||
return int(info.ModelPrice * common.QuotaPerUnit * info.GroupRatio)
|
||||
}
|
||||
|
||||
completionRatio := common.GetCompletionRatio(info.ModelName)
|
||||
audioRatio := common.GetAudioRatio(info.ModelName)
|
||||
audioCompletionRatio := common.GetAudioCompletionRatio(info.ModelName)
|
||||
completionRatio := operation_setting.GetCompletionRatio(info.ModelName)
|
||||
audioRatio := operation_setting.GetAudioRatio(info.ModelName)
|
||||
audioCompletionRatio := operation_setting.GetAudioCompletionRatio(info.ModelName)
|
||||
ratio := info.GroupRatio * info.ModelRatio
|
||||
|
||||
quota := info.InputDetails.TextTokens + int(math.Round(float64(info.OutputDetails.TextTokens)*completionRatio))
|
||||
@@ -75,7 +76,7 @@ func PreWssConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, usag
|
||||
audioInputTokens := usage.InputTokenDetails.AudioTokens
|
||||
audioOutTokens := usage.OutputTokenDetails.AudioTokens
|
||||
groupRatio := setting.GetGroupRatio(relayInfo.Group)
|
||||
modelRatio, _ := common.GetModelRatio(modelName)
|
||||
modelRatio, _ := operation_setting.GetModelRatio(modelName)
|
||||
|
||||
quotaInfo := QuotaInfo{
|
||||
InputDetails: TokenDetails{
|
||||
@@ -122,9 +123,9 @@ func PostWssConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, mod
|
||||
audioOutTokens := usage.OutputTokenDetails.AudioTokens
|
||||
|
||||
tokenName := ctx.GetString("token_name")
|
||||
completionRatio := common.GetCompletionRatio(modelName)
|
||||
audioRatio := common.GetAudioRatio(relayInfo.OriginModelName)
|
||||
audioCompletionRatio := common.GetAudioCompletionRatio(modelName)
|
||||
completionRatio := operation_setting.GetCompletionRatio(modelName)
|
||||
audioRatio := operation_setting.GetAudioRatio(relayInfo.OriginModelName)
|
||||
audioCompletionRatio := operation_setting.GetAudioCompletionRatio(modelName)
|
||||
|
||||
quotaInfo := QuotaInfo{
|
||||
InputDetails: TokenDetails{
|
||||
@@ -184,9 +185,9 @@ func PostAudioConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
|
||||
audioOutTokens := usage.CompletionTokenDetails.AudioTokens
|
||||
|
||||
tokenName := ctx.GetString("token_name")
|
||||
completionRatio := common.GetCompletionRatio(relayInfo.OriginModelName)
|
||||
audioRatio := common.GetAudioRatio(relayInfo.OriginModelName)
|
||||
audioCompletionRatio := common.GetAudioCompletionRatio(relayInfo.OriginModelName)
|
||||
completionRatio := operation_setting.GetCompletionRatio(relayInfo.OriginModelName)
|
||||
audioRatio := operation_setting.GetAudioRatio(relayInfo.OriginModelName)
|
||||
audioCompletionRatio := operation_setting.GetAudioCompletionRatio(relayInfo.OriginModelName)
|
||||
|
||||
modelRatio := priceData.ModelRatio
|
||||
groupRatio := priceData.GroupRatio
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"one-api/constant"
|
||||
"one-api/dto"
|
||||
relaycommon "one-api/relay/common"
|
||||
"one-api/setting/operation_setting"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
@@ -32,7 +33,7 @@ func InitTokenEncoders() {
|
||||
if err != nil {
|
||||
common.FatalLog(fmt.Sprintf("failed to get gpt-4o token encoder: %s", err.Error()))
|
||||
}
|
||||
for model, _ := range common.GetDefaultModelRatioMap() {
|
||||
for model, _ := range operation_setting.GetDefaultModelRatioMap() {
|
||||
if strings.HasPrefix(model, "gpt-3.5") {
|
||||
tokenEncoderMap[model] = cl100TokenEncoder
|
||||
} else if strings.HasPrefix(model, "gpt-4") {
|
||||
|
||||
@@ -11,7 +11,10 @@ import (
|
||||
|
||||
func NotifyRootUser(t string, subject string, content string) {
|
||||
user := model.GetRootUser().ToBaseUser()
|
||||
_ = NotifyUser(user.Id, user.Email, user.GetSetting(), dto.NewNotify(t, subject, content, nil))
|
||||
err := NotifyUser(user.Id, user.Email, user.GetSetting(), dto.NewNotify(t, subject, content, nil))
|
||||
if err != nil {
|
||||
common.SysError(fmt.Sprintf("failed to notify root user: %s", err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
func NotifyUser(userId int, userEmail string, userSetting map[string]interface{}, data dto.Notify) error {
|
||||
|
||||
77
setting/operation_setting/cache_ratio.go
Normal file
77
setting/operation_setting/cache_ratio.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package operation_setting
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"one-api/common"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var defaultCacheRatio = map[string]float64{
|
||||
"gpt-4": 0.5,
|
||||
"o1-2024-12-17": 0.5,
|
||||
"o1-preview-2024-09-12": 0.5,
|
||||
"o1-mini-2024-09-12": 0.5,
|
||||
"gpt-4o-2024-11-20": 0.5,
|
||||
"gpt-4o-2024-08-06": 0.5,
|
||||
"gpt-4o-mini-2024-07-18": 0.5,
|
||||
"gpt-4o-realtime-preview": 0.5,
|
||||
"gpt-4o-mini-realtime-preview": 0.5,
|
||||
"deepseek-chat": 0.5,
|
||||
"deepseek-reasoner": 0.5,
|
||||
"deepseek-coder": 0.5,
|
||||
}
|
||||
|
||||
var cacheRatioMap map[string]float64
|
||||
var cacheRatioMapMutex sync.RWMutex
|
||||
|
||||
// GetCacheRatioMap returns the cache ratio map
|
||||
func GetCacheRatioMap() map[string]float64 {
|
||||
cacheRatioMapMutex.Lock()
|
||||
defer cacheRatioMapMutex.Unlock()
|
||||
if cacheRatioMap == nil {
|
||||
cacheRatioMap = defaultCacheRatio
|
||||
}
|
||||
return cacheRatioMap
|
||||
}
|
||||
|
||||
// CacheRatio2JSONString converts the cache ratio map to a JSON string
|
||||
func CacheRatio2JSONString() string {
|
||||
GetCacheRatioMap()
|
||||
jsonBytes, err := json.Marshal(cacheRatioMap)
|
||||
if err != nil {
|
||||
common.SysError("error marshalling cache ratio: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
|
||||
// UpdateCacheRatioByJSONString updates the cache ratio map from a JSON string
|
||||
func UpdateCacheRatioByJSONString(jsonStr string) error {
|
||||
cacheRatioMapMutex.Lock()
|
||||
defer cacheRatioMapMutex.Unlock()
|
||||
cacheRatioMap = make(map[string]float64)
|
||||
return json.Unmarshal([]byte(jsonStr), &cacheRatioMap)
|
||||
}
|
||||
|
||||
// GetCacheRatio returns the cache ratio for a model
|
||||
func GetCacheRatio(name string) (float64, bool) {
|
||||
GetCacheRatioMap()
|
||||
ratio, ok := cacheRatioMap[name]
|
||||
if !ok {
|
||||
return 0.5, false // Default to 0.5 if not found
|
||||
}
|
||||
return ratio, true
|
||||
}
|
||||
|
||||
// DefaultCacheRatio2JSONString converts the default cache ratio map to a JSON string
|
||||
func DefaultCacheRatio2JSONString() string {
|
||||
jsonBytes, err := json.Marshal(defaultCacheRatio)
|
||||
if err != nil {
|
||||
common.SysError("error marshalling default cache ratio: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
|
||||
// GetDefaultCacheRatioMap returns the default cache ratio map
|
||||
func GetDefaultCacheRatioMap() map[string]float64 {
|
||||
return defaultCacheRatio
|
||||
}
|
||||
@@ -1,7 +1,8 @@
|
||||
package common
|
||||
package operation_setting
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"one-api/common"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
@@ -261,7 +262,7 @@ func ModelPrice2JSONString() string {
|
||||
GetModelPriceMap()
|
||||
jsonBytes, err := json.Marshal(modelPriceMap)
|
||||
if err != nil {
|
||||
SysError("error marshalling model price: " + err.Error())
|
||||
common.SysError("error marshalling model price: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
@@ -285,7 +286,7 @@ func GetModelPrice(name string, printErr bool) (float64, bool) {
|
||||
price, ok := modelPriceMap[name]
|
||||
if !ok {
|
||||
if printErr {
|
||||
SysError("model price not found: " + name)
|
||||
common.SysError("model price not found: " + name)
|
||||
}
|
||||
return -1, false
|
||||
}
|
||||
@@ -305,7 +306,7 @@ func ModelRatio2JSONString() string {
|
||||
GetModelRatioMap()
|
||||
jsonBytes, err := json.Marshal(modelRatioMap)
|
||||
if err != nil {
|
||||
SysError("error marshalling model ratio: " + err.Error())
|
||||
common.SysError("error marshalling model ratio: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
@@ -324,8 +325,7 @@ func GetModelRatio(name string) (float64, bool) {
|
||||
}
|
||||
ratio, ok := modelRatioMap[name]
|
||||
if !ok {
|
||||
SysError("model ratio not found: " + name)
|
||||
return 37.5, false
|
||||
return 37.5, SelfUseModeEnabled
|
||||
}
|
||||
return ratio, true
|
||||
}
|
||||
@@ -333,7 +333,7 @@ func GetModelRatio(name string) (float64, bool) {
|
||||
func DefaultModelRatio2JSONString() string {
|
||||
jsonBytes, err := json.Marshal(defaultModelRatio)
|
||||
if err != nil {
|
||||
SysError("error marshalling model ratio: " + err.Error())
|
||||
common.SysError("error marshalling model ratio: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
@@ -355,7 +355,7 @@ func CompletionRatio2JSONString() string {
|
||||
GetCompletionRatioMap()
|
||||
jsonBytes, err := json.Marshal(CompletionRatio)
|
||||
if err != nil {
|
||||
SysError("error marshalling completion ratio: " + err.Error())
|
||||
common.SysError("error marshalling completion ratio: " + err.Error())
|
||||
}
|
||||
return string(jsonBytes)
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
package setting
|
||||
package operation_setting
|
||||
|
||||
import "strings"
|
||||
|
||||
var DemoSiteEnabled = false
|
||||
var SelfUseModeEnabled = false
|
||||
|
||||
var AutomaticDisableKeywords = []string{
|
||||
"Your credit balance is too low",
|
||||
@@ -30,6 +30,7 @@ import { useTranslation } from 'react-i18next';
|
||||
import { StatusContext } from './context/Status';
|
||||
import { setStatusData } from './helpers/data.js';
|
||||
import { API, showError } from './helpers';
|
||||
import PersonalSetting from './components/PersonalSetting.js';
|
||||
|
||||
const Home = lazy(() => import('./pages/Home'));
|
||||
const Detail = lazy(() => import('./pages/Detail'));
|
||||
@@ -177,6 +178,16 @@ function App() {
|
||||
</PrivateRoute>
|
||||
}
|
||||
/>
|
||||
<Route
|
||||
path='/personal'
|
||||
element={
|
||||
<PrivateRoute>
|
||||
<Suspense fallback={<Loading></Loading>}>
|
||||
<PersonalSetting />
|
||||
</Suspense>
|
||||
</PrivateRoute>
|
||||
}
|
||||
/>
|
||||
<Route
|
||||
path='/topup'
|
||||
element={
|
||||
|
||||
@@ -15,7 +15,7 @@ import {
|
||||
getQuotaPerUnit,
|
||||
renderGroup,
|
||||
renderNumberWithPoint,
|
||||
renderQuota, renderQuotaWithPrompt
|
||||
renderQuota, renderQuotaWithPrompt, stringToColor
|
||||
} from '../helpers/render';
|
||||
import {
|
||||
Button, Divider,
|
||||
@@ -378,17 +378,15 @@ const ChannelsTable = () => {
|
||||
>
|
||||
{t('测试')}
|
||||
</Button>
|
||||
<Dropdown
|
||||
trigger="click"
|
||||
position="bottomRight"
|
||||
menu={modelMenuItems} // 使用即时生成的菜单项
|
||||
>
|
||||
<Button
|
||||
style={{ padding: '8px 4px' }}
|
||||
type="primary"
|
||||
icon={<IconTreeTriangleDown />}
|
||||
></Button>
|
||||
</Dropdown>
|
||||
<Button
|
||||
style={{ padding: '8px 4px' }}
|
||||
type="primary"
|
||||
icon={<IconTreeTriangleDown />}
|
||||
onClick={() => {
|
||||
setCurrentTestChannel(record);
|
||||
setShowModelTestModal(true);
|
||||
}}
|
||||
></Button>
|
||||
</SplitButtonGroup>
|
||||
<Popconfirm
|
||||
title={t('确定是否要删除此渠道?')}
|
||||
@@ -522,6 +520,9 @@ const ChannelsTable = () => {
|
||||
const [enableTagMode, setEnableTagMode] = useState(false);
|
||||
const [showBatchSetTag, setShowBatchSetTag] = useState(false);
|
||||
const [batchSetTagValue, setBatchSetTagValue] = useState('');
|
||||
const [showModelTestModal, setShowModelTestModal] = useState(false);
|
||||
const [currentTestChannel, setCurrentTestChannel] = useState(null);
|
||||
const [modelSearchKeyword, setModelSearchKeyword] = useState('');
|
||||
|
||||
|
||||
const removeRecord = (record) => {
|
||||
@@ -1289,6 +1290,77 @@ const ChannelsTable = () => {
|
||||
onChange={(v) => setBatchSetTagValue(v)}
|
||||
/>
|
||||
</Modal>
|
||||
|
||||
{/* 模型测试弹窗 */}
|
||||
<Modal
|
||||
title={t('选择模型进行测试')}
|
||||
visible={showModelTestModal && currentTestChannel !== null}
|
||||
onCancel={() => {
|
||||
setShowModelTestModal(false);
|
||||
setModelSearchKeyword('');
|
||||
}}
|
||||
footer={null}
|
||||
maskClosable={true}
|
||||
centered={true}
|
||||
width={600}
|
||||
>
|
||||
<div style={{ maxHeight: '500px', overflowY: 'auto', padding: '10px' }}>
|
||||
{currentTestChannel && (
|
||||
<div>
|
||||
<Typography.Title heading={6} style={{ marginBottom: '16px' }}>
|
||||
{t('渠道')}: {currentTestChannel.name}
|
||||
</Typography.Title>
|
||||
|
||||
{/* 搜索框 */}
|
||||
<Input
|
||||
placeholder={t('搜索模型...')}
|
||||
value={modelSearchKeyword}
|
||||
onChange={(value) => setModelSearchKeyword(value)}
|
||||
style={{ marginBottom: '16px' }}
|
||||
showClear
|
||||
/>
|
||||
|
||||
<div style={{
|
||||
display: 'grid',
|
||||
gridTemplateColumns: 'repeat(auto-fill, minmax(180px, 1fr))',
|
||||
gap: '10px'
|
||||
}}>
|
||||
{currentTestChannel.models.split(',')
|
||||
.filter(model => model.toLowerCase().includes(modelSearchKeyword.toLowerCase()))
|
||||
.map((model, index) => {
|
||||
|
||||
return (
|
||||
<Button
|
||||
key={index}
|
||||
theme="light"
|
||||
type="tertiary"
|
||||
style={{
|
||||
height: 'auto',
|
||||
padding: '8px 12px',
|
||||
textAlign: 'center',
|
||||
}}
|
||||
onClick={() => {
|
||||
testChannel(currentTestChannel, model);
|
||||
}}
|
||||
>
|
||||
{model}
|
||||
</Button>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
|
||||
{/* 显示搜索结果数量 */}
|
||||
{modelSearchKeyword && (
|
||||
<Typography.Text type="secondary" style={{ marginTop: '16px', display: 'block' }}>
|
||||
{t('找到')} {currentTestChannel.models.split(',').filter(model =>
|
||||
model.toLowerCase().includes(modelSearchKeyword.toLowerCase())
|
||||
).length} {t('个模型')}
|
||||
</Typography.Text>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</Modal>
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -21,15 +21,17 @@ import {
|
||||
IconUser,
|
||||
IconLanguage
|
||||
} from '@douyinfe/semi-icons';
|
||||
import { Avatar, Button, Dropdown, Layout, Nav, Switch } from '@douyinfe/semi-ui';
|
||||
import { Avatar, Button, Dropdown, Layout, Nav, Switch, Tag } from '@douyinfe/semi-ui';
|
||||
import { stringToColor } from '../helpers/render';
|
||||
import Text from '@douyinfe/semi-ui/lib/es/typography/text';
|
||||
import { StyleContext } from '../context/Style/index.js';
|
||||
import { StatusContext } from '../context/Status/index.js';
|
||||
|
||||
const HeaderBar = () => {
|
||||
const { t, i18n } = useTranslation();
|
||||
const [userState, userDispatch] = useContext(UserContext);
|
||||
const [styleState, styleDispatch] = useContext(StyleContext);
|
||||
const [statusState, statusDispatch] = useContext(StatusContext);
|
||||
let navigate = useNavigate();
|
||||
const [currentLang, setCurrentLang] = useState(i18n.language);
|
||||
|
||||
@@ -40,6 +42,10 @@ const HeaderBar = () => {
|
||||
const isNewYear =
|
||||
(currentDate.getMonth() === 0 && currentDate.getDate() === 1);
|
||||
|
||||
// Check if self-use mode is enabled
|
||||
const isSelfUseMode = statusState?.status?.self_use_mode_enabled || false;
|
||||
const isDemoSiteMode = statusState?.status?.demo_site_enabled || false;
|
||||
|
||||
let buttons = [
|
||||
{
|
||||
text: t('首页'),
|
||||
@@ -166,7 +172,7 @@ const HeaderBar = () => {
|
||||
onSelect={(key) => {}}
|
||||
header={styleState.isMobile?{
|
||||
logo: (
|
||||
<>
|
||||
<div style={{ display: 'flex', alignItems: 'center', position: 'relative' }}>
|
||||
{
|
||||
!styleState.showSider ?
|
||||
<Button icon={<IconMenu />} theme="light" aria-label={t('展开侧边栏')} onClick={
|
||||
@@ -176,13 +182,52 @@ const HeaderBar = () => {
|
||||
() => styleDispatch({ type: 'SET_SIDER', payload: false })
|
||||
} />
|
||||
}
|
||||
</>
|
||||
{(isSelfUseMode || isDemoSiteMode) && (
|
||||
<Tag
|
||||
color={isSelfUseMode ? 'purple' : 'blue'}
|
||||
style={{
|
||||
position: 'absolute',
|
||||
top: '-8px',
|
||||
right: '-15px',
|
||||
fontSize: '0.7rem',
|
||||
padding: '0 4px',
|
||||
height: 'auto',
|
||||
lineHeight: '1.2',
|
||||
zIndex: 1,
|
||||
pointerEvents: 'none'
|
||||
}}
|
||||
>
|
||||
{isSelfUseMode ? t('自用模式') : t('演示站点')}
|
||||
</Tag>
|
||||
)}
|
||||
</div>
|
||||
),
|
||||
}:{
|
||||
logo: (
|
||||
<img src={logo} alt='logo' />
|
||||
),
|
||||
text: systemName,
|
||||
text: (
|
||||
<div style={{ position: 'relative', display: 'inline-block' }}>
|
||||
{systemName}
|
||||
{(isSelfUseMode || isDemoSiteMode) && (
|
||||
<Tag
|
||||
color={isSelfUseMode ? 'purple' : 'blue'}
|
||||
style={{
|
||||
position: 'absolute',
|
||||
top: '-10px',
|
||||
right: '-25px',
|
||||
fontSize: '0.7rem',
|
||||
padding: '0 4px',
|
||||
whiteSpace: 'nowrap',
|
||||
zIndex: 1,
|
||||
boxShadow: '0 0 3px rgba(255, 255, 255, 0.7)'
|
||||
}}
|
||||
>
|
||||
{isSelfUseMode ? t('自用模式') : t('演示站点')}
|
||||
</Tag>
|
||||
)}
|
||||
</div>
|
||||
),
|
||||
}}
|
||||
items={buttons}
|
||||
footer={
|
||||
@@ -266,7 +311,8 @@ const HeaderBar = () => {
|
||||
icon={<IconUser />}
|
||||
/>
|
||||
{
|
||||
!styleState.isMobile && (
|
||||
// Hide register option in self-use mode
|
||||
!styleState.isMobile && !isSelfUseMode && (
|
||||
<Nav.Item
|
||||
itemKey={'register'}
|
||||
text={t('注册')}
|
||||
|
||||
@@ -464,6 +464,8 @@ const LogsTable = () => {
|
||||
other.model_ratio,
|
||||
other.model_price,
|
||||
other.group_ratio,
|
||||
other.cache_tokens || 0,
|
||||
other.cache_ratio || 1.0,
|
||||
);
|
||||
return (
|
||||
<Paragraph
|
||||
@@ -665,6 +667,8 @@ const LogsTable = () => {
|
||||
other?.audio_ratio,
|
||||
other?.audio_completion_ratio,
|
||||
other.group_ratio,
|
||||
other.cache_tokens || 0,
|
||||
other.cache_ratio || 1.0,
|
||||
);
|
||||
} else {
|
||||
content = renderModelPrice(
|
||||
@@ -674,6 +678,8 @@ const LogsTable = () => {
|
||||
other.model_price,
|
||||
other.completion_ratio,
|
||||
other.group_ratio,
|
||||
other.cache_tokens || 0,
|
||||
other.cache_ratio || 1.0,
|
||||
);
|
||||
}
|
||||
expandDataLocal.push({
|
||||
|
||||
@@ -28,6 +28,7 @@ const OperationSetting = () => {
|
||||
PreConsumedQuota: 0,
|
||||
StreamCacheQueueLength: 0,
|
||||
ModelRatio: '',
|
||||
CacheRatio: '',
|
||||
CompletionRatio: '',
|
||||
ModelPrice: '',
|
||||
GroupRatio: '',
|
||||
@@ -60,6 +61,7 @@ const OperationSetting = () => {
|
||||
RetryTimes: 0,
|
||||
Chats: "[]",
|
||||
DemoSiteEnabled: false,
|
||||
SelfUseModeEnabled: false,
|
||||
AutomaticDisableKeywords: '',
|
||||
});
|
||||
|
||||
@@ -76,7 +78,8 @@ const OperationSetting = () => {
|
||||
item.key === 'GroupRatio' ||
|
||||
item.key === 'UserUsableGroups' ||
|
||||
item.key === 'CompletionRatio' ||
|
||||
item.key === 'ModelPrice'
|
||||
item.key === 'ModelPrice' ||
|
||||
item.key === 'CacheRatio'
|
||||
) {
|
||||
item.value = JSON.stringify(JSON.parse(item.value), null, 2);
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import React, { useEffect, useRef, useState } from 'react';
|
||||
import { Banner, Button, Col, Form, Row } from '@douyinfe/semi-ui';
|
||||
import { API, showError, showSuccess } from '../helpers';
|
||||
import React, { useContext, useEffect, useRef, useState } from 'react';
|
||||
import { Banner, Button, Col, Form, Row, Modal, Space } from '@douyinfe/semi-ui';
|
||||
import { API, showError, showSuccess, timestamp2string } from '../helpers';
|
||||
import { marked } from 'marked';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { StatusContext } from '../context/Status/index.js';
|
||||
import Text from '@douyinfe/semi-ui/lib/es/typography/text';
|
||||
|
||||
const OtherSetting = () => {
|
||||
const { t } = useTranslation();
|
||||
@@ -16,6 +18,7 @@ const OtherSetting = () => {
|
||||
});
|
||||
let [loading, setLoading] = useState(false);
|
||||
const [showUpdateModal, setShowUpdateModal] = useState(false);
|
||||
const [statusState, statusDispatch] = useContext(StatusContext);
|
||||
const [updateData, setUpdateData] = useState({
|
||||
tag_name: '',
|
||||
content: '',
|
||||
@@ -43,6 +46,7 @@ const OtherSetting = () => {
|
||||
HomePageContent: false,
|
||||
About: false,
|
||||
Footer: false,
|
||||
CheckUpdate: false
|
||||
});
|
||||
const handleInputChange = async (value, e) => {
|
||||
const name = e.target.id;
|
||||
@@ -145,23 +149,48 @@ const OtherSetting = () => {
|
||||
}
|
||||
};
|
||||
|
||||
const openGitHubRelease = () => {
|
||||
window.location = 'https://github.com/songquanpeng/one-api/releases/latest';
|
||||
};
|
||||
|
||||
const checkUpdate = async () => {
|
||||
const res = await API.get(
|
||||
'https://api.github.com/repos/songquanpeng/one-api/releases/latest',
|
||||
);
|
||||
const { tag_name, body } = res.data;
|
||||
if (tag_name === process.env.REACT_APP_VERSION) {
|
||||
showSuccess(`已是最新版本:${tag_name}`);
|
||||
} else {
|
||||
setUpdateData({
|
||||
tag_name: tag_name,
|
||||
content: marked.parse(body),
|
||||
});
|
||||
setShowUpdateModal(true);
|
||||
try {
|
||||
setLoadingInput((loadingInput) => ({ ...loadingInput, CheckUpdate: true }));
|
||||
// Use a CORS proxy to avoid direct cross-origin requests to GitHub API
|
||||
// Option 1: Use a public CORS proxy service
|
||||
// const proxyUrl = 'https://cors-anywhere.herokuapp.com/';
|
||||
// const res = await API.get(
|
||||
// `${proxyUrl}https://api.github.com/repos/Calcium-Ion/new-api/releases/latest`,
|
||||
// );
|
||||
|
||||
// Option 2: Use the JSON proxy approach which often works better with GitHub API
|
||||
const res = await fetch(
|
||||
'https://api.github.com/repos/Calcium-Ion/new-api/releases/latest',
|
||||
{
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'Content-Type': 'application/json',
|
||||
// Adding User-Agent which is often required by GitHub API
|
||||
'User-Agent': 'new-api-update-checker'
|
||||
}
|
||||
}
|
||||
).then(response => response.json());
|
||||
|
||||
// Option 3: Use a local proxy endpoint
|
||||
// Create a cached version of the response to avoid frequent GitHub API calls
|
||||
// const res = await API.get('/api/status/github-latest-release');
|
||||
|
||||
const { tag_name, body } = res;
|
||||
if (tag_name === statusState?.status?.version) {
|
||||
showSuccess(`已是最新版本:${tag_name}`);
|
||||
} else {
|
||||
setUpdateData({
|
||||
tag_name: tag_name,
|
||||
content: marked.parse(body),
|
||||
});
|
||||
setShowUpdateModal(true);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to check for updates:', error);
|
||||
showError('检查更新失败,请稍后再试');
|
||||
} finally {
|
||||
setLoadingInput((loadingInput) => ({ ...loadingInput, CheckUpdate: false }));
|
||||
}
|
||||
};
|
||||
const getOptions = async () => {
|
||||
@@ -186,9 +215,41 @@ const OtherSetting = () => {
|
||||
getOptions();
|
||||
}, []);
|
||||
|
||||
// Function to open GitHub release page
|
||||
const openGitHubRelease = () => {
|
||||
window.open(`https://github.com/Calcium-Ion/new-api/releases/tag/${updateData.tag_name}`, '_blank');
|
||||
};
|
||||
|
||||
const getStartTimeString = () => {
|
||||
const timestamp = statusState?.status?.start_time;
|
||||
return statusState.status ? timestamp2string(timestamp) : '';
|
||||
};
|
||||
|
||||
return (
|
||||
<Row>
|
||||
<Col span={24}>
|
||||
{/* 版本信息 */}
|
||||
<Form style={{ marginBottom: 15 }}>
|
||||
<Form.Section text={t('系统信息')}>
|
||||
<Row>
|
||||
<Col span={16}>
|
||||
<Space>
|
||||
<Text>
|
||||
{t('当前版本')}:{statusState?.status?.version || t('未知')}
|
||||
</Text>
|
||||
<Button type="primary" onClick={checkUpdate} loading={loadingInput['CheckUpdate']}>
|
||||
{t('检查更新')}
|
||||
</Button>
|
||||
</Space>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Col span={16}>
|
||||
<Text>{t('启动时间')}:{getStartTimeString()}</Text>
|
||||
</Col>
|
||||
</Row>
|
||||
</Form.Section>
|
||||
</Form>
|
||||
{/* 通用设置 */}
|
||||
<Form
|
||||
values={inputs}
|
||||
@@ -282,28 +343,25 @@ const OtherSetting = () => {
|
||||
</Form.Section>
|
||||
</Form>
|
||||
</Col>
|
||||
{/*<Modal*/}
|
||||
{/* onClose={() => setShowUpdateModal(false)}*/}
|
||||
{/* onOpen={() => setShowUpdateModal(true)}*/}
|
||||
{/* open={showUpdateModal}*/}
|
||||
{/*>*/}
|
||||
{/* <Modal.Header>新版本:{updateData.tag_name}</Modal.Header>*/}
|
||||
{/* <Modal.Content>*/}
|
||||
{/* <Modal.Description>*/}
|
||||
{/* <div dangerouslySetInnerHTML={{ __html: updateData.content }}></div>*/}
|
||||
{/* </Modal.Description>*/}
|
||||
{/* </Modal.Content>*/}
|
||||
{/* <Modal.Actions>*/}
|
||||
{/* <Button onClick={() => setShowUpdateModal(false)}>关闭</Button>*/}
|
||||
{/* <Button*/}
|
||||
{/* content='详情'*/}
|
||||
{/* onClick={() => {*/}
|
||||
{/* setShowUpdateModal(false);*/}
|
||||
{/* openGitHubRelease();*/}
|
||||
{/* }}*/}
|
||||
{/* />*/}
|
||||
{/* </Modal.Actions>*/}
|
||||
{/*</Modal>*/}
|
||||
<Modal
|
||||
title={t('新版本') + ':' + updateData.tag_name}
|
||||
visible={showUpdateModal}
|
||||
onCancel={() => setShowUpdateModal(false)}
|
||||
footer={[
|
||||
<Button
|
||||
key="details"
|
||||
type="primary"
|
||||
onClick={() => {
|
||||
setShowUpdateModal(false);
|
||||
openGitHubRelease();
|
||||
}}
|
||||
>
|
||||
{t('详情')}
|
||||
</Button>
|
||||
]}
|
||||
>
|
||||
<div dangerouslySetInnerHTML={{ __html: updateData.content }}></div>
|
||||
</Modal>
|
||||
</Row>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -69,7 +69,11 @@ const PersonalSetting = () => {
|
||||
const [models, setModels] = useState([]);
|
||||
const [openTransfer, setOpenTransfer] = useState(false);
|
||||
const [transferAmount, setTransferAmount] = useState(0);
|
||||
const [isModelsExpanded, setIsModelsExpanded] = useState(false);
|
||||
const [isModelsExpanded, setIsModelsExpanded] = useState(() => {
|
||||
// Initialize from localStorage if available
|
||||
const savedState = localStorage.getItem('modelsExpanded');
|
||||
return savedState ? JSON.parse(savedState) : false;
|
||||
});
|
||||
const MODELS_DISPLAY_COUNT = 10; // 默认显示的模型数量
|
||||
const [notificationSettings, setNotificationSettings] = useState({
|
||||
warningType: 'email',
|
||||
@@ -124,6 +128,11 @@ const PersonalSetting = () => {
|
||||
}
|
||||
}, [userState?.user?.setting]);
|
||||
|
||||
// Save models expanded state to localStorage whenever it changes
|
||||
useEffect(() => {
|
||||
localStorage.setItem('modelsExpanded', JSON.stringify(isModelsExpanded));
|
||||
}, [isModelsExpanded]);
|
||||
|
||||
const handleInputChange = (name, value) => {
|
||||
setInputs((inputs) => ({...inputs, [name]: value}));
|
||||
};
|
||||
@@ -384,7 +393,7 @@ const PersonalSetting = () => {
|
||||
</div>
|
||||
</div>
|
||||
</Modal>
|
||||
<div style={{marginTop: 20}}>
|
||||
<div>
|
||||
<Card
|
||||
title={
|
||||
<Card.Meta
|
||||
|
||||
@@ -28,7 +28,7 @@ import {
|
||||
IconSetting,
|
||||
IconUser
|
||||
} from '@douyinfe/semi-icons';
|
||||
import { Avatar, Dropdown, Layout, Nav, Switch } from '@douyinfe/semi-ui';
|
||||
import { Avatar, Dropdown, Layout, Nav, Switch, Divider } from '@douyinfe/semi-ui';
|
||||
import { setStatusData } from '../helpers/data.js';
|
||||
import { stringToColor } from '../helpers/render.js';
|
||||
import { useSetTheme, useTheme } from '../context/Theme/index.js';
|
||||
@@ -65,35 +65,11 @@ const SiderBar = () => {
|
||||
pricing: '/pricing',
|
||||
task: '/task',
|
||||
playground: '/playground',
|
||||
personal: '/personal',
|
||||
};
|
||||
|
||||
const headerButtons = useMemo(
|
||||
const workspaceItems = useMemo(
|
||||
() => [
|
||||
{
|
||||
text: 'Playground',
|
||||
itemKey: 'playground',
|
||||
to: '/playground',
|
||||
icon: <IconCommentStroked />,
|
||||
},
|
||||
{
|
||||
text: t('渠道'),
|
||||
itemKey: 'channel',
|
||||
to: '/channel',
|
||||
icon: <IconLayers />,
|
||||
className: isAdmin() ? '' : 'tableHiddle',
|
||||
},
|
||||
{
|
||||
text: t('聊天'),
|
||||
itemKey: 'chat',
|
||||
items: chatItems,
|
||||
icon: <IconComment />,
|
||||
},
|
||||
{
|
||||
text: t('令牌'),
|
||||
itemKey: 'token',
|
||||
to: '/token',
|
||||
icon: <IconKey />,
|
||||
},
|
||||
{
|
||||
text: t('数据看板'),
|
||||
itemKey: 'detail',
|
||||
@@ -105,33 +81,19 @@ const SiderBar = () => {
|
||||
: 'tableHiddle',
|
||||
},
|
||||
{
|
||||
text: t('兑换码'),
|
||||
itemKey: 'redemption',
|
||||
to: '/redemption',
|
||||
icon: <IconGift />,
|
||||
className: isAdmin() ? '' : 'tableHiddle',
|
||||
text: t('API令牌'),
|
||||
itemKey: 'token',
|
||||
to: '/token',
|
||||
icon: <IconKey />,
|
||||
},
|
||||
{
|
||||
text: t('钱包'),
|
||||
itemKey: 'topup',
|
||||
to: '/topup',
|
||||
icon: <IconCreditCard />,
|
||||
},
|
||||
{
|
||||
text: t('用户管理'),
|
||||
itemKey: 'user',
|
||||
to: '/user',
|
||||
icon: <IconUser />,
|
||||
className: isAdmin() ? '' : 'tableHiddle',
|
||||
},
|
||||
{
|
||||
text: t('日志'),
|
||||
text: t('使用日志'),
|
||||
itemKey: 'log',
|
||||
to: '/log',
|
||||
icon: <IconHistogram />,
|
||||
},
|
||||
{
|
||||
text: t('绘图'),
|
||||
text: t('绘图日志'),
|
||||
itemKey: 'midjourney',
|
||||
to: '/midjourney',
|
||||
icon: <IconImage />,
|
||||
@@ -141,75 +103,148 @@ const SiderBar = () => {
|
||||
: 'tableHiddle',
|
||||
},
|
||||
{
|
||||
text: t('异步任务'),
|
||||
text: t('任务日志'),
|
||||
itemKey: 'task',
|
||||
to: '/task',
|
||||
icon: <IconChecklistStroked />,
|
||||
className:
|
||||
localStorage.getItem('enable_task') === 'true'
|
||||
? ''
|
||||
: 'tableHiddle',
|
||||
},
|
||||
{
|
||||
text: t('设置'),
|
||||
itemKey: 'setting',
|
||||
to: '/setting',
|
||||
icon: <IconSetting />,
|
||||
},
|
||||
localStorage.getItem('enable_task') === 'true'
|
||||
? ''
|
||||
: 'tableHiddle',
|
||||
}
|
||||
],
|
||||
[
|
||||
localStorage.getItem('enable_data_export'),
|
||||
localStorage.getItem('enable_drawing'),
|
||||
localStorage.getItem('enable_task'),
|
||||
localStorage.getItem('chat_link'),
|
||||
chatItems,
|
||||
isAdmin(),
|
||||
t,
|
||||
],
|
||||
);
|
||||
|
||||
const financeItems = useMemo(
|
||||
() => [
|
||||
{
|
||||
text: t('钱包'),
|
||||
itemKey: 'topup',
|
||||
to: '/topup',
|
||||
icon: <IconCreditCard />,
|
||||
},
|
||||
{
|
||||
text: t('个人设置'),
|
||||
itemKey: 'personal',
|
||||
to: '/personal',
|
||||
icon: <IconUser />,
|
||||
},
|
||||
],
|
||||
[t],
|
||||
);
|
||||
|
||||
const adminItems = useMemo(
|
||||
() => [
|
||||
{
|
||||
text: t('渠道'),
|
||||
itemKey: 'channel',
|
||||
to: '/channel',
|
||||
icon: <IconLayers />,
|
||||
className: isAdmin() ? '' : 'tableHiddle',
|
||||
},
|
||||
{
|
||||
text: t('兑换码'),
|
||||
itemKey: 'redemption',
|
||||
to: '/redemption',
|
||||
icon: <IconGift />,
|
||||
className: isAdmin() ? '' : 'tableHiddle',
|
||||
},
|
||||
{
|
||||
text: t('用户管理'),
|
||||
itemKey: 'user',
|
||||
to: '/user',
|
||||
icon: <IconUser />,
|
||||
},
|
||||
{
|
||||
text: t('系统设置'),
|
||||
itemKey: 'setting',
|
||||
to: '/setting',
|
||||
icon: <IconSetting />,
|
||||
},
|
||||
],
|
||||
[isAdmin(), t],
|
||||
);
|
||||
|
||||
const chatMenuItems = useMemo(
|
||||
() => [
|
||||
{
|
||||
text: 'Playground',
|
||||
itemKey: 'playground',
|
||||
to: '/playground',
|
||||
icon: <IconCommentStroked />,
|
||||
},
|
||||
{
|
||||
text: t('聊天'),
|
||||
itemKey: 'chat',
|
||||
items: chatItems,
|
||||
icon: <IconComment />,
|
||||
},
|
||||
],
|
||||
[chatItems, t],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
let localKey = window.location.pathname.split('/')[1];
|
||||
if (localKey === '') {
|
||||
localKey = 'home';
|
||||
}
|
||||
setSelectedKeys([localKey]);
|
||||
|
||||
|
||||
let chatLink = localStorage.getItem('chat_link');
|
||||
if (!chatLink) {
|
||||
let chats = localStorage.getItem('chats');
|
||||
if (chats) {
|
||||
// console.log(chats);
|
||||
try {
|
||||
chats = JSON.parse(chats);
|
||||
if (Array.isArray(chats)) {
|
||||
let chatItems = [];
|
||||
for (let i = 0; i < chats.length; i++) {
|
||||
let chat = {};
|
||||
for (let key in chats[i]) {
|
||||
chat.text = key;
|
||||
chat.itemKey = 'chat' + i;
|
||||
chat.to = '/chat/' + i;
|
||||
}
|
||||
// setRouterMap({ ...routerMap, chat: '/chat/' + i })
|
||||
chatItems.push(chat);
|
||||
}
|
||||
setChatItems(chatItems);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
showError('聊天数据解析失败')
|
||||
let chats = localStorage.getItem('chats');
|
||||
if (chats) {
|
||||
// console.log(chats);
|
||||
try {
|
||||
chats = JSON.parse(chats);
|
||||
if (Array.isArray(chats)) {
|
||||
let chatItems = [];
|
||||
for (let i = 0; i < chats.length; i++) {
|
||||
let chat = {};
|
||||
for (let key in chats[i]) {
|
||||
chat.text = key;
|
||||
chat.itemKey = 'chat' + i;
|
||||
chat.to = '/chat/' + i;
|
||||
}
|
||||
// setRouterMap({ ...routerMap, chat: '/chat/' + i })
|
||||
chatItems.push(chat);
|
||||
}
|
||||
setChatItems(chatItems);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
showError('聊天数据解析失败')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
setIsCollapsed(localStorage.getItem('default_collapse_sidebar') === 'true');
|
||||
}, []);
|
||||
|
||||
// Custom divider style
|
||||
const dividerStyle = {
|
||||
margin: '8px 0',
|
||||
opacity: 0.6,
|
||||
};
|
||||
|
||||
// Custom group label style
|
||||
const groupLabelStyle = {
|
||||
padding: '8px 16px',
|
||||
color: 'var(--semi-color-text-2)',
|
||||
fontSize: '12px',
|
||||
fontWeight: 'normal',
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<Nav
|
||||
style={{ maxWidth: 220, height: '100%' }}
|
||||
style={{ maxWidth: 200, height: '100%' }}
|
||||
defaultIsCollapsed={
|
||||
localStorage.getItem('default_collapse_sidebar') === 'true'
|
||||
}
|
||||
@@ -219,27 +254,27 @@ const SiderBar = () => {
|
||||
}}
|
||||
selectedKeys={selectedKeys}
|
||||
renderWrapper={({ itemElement, isSubNav, isInSubNav, props }) => {
|
||||
let chatLink = localStorage.getItem('chat_link');
|
||||
if (!chatLink) {
|
||||
let chats = localStorage.getItem('chats');
|
||||
if (chats) {
|
||||
chats = JSON.parse(chats);
|
||||
if (Array.isArray(chats) && chats.length > 0) {
|
||||
for (let i = 0; i < chats.length; i++) {
|
||||
routerMap['chat' + i] = '/chat/' + i;
|
||||
}
|
||||
if (chats.length > 1) {
|
||||
// delete /chat
|
||||
if (routerMap['chat']) {
|
||||
delete routerMap['chat'];
|
||||
}
|
||||
} else {
|
||||
// rename /chat to /chat/0
|
||||
routerMap['chat'] = '/chat/0';
|
||||
}
|
||||
}
|
||||
let chatLink = localStorage.getItem('chat_link');
|
||||
if (!chatLink) {
|
||||
let chats = localStorage.getItem('chats');
|
||||
if (chats) {
|
||||
chats = JSON.parse(chats);
|
||||
if (Array.isArray(chats) && chats.length > 0) {
|
||||
for (let i = 0; i < chats.length; i++) {
|
||||
routerMap['chat' + i] = '/chat/' + i;
|
||||
}
|
||||
if (chats.length > 1) {
|
||||
// delete /chat
|
||||
if (routerMap['chat']) {
|
||||
delete routerMap['chat'];
|
||||
}
|
||||
} else {
|
||||
// rename /chat to /chat/0
|
||||
routerMap['chat'] = '/chat/0';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return (
|
||||
<Link
|
||||
style={{ textDecoration: 'none' }}
|
||||
@@ -249,7 +284,6 @@ const SiderBar = () => {
|
||||
</Link>
|
||||
);
|
||||
}}
|
||||
items={headerButtons}
|
||||
onSelect={(key) => {
|
||||
if (key.itemKey.toString().startsWith('chat')) {
|
||||
styleDispatch({ type: 'SET_INNER_PADDING', payload: false });
|
||||
@@ -258,12 +292,101 @@ const SiderBar = () => {
|
||||
}
|
||||
setSelectedKeys([key.itemKey]);
|
||||
}}
|
||||
footer={
|
||||
<>
|
||||
</>
|
||||
}
|
||||
>
|
||||
<Nav.Footer collapseButton={true}></Nav.Footer>
|
||||
{/* Chat Section - Only show if there are chat items */}
|
||||
{chatItems.length > 0 && (
|
||||
<>
|
||||
{chatMenuItems.map((item) => {
|
||||
if (item.items && item.items.length > 0) {
|
||||
return (
|
||||
<Nav.Sub
|
||||
key={item.itemKey}
|
||||
itemKey={item.itemKey}
|
||||
text={item.text}
|
||||
icon={item.icon}
|
||||
>
|
||||
{item.items.map((subItem) => (
|
||||
<Nav.Item
|
||||
key={subItem.itemKey}
|
||||
itemKey={subItem.itemKey}
|
||||
text={subItem.text}
|
||||
/>
|
||||
))}
|
||||
</Nav.Sub>
|
||||
);
|
||||
} else {
|
||||
return (
|
||||
<Nav.Item
|
||||
key={item.itemKey}
|
||||
itemKey={item.itemKey}
|
||||
text={item.text}
|
||||
icon={item.icon}
|
||||
/>
|
||||
);
|
||||
}
|
||||
})}
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* Divider */}
|
||||
<Divider style={dividerStyle} />
|
||||
|
||||
{/* Workspace Section */}
|
||||
{!isCollapsed && <div style={groupLabelStyle}>{t('控制台')}</div>}
|
||||
{workspaceItems.map((item) => (
|
||||
<Nav.Item
|
||||
key={item.itemKey}
|
||||
itemKey={item.itemKey}
|
||||
text={item.text}
|
||||
icon={item.icon}
|
||||
className={item.className}
|
||||
/>
|
||||
))}
|
||||
|
||||
{/* Divider */}
|
||||
<Divider style={dividerStyle} />
|
||||
|
||||
{/* Finance Management Section */}
|
||||
{!isCollapsed && <div style={groupLabelStyle}>{t('个人中心')}</div>}
|
||||
{financeItems.map((item) => (
|
||||
<Nav.Item
|
||||
key={item.itemKey}
|
||||
itemKey={item.itemKey}
|
||||
text={item.text}
|
||||
icon={item.icon}
|
||||
className={item.className}
|
||||
/>
|
||||
))}
|
||||
|
||||
{isAdmin() && (
|
||||
<>
|
||||
{/* Divider */}
|
||||
<Divider style={dividerStyle} />
|
||||
|
||||
{/* Admin Section */}
|
||||
{adminItems.map((item) => (
|
||||
<Nav.Item
|
||||
key={item.itemKey}
|
||||
itemKey={item.itemKey}
|
||||
text={item.text}
|
||||
icon={item.icon}
|
||||
className={item.className}
|
||||
/>
|
||||
))}
|
||||
</>
|
||||
)}
|
||||
|
||||
<Nav.Footer
|
||||
collapseButton={true}
|
||||
collapseText={(collapsed)=>
|
||||
{
|
||||
if(collapsed){
|
||||
return t('展开侧边栏')
|
||||
}
|
||||
return t('收起侧边栏')
|
||||
}
|
||||
}
|
||||
/>
|
||||
</Nav>
|
||||
</>
|
||||
);
|
||||
|
||||
@@ -82,7 +82,7 @@ export const CHANNEL_OPTIONS = [
|
||||
{
|
||||
value: 45,
|
||||
color: 'blue',
|
||||
label: '火山方舟(豆包)'
|
||||
label: '字节火山方舟、豆包、DeepSeek通用'
|
||||
},
|
||||
{ value: 25, color: 'green', label: 'Moonshot' },
|
||||
{ value: 19, color: 'blue', label: '360 智脑' },
|
||||
|
||||
@@ -298,6 +298,8 @@ export function renderModelPrice(
|
||||
modelPrice = -1,
|
||||
completionRatio,
|
||||
groupRatio,
|
||||
cacheTokens = 0,
|
||||
cacheRatio = 1.0,
|
||||
) {
|
||||
if (modelPrice !== -1) {
|
||||
return i18next.t('模型价格:${{price}} * 分组倍率:{{ratio}} = ${{total}}', {
|
||||
@@ -311,9 +313,15 @@ export function renderModelPrice(
|
||||
}
|
||||
let inputRatioPrice = modelRatio * 2.0;
|
||||
let completionRatioPrice = modelRatio * 2.0 * completionRatio;
|
||||
let cacheRatioPrice = modelRatio * 2.0 * cacheRatio;
|
||||
|
||||
// Calculate effective input tokens (non-cached + cached with ratio applied)
|
||||
const effectiveInputTokens = (inputTokens - cacheTokens) + (cacheTokens * cacheRatio);
|
||||
|
||||
let price =
|
||||
(inputTokens / 1000000) * inputRatioPrice * groupRatio +
|
||||
(effectiveInputTokens / 1000000) * inputRatioPrice * groupRatio +
|
||||
(completionTokens / 1000000) * completionRatioPrice * groupRatio;
|
||||
|
||||
return (
|
||||
<>
|
||||
<article>
|
||||
@@ -327,16 +335,36 @@ export function renderModelPrice(
|
||||
ratio: groupRatio,
|
||||
total: completionRatioPrice * groupRatio
|
||||
})}</p>
|
||||
{cacheTokens > 0 && (
|
||||
<p>{i18next.t('缓存:${{price}} * {{ratio}} = ${{total}} / 1M tokens (缓存比例: {{cacheRatio}})', {
|
||||
price: cacheRatioPrice,
|
||||
ratio: groupRatio,
|
||||
total: cacheRatioPrice * groupRatio,
|
||||
cacheRatio: cacheRatio
|
||||
})}</p>
|
||||
)}
|
||||
<p></p>
|
||||
<p>
|
||||
{i18next.t('提示 {{input}} tokens / 1M tokens * ${{price}} + 补全 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}', {
|
||||
input: inputTokens,
|
||||
price: inputRatioPrice,
|
||||
completion: completionTokens,
|
||||
compPrice: completionRatioPrice,
|
||||
ratio: groupRatio,
|
||||
total: price.toFixed(6)
|
||||
})}
|
||||
{cacheTokens > 0 ?
|
||||
i18next.t('提示 {{nonCacheInput}} tokens + 缓存 {{cacheInput}} tokens * {{cacheRatio}} / 1M tokens * ${{price}} + 补全 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}', {
|
||||
nonCacheInput: inputTokens - cacheTokens,
|
||||
cacheInput: cacheTokens,
|
||||
cacheRatio: cacheRatio,
|
||||
price: inputRatioPrice,
|
||||
completion: completionTokens,
|
||||
compPrice: completionRatioPrice,
|
||||
ratio: groupRatio,
|
||||
total: price.toFixed(6)
|
||||
}) :
|
||||
i18next.t('提示 {{input}} tokens / 1M tokens * ${{price}} + 补全 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}', {
|
||||
input: inputTokens,
|
||||
price: inputRatioPrice,
|
||||
completion: completionTokens,
|
||||
compPrice: completionRatioPrice,
|
||||
ratio: groupRatio,
|
||||
total: price.toFixed(6)
|
||||
})
|
||||
}
|
||||
</p>
|
||||
<p>{i18next.t('仅供参考,以实际扣费为准')}</p>
|
||||
</article>
|
||||
@@ -349,6 +377,8 @@ export function renderModelPriceSimple(
|
||||
modelRatio,
|
||||
modelPrice = -1,
|
||||
groupRatio,
|
||||
cacheTokens = 0,
|
||||
cacheRatio = 1.0,
|
||||
) {
|
||||
if (modelPrice !== -1) {
|
||||
return i18next.t('价格:${{price}} * 分组:{{ratio}}', {
|
||||
@@ -356,10 +386,18 @@ export function renderModelPriceSimple(
|
||||
ratio: groupRatio
|
||||
});
|
||||
} else {
|
||||
return i18next.t('模型: {{ratio}} * 分组: {{groupRatio}}', {
|
||||
ratio: modelRatio,
|
||||
groupRatio: groupRatio
|
||||
});
|
||||
if (cacheTokens !== 0) {
|
||||
return i18next.t('模型: {{ratio}} * 分组: {{groupRatio}} * 缓存比例: {{cacheRatio}}', {
|
||||
ratio: modelRatio,
|
||||
groupRatio: groupRatio,
|
||||
cacheRatio: cacheRatio
|
||||
});
|
||||
} else {
|
||||
return i18next.t('模型: {{ratio}} * 分组: {{groupRatio}}', {
|
||||
ratio: modelRatio,
|
||||
groupRatio: groupRatio
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -374,6 +412,8 @@ export function renderAudioModelPrice(
|
||||
audioRatio,
|
||||
audioCompletionRatio,
|
||||
groupRatio,
|
||||
cacheTokens = 0,
|
||||
cacheRatio = 1.0,
|
||||
) {
|
||||
// 1 ratio = $0.002 / 1K tokens
|
||||
if (modelPrice !== -1) {
|
||||
@@ -388,8 +428,13 @@ export function renderAudioModelPrice(
|
||||
// 这里的 *2 是因为 1倍率=0.002刀,请勿删除
|
||||
let inputRatioPrice = modelRatio * 2.0;
|
||||
let completionRatioPrice = modelRatio * 2.0 * completionRatio;
|
||||
let cacheRatioPrice = modelRatio * 2.0 * cacheRatio;
|
||||
|
||||
// Calculate effective input tokens (non-cached + cached with ratio applied)
|
||||
const effectiveInputTokens = (inputTokens - cacheTokens) + (cacheTokens * cacheRatio);
|
||||
|
||||
let price =
|
||||
(inputTokens / 1000000) * inputRatioPrice * groupRatio +
|
||||
(effectiveInputTokens / 1000000) * inputRatioPrice * groupRatio +
|
||||
(completionTokens / 1000000) * completionRatioPrice * groupRatio +
|
||||
(audioInputTokens / 1000000) * inputRatioPrice * audioRatio * groupRatio +
|
||||
(audioCompletionTokens / 1000000) * inputRatioPrice * audioRatio * audioCompletionRatio * groupRatio;
|
||||
@@ -406,6 +451,14 @@ export function renderAudioModelPrice(
|
||||
ratio: groupRatio,
|
||||
total: completionRatioPrice * groupRatio
|
||||
})}</p>
|
||||
{cacheTokens > 0 && (
|
||||
<p>{i18next.t('缓存:${{price}} * {{ratio}} = ${{total}} / 1M tokens (缓存比例: {{cacheRatio}})', {
|
||||
price: cacheRatioPrice,
|
||||
ratio: groupRatio,
|
||||
total: cacheRatioPrice * groupRatio,
|
||||
cacheRatio: cacheRatio
|
||||
})}</p>
|
||||
)}
|
||||
<p>{i18next.t('音频提示:${{price}} * {{ratio}} * {{audioRatio}} = ${{total}} / 1M tokens', {
|
||||
price: inputRatioPrice,
|
||||
ratio: groupRatio,
|
||||
@@ -420,12 +473,22 @@ export function renderAudioModelPrice(
|
||||
total: inputRatioPrice * audioRatio * audioCompletionRatio * groupRatio
|
||||
})}</p>
|
||||
<p>
|
||||
{i18next.t('文字提示 {{input}} tokens / 1M tokens * ${{price}} + 文字补全 {{completion}} tokens / 1M tokens * ${{compPrice}} +', {
|
||||
input: inputTokens,
|
||||
price: inputRatioPrice,
|
||||
completion: completionTokens,
|
||||
compPrice: completionRatioPrice
|
||||
})}
|
||||
{cacheTokens > 0 ?
|
||||
i18next.t('文字提示 {{nonCacheInput}} tokens + 文字缓存 {{cacheInput}} tokens * {{cacheRatio}} / 1M tokens * ${{price}} + 文字补全 {{completion}} tokens / 1M tokens * ${{compPrice}} +', {
|
||||
nonCacheInput: inputTokens - cacheTokens,
|
||||
cacheInput: cacheTokens,
|
||||
cacheRatio: cacheRatio,
|
||||
price: inputRatioPrice,
|
||||
completion: completionTokens,
|
||||
compPrice: completionRatioPrice
|
||||
}) :
|
||||
i18next.t('文字提示 {{input}} tokens / 1M tokens * ${{price}} + 文字补全 {{completion}} tokens / 1M tokens * ${{compPrice}} +', {
|
||||
input: inputTokens,
|
||||
price: inputRatioPrice,
|
||||
completion: completionTokens,
|
||||
compPrice: completionRatioPrice
|
||||
})
|
||||
}
|
||||
</p>
|
||||
<p>
|
||||
{i18next.t('音频提示 {{input}} tokens / 1M tokens * ${{price}} * {{audioRatio}} + 音频补全 {{completion}} tokens / 1M tokens * ${{price}} * {{audioRatio}} * {{audioCompRatio}}', {
|
||||
|
||||
@@ -621,6 +621,7 @@
|
||||
"窗口等待": "window wait",
|
||||
"失败": "Failed",
|
||||
"绘图": "Drawing",
|
||||
"绘图日志": "Drawing log",
|
||||
"放大": "Upscalers",
|
||||
"微妙放大": "Upscale (Subtle)",
|
||||
"创造放大": "Upscale (Creative)",
|
||||
@@ -1120,7 +1121,7 @@
|
||||
"知识库 ID": "Knowledge Base ID",
|
||||
"请输入知识库 ID,例如:123456": "Please enter knowledge base ID, e.g.: 123456",
|
||||
"可选值": "Optional value",
|
||||
"异步任务": "Async task",
|
||||
"任务日志": "Task log",
|
||||
"你好": "Hello",
|
||||
"你好,请问有什么可以帮助您的吗?": "Hello, how may I help you?",
|
||||
"用户分组": "Your default group",
|
||||
@@ -1317,5 +1318,25 @@
|
||||
"当前设置类型: ": "Current setting type: ",
|
||||
"固定价格值": "Fixed Price Value",
|
||||
"未设置倍率模型": "Models without ratio settings",
|
||||
"模型倍率和补全倍率同时设置": "Both model ratio and completion ratio are set"
|
||||
"模型倍率和补全倍率同时设置": "Both model ratio and completion ratio are set",
|
||||
"自用模式": "Self-use mode",
|
||||
"开启后不限制:必须设置模型倍率": "After enabling, no limit: must set model ratio",
|
||||
"演示站点模式": "Demo site mode",
|
||||
"当前版本": "Current version",
|
||||
"Gemini设置": "Gemini settings",
|
||||
"Gemini安全设置": "Gemini safety settings",
|
||||
"default为默认设置,可单独设置每个分类的安全等级": "\"default\" is the default setting, and each category can be set separately",
|
||||
"Gemini版本设置": "Gemini version settings",
|
||||
"default为默认设置,可单独设置每个模型的版本": "\"default\" is the default setting, and each model can be set separately",
|
||||
"Claude设置": "Claude settings",
|
||||
"Claude请求头覆盖": "Claude request header override",
|
||||
"示例": "Example",
|
||||
"缺省 MaxTokens": "Default MaxTokens",
|
||||
"启用Claude思考适配(-thinking后缀)": "Enable Claude thinking adaptation (-thinking suffix)",
|
||||
"Claude思考适配 BudgetTokens = MaxTokens * BudgetTokens 百分比": "Claude thinking adaptation BudgetTokens = MaxTokens * BudgetTokens percentage",
|
||||
"思考适配 BudgetTokens 百分比": "Thinking adaptation BudgetTokens percentage",
|
||||
"0.1-1之间的小数": "Decimal between 0.1 and 1",
|
||||
"模型相关设置": "Model related settings",
|
||||
"收起侧边栏": "Collapse sidebar",
|
||||
"展开侧边栏": "Expand sidebar"
|
||||
}
|
||||
|
||||
@@ -18,6 +18,8 @@ const CLAUDE_HEADER = {
|
||||
|
||||
const CLAUDE_DEFAULT_MAX_TOKENS = {
|
||||
'default': 8192,
|
||||
"claude-3-haiku-20240307": 4096,
|
||||
"claude-3-opus-20240229": 4096,
|
||||
'claude-3-7-sonnet-20250219-thinking': 8192,
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ export default function ModelRatioSettings(props) {
|
||||
const [inputs, setInputs] = useState({
|
||||
ModelPrice: '',
|
||||
ModelRatio: '',
|
||||
CacheRatio: '',
|
||||
CompletionRatio: '',
|
||||
});
|
||||
const refForm = useRef();
|
||||
@@ -139,6 +140,25 @@ export default function ModelRatioSettings(props) {
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row gutter={16}>
|
||||
<Col span={16}>
|
||||
<Form.TextArea
|
||||
label={t('提示缓存倍率')}
|
||||
placeholder={t('为一个 JSON 文本,键为模型名称,值为倍率')}
|
||||
field={'CacheRatio'}
|
||||
autosize={{ minRows: 6, maxRows: 12 }}
|
||||
trigger='blur'
|
||||
stopValidateWithError
|
||||
rules={[
|
||||
{
|
||||
validator: (rule, value) => verifyJSON(value),
|
||||
message: '不是合法的 JSON 字符串'
|
||||
}
|
||||
]}
|
||||
onChange={(value) => setInputs({ ...inputs, CacheRatio: value })}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row gutter={16}>
|
||||
<Col span={16}>
|
||||
<Form.TextArea
|
||||
|
||||
@@ -55,18 +55,17 @@ export default function ModelRatioNotSetEditor(props) {
|
||||
const unsetModels = enabledModels.filter(modelName => {
|
||||
const hasPrice = modelPrice[modelName] !== undefined;
|
||||
const hasRatio = modelRatio[modelName] !== undefined;
|
||||
const hasCompletionRatio = completionRatio[modelName] !== undefined;
|
||||
|
||||
// 如果模型既没有价格也没有倍率设置,则显示
|
||||
return !(hasPrice || (hasRatio && hasCompletionRatio));
|
||||
// 如果模型没有价格或者没有倍率设置,则显示
|
||||
return !hasPrice && !hasRatio;
|
||||
});
|
||||
|
||||
// 创建模型数据
|
||||
const modelData = unsetModels.map(name => ({
|
||||
name,
|
||||
price: '',
|
||||
ratio: '',
|
||||
completionRatio: ''
|
||||
price: modelPrice[name] || '',
|
||||
ratio: modelRatio[name] || '',
|
||||
completionRatio: completionRatio[name] || ''
|
||||
}));
|
||||
|
||||
setModels(modelData);
|
||||
|
||||
@@ -22,6 +22,7 @@ export default function GeneralSettings(props) {
|
||||
DisplayTokenStatEnabled: false,
|
||||
DefaultCollapseSidebar: false,
|
||||
DemoSiteEnabled: false,
|
||||
SelfUseModeEnabled: false,
|
||||
});
|
||||
const refForm = useRef();
|
||||
const [inputsRow, setInputsRow] = useState(inputs);
|
||||
@@ -205,6 +206,22 @@ export default function GeneralSettings(props) {
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'SelfUseModeEnabled'}
|
||||
label={t('自用模式')}
|
||||
extraText={t('开启后不限制:必须设置模型倍率')}
|
||||
size='default'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
SelfUseModeEnabled: value
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Button size='default' onClick={onSubmit}>
|
||||
|
||||
@@ -18,9 +18,6 @@ const Setting = () => {
|
||||
const [tabActiveKey, setTabActiveKey] = useState('1');
|
||||
let panes = [
|
||||
{
|
||||
tab: t('个人设置'),
|
||||
content: <PersonalSetting />,
|
||||
itemKey: 'personal',
|
||||
},
|
||||
];
|
||||
|
||||
@@ -61,7 +58,7 @@ const Setting = () => {
|
||||
if (tab) {
|
||||
setTabActiveKey(tab);
|
||||
} else {
|
||||
onChangeTab('personal');
|
||||
onChangeTab('operation');
|
||||
}
|
||||
}, [location.search]);
|
||||
return (
|
||||
|
||||
Reference in New Issue
Block a user