Compare commits

...

8 Commits

Author SHA1 Message Date
CaIon
45e1042e58 update GeneralOpenAIRequest 2023-12-20 21:20:09 +08:00
CaIon
f5a36a05e5 log输出更多有效信息 2023-12-20 20:53:36 +08:00
CaIon
5730c69385 个人设置添加修改密码功能 2023-12-20 20:53:15 +08:00
CaIon
2d33283afb fix gemini 2023-12-19 12:53:56 +08:00
CaIon
e057c0e42e 添加gemini支持 2023-12-18 23:46:05 +08:00
CaIon
b3f1da44dd 修复vision格式问题 2023-12-18 23:46:05 +08:00
CaIon
f048cefed1 update README.md 2023-12-18 23:46:05 +08:00
CaIon
0226d94ea6 修改docker-compose.yml 2023-12-18 23:46:04 +08:00
19 changed files with 485 additions and 32 deletions

View File

@@ -1,5 +1,5 @@
# Neko API
# New API
> [!NOTE]
> 本项目为开源项目,在[One API](https://github.com/songquanpeng/one-api)的基础上进行二次开发,感谢原作者的无私奉献。

View File

@@ -190,6 +190,7 @@ const (
ChannelTypeAIProxyLibrary = 21
ChannelTypeFastGPT = 22
ChannelTypeTencent = 23
ChannelTypeGemini = 24
)
var ChannelBaseURLs = []string{

View File

@@ -61,6 +61,7 @@ var ModelRatio = map[string]float64{
"ERNIE-Bot-4": 8.572, // ¥0.12 / 1k tokens
"Embedding-V1": 0.1429, // ¥0.002 / 1k tokens
"PaLM-2": 1,
"gemini-pro": 1, // $0.00025 / 1k characters -> $0.001 / 1k tokens
"chatglm_turbo": 0.3572, // ¥0.005 / 1k tokens
"chatglm_pro": 0.7143, // ¥0.01 / 1k tokens
"chatglm_std": 0.3572, // ¥0.005 / 1k tokens

View File

@@ -29,6 +29,8 @@ func testChannel(channel *model.Channel, request ChatRequest) (err error, openai
fallthrough
case common.ChannelType360:
fallthrough
case common.ChannelTypeGemini:
fallthrough
case common.ChannelTypeXunfei:
return errors.New("该渠道类型当前版本不支持测试,请手动测试"), nil
case common.ChannelTypeAzure:

View File

@@ -423,6 +423,15 @@ func init() {
Root: "PaLM-2",
Parent: nil,
},
{
Id: "gemini-pro",
Object: "model",
Created: 1677649963,
OwnedBy: "google",
Permission: permission,
Root: "gemini-pro",
Parent: nil,
},
{
Id: "chatglm_turbo",
Object: "model",

View File

@@ -18,7 +18,7 @@ type ClaudeMetadata struct {
type ClaudeRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
MaxTokensToSample int `json:"max_tokens_to_sample"`
MaxTokensToSample uint `json:"max_tokens_to_sample"`
StopSequences []string `json:"stop_sequences,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`

307
controller/relay-gemini.go Normal file
View File

@@ -0,0 +1,307 @@
package controller
import (
"bufio"
"encoding/json"
"fmt"
"io"
"net/http"
"one-api/common"
"strings"
"github.com/gin-gonic/gin"
)
type GeminiChatRequest struct {
Contents []GeminiChatContent `json:"contents"`
SafetySettings []GeminiChatSafetySettings `json:"safety_settings,omitempty"`
GenerationConfig GeminiChatGenerationConfig `json:"generation_config,omitempty"`
Tools []GeminiChatTools `json:"tools,omitempty"`
}
type GeminiInlineData struct {
MimeType string `json:"mimeType"`
Data string `json:"data"`
}
type GeminiPart struct {
Text string `json:"text,omitempty"`
InlineData *GeminiInlineData `json:"inlineData,omitempty"`
}
type GeminiChatContent struct {
Role string `json:"role,omitempty"`
Parts []GeminiPart `json:"parts"`
}
type GeminiChatSafetySettings struct {
Category string `json:"category"`
Threshold string `json:"threshold"`
}
type GeminiChatTools struct {
FunctionDeclarations any `json:"functionDeclarations,omitempty"`
}
type GeminiChatGenerationConfig struct {
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK float64 `json:"topK,omitempty"`
MaxOutputTokens uint `json:"maxOutputTokens,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
StopSequences []string `json:"stopSequences,omitempty"`
}
// Setting safety to the lowest possible values since Gemini is already powerless enough
func requestOpenAI2Gemini(textRequest GeneralOpenAIRequest) *GeminiChatRequest {
geminiRequest := GeminiChatRequest{
Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)),
//SafetySettings: []GeminiChatSafetySettings{
// {
// Category: "HARM_CATEGORY_HARASSMENT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_HATE_SPEECH",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
// {
// Category: "HARM_CATEGORY_DANGEROUS_CONTENT",
// Threshold: "BLOCK_ONLY_HIGH",
// },
//},
GenerationConfig: GeminiChatGenerationConfig{
Temperature: textRequest.Temperature,
TopP: textRequest.TopP,
MaxOutputTokens: textRequest.MaxTokens,
},
}
if textRequest.Functions != nil {
geminiRequest.Tools = []GeminiChatTools{
{
FunctionDeclarations: textRequest.Functions,
},
}
}
shouldAddDummyModelMessage := false
for _, message := range textRequest.Messages {
content := GeminiChatContent{
Role: message.Role,
Parts: []GeminiPart{
{
Text: string(message.Content),
},
},
}
// there's no assistant role in gemini and API shall vomit if Role is not user or model
if content.Role == "assistant" {
content.Role = "model"
}
// Converting system prompt to prompt from user for the same reason
if content.Role == "system" {
content.Role = "user"
shouldAddDummyModelMessage = true
}
geminiRequest.Contents = append(geminiRequest.Contents, content)
// If a system message is the last message, we need to add a dummy model message to make gemini happy
if shouldAddDummyModelMessage {
geminiRequest.Contents = append(geminiRequest.Contents, GeminiChatContent{
Role: "model",
Parts: []GeminiPart{
{
Text: "Okay",
},
},
})
shouldAddDummyModelMessage = false
}
}
return &geminiRequest
}
type GeminiChatResponse struct {
Candidates []GeminiChatCandidate `json:"candidates"`
PromptFeedback GeminiChatPromptFeedback `json:"promptFeedback"`
}
func (g *GeminiChatResponse) GetResponseText() string {
if g == nil {
return ""
}
if len(g.Candidates) > 0 && len(g.Candidates[0].Content.Parts) > 0 {
return g.Candidates[0].Content.Parts[0].Text
}
return ""
}
type GeminiChatCandidate struct {
Content GeminiChatContent `json:"content"`
FinishReason string `json:"finishReason"`
Index int64 `json:"index"`
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
}
type GeminiChatSafetyRating struct {
Category string `json:"category"`
Probability string `json:"probability"`
}
type GeminiChatPromptFeedback struct {
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
}
func responseGeminiChat2OpenAI(response *GeminiChatResponse) *OpenAITextResponse {
fullTextResponse := OpenAITextResponse{
Id: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
Object: "chat.completion",
Created: common.GetTimestamp(),
Choices: make([]OpenAITextResponseChoice, 0, len(response.Candidates)),
}
content, _ := json.Marshal("")
for i, candidate := range response.Candidates {
choice := OpenAITextResponseChoice{
Index: i,
Message: Message{
Role: "assistant",
Content: content,
},
FinishReason: stopFinishReason,
}
content, _ = json.Marshal(candidate.Content.Parts[0].Text)
if len(candidate.Content.Parts) > 0 {
choice.Message.Content = content
}
fullTextResponse.Choices = append(fullTextResponse.Choices, choice)
}
return &fullTextResponse
}
func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) *ChatCompletionsStreamResponse {
var choice ChatCompletionsStreamResponseChoice
choice.Delta.Content = geminiResponse.GetResponseText()
choice.FinishReason = &stopFinishReason
var response ChatCompletionsStreamResponse
response.Object = "chat.completion.chunk"
response.Model = "gemini"
response.Choices = []ChatCompletionsStreamResponseChoice{choice}
return &response
}
func geminiChatStreamHandler(c *gin.Context, resp *http.Response) (*OpenAIErrorWithStatusCode, string) {
responseText := ""
dataChan := make(chan string)
stopChan := make(chan bool)
scanner := bufio.NewScanner(resp.Body)
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := strings.Index(string(data), "\n"); i >= 0 {
return i + 1, data[0:i], nil
}
if atEOF {
return len(data), data, nil
}
return 0, nil, nil
})
go func() {
for scanner.Scan() {
data := scanner.Text()
data = strings.TrimSpace(data)
if !strings.HasPrefix(data, "\"text\": \"") {
continue
}
data = strings.TrimPrefix(data, "\"text\": \"")
data = strings.TrimSuffix(data, "\"")
dataChan <- data
}
stopChan <- true
}()
setEventStreamHeaders(c)
c.Stream(func(w io.Writer) bool {
select {
case data := <-dataChan:
// this is used to prevent annoying \ related format bug
data = fmt.Sprintf("{\"content\": \"%s\"}", data)
type dummyStruct struct {
Content string `json:"content"`
}
var dummy dummyStruct
err := json.Unmarshal([]byte(data), &dummy)
responseText += dummy.Content
var choice ChatCompletionsStreamResponseChoice
choice.Delta.Content = dummy.Content
response := ChatCompletionsStreamResponse{
Id: fmt.Sprintf("chatcmpl-%s", common.GetUUID()),
Object: "chat.completion.chunk",
Created: common.GetTimestamp(),
Model: "gemini-pro",
Choices: []ChatCompletionsStreamResponseChoice{choice},
}
jsonResponse, err := json.Marshal(response)
if err != nil {
common.SysError("error marshalling stream response: " + err.Error())
return true
}
c.Render(-1, common.CustomEvent{Data: "data: " + string(jsonResponse)})
return true
case <-stopChan:
c.Render(-1, common.CustomEvent{Data: "data: [DONE]"})
return false
}
})
err := resp.Body.Close()
if err != nil {
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), ""
}
return nil, responseText
}
func geminiChatHandler(c *gin.Context, resp *http.Response, promptTokens int, model string) (*OpenAIErrorWithStatusCode, *Usage) {
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return errorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
}
err = resp.Body.Close()
if err != nil {
return errorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
var geminiResponse GeminiChatResponse
err = json.Unmarshal(responseBody, &geminiResponse)
if err != nil {
return errorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if len(geminiResponse.Candidates) == 0 {
return &OpenAIErrorWithStatusCode{
OpenAIError: OpenAIError{
Message: "No candidates returned",
Type: "server_error",
Param: "",
Code: 500,
},
StatusCode: resp.StatusCode,
}, nil
}
fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse)
completionTokens := countTokenText(geminiResponse.GetResponseText(), model)
usage := Usage{
PromptTokens: promptTokens,
CompletionTokens: completionTokens,
TotalTokens: promptTokens + completionTokens,
}
fullTextResponse.Usage = usage
jsonResponse, err := json.Marshal(fullTextResponse)
if err != nil {
return errorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
}
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(resp.StatusCode)
_, err = c.Writer.Write(jsonResponse)
return nil, &usage
}

View File

@@ -31,7 +31,7 @@ type PaLMChatRequest struct {
Temperature float64 `json:"temperature,omitempty"`
CandidateCount int `json:"candidateCount,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK int `json:"topK,omitempty"`
TopK uint `json:"topK,omitempty"`
}
type PaLMError struct {

View File

@@ -26,6 +26,7 @@ const (
APITypeXunfei
APITypeAIProxyLibrary
APITypeTencent
APITypeGemini
)
var httpClient *http.Client
@@ -119,6 +120,8 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
apiType = APITypeAIProxyLibrary
case common.ChannelTypeTencent:
apiType = APITypeTencent
case common.ChannelTypeGemini:
apiType = APITypeGemini
}
baseURL := common.ChannelBaseURLs[channelType]
requestURL := c.Request.URL.String()
@@ -180,6 +183,25 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
apiKey := c.Request.Header.Get("Authorization")
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
fullRequestURL += "?key=" + apiKey
case APITypeGemini:
requestBaseURL := "https://generativelanguage.googleapis.com"
if baseURL != "" {
requestBaseURL = baseURL
}
version := "v1beta"
if c.GetString("api_version") != "" {
version = c.GetString("api_version")
}
action := "generateContent"
if textRequest.Stream {
action = "streamGenerateContent"
}
fullRequestURL = fmt.Sprintf("%s/%s/models/%s:%s", requestBaseURL, version, textRequest.Model, action)
apiKey := c.Request.Header.Get("Authorization")
apiKey = strings.TrimPrefix(apiKey, "Bearer ")
fullRequestURL += "?key=" + apiKey
//log.Println(fullRequestURL)
case APITypeZhipu:
method := "invoke"
if textRequest.Stream {
@@ -211,7 +233,7 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
}
preConsumedTokens := common.PreConsumedQuota
if textRequest.MaxTokens != 0 {
preConsumedTokens = promptTokens + textRequest.MaxTokens
preConsumedTokens = promptTokens + int(textRequest.MaxTokens)
}
modelRatio := common.GetModelRatio(textRequest.Model)
groupRatio := common.GetGroupRatio(group)
@@ -280,6 +302,13 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
}
requestBody = bytes.NewBuffer(jsonStr)
case APITypeGemini:
geminiChatRequest := requestOpenAI2Gemini(textRequest)
jsonStr, err := json.Marshal(geminiChatRequest)
if err != nil {
return errorWrapper(err, "marshal_text_request_failed", http.StatusInternalServerError)
}
requestBody = bytes.NewBuffer(jsonStr)
case APITypeZhipu:
zhipuRequest := requestOpenAI2Zhipu(textRequest)
jsonStr, err := json.Marshal(zhipuRequest)
@@ -539,6 +568,25 @@ func relayTextHelper(c *gin.Context, relayMode int) *OpenAIErrorWithStatusCode {
}
return nil
}
case APITypeGemini:
if textRequest.Stream {
err, responseText := geminiChatStreamHandler(c, resp)
if err != nil {
return err
}
textResponse.Usage.PromptTokens = promptTokens
textResponse.Usage.CompletionTokens = countTokenText(responseText, textRequest.Model)
return nil
} else {
err, usage := geminiChatHandler(c, resp, promptTokens, textRequest.Model)
if err != nil {
return err
}
if usage != nil {
textResponse.Usage = *usage
}
return nil
}
case APITypeZhipu:
if isStream {
err, usage := zhipuStreamHandler(c, resp)

View File

@@ -160,10 +160,27 @@ func countTokenMessages(messages []Message, model string) (int, error) {
} else {
for _, m := range arrayContent {
if m.Type == "image_url" {
imageTokenNum, err := getImageToken(&m.ImageUrl)
var imageTokenNum int
if str, ok := m.ImageUrl.(string); ok {
imageTokenNum, err = getImageToken(&MessageImageUrl{Url: str, Detail: "auto"})
} else {
imageUrlMap := m.ImageUrl.(map[string]interface{})
detail, ok := imageUrlMap["detail"]
if ok {
imageUrlMap["detail"] = detail.(string)
} else {
imageUrlMap["detail"] = "auto"
}
imageUrl := MessageImageUrl{
Url: imageUrlMap["url"].(string),
Detail: imageUrlMap["detail"].(string),
}
imageTokenNum, err = getImageToken(&imageUrl)
}
if err != nil {
return 0, err
}
tokenNum += imageTokenNum
log.Printf("image token num: %d", imageTokenNum)
} else {

View File

@@ -33,7 +33,7 @@ type XunfeiChatRequest struct {
Domain string `json:"domain,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopK int `json:"top_k,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
MaxTokens uint `json:"max_tokens,omitempty"`
Auditing bool `json:"auditing,omitempty"`
} `json:"chat"`
} `json:"parameter"`

View File

@@ -19,9 +19,9 @@ type Message struct {
}
type MediaMessage struct {
Type string `json:"type"`
Text string `json:"text"`
ImageUrl MessageImageUrl `json:"image_url,omitempty"`
Type string `json:"type"`
Text string `json:"text"`
ImageUrl any `json:"image_url,omitempty"`
}
type MessageImageUrl struct {
@@ -53,7 +53,7 @@ type GeneralOpenAIRequest struct {
Messages []Message `json:"messages,omitempty"`
Prompt any `json:"prompt,omitempty"`
Stream bool `json:"stream,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
MaxTokens uint `json:"max_tokens,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"top_p,omitempty"`
N int `json:"n,omitempty"`
@@ -91,14 +91,14 @@ type AudioRequest struct {
type ChatRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
MaxTokens int `json:"max_tokens"`
MaxTokens uint `json:"max_tokens"`
}
type TextRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
Prompt string `json:"prompt"`
MaxTokens int `json:"max_tokens"`
MaxTokens uint `json:"max_tokens"`
//Stream bool `json:"stream"`
}

View File

@@ -1,9 +1,9 @@
version: '3.4'
services:
one-api:
new-api:
image: calciumion/new-api:latest
container_name: one-api
container_name: new-api
restart: always
command: --log-dir /app/logs
ports:
@@ -12,7 +12,7 @@ services:
- ./data:/data
- ./logs:/app/logs
environment:
- SQL_DSN=root:123456@tcp(host.docker.internal:3306)/one-api # 修改此行,或注释掉以使用 SQLite 作为数据库
- SQL_DSN=root:123456@tcp(host.docker.internal:3306)/new-api # 修改此行,或注释掉以使用 SQLite 作为数据库
- REDIS_CONN_STRING=redis://redis
- SESSION_SECRET=random_string # 修改为随机字符串
- TZ=Asia/Shanghai

View File

@@ -107,6 +107,8 @@ func Distribute() func(c *gin.Context) {
c.Set("api_version", channel.Other)
case common.ChannelTypeAIProxyLibrary:
c.Set("library_id", channel.Other)
case common.ChannelTypeGemini:
c.Set("api_version", channel.Other)
}
c.Next()
}

View File

@@ -99,6 +99,7 @@ func BatchDeleteChannels(ids []int) error {
if err != nil {
// 回滚事务
tx.Rollback()
return err
}
// 提交事务
tx.Commit()

View File

@@ -45,7 +45,7 @@ func ValidateUserToken(key string) (token *Token, err error) {
token, err = CacheGetTokenByKey(key)
if err == nil {
if token.Status == common.TokenStatusExhausted {
return nil, errors.New("该令牌额度已用尽")
return nil, errors.New("该令牌额度已用尽 token.Status == common.TokenStatusExhausted " + key)
} else if token.Status == common.TokenStatusExpired {
return nil, errors.New("该令牌已过期")
}
@@ -71,7 +71,7 @@ func ValidateUserToken(key string) (token *Token, err error) {
common.SysError("failed to update token status" + err.Error())
}
}
return nil, errors.New("该令牌额度已用尽")
return nil, errors.New(fmt.Sprintf("%s 该令牌额度已用尽 !token.UnlimitedQuota && token.RemainQuota = %d", token.Key, token.RemainQuota))
}
return token, nil
}

View File

@@ -20,6 +20,7 @@ import {
import {getQuotaPerUnit, renderQuota, renderQuotaWithPrompt, stringToColor} from "../helpers/render";
import EditToken from "../pages/Token/EditToken";
import EditUser from "../pages/User/EditUser";
import passwordResetConfirm from "./PasswordResetConfirm";
const PersonalSetting = () => {
const [userState, userDispatch] = useContext(UserContext);
@@ -29,9 +30,12 @@ const PersonalSetting = () => {
wechat_verification_code: '',
email_verification_code: '',
email: '',
self_account_deletion_confirmation: ''
self_account_deletion_confirmation: '',
set_new_password: '',
set_new_password_confirmation: '',
});
const [status, setStatus] = useState({});
const [showChangePasswordModal, setShowChangePasswordModal] = useState(false);
const [showWeChatBindModal, setShowWeChatBindModal] = useState(false);
const [showEmailBindModal, setShowEmailBindModal] = useState(false);
const [showAccountDeleteModal, setShowAccountDeleteModal] = useState(false);
@@ -180,6 +184,27 @@ const PersonalSetting = () => {
}
};
const changePassword = async () => {
if (inputs.set_new_password !== inputs.set_new_password_confirmation) {
showError('两次输入的密码不一致!');
return;
}
const res = await API.put(
`/api/user/self`,
{
password: inputs.set_new_password
}
);
const {success, message} = res.data;
if (success) {
showSuccess('密码修改成功!');
setShowWeChatBindModal(false);
} else {
showError(message);
}
setShowChangePasswordModal(false);
};
const transfer = async () => {
if (transferAmount < getQuotaPerUnit()) {
showError('划转金额最低为' + renderQuota(getQuotaPerUnit()));
@@ -420,6 +445,9 @@ const PersonalSetting = () => {
<Space>
<Button onClick={generateAccessToken}>生成系统访问令牌</Button>
<Button onClick={() => {
setShowChangePasswordModal(true);
}}>修改密码</Button>
<Button type={'danger'} onClick={() => {
setShowAccountDeleteModal(true);
}}>删除个人账户</Button>
</Space>
@@ -543,6 +571,39 @@ const PersonalSetting = () => {
)}
</div>
</Modal>
<Modal
onCancel={() => setShowChangePasswordModal(false)}
visible={showChangePasswordModal}
size={'small'}
centered={true}
onOk={changePassword}
>
<div style={{marginTop: 20}}>
<Input
name='set_new_password'
placeholder='新密码'
value={inputs.set_new_password}
onChange={(value)=>handleInputChange('set_new_password', value)}
/>
<Input
style={{marginTop: 20}}
name='set_new_password_confirmation'
placeholder='确认新密码'
value={inputs.set_new_password_confirmation}
onChange={(value)=>handleInputChange('set_new_password_confirmation', value)}
/>
{turnstileEnabled ? (
<Turnstile
sitekey={turnstileSiteKey}
onVerify={(token) => {
setTurnstileToken(token);
}}
/>
) : (
<></>
)}
</div>
</Modal>
</div>
</Layout.Content>

View File

@@ -1,16 +1,17 @@
export const CHANNEL_OPTIONS = [
{ key: 1, text: 'OpenAI', value: 1, color: 'green', label: 'OpenAI' },
{ key: 24, text: 'Midjourney Proxy', value: 24, color: 'light-blue', label: 'Midjourney Proxy' },
{ key: 14, text: 'Anthropic Claude', value: 14, color: 'black', label: 'Anthropic Claude' },
{ key: 3, text: 'Azure OpenAI', value: 3, color: 'olive', label: 'Azure OpenAI' },
{ key: 11, text: 'Google PaLM2', value: 11, color: 'orange', label: 'Google PaLM2' },
{ key: 15, text: '百度文心千帆', value: 15, color: 'blue', label: '百度文心千帆' },
{ key: 17, text: '阿里通义千问', value: 17, color: 'orange', label: '阿里通义千问' },
{ key: 18, text: '讯飞星火认知', value: 18, color: 'blue', label: '讯飞星火认知' },
{ key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet', label: '智谱 ChatGLM' },
{ key: 19, text: '360 智脑', value: 19, color: 'blue', label: '360 智脑' },
{ key: 23, text: '腾讯混元', value: 23, color: 'teal', label: '腾讯混元' },
{ key: 8, text: '自定义渠道', value: 8, color: 'pink', label: '自定义渠道' },
{ key: 22, text: '知识库FastGPT', value: 22, color: 'blue', label: '知识库FastGPT' },
{ key: 21, text: '知识库:AI Proxy', value: 21, color: 'purple', label: '知识库:AI Proxy' },
{key: 1, text: 'OpenAI', value: 1, color: 'green', label: 'OpenAI'},
{key: 2, text: 'Midjourney Proxy', value: 2, color: 'light-blue', label: 'Midjourney Proxy'},
{key: 14, text: 'Anthropic Claude', value: 14, color: 'black', label: 'Anthropic Claude'},
{key: 3, text: 'Azure OpenAI', value: 3, color: 'olive', label: 'Azure OpenAI'},
{key: 11, text: 'Google PaLM2', value: 11, color: 'orange', label: 'Google PaLM2'},
{key: 24, text: 'Google Gemini', value: 24, color: 'orange', label: 'Google Gemini'},
{key: 15, text: '百度文心千帆', value: 15, color: 'blue', label: '百度文心千帆'},
{key: 17, text: '阿里通义千问', value: 17, color: 'orange', label: '阿里通义千问'},
{key: 18, text: '讯飞星火认知', value: 18, color: 'blue', label: '讯飞星火认知'},
{key: 16, text: '智谱 ChatGLM', value: 16, color: 'violet', label: '智谱 ChatGLM'},
{key: 19, text: '360 智脑', value: 19, color: 'blue', label: '360 智脑'},
{key: 23, text: '腾讯混元', value: 23, color: 'teal', label: '腾讯混元'},
{key: 8, text: '自定义渠道', value: 8, color: 'pink', label: '自定义渠道'},
{key: 22, text: '知识库:FastGPT', value: 22, color: 'blue', label: '知识库:FastGPT'},
{key: 21, text: '知识库AI Proxy', value: 21, color: 'purple', label: '知识库AI Proxy'},
];

View File

@@ -86,6 +86,9 @@ const EditChannel = (props) => {
case 23:
localModels = ['hunyuan'];
break;
case 24:
localModels = ['gemini-pro'];
break;
}
setInputs((inputs) => ({...inputs, models: localModels}));
}