From 51c4cd9ab52a5b9e7263b3e3ed483594a0900cd3 Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:01:14 +0800 Subject: [PATCH 01/18] =?UTF-8?q?feat:=20=E9=87=8D=E6=9E=84ollama=E6=B8=A0?= =?UTF-8?q?=E9=81=93=E8=AF=B7=E6=B1=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- relay/channel/ollama/adaptor.go | 31 ++-- relay/channel/ollama/dto.go | 91 +++++---- relay/channel/ollama/relay-ollama.go | 263 ++++++++++++++++----------- relay/channel/ollama/stream.go | 165 +++++++++++++++++ 4 files changed, 400 insertions(+), 150 deletions(-) create mode 100644 relay/channel/ollama/stream.go diff --git a/relay/channel/ollama/adaptor.go b/relay/channel/ollama/adaptor.go index d6b5b697e..3732be91b 100644 --- a/relay/channel/ollama/adaptor.go +++ b/relay/channel/ollama/adaptor.go @@ -10,6 +10,7 @@ import ( relaycommon "one-api/relay/common" relayconstant "one-api/relay/constant" "one-api/types" + "strings" "github.com/gin-gonic/gin" ) @@ -48,15 +49,15 @@ func (a *Adaptor) Init(info *relaycommon.RelayInfo) { } func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) { - if info.RelayFormat == types.RelayFormatClaude { - return info.ChannelBaseUrl + "/v1/chat/completions", nil - } - switch info.RelayMode { - case relayconstant.RelayModeEmbeddings: + // embeddings fixed endpoint + if info.RelayMode == relayconstant.RelayModeEmbeddings { return info.ChannelBaseUrl + "/api/embed", nil - default: - return relaycommon.GetFullRequestURL(info.ChannelBaseUrl, info.RequestURLPath, info.ChannelType), nil } + // For chat vs generate: if original path contains "/v1/completions" map to generate; otherwise chat + if strings.Contains(info.RequestURLPath, "/v1/completions") || info.RelayMode == relayconstant.RelayModeCompletions { + return info.ChannelBaseUrl + "/api/generate", nil + } + return info.ChannelBaseUrl + "/api/chat", nil } func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error { @@ -66,10 +67,12 @@ func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *rel } func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) { - if request == nil { - return nil, errors.New("request is nil") + if request == nil { return nil, errors.New("request is nil") } + // decide generate or chat + if strings.Contains(info.RequestURLPath, "/v1/completions") || info.RelayMode == relayconstant.RelayModeCompletions { + return openAIToGenerate(c, request) } - return requestOpenAI2Ollama(c, request) + return openAIChatToOllamaChat(c, request) } func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) { @@ -92,15 +95,13 @@ func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, request func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *types.NewAPIError) { switch info.RelayMode { case relayconstant.RelayModeEmbeddings: - usage, err = ollamaEmbeddingHandler(c, info, resp) + return ollamaEmbeddingHandler(c, info, resp) default: if info.IsStream { - usage, err = openai.OaiStreamHandler(c, info, resp) - } else { - usage, err = openai.OpenaiHandler(c, info, resp) + return ollamaStreamHandler(c, info, resp) } + return ollamaChatHandler(c, info, resp) } - return } func (a *Adaptor) GetModelList() []string { diff --git a/relay/channel/ollama/dto.go b/relay/channel/ollama/dto.go index 317c2a4a1..b3d083dce 100644 --- a/relay/channel/ollama/dto.go +++ b/relay/channel/ollama/dto.go @@ -5,45 +5,70 @@ import ( "one-api/dto" ) -type OllamaRequest struct { - Model string `json:"model,omitempty"` - Messages []dto.Message `json:"messages,omitempty"` - Stream bool `json:"stream,omitempty"` - Temperature *float64 `json:"temperature,omitempty"` - Seed float64 `json:"seed,omitempty"` - Topp float64 `json:"top_p,omitempty"` - TopK int `json:"top_k,omitempty"` - Stop any `json:"stop,omitempty"` - MaxTokens uint `json:"max_tokens,omitempty"` - Tools []dto.ToolCallRequest `json:"tools,omitempty"` - ResponseFormat any `json:"response_format,omitempty"` - FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` - PresencePenalty float64 `json:"presence_penalty,omitempty"` - Suffix any `json:"suffix,omitempty"` - StreamOptions *dto.StreamOptions `json:"stream_options,omitempty"` - Prompt any `json:"prompt,omitempty"` - Think json.RawMessage `json:"think,omitempty"` +// OllamaChatMessage represents a single chat message +type OllamaChatMessage struct { + Role string `json:"role"` + Content string `json:"content,omitempty"` + Images []string `json:"images,omitempty"` + ToolCalls []OllamaToolCall `json:"tool_calls,omitempty"` + ToolName string `json:"tool_name,omitempty"` + Thinking json.RawMessage `json:"thinking,omitempty"` } -type Options struct { - Seed int `json:"seed,omitempty"` - Temperature *float64 `json:"temperature,omitempty"` - TopK int `json:"top_k,omitempty"` - TopP float64 `json:"top_p,omitempty"` - FrequencyPenalty float64 `json:"frequency_penalty,omitempty"` - PresencePenalty float64 `json:"presence_penalty,omitempty"` - NumPredict int `json:"num_predict,omitempty"` - NumCtx int `json:"num_ctx,omitempty"` +type OllamaToolFunction struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Parameters interface{} `json:"parameters,omitempty"` +} + +type OllamaTool struct { + Type string `json:"type"` + Function OllamaToolFunction `json:"function"` +} + +type OllamaToolCall struct { + Function struct { + Name string `json:"name"` + Arguments interface{} `json:"arguments"` + } `json:"function"` +} + +// OllamaChatRequest -> /api/chat +type OllamaChatRequest struct { + Model string `json:"model"` + Messages []OllamaChatMessage `json:"messages"` + Tools interface{} `json:"tools,omitempty"` + Format interface{} `json:"format,omitempty"` + Stream bool `json:"stream,omitempty"` + Options map[string]any `json:"options,omitempty"` + KeepAlive interface{} `json:"keep_alive,omitempty"` + Think json.RawMessage `json:"think,omitempty"` +} + +// OllamaGenerateRequest -> /api/generate +type OllamaGenerateRequest struct { + Model string `json:"model"` + Prompt string `json:"prompt,omitempty"` + Suffix string `json:"suffix,omitempty"` + Images []string `json:"images,omitempty"` + Format interface{} `json:"format,omitempty"` + Stream bool `json:"stream,omitempty"` + Options map[string]any `json:"options,omitempty"` + KeepAlive interface{} `json:"keep_alive,omitempty"` + Think json.RawMessage `json:"think,omitempty"` } type OllamaEmbeddingRequest struct { - Model string `json:"model,omitempty"` - Input []string `json:"input"` - Options *Options `json:"options,omitempty"` + Model string `json:"model"` + Input interface{} `json:"input"` + Options map[string]any `json:"options,omitempty"` + Dimensions int `json:"dimensions,omitempty"` } type OllamaEmbeddingResponse struct { - Error string `json:"error,omitempty"` - Model string `json:"model"` - Embedding [][]float64 `json:"embeddings,omitempty"` + Error string `json:"error,omitempty"` + Model string `json:"model"` + Embeddings [][]float64 `json:"embeddings"` + PromptEvalCount int `json:"prompt_eval_count,omitempty"` } + diff --git a/relay/channel/ollama/relay-ollama.go b/relay/channel/ollama/relay-ollama.go index 27c67b4ec..897e22cbd 100644 --- a/relay/channel/ollama/relay-ollama.go +++ b/relay/channel/ollama/relay-ollama.go @@ -1,6 +1,7 @@ package ollama import ( + "encoding/json" "fmt" "io" "net/http" @@ -14,121 +15,179 @@ import ( "github.com/gin-gonic/gin" ) -func requestOpenAI2Ollama(c *gin.Context, request *dto.GeneralOpenAIRequest) (*OllamaRequest, error) { - messages := make([]dto.Message, 0, len(request.Messages)) - for _, message := range request.Messages { - if !message.IsStringContent() { - mediaMessages := message.ParseContent() - for j, mediaMessage := range mediaMessages { - if mediaMessage.Type == dto.ContentTypeImageURL { - imageUrl := mediaMessage.GetImageMedia() - // check if not base64 - if strings.HasPrefix(imageUrl.Url, "http") { - fileData, err := service.GetFileBase64FromUrl(c, imageUrl.Url, "formatting image for Ollama") - if err != nil { - return nil, err +// openAIChatToOllamaChat converts OpenAI-style chat request to Ollama chat +func openAIChatToOllamaChat(c *gin.Context, r *dto.GeneralOpenAIRequest) (*OllamaChatRequest, error) { + chatReq := &OllamaChatRequest{ + Model: r.Model, + Stream: r.Stream, + Options: map[string]any{}, + Think: r.Think, + } + // format mapping + if r.ResponseFormat != nil { + if r.ResponseFormat.Type == "json" { + chatReq.Format = "json" + } else if r.ResponseFormat.Type == "json_schema" { + // supply schema object directly + if len(r.ResponseFormat.JsonSchema) > 0 { + var schema any + _ = json.Unmarshal(r.ResponseFormat.JsonSchema, &schema) + chatReq.Format = schema + } + } + } + + // options mapping + if r.Temperature != nil { chatReq.Options["temperature"] = r.Temperature } + if r.TopP != 0 { chatReq.Options["top_p"] = r.TopP } + if r.TopK != 0 { chatReq.Options["top_k"] = r.TopK } + if r.FrequencyPenalty != 0 { chatReq.Options["frequency_penalty"] = r.FrequencyPenalty } + if r.PresencePenalty != 0 { chatReq.Options["presence_penalty"] = r.PresencePenalty } + if r.Seed != 0 { chatReq.Options["seed"] = int(r.Seed) } + if mt := r.GetMaxTokens(); mt != 0 { chatReq.Options["num_predict"] = int(mt) } + + // Stop -> options.stop (array) + if r.Stop != nil { + switch v := r.Stop.(type) { + case string: + chatReq.Options["stop"] = []string{v} + case []string: + chatReq.Options["stop"] = v + case []any: + arr := make([]string,0,len(v)) + for _, i := range v { if s,ok:=i.(string); ok { arr = append(arr,s) } } + if len(arr)>0 { chatReq.Options["stop"] = arr } + } + } + + // tools + if len(r.Tools) > 0 { + tools := make([]OllamaTool,0,len(r.Tools)) + for _, t := range r.Tools { + tools = append(tools, OllamaTool{Type: "function", Function: OllamaToolFunction{Name: t.Function.Name, Description: t.Function.Description, Parameters: t.Function.Parameters}}) + } + chatReq.Tools = tools + } + + // messages + chatReq.Messages = make([]OllamaChatMessage,0,len(r.Messages)) + for _, m := range r.Messages { + // gather text parts & images + var textBuilder strings.Builder + var images []string + if m.IsStringContent() { + textBuilder.WriteString(m.StringContent()) + } else { + parts := m.ParseContent() + for _, part := range parts { + if part.Type == dto.ContentTypeImageURL { + img := part.GetImageMedia() + if img != nil && img.Url != "" { + // ensure base64 dataURL + if strings.HasPrefix(img.Url, "http") { + fileData, err := service.GetFileBase64FromUrl(c, img.Url, "fetch image for ollama chat") + if err != nil { return nil, err } + img.Url = fmt.Sprintf("data:%s;base64,%s", fileData.MimeType, fileData.Base64Data) } - imageUrl.Url = fmt.Sprintf("data:%s;base64,%s", fileData.MimeType, fileData.Base64Data) + images = append(images, img.Url) } - mediaMessage.ImageUrl = imageUrl - mediaMessages[j] = mediaMessage + } else if part.Type == dto.ContentTypeText { + textBuilder.WriteString(part.Text) } } - message.SetMediaContent(mediaMessages) } - messages = append(messages, dto.Message{ - Role: message.Role, - Content: message.Content, - ToolCalls: message.ToolCalls, - ToolCallId: message.ToolCallId, - }) + cm := OllamaChatMessage{Role: m.Role, Content: textBuilder.String()} + if len(images)>0 { cm.Images = images } + // history tool call result message + if m.Role == "tool" && m.Name != nil { cm.ToolName = *m.Name } + // tool calls from assistant previous message + if len(m.ToolCalls)>0 { + calls := make([]OllamaToolCall,0,len(m.ToolCalls)) + for _, tc := range m.ToolCalls { + var args interface{} + if tc.Function.Arguments != "" { _ = json.Unmarshal([]byte(tc.Function.Arguments), &args) } + oc := OllamaToolCall{} + oc.Function.Name = tc.Function.Name + if args==nil { args = map[string]any{} } + oc.Function.Arguments = args + calls = append(calls, oc) + } + cm.ToolCalls = calls + } + chatReq.Messages = append(chatReq.Messages, cm) } - str, ok := request.Stop.(string) - var Stop []string - if ok { - Stop = []string{str} - } else { - Stop, _ = request.Stop.([]string) - } - ollamaRequest := &OllamaRequest{ - Model: request.Model, - Messages: messages, - Stream: request.Stream, - Temperature: request.Temperature, - Seed: request.Seed, - Topp: request.TopP, - TopK: request.TopK, - Stop: Stop, - Tools: request.Tools, - MaxTokens: request.GetMaxTokens(), - ResponseFormat: request.ResponseFormat, - FrequencyPenalty: request.FrequencyPenalty, - PresencePenalty: request.PresencePenalty, - Prompt: request.Prompt, - StreamOptions: request.StreamOptions, - Suffix: request.Suffix, - } - ollamaRequest.Think = request.Think - return ollamaRequest, nil + return chatReq, nil } -func requestOpenAI2Embeddings(request dto.EmbeddingRequest) *OllamaEmbeddingRequest { - return &OllamaEmbeddingRequest{ - Model: request.Model, - Input: request.ParseInput(), - Options: &Options{ - Seed: int(request.Seed), - Temperature: request.Temperature, - TopP: request.TopP, - FrequencyPenalty: request.FrequencyPenalty, - PresencePenalty: request.PresencePenalty, - }, +// openAIToGenerate converts OpenAI completions request to Ollama generate +func openAIToGenerate(c *gin.Context, r *dto.GeneralOpenAIRequest) (*OllamaGenerateRequest, error) { + gen := &OllamaGenerateRequest{ + Model: r.Model, + Stream: r.Stream, + Options: map[string]any{}, + Think: r.Think, } + // Prompt may be in r.Prompt (string or []any) + if r.Prompt != nil { + switch v := r.Prompt.(type) { + case string: + gen.Prompt = v + case []any: + var sb strings.Builder + for _, it := range v { if s,ok:=it.(string); ok { sb.WriteString(s) } } + gen.Prompt = sb.String() + default: + gen.Prompt = fmt.Sprintf("%v", r.Prompt) + } + } + if r.Suffix != nil { if s,ok:=r.Suffix.(string); ok { gen.Suffix = s } } + if r.ResponseFormat != nil { + if r.ResponseFormat.Type == "json" { gen.Format = "json" } else if r.ResponseFormat.Type == "json_schema" { var schema any; _ = json.Unmarshal(r.ResponseFormat.JsonSchema,&schema); gen.Format=schema } + } + if r.Temperature != nil { gen.Options["temperature"] = r.Temperature } + if r.TopP != 0 { gen.Options["top_p"] = r.TopP } + if r.TopK != 0 { gen.Options["top_k"] = r.TopK } + if r.FrequencyPenalty != 0 { gen.Options["frequency_penalty"] = r.FrequencyPenalty } + if r.PresencePenalty != 0 { gen.Options["presence_penalty"] = r.PresencePenalty } + if r.Seed != 0 { gen.Options["seed"] = int(r.Seed) } + if mt := r.GetMaxTokens(); mt != 0 { gen.Options["num_predict"] = int(mt) } + if r.Stop != nil { + switch v := r.Stop.(type) { + case string: gen.Options["stop"] = []string{v} + case []string: gen.Options["stop"] = v + case []any: arr:=make([]string,0,len(v)); for _,i:= range v { if s,ok:=i.(string); ok { arr=append(arr,s) } }; if len(arr)>0 { gen.Options["stop"]=arr } + } + } + return gen, nil +} + +func requestOpenAI2Embeddings(r dto.EmbeddingRequest) *OllamaEmbeddingRequest { + opts := map[string]any{} + if r.Temperature != nil { opts["temperature"] = r.Temperature } + if r.TopP != 0 { opts["top_p"] = r.TopP } + if r.TopK != 0 { opts["top_k"] = r.TopK } + if r.FrequencyPenalty != 0 { opts["frequency_penalty"] = r.FrequencyPenalty } + if r.PresencePenalty != 0 { opts["presence_penalty"] = r.PresencePenalty } + if r.Seed != 0 { opts["seed"] = int(r.Seed) } + if r.Dimensions != 0 { opts["dimensions"] = r.Dimensions } + input := r.ParseInput() + if len(input)==1 { return &OllamaEmbeddingRequest{Model:r.Model, Input: input[0], Options: opts, Dimensions:r.Dimensions} } + return &OllamaEmbeddingRequest{Model:r.Model, Input: input, Options: opts, Dimensions:r.Dimensions} } func ollamaEmbeddingHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { - var ollamaEmbeddingResponse OllamaEmbeddingResponse - responseBody, err := io.ReadAll(resp.Body) - if err != nil { - return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) - } + var oResp OllamaEmbeddingResponse + body, err := io.ReadAll(resp.Body) + if err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } service.CloseResponseBodyGracefully(resp) - err = common.Unmarshal(responseBody, &ollamaEmbeddingResponse) - if err != nil { - return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) - } - if ollamaEmbeddingResponse.Error != "" { - return nil, types.NewOpenAIError(fmt.Errorf("ollama error: %s", ollamaEmbeddingResponse.Error), types.ErrorCodeBadResponseBody, http.StatusInternalServerError) - } - flattenedEmbeddings := flattenEmbeddings(ollamaEmbeddingResponse.Embedding) - data := make([]dto.OpenAIEmbeddingResponseItem, 0, 1) - data = append(data, dto.OpenAIEmbeddingResponseItem{ - Embedding: flattenedEmbeddings, - Object: "embedding", - }) - usage := &dto.Usage{ - TotalTokens: info.PromptTokens, - CompletionTokens: 0, - PromptTokens: info.PromptTokens, - } - embeddingResponse := &dto.OpenAIEmbeddingResponse{ - Object: "list", - Data: data, - Model: info.UpstreamModelName, - Usage: *usage, - } - doResponseBody, err := common.Marshal(embeddingResponse) - if err != nil { - return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) - } - service.IOCopyBytesGracefully(c, resp, doResponseBody) + if err = common.Unmarshal(body, &oResp); err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } + if oResp.Error != "" { return nil, types.NewOpenAIError(fmt.Errorf("ollama error: %s", oResp.Error), types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } + data := make([]dto.OpenAIEmbeddingResponseItem,0,len(oResp.Embeddings)) + for i, emb := range oResp.Embeddings { data = append(data, dto.OpenAIEmbeddingResponseItem{Index:i,Object:"embedding",Embedding:emb}) } + usage := &dto.Usage{PromptTokens: oResp.PromptEvalCount, CompletionTokens:0, TotalTokens: oResp.PromptEvalCount} + embResp := &dto.OpenAIEmbeddingResponse{Object:"list", Data:data, Model: info.UpstreamModelName, Usage:*usage} + out, _ := common.Marshal(embResp) + service.IOCopyBytesGracefully(c, resp, out) return usage, nil } -func flattenEmbeddings(embeddings [][]float64) []float64 { - flattened := []float64{} - for _, row := range embeddings { - flattened = append(flattened, row...) - } - return flattened -} diff --git a/relay/channel/ollama/stream.go b/relay/channel/ollama/stream.go new file mode 100644 index 000000000..3ae9c6d04 --- /dev/null +++ b/relay/channel/ollama/stream.go @@ -0,0 +1,165 @@ +package ollama + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "net/http" + "one-api/common" + "one-api/dto" + "one-api/logger" + relaycommon "one-api/relay/common" + "one-api/relay/helper" + "one-api/service" + "one-api/types" + "strings" + "time" + + "github.com/gin-gonic/gin" +) + +// Ollama streaming chunk (chat or generate) +type ollamaChatStreamChunk struct { + Model string `json:"model"` + CreatedAt string `json:"created_at"` + // chat + Message *struct { + Role string `json:"role"` + Content string `json:"content"` + ToolCalls []struct { `json:"tool_calls"` + Function struct { + Name string `json:"name"` + Arguments interface{} `json:"arguments"` + } `json:"function"` + } `json:"tool_calls"` + } `json:"message"` + // generate + Response string `json:"response"` + Done bool `json:"done"` + DoneReason string `json:"done_reason"` + TotalDuration int64 `json:"total_duration"` + LoadDuration int64 `json:"load_duration"` + PromptEvalCount int `json:"prompt_eval_count"` + EvalCount int `json:"eval_count"` + // generate mode may use these + PromptEvalDuration int64 `json:"prompt_eval_duration"` + EvalDuration int64 `json:"eval_duration"` +} + +func toUnix(ts string) int64 { // parse RFC3339 / variant; fallback time.Now + if ts == "" { return time.Now().Unix() } + // try time.RFC3339 or with nanoseconds + t, err := time.Parse(time.RFC3339Nano, ts) + if err != nil { t2, err2 := time.Parse(time.RFC3339, ts); if err2==nil { return t2.Unix() }; return time.Now().Unix() } + return t.Unix() +} + +// streaming handler: convert Ollama stream -> OpenAI SSE +func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { + if resp == nil || resp.Body == nil { return nil, types.NewOpenAIError(fmt.Errorf("empty response"), types.ErrorCodeBadResponse, http.StatusBadRequest) } + defer service.CloseResponseBodyGracefully(resp) + + helper.SetEventStreamHeaders(c) + scanner := bufio.NewScanner(resp.Body) + usage := &dto.Usage{} + var model = info.UpstreamModelName + var responseId = common.GetUUID() + var created = time.Now().Unix() + var aggregatedText strings.Builder + var toolCallIndex int + // send start event + start := helper.GenerateStartEmptyResponse(responseId, created, model, nil) + if data, err := common.Marshal(start); err == nil { _ = helper.StringData(c, string(data)) } + + for scanner.Scan() { + line := scanner.Text() + line = strings.TrimSpace(line) + if line == "" { continue } + var chunk ollamaChatStreamChunk + if err := json.Unmarshal([]byte(line), &chunk); err != nil { + logger.LogError(c, "ollama stream json decode error: "+err.Error()+" line="+line) + return usage, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) + } + if chunk.Model != "" { model = chunk.Model } + created = toUnix(chunk.CreatedAt) + + if !chunk.Done { + // delta content + var content string + if chunk.Message != nil { content = chunk.Message.Content } else { content = chunk.Response } + if content != "" { aggregatedText.WriteString(content) } + delta := dto.ChatCompletionsStreamResponse{ + Id: responseId, + Object: "chat.completion.chunk", + Created: created, + Model: model, + Choices: []dto.ChatCompletionsStreamResponseChoice{ { + Index: 0, + Delta: dto.ChatCompletionsStreamResponseChoiceDelta{ Role: "assistant" }, + } }, + } + if content != "" { delta.Choices[0].Delta.SetContentString(content) } + // tool calls + if chunk.Message != nil && len(chunk.Message.ToolCalls) > 0 { + delta.Choices[0].Delta.ToolCalls = make([]dto.ToolCallResponse,0,len(chunk.Message.ToolCalls)) + for _, tc := range chunk.Message.ToolCalls { + // arguments -> string + argBytes, _ := json.Marshal(tc.Function.Arguments) + tr := dto.ToolCallResponse{ID:"", Type:nil, Function: dto.FunctionResponse{Name: tc.Function.Name, Arguments: string(argBytes)}} + tr.SetIndex(toolCallIndex) + toolCallIndex++ + delta.Choices[0].Delta.ToolCalls = append(delta.Choices[0].Delta.ToolCalls, tr) + } + } + if data, err := common.Marshal(delta); err == nil { _ = helper.StringData(c, string(data)) } + continue + } + // done frame + usage.PromptTokens = chunk.PromptEvalCount + usage.CompletionTokens = chunk.EvalCount + usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens + finishReason := chunk.DoneReason + if finishReason == "" { finishReason = "stop" } + stop := helper.GenerateStopResponse(responseId, created, model, finishReason) + if data, err := common.Marshal(stop); err == nil { _ = helper.StringData(c, string(data)) } + final := helper.GenerateFinalUsageResponse(responseId, created, model, *usage) + if data, err := common.Marshal(final); err == nil { _ = helper.StringData(c, string(data)) } + } + if err := scanner.Err(); err != nil && err != io.EOF { logger.LogError(c, "ollama stream scan error: "+err.Error()) } + return usage, nil +} + +// non-stream handler for chat/generate +func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { + body, err := io.ReadAll(resp.Body) + if err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeReadResponseBodyFailed, http.StatusInternalServerError) } + service.CloseResponseBodyGracefully(resp) + if common.DebugEnabled { println("ollama non-stream resp:", string(body)) } + var chunk ollamaChatStreamChunk + if err = json.Unmarshal(body, &chunk); err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } + model := chunk.Model + if model == "" { model = info.UpstreamModelName } + created := toUnix(chunk.CreatedAt) + content := "" + if chunk.Message != nil { content = chunk.Message.Content } else { content = chunk.Response } + usage := &dto.Usage{PromptTokens: chunk.PromptEvalCount, CompletionTokens: chunk.EvalCount, TotalTokens: chunk.PromptEvalCount + chunk.EvalCount} + // Build OpenAI style response + full := dto.OpenAITextResponse{ + Id: common.GetUUID(), + Model: model, + Object: "chat.completion", + Created: created, + Choices: []dto.OpenAITextResponseChoice{ { + Index: 0, + Message: dto.Message{Role: "assistant", Content: contentPtr(content)}, + FinishReason: func() string { if chunk.DoneReason == "" { return "stop" } ; return chunk.DoneReason }(), + } }, + Usage: *usage, + } + out, _ := common.Marshal(full) + service.IOCopyBytesGracefully(c, resp, out) + return usage, nil +} + +func contentPtr(s string) *string { if s=="" { return nil }; return &s } From fc38c480a1fd764bc5b9d0d7c3fd07acd7bf0694 Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:09:10 +0800 Subject: [PATCH 02/18] =?UTF-8?q?fix:=20=E4=BC=98=E5=8C=96ollamaChatStream?= =?UTF-8?q?Chunk=E7=BB=93=E6=9E=84=E4=BD=93=E5=AD=97=E6=AE=B5=E6=A0=BC?= =?UTF-8?q?=E5=BC=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- relay/channel/ollama/stream.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/relay/channel/ollama/stream.go b/relay/channel/ollama/stream.go index 3ae9c6d04..db615e8bd 100644 --- a/relay/channel/ollama/stream.go +++ b/relay/channel/ollama/stream.go @@ -25,9 +25,9 @@ type ollamaChatStreamChunk struct { CreatedAt string `json:"created_at"` // chat Message *struct { - Role string `json:"role"` - Content string `json:"content"` - ToolCalls []struct { `json:"tool_calls"` + Role string `json:"role"` + Content string `json:"content"` + ToolCalls []struct { Function struct { Name string `json:"name"` Arguments interface{} `json:"arguments"` @@ -66,7 +66,6 @@ func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http var model = info.UpstreamModelName var responseId = common.GetUUID() var created = time.Now().Unix() - var aggregatedText strings.Builder var toolCallIndex int // send start event start := helper.GenerateStartEmptyResponse(responseId, created, model, nil) From 7d6ba52d85fbe88244b1e8e5c4866b63da3cae6a Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:15:46 +0800 Subject: [PATCH 03/18] =?UTF-8?q?refactor:=20=E6=9B=B4=E6=96=B0=E8=AF=B7?= =?UTF-8?q?=E6=B1=82=E8=BD=AC=E6=8D=A2=E9=80=BB=E8=BE=91=EF=BC=8C=E4=BC=98?= =?UTF-8?q?=E5=8C=96=E5=B7=A5=E5=85=B7=E8=B0=83=E7=94=A8=E8=A7=A3=E6=9E=90?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- relay/channel/ollama/adaptor.go | 3 ++- relay/channel/ollama/dto.go | 1 - relay/channel/ollama/relay-ollama.go | 26 ++++++++++++++------------ relay/channel/ollama/stream.go | 1 - 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/relay/channel/ollama/adaptor.go b/relay/channel/ollama/adaptor.go index 3732be91b..d66839f7b 100644 --- a/relay/channel/ollama/adaptor.go +++ b/relay/channel/ollama/adaptor.go @@ -32,7 +32,8 @@ func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayIn openaiRequest.(*dto.GeneralOpenAIRequest).StreamOptions = &dto.StreamOptions{ IncludeUsage: true, } - return requestOpenAI2Ollama(c, openaiRequest.(*dto.GeneralOpenAIRequest)) + // map to ollama chat request (Claude -> OpenAI -> Ollama chat) + return openAIChatToOllamaChat(c, openaiRequest.(*dto.GeneralOpenAIRequest)) } func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) { diff --git a/relay/channel/ollama/dto.go b/relay/channel/ollama/dto.go index b3d083dce..a3e325e2f 100644 --- a/relay/channel/ollama/dto.go +++ b/relay/channel/ollama/dto.go @@ -2,7 +2,6 @@ package ollama import ( "encoding/json" - "one-api/dto" ) // OllamaChatMessage represents a single chat message diff --git a/relay/channel/ollama/relay-ollama.go b/relay/channel/ollama/relay-ollama.go index 897e22cbd..45424633c 100644 --- a/relay/channel/ollama/relay-ollama.go +++ b/relay/channel/ollama/relay-ollama.go @@ -101,18 +101,21 @@ func openAIChatToOllamaChat(c *gin.Context, r *dto.GeneralOpenAIRequest) (*Ollam // history tool call result message if m.Role == "tool" && m.Name != nil { cm.ToolName = *m.Name } // tool calls from assistant previous message - if len(m.ToolCalls)>0 { - calls := make([]OllamaToolCall,0,len(m.ToolCalls)) - for _, tc := range m.ToolCalls { - var args interface{} - if tc.Function.Arguments != "" { _ = json.Unmarshal([]byte(tc.Function.Arguments), &args) } - oc := OllamaToolCall{} - oc.Function.Name = tc.Function.Name - if args==nil { args = map[string]any{} } - oc.Function.Arguments = args - calls = append(calls, oc) + if m.ToolCalls != nil && len(m.ToolCalls) > 0 { + parsed := m.ParseToolCalls() + if len(parsed) > 0 { + calls := make([]OllamaToolCall,0,len(parsed)) + for _, tc := range parsed { + var args interface{} + if tc.Function.Arguments != "" { _ = json.Unmarshal([]byte(tc.Function.Arguments), &args) } + if args==nil { args = map[string]any{} } + oc := OllamaToolCall{} + oc.Function.Name = tc.Function.Name + oc.Function.Arguments = args + calls = append(calls, oc) + } + cm.ToolCalls = calls } - cm.ToolCalls = calls } chatReq.Messages = append(chatReq.Messages, cm) } @@ -165,7 +168,6 @@ func requestOpenAI2Embeddings(r dto.EmbeddingRequest) *OllamaEmbeddingRequest { opts := map[string]any{} if r.Temperature != nil { opts["temperature"] = r.Temperature } if r.TopP != 0 { opts["top_p"] = r.TopP } - if r.TopK != 0 { opts["top_k"] = r.TopK } if r.FrequencyPenalty != 0 { opts["frequency_penalty"] = r.FrequencyPenalty } if r.PresencePenalty != 0 { opts["presence_penalty"] = r.PresencePenalty } if r.Seed != 0 { opts["seed"] = int(r.Seed) } diff --git a/relay/channel/ollama/stream.go b/relay/channel/ollama/stream.go index db615e8bd..d5b104d6f 100644 --- a/relay/channel/ollama/stream.go +++ b/relay/channel/ollama/stream.go @@ -87,7 +87,6 @@ func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http // delta content var content string if chunk.Message != nil { content = chunk.Message.Content } else { content = chunk.Response } - if content != "" { aggregatedText.WriteString(content) } delta := dto.ChatCompletionsStreamResponse{ Id: responseId, Object: "chat.completion.chunk", From 176fd6eda13537b57e6e1dc4c0f718a5ad523498 Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:23:53 +0800 Subject: [PATCH 04/18] =?UTF-8?q?fix:=20=E4=BC=98=E5=8C=96ollamaStreamHand?= =?UTF-8?q?ler=E4=B8=AD=E7=9A=84=E5=81=9C=E6=AD=A2=E5=92=8C=E6=9C=80?= =?UTF-8?q?=E7=BB=88=E4=BD=BF=E7=94=A8=E5=93=8D=E5=BA=94=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- relay/channel/ollama/stream.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/relay/channel/ollama/stream.go b/relay/channel/ollama/stream.go index d5b104d6f..4e17f12d7 100644 --- a/relay/channel/ollama/stream.go +++ b/relay/channel/ollama/stream.go @@ -114,15 +114,23 @@ func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http continue } // done frame + // finalize once and break loop usage.PromptTokens = chunk.PromptEvalCount usage.CompletionTokens = chunk.EvalCount usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens finishReason := chunk.DoneReason if finishReason == "" { finishReason = "stop" } - stop := helper.GenerateStopResponse(responseId, created, model, finishReason) - if data, err := common.Marshal(stop); err == nil { _ = helper.StringData(c, string(data)) } - final := helper.GenerateFinalUsageResponse(responseId, created, model, *usage) - if data, err := common.Marshal(final); err == nil { _ = helper.StringData(c, string(data)) } + // emit stop delta + if stop := helper.GenerateStopResponse(responseId, created, model, finishReason); stop != nil { + if data, err := common.Marshal(stop); err == nil { _ = helper.StringData(c, string(data)) } + } + // emit usage frame + if final := helper.GenerateFinalUsageResponse(responseId, created, model, *usage); final != nil { + if data, err := common.Marshal(final); err == nil { _ = helper.StringData(c, string(data)) } + } + // send [DONE] + helper.Done(c) + break } if err := scanner.Err(); err != nil && err != io.EOF { logger.LogError(c, "ollama stream scan error: "+err.Error()) } return usage, nil From f7d393fc721278774469b3e25eca2df35f25127f Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:41:09 +0800 Subject: [PATCH 05/18] =?UTF-8?q?refactor:=20=E7=AE=80=E5=8C=96=E8=AF=B7?= =?UTF-8?q?=E6=B1=82=E8=BD=AC=E6=8D=A2=E5=87=BD=E6=95=B0=E5=92=8C=E6=B5=81?= =?UTF-8?q?=E5=A4=84=E7=90=86=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- relay/channel/ollama/adaptor.go | 32 ++++------------ relay/channel/ollama/dto.go | 3 -- relay/channel/ollama/relay-ollama.go | 9 ----- relay/channel/ollama/stream.go | 56 +++++++++++++++++++++------- 4 files changed, 49 insertions(+), 51 deletions(-) diff --git a/relay/channel/ollama/adaptor.go b/relay/channel/ollama/adaptor.go index d66839f7b..bafe73b92 100644 --- a/relay/channel/ollama/adaptor.go +++ b/relay/channel/ollama/adaptor.go @@ -18,10 +18,7 @@ import ( type Adaptor struct { } -func (a *Adaptor) ConvertGeminiRequest(*gin.Context, *relaycommon.RelayInfo, *dto.GeminiChatRequest) (any, error) { - //TODO implement me - return nil, errors.New("not implemented") -} +func (a *Adaptor) ConvertGeminiRequest(*gin.Context, *relaycommon.RelayInfo, *dto.GeminiChatRequest) (any, error) { return nil, errors.New("not implemented") } func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.ClaudeRequest) (any, error) { openaiAdaptor := openai.Adaptor{} @@ -36,29 +33,17 @@ func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayIn return openAIChatToOllamaChat(c, openaiRequest.(*dto.GeneralOpenAIRequest)) } -func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) { - //TODO implement me - return nil, errors.New("not implemented") -} +func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) { return nil, errors.New("not implemented") } -func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) { - //TODO implement me - return nil, errors.New("not implemented") -} +func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) { return nil, errors.New("not implemented") } func (a *Adaptor) Init(info *relaycommon.RelayInfo) { } func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) { - // embeddings fixed endpoint - if info.RelayMode == relayconstant.RelayModeEmbeddings { - return info.ChannelBaseUrl + "/api/embed", nil - } - // For chat vs generate: if original path contains "/v1/completions" map to generate; otherwise chat - if strings.Contains(info.RequestURLPath, "/v1/completions") || info.RelayMode == relayconstant.RelayModeCompletions { - return info.ChannelBaseUrl + "/api/generate", nil - } - return info.ChannelBaseUrl + "/api/chat", nil + if info.RelayMode == relayconstant.RelayModeEmbeddings { return info.ChannelBaseUrl + "/api/embed", nil } + if strings.Contains(info.RequestURLPath, "/v1/completions") || info.RelayMode == relayconstant.RelayModeCompletions { return info.ChannelBaseUrl + "/api/generate", nil } + return info.ChannelBaseUrl + "/api/chat", nil } func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error { @@ -84,10 +69,7 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela return requestOpenAI2Embeddings(request), nil } -func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) { - // TODO implement me - return nil, errors.New("not implemented") -} +func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) { return nil, errors.New("not implemented") } func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) { return channel.DoApiRequest(a, c, info, requestBody) diff --git a/relay/channel/ollama/dto.go b/relay/channel/ollama/dto.go index a3e325e2f..45e49ab43 100644 --- a/relay/channel/ollama/dto.go +++ b/relay/channel/ollama/dto.go @@ -4,7 +4,6 @@ import ( "encoding/json" ) -// OllamaChatMessage represents a single chat message type OllamaChatMessage struct { Role string `json:"role"` Content string `json:"content,omitempty"` @@ -32,7 +31,6 @@ type OllamaToolCall struct { } `json:"function"` } -// OllamaChatRequest -> /api/chat type OllamaChatRequest struct { Model string `json:"model"` Messages []OllamaChatMessage `json:"messages"` @@ -44,7 +42,6 @@ type OllamaChatRequest struct { Think json.RawMessage `json:"think,omitempty"` } -// OllamaGenerateRequest -> /api/generate type OllamaGenerateRequest struct { Model string `json:"model"` Prompt string `json:"prompt,omitempty"` diff --git a/relay/channel/ollama/relay-ollama.go b/relay/channel/ollama/relay-ollama.go index 45424633c..c79f98760 100644 --- a/relay/channel/ollama/relay-ollama.go +++ b/relay/channel/ollama/relay-ollama.go @@ -15,7 +15,6 @@ import ( "github.com/gin-gonic/gin" ) -// openAIChatToOllamaChat converts OpenAI-style chat request to Ollama chat func openAIChatToOllamaChat(c *gin.Context, r *dto.GeneralOpenAIRequest) (*OllamaChatRequest, error) { chatReq := &OllamaChatRequest{ Model: r.Model, @@ -23,12 +22,10 @@ func openAIChatToOllamaChat(c *gin.Context, r *dto.GeneralOpenAIRequest) (*Ollam Options: map[string]any{}, Think: r.Think, } - // format mapping if r.ResponseFormat != nil { if r.ResponseFormat.Type == "json" { chatReq.Format = "json" } else if r.ResponseFormat.Type == "json_schema" { - // supply schema object directly if len(r.ResponseFormat.JsonSchema) > 0 { var schema any _ = json.Unmarshal(r.ResponseFormat.JsonSchema, &schema) @@ -46,7 +43,6 @@ func openAIChatToOllamaChat(c *gin.Context, r *dto.GeneralOpenAIRequest) (*Ollam if r.Seed != 0 { chatReq.Options["seed"] = int(r.Seed) } if mt := r.GetMaxTokens(); mt != 0 { chatReq.Options["num_predict"] = int(mt) } - // Stop -> options.stop (array) if r.Stop != nil { switch v := r.Stop.(type) { case string: @@ -60,7 +56,6 @@ func openAIChatToOllamaChat(c *gin.Context, r *dto.GeneralOpenAIRequest) (*Ollam } } - // tools if len(r.Tools) > 0 { tools := make([]OllamaTool,0,len(r.Tools)) for _, t := range r.Tools { @@ -69,10 +64,8 @@ func openAIChatToOllamaChat(c *gin.Context, r *dto.GeneralOpenAIRequest) (*Ollam chatReq.Tools = tools } - // messages chatReq.Messages = make([]OllamaChatMessage,0,len(r.Messages)) for _, m := range r.Messages { - // gather text parts & images var textBuilder strings.Builder var images []string if m.IsStringContent() { @@ -98,9 +91,7 @@ func openAIChatToOllamaChat(c *gin.Context, r *dto.GeneralOpenAIRequest) (*Ollam } cm := OllamaChatMessage{Role: m.Role, Content: textBuilder.String()} if len(images)>0 { cm.Images = images } - // history tool call result message if m.Role == "tool" && m.Name != nil { cm.ToolName = *m.Name } - // tool calls from assistant previous message if m.ToolCalls != nil && len(m.ToolCalls) > 0 { parsed := m.ParseToolCalls() if len(parsed) > 0 { diff --git a/relay/channel/ollama/stream.go b/relay/channel/ollama/stream.go index 4e17f12d7..167c676d6 100644 --- a/relay/channel/ollama/stream.go +++ b/relay/channel/ollama/stream.go @@ -19,7 +19,6 @@ import ( "github.com/gin-gonic/gin" ) -// Ollama streaming chunk (chat or generate) type ollamaChatStreamChunk struct { Model string `json:"model"` CreatedAt string `json:"created_at"` @@ -47,7 +46,7 @@ type ollamaChatStreamChunk struct { EvalDuration int64 `json:"eval_duration"` } -func toUnix(ts string) int64 { // parse RFC3339 / variant; fallback time.Now +func toUnix(ts string) int64 { if ts == "" { return time.Now().Unix() } // try time.RFC3339 or with nanoseconds t, err := time.Parse(time.RFC3339Nano, ts) @@ -55,7 +54,6 @@ func toUnix(ts string) int64 { // parse RFC3339 / variant; fallback time.Now return t.Unix() } -// streaming handler: convert Ollama stream -> OpenAI SSE func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { if resp == nil || resp.Body == nil { return nil, types.NewOpenAIError(fmt.Errorf("empty response"), types.ErrorCodeBadResponse, http.StatusBadRequest) } defer service.CloseResponseBodyGracefully(resp) @@ -67,7 +65,6 @@ func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http var responseId = common.GetUUID() var created = time.Now().Unix() var toolCallIndex int - // send start event start := helper.GenerateStartEmptyResponse(responseId, created, model, nil) if data, err := common.Marshal(start); err == nil { _ = helper.StringData(c, string(data)) } @@ -141,16 +138,47 @@ func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.R body, err := io.ReadAll(resp.Body) if err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeReadResponseBodyFailed, http.StatusInternalServerError) } service.CloseResponseBodyGracefully(resp) - if common.DebugEnabled { println("ollama non-stream resp:", string(body)) } - var chunk ollamaChatStreamChunk - if err = json.Unmarshal(body, &chunk); err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } - model := chunk.Model + raw := string(body) + if common.DebugEnabled { println("ollama non-stream raw resp:", raw) } + + lines := strings.Split(raw, "\n") + var ( + aggContent strings.Builder + lastChunk ollamaChatStreamChunk + parsedAny bool + ) + for _, ln := range lines { + ln = strings.TrimSpace(ln) + if ln == "" { continue } + var ck ollamaChatStreamChunk + if err := json.Unmarshal([]byte(ln), &ck); err != nil { + if len(lines) == 1 { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } + continue + } + parsedAny = true + lastChunk = ck + if !ck.Done { + if ck.Message != nil && ck.Message.Content != "" { aggContent.WriteString(ck.Message.Content) } else if ck.Response != "" { aggContent.WriteString(ck.Response) } + } else { + if ck.Message != nil && ck.Message.Content != "" { aggContent.WriteString(ck.Message.Content) } else if ck.Response != "" { aggContent.WriteString(ck.Response) } + } + } + + if !parsedAny { + var single ollamaChatStreamChunk + if err := json.Unmarshal(body, &single); err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } + lastChunk = single + if single.Message != nil { aggContent.WriteString(single.Message.Content) } else { aggContent.WriteString(single.Response) } + } + + model := lastChunk.Model if model == "" { model = info.UpstreamModelName } - created := toUnix(chunk.CreatedAt) - content := "" - if chunk.Message != nil { content = chunk.Message.Content } else { content = chunk.Response } - usage := &dto.Usage{PromptTokens: chunk.PromptEvalCount, CompletionTokens: chunk.EvalCount, TotalTokens: chunk.PromptEvalCount + chunk.EvalCount} - // Build OpenAI style response + created := toUnix(lastChunk.CreatedAt) + usage := &dto.Usage{PromptTokens: lastChunk.PromptEvalCount, CompletionTokens: lastChunk.EvalCount, TotalTokens: lastChunk.PromptEvalCount + lastChunk.EvalCount} + content := aggContent.String() + finishReason := lastChunk.DoneReason + if finishReason == "" { finishReason = "stop" } + full := dto.OpenAITextResponse{ Id: common.GetUUID(), Model: model, @@ -159,7 +187,7 @@ func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.R Choices: []dto.OpenAITextResponseChoice{ { Index: 0, Message: dto.Message{Role: "assistant", Content: contentPtr(content)}, - FinishReason: func() string { if chunk.DoneReason == "" { return "stop" } ; return chunk.DoneReason }(), + FinishReason: &finishReason, } }, Usage: *usage, } From 9d952e0d78569bee80446c4b6ff2881d04159eb6 Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Mon, 15 Sep 2025 23:43:39 +0800 Subject: [PATCH 06/18] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8DollamaChatHandle?= =?UTF-8?q?r=E4=B8=AD=E7=9A=84FinishReason=E5=AD=97=E6=AE=B5=E8=B5=8B?= =?UTF-8?q?=E5=80=BC=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- relay/channel/ollama/stream.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/relay/channel/ollama/stream.go b/relay/channel/ollama/stream.go index 167c676d6..ad12e7f83 100644 --- a/relay/channel/ollama/stream.go +++ b/relay/channel/ollama/stream.go @@ -187,7 +187,7 @@ func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.R Choices: []dto.OpenAITextResponseChoice{ { Index: 0, Message: dto.Message{Role: "assistant", Content: contentPtr(content)}, - FinishReason: &finishReason, + FinishReason: finishReason, } }, Usage: *usage, } From 4eeca081fee46faaeb5551c7af355c1ef03de47b Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Tue, 16 Sep 2025 08:13:28 +0800 Subject: [PATCH 07/18] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E5=9B=BE?= =?UTF-8?q?=E5=83=8FURL=E5=A4=84=E7=90=86=E9=80=BB=E8=BE=91=EF=BC=8C?= =?UTF-8?q?=E7=A1=AE=E4=BF=9D=E6=AD=A3=E7=A1=AE=E7=94=9F=E6=88=90base64?= =?UTF-8?q?=E6=95=B0=E6=8D=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- relay/channel/ollama/relay-ollama.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/relay/channel/ollama/relay-ollama.go b/relay/channel/ollama/relay-ollama.go index c79f98760..3b67f9525 100644 --- a/relay/channel/ollama/relay-ollama.go +++ b/relay/channel/ollama/relay-ollama.go @@ -76,13 +76,17 @@ func openAIChatToOllamaChat(c *gin.Context, r *dto.GeneralOpenAIRequest) (*Ollam if part.Type == dto.ContentTypeImageURL { img := part.GetImageMedia() if img != nil && img.Url != "" { - // ensure base64 dataURL + var base64Data string if strings.HasPrefix(img.Url, "http") { fileData, err := service.GetFileBase64FromUrl(c, img.Url, "fetch image for ollama chat") if err != nil { return nil, err } - img.Url = fmt.Sprintf("data:%s;base64,%s", fileData.MimeType, fileData.Base64Data) + base64Data = fileData.Base64Data + } else if strings.HasPrefix(img.Url, "data:") { + if idx := strings.Index(img.Url, ","); idx != -1 && idx+1 < len(img.Url) { base64Data = img.Url[idx+1:] } + } else { + base64Data = img.Url } - images = append(images, img.Url) + if base64Data != "" { images = append(images, base64Data) } } } else if part.Type == dto.ContentTypeText { textBuilder.WriteString(part.Text) From 62549717e0652a9914c83ad5e025360e63c0f9c3 Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Tue, 16 Sep 2025 08:51:29 +0800 Subject: [PATCH 08/18] =?UTF-8?q?fix:=20=E6=B7=BB=E5=8A=A0=E5=AF=B9Thinkin?= =?UTF-8?q?g=E5=AD=97=E6=AE=B5=E7=9A=84=E5=A4=84=E7=90=86=E9=80=BB?= =?UTF-8?q?=E8=BE=91=EF=BC=8C=E7=A1=AE=E4=BF=9D=E6=8E=A8=E7=90=86=E5=86=85?= =?UTF-8?q?=E5=AE=B9=E6=AD=A3=E7=A1=AE=E4=BC=A0=E9=80=92?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- relay/channel/ollama/stream.go | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/relay/channel/ollama/stream.go b/relay/channel/ollama/stream.go index ad12e7f83..cea458444 100644 --- a/relay/channel/ollama/stream.go +++ b/relay/channel/ollama/stream.go @@ -26,6 +26,7 @@ type ollamaChatStreamChunk struct { Message *struct { Role string `json:"role"` Content string `json:"content"` + Thinking json.RawMessage `json:"thinking"` ToolCalls []struct { Function struct { Name string `json:"name"` @@ -41,7 +42,6 @@ type ollamaChatStreamChunk struct { LoadDuration int64 `json:"load_duration"` PromptEvalCount int `json:"prompt_eval_count"` EvalCount int `json:"eval_count"` - // generate mode may use these PromptEvalDuration int64 `json:"prompt_eval_duration"` EvalDuration int64 `json:"eval_duration"` } @@ -95,13 +95,18 @@ func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http } }, } if content != "" { delta.Choices[0].Delta.SetContentString(content) } + if chunk.Message != nil && len(chunk.Message.Thinking) > 0 { + raw := strings.TrimSpace(string(chunk.Message.Thinking)) + if raw != "" && raw != "null" { delta.Choices[0].Delta.SetReasoningContent(raw) } + } // tool calls if chunk.Message != nil && len(chunk.Message.ToolCalls) > 0 { delta.Choices[0].Delta.ToolCalls = make([]dto.ToolCallResponse,0,len(chunk.Message.ToolCalls)) for _, tc := range chunk.Message.ToolCalls { // arguments -> string argBytes, _ := json.Marshal(tc.Function.Arguments) - tr := dto.ToolCallResponse{ID:"", Type:nil, Function: dto.FunctionResponse{Name: tc.Function.Name, Arguments: string(argBytes)}} + toolId := fmt.Sprintf("call_%d", toolCallIndex) + tr := dto.ToolCallResponse{ID:toolId, Type:"function", Function: dto.FunctionResponse{Name: tc.Function.Name, Arguments: string(argBytes)}} tr.SetIndex(toolCallIndex) toolCallIndex++ delta.Choices[0].Delta.ToolCalls = append(delta.Choices[0].Delta.ToolCalls, tr) @@ -115,8 +120,8 @@ func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http usage.PromptTokens = chunk.PromptEvalCount usage.CompletionTokens = chunk.EvalCount usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens - finishReason := chunk.DoneReason - if finishReason == "" { finishReason = "stop" } + finishReason := chunk.DoneReason + if finishReason == "" { finishReason = "stop" } // emit stop delta if stop := helper.GenerateStopResponse(responseId, created, model, finishReason); stop != nil { if data, err := common.Marshal(stop); err == nil { _ = helper.StringData(c, string(data)) } @@ -144,6 +149,7 @@ func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.R lines := strings.Split(raw, "\n") var ( aggContent strings.Builder + reasoningBuilder strings.Builder lastChunk ollamaChatStreamChunk parsedAny bool ) @@ -157,18 +163,21 @@ func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.R } parsedAny = true lastChunk = ck - if !ck.Done { - if ck.Message != nil && ck.Message.Content != "" { aggContent.WriteString(ck.Message.Content) } else if ck.Response != "" { aggContent.WriteString(ck.Response) } - } else { - if ck.Message != nil && ck.Message.Content != "" { aggContent.WriteString(ck.Message.Content) } else if ck.Response != "" { aggContent.WriteString(ck.Response) } + if ck.Message != nil && len(ck.Message.Thinking) > 0 { + raw := strings.TrimSpace(string(ck.Message.Thinking)) + if raw != "" && raw != "null" { reasoningBuilder.WriteString(raw) } } + if ck.Message != nil && ck.Message.Content != "" { aggContent.WriteString(ck.Message.Content) } else if ck.Response != "" { aggContent.WriteString(ck.Response) } } if !parsedAny { var single ollamaChatStreamChunk if err := json.Unmarshal(body, &single); err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } lastChunk = single - if single.Message != nil { aggContent.WriteString(single.Message.Content) } else { aggContent.WriteString(single.Response) } + if single.Message != nil { + if len(single.Message.Thinking) > 0 { raw := strings.TrimSpace(string(single.Message.Thinking)); if raw != "" && raw != "null" { reasoningBuilder.WriteString(raw) } } + aggContent.WriteString(single.Message.Content) + } else { aggContent.WriteString(single.Response) } } model := lastChunk.Model @@ -179,6 +188,8 @@ func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.R finishReason := lastChunk.DoneReason if finishReason == "" { finishReason = "stop" } + msg := dto.Message{Role: "assistant", Content: contentPtr(content)} + if rc := reasoningBuilder.String(); rc != "" { msg.ReasoningContent = &rc } full := dto.OpenAITextResponse{ Id: common.GetUUID(), Model: model, @@ -186,7 +197,7 @@ func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.R Created: created, Choices: []dto.OpenAITextResponseChoice{ { Index: 0, - Message: dto.Message{Role: "assistant", Content: contentPtr(content)}, + Message: msg, FinishReason: finishReason, } }, Usage: *usage, From 1dd78b83b7d80e408062f9b18b3db8f860c0a0c9 Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Tue, 16 Sep 2025 08:54:34 +0800 Subject: [PATCH 09/18] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8DollamaChatHandle?= =?UTF-8?q?r=E4=B8=ADReasoningContent=E5=AD=97=E6=AE=B5=E7=9A=84=E8=B5=8B?= =?UTF-8?q?=E5=80=BC=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- relay/channel/ollama/stream.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/relay/channel/ollama/stream.go b/relay/channel/ollama/stream.go index cea458444..964f11d90 100644 --- a/relay/channel/ollama/stream.go +++ b/relay/channel/ollama/stream.go @@ -189,7 +189,7 @@ func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.R if finishReason == "" { finishReason = "stop" } msg := dto.Message{Role: "assistant", Content: contentPtr(content)} - if rc := reasoningBuilder.String(); rc != "" { msg.ReasoningContent = &rc } + if rc := reasoningBuilder.String(); rc != "" { msg.ReasoningContent = rc } full := dto.OpenAITextResponse{ Id: common.GetUUID(), Model: model, From 69a88a0563932d62b1d9bb8f9c310ab119909d51 Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Tue, 16 Sep 2025 08:58:06 +0800 Subject: [PATCH 10/18] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0ollama=E8=81=8A?= =?UTF-8?q?=E5=A4=A9=E6=B5=81=E5=A4=84=E7=90=86=E5=92=8C=E9=9D=9E=E6=B5=81?= =?UTF-8?q?=E5=A4=84=E7=90=86=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../channel/ollama/stream_20250916085416.go | 210 ++++++++++++++++++ .../channel/ollama/stream_20250916085435.go | 210 ++++++++++++++++++ 2 files changed, 420 insertions(+) create mode 100644 .history/relay/channel/ollama/stream_20250916085416.go create mode 100644 .history/relay/channel/ollama/stream_20250916085435.go diff --git a/.history/relay/channel/ollama/stream_20250916085416.go b/.history/relay/channel/ollama/stream_20250916085416.go new file mode 100644 index 000000000..964f11d90 --- /dev/null +++ b/.history/relay/channel/ollama/stream_20250916085416.go @@ -0,0 +1,210 @@ +package ollama + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "net/http" + "one-api/common" + "one-api/dto" + "one-api/logger" + relaycommon "one-api/relay/common" + "one-api/relay/helper" + "one-api/service" + "one-api/types" + "strings" + "time" + + "github.com/gin-gonic/gin" +) + +type ollamaChatStreamChunk struct { + Model string `json:"model"` + CreatedAt string `json:"created_at"` + // chat + Message *struct { + Role string `json:"role"` + Content string `json:"content"` + Thinking json.RawMessage `json:"thinking"` + ToolCalls []struct { + Function struct { + Name string `json:"name"` + Arguments interface{} `json:"arguments"` + } `json:"function"` + } `json:"tool_calls"` + } `json:"message"` + // generate + Response string `json:"response"` + Done bool `json:"done"` + DoneReason string `json:"done_reason"` + TotalDuration int64 `json:"total_duration"` + LoadDuration int64 `json:"load_duration"` + PromptEvalCount int `json:"prompt_eval_count"` + EvalCount int `json:"eval_count"` + PromptEvalDuration int64 `json:"prompt_eval_duration"` + EvalDuration int64 `json:"eval_duration"` +} + +func toUnix(ts string) int64 { + if ts == "" { return time.Now().Unix() } + // try time.RFC3339 or with nanoseconds + t, err := time.Parse(time.RFC3339Nano, ts) + if err != nil { t2, err2 := time.Parse(time.RFC3339, ts); if err2==nil { return t2.Unix() }; return time.Now().Unix() } + return t.Unix() +} + +func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { + if resp == nil || resp.Body == nil { return nil, types.NewOpenAIError(fmt.Errorf("empty response"), types.ErrorCodeBadResponse, http.StatusBadRequest) } + defer service.CloseResponseBodyGracefully(resp) + + helper.SetEventStreamHeaders(c) + scanner := bufio.NewScanner(resp.Body) + usage := &dto.Usage{} + var model = info.UpstreamModelName + var responseId = common.GetUUID() + var created = time.Now().Unix() + var toolCallIndex int + start := helper.GenerateStartEmptyResponse(responseId, created, model, nil) + if data, err := common.Marshal(start); err == nil { _ = helper.StringData(c, string(data)) } + + for scanner.Scan() { + line := scanner.Text() + line = strings.TrimSpace(line) + if line == "" { continue } + var chunk ollamaChatStreamChunk + if err := json.Unmarshal([]byte(line), &chunk); err != nil { + logger.LogError(c, "ollama stream json decode error: "+err.Error()+" line="+line) + return usage, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) + } + if chunk.Model != "" { model = chunk.Model } + created = toUnix(chunk.CreatedAt) + + if !chunk.Done { + // delta content + var content string + if chunk.Message != nil { content = chunk.Message.Content } else { content = chunk.Response } + delta := dto.ChatCompletionsStreamResponse{ + Id: responseId, + Object: "chat.completion.chunk", + Created: created, + Model: model, + Choices: []dto.ChatCompletionsStreamResponseChoice{ { + Index: 0, + Delta: dto.ChatCompletionsStreamResponseChoiceDelta{ Role: "assistant" }, + } }, + } + if content != "" { delta.Choices[0].Delta.SetContentString(content) } + if chunk.Message != nil && len(chunk.Message.Thinking) > 0 { + raw := strings.TrimSpace(string(chunk.Message.Thinking)) + if raw != "" && raw != "null" { delta.Choices[0].Delta.SetReasoningContent(raw) } + } + // tool calls + if chunk.Message != nil && len(chunk.Message.ToolCalls) > 0 { + delta.Choices[0].Delta.ToolCalls = make([]dto.ToolCallResponse,0,len(chunk.Message.ToolCalls)) + for _, tc := range chunk.Message.ToolCalls { + // arguments -> string + argBytes, _ := json.Marshal(tc.Function.Arguments) + toolId := fmt.Sprintf("call_%d", toolCallIndex) + tr := dto.ToolCallResponse{ID:toolId, Type:"function", Function: dto.FunctionResponse{Name: tc.Function.Name, Arguments: string(argBytes)}} + tr.SetIndex(toolCallIndex) + toolCallIndex++ + delta.Choices[0].Delta.ToolCalls = append(delta.Choices[0].Delta.ToolCalls, tr) + } + } + if data, err := common.Marshal(delta); err == nil { _ = helper.StringData(c, string(data)) } + continue + } + // done frame + // finalize once and break loop + usage.PromptTokens = chunk.PromptEvalCount + usage.CompletionTokens = chunk.EvalCount + usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens + finishReason := chunk.DoneReason + if finishReason == "" { finishReason = "stop" } + // emit stop delta + if stop := helper.GenerateStopResponse(responseId, created, model, finishReason); stop != nil { + if data, err := common.Marshal(stop); err == nil { _ = helper.StringData(c, string(data)) } + } + // emit usage frame + if final := helper.GenerateFinalUsageResponse(responseId, created, model, *usage); final != nil { + if data, err := common.Marshal(final); err == nil { _ = helper.StringData(c, string(data)) } + } + // send [DONE] + helper.Done(c) + break + } + if err := scanner.Err(); err != nil && err != io.EOF { logger.LogError(c, "ollama stream scan error: "+err.Error()) } + return usage, nil +} + +// non-stream handler for chat/generate +func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { + body, err := io.ReadAll(resp.Body) + if err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeReadResponseBodyFailed, http.StatusInternalServerError) } + service.CloseResponseBodyGracefully(resp) + raw := string(body) + if common.DebugEnabled { println("ollama non-stream raw resp:", raw) } + + lines := strings.Split(raw, "\n") + var ( + aggContent strings.Builder + reasoningBuilder strings.Builder + lastChunk ollamaChatStreamChunk + parsedAny bool + ) + for _, ln := range lines { + ln = strings.TrimSpace(ln) + if ln == "" { continue } + var ck ollamaChatStreamChunk + if err := json.Unmarshal([]byte(ln), &ck); err != nil { + if len(lines) == 1 { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } + continue + } + parsedAny = true + lastChunk = ck + if ck.Message != nil && len(ck.Message.Thinking) > 0 { + raw := strings.TrimSpace(string(ck.Message.Thinking)) + if raw != "" && raw != "null" { reasoningBuilder.WriteString(raw) } + } + if ck.Message != nil && ck.Message.Content != "" { aggContent.WriteString(ck.Message.Content) } else if ck.Response != "" { aggContent.WriteString(ck.Response) } + } + + if !parsedAny { + var single ollamaChatStreamChunk + if err := json.Unmarshal(body, &single); err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } + lastChunk = single + if single.Message != nil { + if len(single.Message.Thinking) > 0 { raw := strings.TrimSpace(string(single.Message.Thinking)); if raw != "" && raw != "null" { reasoningBuilder.WriteString(raw) } } + aggContent.WriteString(single.Message.Content) + } else { aggContent.WriteString(single.Response) } + } + + model := lastChunk.Model + if model == "" { model = info.UpstreamModelName } + created := toUnix(lastChunk.CreatedAt) + usage := &dto.Usage{PromptTokens: lastChunk.PromptEvalCount, CompletionTokens: lastChunk.EvalCount, TotalTokens: lastChunk.PromptEvalCount + lastChunk.EvalCount} + content := aggContent.String() + finishReason := lastChunk.DoneReason + if finishReason == "" { finishReason = "stop" } + + msg := dto.Message{Role: "assistant", Content: contentPtr(content)} + if rc := reasoningBuilder.String(); rc != "" { msg.ReasoningContent = rc } + full := dto.OpenAITextResponse{ + Id: common.GetUUID(), + Model: model, + Object: "chat.completion", + Created: created, + Choices: []dto.OpenAITextResponseChoice{ { + Index: 0, + Message: msg, + FinishReason: finishReason, + } }, + Usage: *usage, + } + out, _ := common.Marshal(full) + service.IOCopyBytesGracefully(c, resp, out) + return usage, nil +} + +func contentPtr(s string) *string { if s=="" { return nil }; return &s } diff --git a/.history/relay/channel/ollama/stream_20250916085435.go b/.history/relay/channel/ollama/stream_20250916085435.go new file mode 100644 index 000000000..964f11d90 --- /dev/null +++ b/.history/relay/channel/ollama/stream_20250916085435.go @@ -0,0 +1,210 @@ +package ollama + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "net/http" + "one-api/common" + "one-api/dto" + "one-api/logger" + relaycommon "one-api/relay/common" + "one-api/relay/helper" + "one-api/service" + "one-api/types" + "strings" + "time" + + "github.com/gin-gonic/gin" +) + +type ollamaChatStreamChunk struct { + Model string `json:"model"` + CreatedAt string `json:"created_at"` + // chat + Message *struct { + Role string `json:"role"` + Content string `json:"content"` + Thinking json.RawMessage `json:"thinking"` + ToolCalls []struct { + Function struct { + Name string `json:"name"` + Arguments interface{} `json:"arguments"` + } `json:"function"` + } `json:"tool_calls"` + } `json:"message"` + // generate + Response string `json:"response"` + Done bool `json:"done"` + DoneReason string `json:"done_reason"` + TotalDuration int64 `json:"total_duration"` + LoadDuration int64 `json:"load_duration"` + PromptEvalCount int `json:"prompt_eval_count"` + EvalCount int `json:"eval_count"` + PromptEvalDuration int64 `json:"prompt_eval_duration"` + EvalDuration int64 `json:"eval_duration"` +} + +func toUnix(ts string) int64 { + if ts == "" { return time.Now().Unix() } + // try time.RFC3339 or with nanoseconds + t, err := time.Parse(time.RFC3339Nano, ts) + if err != nil { t2, err2 := time.Parse(time.RFC3339, ts); if err2==nil { return t2.Unix() }; return time.Now().Unix() } + return t.Unix() +} + +func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { + if resp == nil || resp.Body == nil { return nil, types.NewOpenAIError(fmt.Errorf("empty response"), types.ErrorCodeBadResponse, http.StatusBadRequest) } + defer service.CloseResponseBodyGracefully(resp) + + helper.SetEventStreamHeaders(c) + scanner := bufio.NewScanner(resp.Body) + usage := &dto.Usage{} + var model = info.UpstreamModelName + var responseId = common.GetUUID() + var created = time.Now().Unix() + var toolCallIndex int + start := helper.GenerateStartEmptyResponse(responseId, created, model, nil) + if data, err := common.Marshal(start); err == nil { _ = helper.StringData(c, string(data)) } + + for scanner.Scan() { + line := scanner.Text() + line = strings.TrimSpace(line) + if line == "" { continue } + var chunk ollamaChatStreamChunk + if err := json.Unmarshal([]byte(line), &chunk); err != nil { + logger.LogError(c, "ollama stream json decode error: "+err.Error()+" line="+line) + return usage, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) + } + if chunk.Model != "" { model = chunk.Model } + created = toUnix(chunk.CreatedAt) + + if !chunk.Done { + // delta content + var content string + if chunk.Message != nil { content = chunk.Message.Content } else { content = chunk.Response } + delta := dto.ChatCompletionsStreamResponse{ + Id: responseId, + Object: "chat.completion.chunk", + Created: created, + Model: model, + Choices: []dto.ChatCompletionsStreamResponseChoice{ { + Index: 0, + Delta: dto.ChatCompletionsStreamResponseChoiceDelta{ Role: "assistant" }, + } }, + } + if content != "" { delta.Choices[0].Delta.SetContentString(content) } + if chunk.Message != nil && len(chunk.Message.Thinking) > 0 { + raw := strings.TrimSpace(string(chunk.Message.Thinking)) + if raw != "" && raw != "null" { delta.Choices[0].Delta.SetReasoningContent(raw) } + } + // tool calls + if chunk.Message != nil && len(chunk.Message.ToolCalls) > 0 { + delta.Choices[0].Delta.ToolCalls = make([]dto.ToolCallResponse,0,len(chunk.Message.ToolCalls)) + for _, tc := range chunk.Message.ToolCalls { + // arguments -> string + argBytes, _ := json.Marshal(tc.Function.Arguments) + toolId := fmt.Sprintf("call_%d", toolCallIndex) + tr := dto.ToolCallResponse{ID:toolId, Type:"function", Function: dto.FunctionResponse{Name: tc.Function.Name, Arguments: string(argBytes)}} + tr.SetIndex(toolCallIndex) + toolCallIndex++ + delta.Choices[0].Delta.ToolCalls = append(delta.Choices[0].Delta.ToolCalls, tr) + } + } + if data, err := common.Marshal(delta); err == nil { _ = helper.StringData(c, string(data)) } + continue + } + // done frame + // finalize once and break loop + usage.PromptTokens = chunk.PromptEvalCount + usage.CompletionTokens = chunk.EvalCount + usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens + finishReason := chunk.DoneReason + if finishReason == "" { finishReason = "stop" } + // emit stop delta + if stop := helper.GenerateStopResponse(responseId, created, model, finishReason); stop != nil { + if data, err := common.Marshal(stop); err == nil { _ = helper.StringData(c, string(data)) } + } + // emit usage frame + if final := helper.GenerateFinalUsageResponse(responseId, created, model, *usage); final != nil { + if data, err := common.Marshal(final); err == nil { _ = helper.StringData(c, string(data)) } + } + // send [DONE] + helper.Done(c) + break + } + if err := scanner.Err(); err != nil && err != io.EOF { logger.LogError(c, "ollama stream scan error: "+err.Error()) } + return usage, nil +} + +// non-stream handler for chat/generate +func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { + body, err := io.ReadAll(resp.Body) + if err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeReadResponseBodyFailed, http.StatusInternalServerError) } + service.CloseResponseBodyGracefully(resp) + raw := string(body) + if common.DebugEnabled { println("ollama non-stream raw resp:", raw) } + + lines := strings.Split(raw, "\n") + var ( + aggContent strings.Builder + reasoningBuilder strings.Builder + lastChunk ollamaChatStreamChunk + parsedAny bool + ) + for _, ln := range lines { + ln = strings.TrimSpace(ln) + if ln == "" { continue } + var ck ollamaChatStreamChunk + if err := json.Unmarshal([]byte(ln), &ck); err != nil { + if len(lines) == 1 { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } + continue + } + parsedAny = true + lastChunk = ck + if ck.Message != nil && len(ck.Message.Thinking) > 0 { + raw := strings.TrimSpace(string(ck.Message.Thinking)) + if raw != "" && raw != "null" { reasoningBuilder.WriteString(raw) } + } + if ck.Message != nil && ck.Message.Content != "" { aggContent.WriteString(ck.Message.Content) } else if ck.Response != "" { aggContent.WriteString(ck.Response) } + } + + if !parsedAny { + var single ollamaChatStreamChunk + if err := json.Unmarshal(body, &single); err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } + lastChunk = single + if single.Message != nil { + if len(single.Message.Thinking) > 0 { raw := strings.TrimSpace(string(single.Message.Thinking)); if raw != "" && raw != "null" { reasoningBuilder.WriteString(raw) } } + aggContent.WriteString(single.Message.Content) + } else { aggContent.WriteString(single.Response) } + } + + model := lastChunk.Model + if model == "" { model = info.UpstreamModelName } + created := toUnix(lastChunk.CreatedAt) + usage := &dto.Usage{PromptTokens: lastChunk.PromptEvalCount, CompletionTokens: lastChunk.EvalCount, TotalTokens: lastChunk.PromptEvalCount + lastChunk.EvalCount} + content := aggContent.String() + finishReason := lastChunk.DoneReason + if finishReason == "" { finishReason = "stop" } + + msg := dto.Message{Role: "assistant", Content: contentPtr(content)} + if rc := reasoningBuilder.String(); rc != "" { msg.ReasoningContent = rc } + full := dto.OpenAITextResponse{ + Id: common.GetUUID(), + Model: model, + Object: "chat.completion", + Created: created, + Choices: []dto.OpenAITextResponseChoice{ { + Index: 0, + Message: msg, + FinishReason: finishReason, + } }, + Usage: *usage, + } + out, _ := common.Marshal(full) + service.IOCopyBytesGracefully(c, resp, out) + return usage, nil +} + +func contentPtr(s string) *string { if s=="" { return nil }; return &s } From f19b5b8680f5078e2516e856670ecba463c010b5 Mon Sep 17 00:00:00 2001 From: somnifex <98788152+somnifex@users.noreply.github.com> Date: Tue, 16 Sep 2025 08:58:19 +0800 Subject: [PATCH 11/18] =?UTF-8?q?chore:=20=E5=88=A0=E9=99=A4=E5=8E=86?= =?UTF-8?q?=E5=8F=B2=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../channel/ollama/stream_20250916085416.go | 210 ------------------ .../channel/ollama/stream_20250916085435.go | 210 ------------------ 2 files changed, 420 deletions(-) delete mode 100644 .history/relay/channel/ollama/stream_20250916085416.go delete mode 100644 .history/relay/channel/ollama/stream_20250916085435.go diff --git a/.history/relay/channel/ollama/stream_20250916085416.go b/.history/relay/channel/ollama/stream_20250916085416.go deleted file mode 100644 index 964f11d90..000000000 --- a/.history/relay/channel/ollama/stream_20250916085416.go +++ /dev/null @@ -1,210 +0,0 @@ -package ollama - -import ( - "bufio" - "encoding/json" - "fmt" - "io" - "net/http" - "one-api/common" - "one-api/dto" - "one-api/logger" - relaycommon "one-api/relay/common" - "one-api/relay/helper" - "one-api/service" - "one-api/types" - "strings" - "time" - - "github.com/gin-gonic/gin" -) - -type ollamaChatStreamChunk struct { - Model string `json:"model"` - CreatedAt string `json:"created_at"` - // chat - Message *struct { - Role string `json:"role"` - Content string `json:"content"` - Thinking json.RawMessage `json:"thinking"` - ToolCalls []struct { - Function struct { - Name string `json:"name"` - Arguments interface{} `json:"arguments"` - } `json:"function"` - } `json:"tool_calls"` - } `json:"message"` - // generate - Response string `json:"response"` - Done bool `json:"done"` - DoneReason string `json:"done_reason"` - TotalDuration int64 `json:"total_duration"` - LoadDuration int64 `json:"load_duration"` - PromptEvalCount int `json:"prompt_eval_count"` - EvalCount int `json:"eval_count"` - PromptEvalDuration int64 `json:"prompt_eval_duration"` - EvalDuration int64 `json:"eval_duration"` -} - -func toUnix(ts string) int64 { - if ts == "" { return time.Now().Unix() } - // try time.RFC3339 or with nanoseconds - t, err := time.Parse(time.RFC3339Nano, ts) - if err != nil { t2, err2 := time.Parse(time.RFC3339, ts); if err2==nil { return t2.Unix() }; return time.Now().Unix() } - return t.Unix() -} - -func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { - if resp == nil || resp.Body == nil { return nil, types.NewOpenAIError(fmt.Errorf("empty response"), types.ErrorCodeBadResponse, http.StatusBadRequest) } - defer service.CloseResponseBodyGracefully(resp) - - helper.SetEventStreamHeaders(c) - scanner := bufio.NewScanner(resp.Body) - usage := &dto.Usage{} - var model = info.UpstreamModelName - var responseId = common.GetUUID() - var created = time.Now().Unix() - var toolCallIndex int - start := helper.GenerateStartEmptyResponse(responseId, created, model, nil) - if data, err := common.Marshal(start); err == nil { _ = helper.StringData(c, string(data)) } - - for scanner.Scan() { - line := scanner.Text() - line = strings.TrimSpace(line) - if line == "" { continue } - var chunk ollamaChatStreamChunk - if err := json.Unmarshal([]byte(line), &chunk); err != nil { - logger.LogError(c, "ollama stream json decode error: "+err.Error()+" line="+line) - return usage, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) - } - if chunk.Model != "" { model = chunk.Model } - created = toUnix(chunk.CreatedAt) - - if !chunk.Done { - // delta content - var content string - if chunk.Message != nil { content = chunk.Message.Content } else { content = chunk.Response } - delta := dto.ChatCompletionsStreamResponse{ - Id: responseId, - Object: "chat.completion.chunk", - Created: created, - Model: model, - Choices: []dto.ChatCompletionsStreamResponseChoice{ { - Index: 0, - Delta: dto.ChatCompletionsStreamResponseChoiceDelta{ Role: "assistant" }, - } }, - } - if content != "" { delta.Choices[0].Delta.SetContentString(content) } - if chunk.Message != nil && len(chunk.Message.Thinking) > 0 { - raw := strings.TrimSpace(string(chunk.Message.Thinking)) - if raw != "" && raw != "null" { delta.Choices[0].Delta.SetReasoningContent(raw) } - } - // tool calls - if chunk.Message != nil && len(chunk.Message.ToolCalls) > 0 { - delta.Choices[0].Delta.ToolCalls = make([]dto.ToolCallResponse,0,len(chunk.Message.ToolCalls)) - for _, tc := range chunk.Message.ToolCalls { - // arguments -> string - argBytes, _ := json.Marshal(tc.Function.Arguments) - toolId := fmt.Sprintf("call_%d", toolCallIndex) - tr := dto.ToolCallResponse{ID:toolId, Type:"function", Function: dto.FunctionResponse{Name: tc.Function.Name, Arguments: string(argBytes)}} - tr.SetIndex(toolCallIndex) - toolCallIndex++ - delta.Choices[0].Delta.ToolCalls = append(delta.Choices[0].Delta.ToolCalls, tr) - } - } - if data, err := common.Marshal(delta); err == nil { _ = helper.StringData(c, string(data)) } - continue - } - // done frame - // finalize once and break loop - usage.PromptTokens = chunk.PromptEvalCount - usage.CompletionTokens = chunk.EvalCount - usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens - finishReason := chunk.DoneReason - if finishReason == "" { finishReason = "stop" } - // emit stop delta - if stop := helper.GenerateStopResponse(responseId, created, model, finishReason); stop != nil { - if data, err := common.Marshal(stop); err == nil { _ = helper.StringData(c, string(data)) } - } - // emit usage frame - if final := helper.GenerateFinalUsageResponse(responseId, created, model, *usage); final != nil { - if data, err := common.Marshal(final); err == nil { _ = helper.StringData(c, string(data)) } - } - // send [DONE] - helper.Done(c) - break - } - if err := scanner.Err(); err != nil && err != io.EOF { logger.LogError(c, "ollama stream scan error: "+err.Error()) } - return usage, nil -} - -// non-stream handler for chat/generate -func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { - body, err := io.ReadAll(resp.Body) - if err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeReadResponseBodyFailed, http.StatusInternalServerError) } - service.CloseResponseBodyGracefully(resp) - raw := string(body) - if common.DebugEnabled { println("ollama non-stream raw resp:", raw) } - - lines := strings.Split(raw, "\n") - var ( - aggContent strings.Builder - reasoningBuilder strings.Builder - lastChunk ollamaChatStreamChunk - parsedAny bool - ) - for _, ln := range lines { - ln = strings.TrimSpace(ln) - if ln == "" { continue } - var ck ollamaChatStreamChunk - if err := json.Unmarshal([]byte(ln), &ck); err != nil { - if len(lines) == 1 { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } - continue - } - parsedAny = true - lastChunk = ck - if ck.Message != nil && len(ck.Message.Thinking) > 0 { - raw := strings.TrimSpace(string(ck.Message.Thinking)) - if raw != "" && raw != "null" { reasoningBuilder.WriteString(raw) } - } - if ck.Message != nil && ck.Message.Content != "" { aggContent.WriteString(ck.Message.Content) } else if ck.Response != "" { aggContent.WriteString(ck.Response) } - } - - if !parsedAny { - var single ollamaChatStreamChunk - if err := json.Unmarshal(body, &single); err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } - lastChunk = single - if single.Message != nil { - if len(single.Message.Thinking) > 0 { raw := strings.TrimSpace(string(single.Message.Thinking)); if raw != "" && raw != "null" { reasoningBuilder.WriteString(raw) } } - aggContent.WriteString(single.Message.Content) - } else { aggContent.WriteString(single.Response) } - } - - model := lastChunk.Model - if model == "" { model = info.UpstreamModelName } - created := toUnix(lastChunk.CreatedAt) - usage := &dto.Usage{PromptTokens: lastChunk.PromptEvalCount, CompletionTokens: lastChunk.EvalCount, TotalTokens: lastChunk.PromptEvalCount + lastChunk.EvalCount} - content := aggContent.String() - finishReason := lastChunk.DoneReason - if finishReason == "" { finishReason = "stop" } - - msg := dto.Message{Role: "assistant", Content: contentPtr(content)} - if rc := reasoningBuilder.String(); rc != "" { msg.ReasoningContent = rc } - full := dto.OpenAITextResponse{ - Id: common.GetUUID(), - Model: model, - Object: "chat.completion", - Created: created, - Choices: []dto.OpenAITextResponseChoice{ { - Index: 0, - Message: msg, - FinishReason: finishReason, - } }, - Usage: *usage, - } - out, _ := common.Marshal(full) - service.IOCopyBytesGracefully(c, resp, out) - return usage, nil -} - -func contentPtr(s string) *string { if s=="" { return nil }; return &s } diff --git a/.history/relay/channel/ollama/stream_20250916085435.go b/.history/relay/channel/ollama/stream_20250916085435.go deleted file mode 100644 index 964f11d90..000000000 --- a/.history/relay/channel/ollama/stream_20250916085435.go +++ /dev/null @@ -1,210 +0,0 @@ -package ollama - -import ( - "bufio" - "encoding/json" - "fmt" - "io" - "net/http" - "one-api/common" - "one-api/dto" - "one-api/logger" - relaycommon "one-api/relay/common" - "one-api/relay/helper" - "one-api/service" - "one-api/types" - "strings" - "time" - - "github.com/gin-gonic/gin" -) - -type ollamaChatStreamChunk struct { - Model string `json:"model"` - CreatedAt string `json:"created_at"` - // chat - Message *struct { - Role string `json:"role"` - Content string `json:"content"` - Thinking json.RawMessage `json:"thinking"` - ToolCalls []struct { - Function struct { - Name string `json:"name"` - Arguments interface{} `json:"arguments"` - } `json:"function"` - } `json:"tool_calls"` - } `json:"message"` - // generate - Response string `json:"response"` - Done bool `json:"done"` - DoneReason string `json:"done_reason"` - TotalDuration int64 `json:"total_duration"` - LoadDuration int64 `json:"load_duration"` - PromptEvalCount int `json:"prompt_eval_count"` - EvalCount int `json:"eval_count"` - PromptEvalDuration int64 `json:"prompt_eval_duration"` - EvalDuration int64 `json:"eval_duration"` -} - -func toUnix(ts string) int64 { - if ts == "" { return time.Now().Unix() } - // try time.RFC3339 or with nanoseconds - t, err := time.Parse(time.RFC3339Nano, ts) - if err != nil { t2, err2 := time.Parse(time.RFC3339, ts); if err2==nil { return t2.Unix() }; return time.Now().Unix() } - return t.Unix() -} - -func ollamaStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { - if resp == nil || resp.Body == nil { return nil, types.NewOpenAIError(fmt.Errorf("empty response"), types.ErrorCodeBadResponse, http.StatusBadRequest) } - defer service.CloseResponseBodyGracefully(resp) - - helper.SetEventStreamHeaders(c) - scanner := bufio.NewScanner(resp.Body) - usage := &dto.Usage{} - var model = info.UpstreamModelName - var responseId = common.GetUUID() - var created = time.Now().Unix() - var toolCallIndex int - start := helper.GenerateStartEmptyResponse(responseId, created, model, nil) - if data, err := common.Marshal(start); err == nil { _ = helper.StringData(c, string(data)) } - - for scanner.Scan() { - line := scanner.Text() - line = strings.TrimSpace(line) - if line == "" { continue } - var chunk ollamaChatStreamChunk - if err := json.Unmarshal([]byte(line), &chunk); err != nil { - logger.LogError(c, "ollama stream json decode error: "+err.Error()+" line="+line) - return usage, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) - } - if chunk.Model != "" { model = chunk.Model } - created = toUnix(chunk.CreatedAt) - - if !chunk.Done { - // delta content - var content string - if chunk.Message != nil { content = chunk.Message.Content } else { content = chunk.Response } - delta := dto.ChatCompletionsStreamResponse{ - Id: responseId, - Object: "chat.completion.chunk", - Created: created, - Model: model, - Choices: []dto.ChatCompletionsStreamResponseChoice{ { - Index: 0, - Delta: dto.ChatCompletionsStreamResponseChoiceDelta{ Role: "assistant" }, - } }, - } - if content != "" { delta.Choices[0].Delta.SetContentString(content) } - if chunk.Message != nil && len(chunk.Message.Thinking) > 0 { - raw := strings.TrimSpace(string(chunk.Message.Thinking)) - if raw != "" && raw != "null" { delta.Choices[0].Delta.SetReasoningContent(raw) } - } - // tool calls - if chunk.Message != nil && len(chunk.Message.ToolCalls) > 0 { - delta.Choices[0].Delta.ToolCalls = make([]dto.ToolCallResponse,0,len(chunk.Message.ToolCalls)) - for _, tc := range chunk.Message.ToolCalls { - // arguments -> string - argBytes, _ := json.Marshal(tc.Function.Arguments) - toolId := fmt.Sprintf("call_%d", toolCallIndex) - tr := dto.ToolCallResponse{ID:toolId, Type:"function", Function: dto.FunctionResponse{Name: tc.Function.Name, Arguments: string(argBytes)}} - tr.SetIndex(toolCallIndex) - toolCallIndex++ - delta.Choices[0].Delta.ToolCalls = append(delta.Choices[0].Delta.ToolCalls, tr) - } - } - if data, err := common.Marshal(delta); err == nil { _ = helper.StringData(c, string(data)) } - continue - } - // done frame - // finalize once and break loop - usage.PromptTokens = chunk.PromptEvalCount - usage.CompletionTokens = chunk.EvalCount - usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens - finishReason := chunk.DoneReason - if finishReason == "" { finishReason = "stop" } - // emit stop delta - if stop := helper.GenerateStopResponse(responseId, created, model, finishReason); stop != nil { - if data, err := common.Marshal(stop); err == nil { _ = helper.StringData(c, string(data)) } - } - // emit usage frame - if final := helper.GenerateFinalUsageResponse(responseId, created, model, *usage); final != nil { - if data, err := common.Marshal(final); err == nil { _ = helper.StringData(c, string(data)) } - } - // send [DONE] - helper.Done(c) - break - } - if err := scanner.Err(); err != nil && err != io.EOF { logger.LogError(c, "ollama stream scan error: "+err.Error()) } - return usage, nil -} - -// non-stream handler for chat/generate -func ollamaChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) { - body, err := io.ReadAll(resp.Body) - if err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeReadResponseBodyFailed, http.StatusInternalServerError) } - service.CloseResponseBodyGracefully(resp) - raw := string(body) - if common.DebugEnabled { println("ollama non-stream raw resp:", raw) } - - lines := strings.Split(raw, "\n") - var ( - aggContent strings.Builder - reasoningBuilder strings.Builder - lastChunk ollamaChatStreamChunk - parsedAny bool - ) - for _, ln := range lines { - ln = strings.TrimSpace(ln) - if ln == "" { continue } - var ck ollamaChatStreamChunk - if err := json.Unmarshal([]byte(ln), &ck); err != nil { - if len(lines) == 1 { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } - continue - } - parsedAny = true - lastChunk = ck - if ck.Message != nil && len(ck.Message.Thinking) > 0 { - raw := strings.TrimSpace(string(ck.Message.Thinking)) - if raw != "" && raw != "null" { reasoningBuilder.WriteString(raw) } - } - if ck.Message != nil && ck.Message.Content != "" { aggContent.WriteString(ck.Message.Content) } else if ck.Response != "" { aggContent.WriteString(ck.Response) } - } - - if !parsedAny { - var single ollamaChatStreamChunk - if err := json.Unmarshal(body, &single); err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } - lastChunk = single - if single.Message != nil { - if len(single.Message.Thinking) > 0 { raw := strings.TrimSpace(string(single.Message.Thinking)); if raw != "" && raw != "null" { reasoningBuilder.WriteString(raw) } } - aggContent.WriteString(single.Message.Content) - } else { aggContent.WriteString(single.Response) } - } - - model := lastChunk.Model - if model == "" { model = info.UpstreamModelName } - created := toUnix(lastChunk.CreatedAt) - usage := &dto.Usage{PromptTokens: lastChunk.PromptEvalCount, CompletionTokens: lastChunk.EvalCount, TotalTokens: lastChunk.PromptEvalCount + lastChunk.EvalCount} - content := aggContent.String() - finishReason := lastChunk.DoneReason - if finishReason == "" { finishReason = "stop" } - - msg := dto.Message{Role: "assistant", Content: contentPtr(content)} - if rc := reasoningBuilder.String(); rc != "" { msg.ReasoningContent = rc } - full := dto.OpenAITextResponse{ - Id: common.GetUUID(), - Model: model, - Object: "chat.completion", - Created: created, - Choices: []dto.OpenAITextResponseChoice{ { - Index: 0, - Message: msg, - FinishReason: finishReason, - } }, - Usage: *usage, - } - out, _ := common.Marshal(full) - service.IOCopyBytesGracefully(c, resp, out) - return usage, nil -} - -func contentPtr(s string) *string { if s=="" { return nil }; return &s } From 4b98773e9a97fbf6af1799c1287fb6dfc6cd6574 Mon Sep 17 00:00:00 2001 From: RixAPI Date: Tue, 16 Sep 2025 20:03:10 +0800 Subject: [PATCH 12/18] =?UTF-8?q?=E4=BC=98=E5=8C=96=E6=B8=A0=E9=81=93?= =?UTF-8?q?=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 增加并发支持 --- web/src/hooks/channels/useChannelsData.jsx | 363 +++++++++++---------- 1 file changed, 198 insertions(+), 165 deletions(-) diff --git a/web/src/hooks/channels/useChannelsData.jsx b/web/src/hooks/channels/useChannelsData.jsx index 65460a06b..7d09d4dfd 100644 --- a/web/src/hooks/channels/useChannelsData.jsx +++ b/web/src/hooks/channels/useChannelsData.jsx @@ -25,13 +25,9 @@ import { showInfo, showSuccess, loadChannelModels, - copy, + copy } from '../../helpers'; -import { - CHANNEL_OPTIONS, - ITEMS_PER_PAGE, - MODEL_TABLE_PAGE_SIZE, -} from '../../constants'; +import { CHANNEL_OPTIONS, ITEMS_PER_PAGE, MODEL_TABLE_PAGE_SIZE } from '../../constants'; import { useIsMobile } from '../common/useIsMobile'; import { useTableCompactMode } from '../common/useTableCompactMode'; import { Modal } from '@douyinfe/semi-ui'; @@ -68,7 +64,7 @@ export const useChannelsData = () => { // Status filter const [statusFilter, setStatusFilter] = useState( - localStorage.getItem('channel-status-filter') || 'all', + localStorage.getItem('channel-status-filter') || 'all' ); // Type tabs states @@ -83,9 +79,10 @@ export const useChannelsData = () => { const [testingModels, setTestingModels] = useState(new Set()); const [selectedModelKeys, setSelectedModelKeys] = useState([]); const [isBatchTesting, setIsBatchTesting] = useState(false); - const [testQueue, setTestQueue] = useState([]); - const [isProcessingQueue, setIsProcessingQueue] = useState(false); const [modelTablePage, setModelTablePage] = useState(1); + + // 使用 ref 来避免闭包问题,类似旧版实现 + const shouldStopBatchTestingRef = useRef(false); // Multi-key management states const [showMultiKeyManageModal, setShowMultiKeyManageModal] = useState(false); @@ -119,12 +116,9 @@ export const useChannelsData = () => { // Initialize from localStorage useEffect(() => { const localIdSort = localStorage.getItem('id-sort') === 'true'; - const localPageSize = - parseInt(localStorage.getItem('page-size')) || ITEMS_PER_PAGE; - const localEnableTagMode = - localStorage.getItem('enable-tag-mode') === 'true'; - const localEnableBatchDelete = - localStorage.getItem('enable-batch-delete') === 'true'; + const localPageSize = parseInt(localStorage.getItem('page-size')) || ITEMS_PER_PAGE; + const localEnableTagMode = localStorage.getItem('enable-tag-mode') === 'true'; + const localEnableBatchDelete = localStorage.getItem('enable-batch-delete') === 'true'; setIdSort(localIdSort); setPageSize(localPageSize); @@ -182,10 +176,7 @@ export const useChannelsData = () => { // Save column preferences useEffect(() => { if (Object.keys(visibleColumns).length > 0) { - localStorage.setItem( - 'channels-table-columns', - JSON.stringify(visibleColumns), - ); + localStorage.setItem('channels-table-columns', JSON.stringify(visibleColumns)); } }, [visibleColumns]); @@ -299,21 +290,14 @@ export const useChannelsData = () => { const { searchKeyword, searchGroup, searchModel } = getFormValues(); if (searchKeyword !== '' || searchGroup !== '' || searchModel !== '') { setLoading(true); - await searchChannels( - enableTagMode, - typeKey, - statusF, - page, - pageSize, - idSort, - ); + await searchChannels(enableTagMode, typeKey, statusF, page, pageSize, idSort); setLoading(false); return; } const reqId = ++requestCounter.current; setLoading(true); - const typeParam = typeKey !== 'all' ? `&type=${typeKey}` : ''; + const typeParam = (typeKey !== 'all') ? `&type=${typeKey}` : ''; const statusParam = statusF !== 'all' ? `&status=${statusF}` : ''; const res = await API.get( `/api/channel/?p=${page}&page_size=${pageSize}&id_sort=${idSort}&tag_mode=${enableTagMode}${typeParam}${statusParam}`, @@ -327,10 +311,7 @@ export const useChannelsData = () => { if (success) { const { items, total, type_counts } = data; if (type_counts) { - const sumAll = Object.values(type_counts).reduce( - (acc, v) => acc + v, - 0, - ); + const sumAll = Object.values(type_counts).reduce((acc, v) => acc + v, 0); setTypeCounts({ ...type_counts, all: sumAll }); } setChannelFormat(items, enableTagMode); @@ -354,18 +335,11 @@ export const useChannelsData = () => { setSearching(true); try { if (searchKeyword === '' && searchGroup === '' && searchModel === '') { - await loadChannels( - page, - pageSz, - sortFlag, - enableTagMode, - typeKey, - statusF, - ); + await loadChannels(page, pageSz, sortFlag, enableTagMode, typeKey, statusF); return; } - const typeParam = typeKey !== 'all' ? `&type=${typeKey}` : ''; + const typeParam = (typeKey !== 'all') ? `&type=${typeKey}` : ''; const statusParam = statusF !== 'all' ? `&status=${statusF}` : ''; const res = await API.get( `/api/channel/search?keyword=${searchKeyword}&group=${searchGroup}&model=${searchModel}&id_sort=${sortFlag}&tag_mode=${enableTagMode}&p=${page}&page_size=${pageSz}${typeParam}${statusParam}`, @@ -373,10 +347,7 @@ export const useChannelsData = () => { const { success, message, data } = res.data; if (success) { const { items = [], total = 0, type_counts = {} } = data; - const sumAll = Object.values(type_counts).reduce( - (acc, v) => acc + v, - 0, - ); + const sumAll = Object.values(type_counts).reduce((acc, v) => acc + v, 0); setTypeCounts({ ...type_counts, all: sumAll }); setChannelFormat(items, enableTagMode); setChannelCount(total); @@ -395,14 +366,7 @@ export const useChannelsData = () => { if (searchKeyword === '' && searchGroup === '' && searchModel === '') { await loadChannels(page, pageSize, idSort, enableTagMode); } else { - await searchChannels( - enableTagMode, - activeTypeKey, - statusFilter, - page, - pageSize, - idSort, - ); + await searchChannels(enableTagMode, activeTypeKey, statusFilter, page, pageSize, idSort); } }; @@ -488,16 +452,9 @@ export const useChannelsData = () => { const { searchKeyword, searchGroup, searchModel } = getFormValues(); setActivePage(page); if (searchKeyword === '' && searchGroup === '' && searchModel === '') { - loadChannels(page, pageSize, idSort, enableTagMode).then(() => {}); + loadChannels(page, pageSize, idSort, enableTagMode).then(() => { }); } else { - searchChannels( - enableTagMode, - activeTypeKey, - statusFilter, - page, - pageSize, - idSort, - ); + searchChannels(enableTagMode, activeTypeKey, statusFilter, page, pageSize, idSort); } }; @@ -513,14 +470,7 @@ export const useChannelsData = () => { showError(reason); }); } else { - searchChannels( - enableTagMode, - activeTypeKey, - statusFilter, - 1, - size, - idSort, - ); + searchChannels(enableTagMode, activeTypeKey, statusFilter, 1, size, idSort); } }; @@ -551,10 +501,7 @@ export const useChannelsData = () => { showError(res?.data?.message || t('渠道复制失败')); } } catch (error) { - showError( - t('渠道复制失败: ') + - (error?.response?.data?.message || error?.message || error), - ); + showError(t('渠道复制失败: ') + (error?.response?.data?.message || error?.message || error)); } }; @@ -593,11 +540,7 @@ export const useChannelsData = () => { data.priority = parseInt(data.priority); break; case 'weight': - if ( - data.weight === undefined || - data.weight < 0 || - data.weight === '' - ) { + if (data.weight === undefined || data.weight < 0 || data.weight === '') { showInfo('权重必须是非负整数!'); return; } @@ -740,136 +683,226 @@ export const useChannelsData = () => { const res = await API.post(`/api/channel/fix`); const { success, message, data } = res.data; if (success) { - showSuccess( - t('已修复 ${success} 个通道,失败 ${fails} 个通道。') - .replace('${success}', data.success) - .replace('${fails}', data.fails), - ); + showSuccess(t('已修复 ${success} 个通道,失败 ${fails} 个通道。').replace('${success}', data.success).replace('${fails}', data.fails)); await refresh(); } else { showError(message); } }; - // Test channel + // Test channel - 单个模型测试,参考旧版实现 const testChannel = async (record, model) => { - setTestQueue((prev) => [...prev, { channel: record, model }]); - if (!isProcessingQueue) { - setIsProcessingQueue(true); + const testKey = `${record.id}-${model}`; + + // 检查是否应该停止批量测试 + if (shouldStopBatchTestingRef.current && isBatchTesting) { + return Promise.resolve(); } - }; - // Process test queue - const processTestQueue = async () => { - if (!isProcessingQueue || testQueue.length === 0) return; - - const { channel, model, indexInFiltered } = testQueue[0]; - - if (currentTestChannel && currentTestChannel.id === channel.id) { - let pageNo; - if (indexInFiltered !== undefined) { - pageNo = Math.floor(indexInFiltered / MODEL_TABLE_PAGE_SIZE) + 1; - } else { - const filteredModelsList = currentTestChannel.models - .split(',') - .filter((m) => - m.toLowerCase().includes(modelSearchKeyword.toLowerCase()), - ); - const modelIdx = filteredModelsList.indexOf(model); - pageNo = - modelIdx !== -1 - ? Math.floor(modelIdx / MODEL_TABLE_PAGE_SIZE) + 1 - : 1; - } - setModelTablePage(pageNo); - } + // 添加到正在测试的模型集合 + setTestingModels(prev => new Set([...prev, model])); try { - setTestingModels((prev) => new Set([...prev, model])); - const res = await API.get( - `/api/channel/test/${channel.id}?model=${model}`, - ); + const res = await API.get(`/api/channel/test/${record.id}?model=${model}`); + + // 检查是否在请求期间被停止 + if (shouldStopBatchTestingRef.current && isBatchTesting) { + return Promise.resolve(); + } + const { success, message, time } = res.data; - setModelTestResults((prev) => ({ + // 更新测试结果 + setModelTestResults(prev => ({ ...prev, - [`${channel.id}-${model}`]: { success, time }, + [testKey]: { + success, + message, + time: time || 0, + timestamp: Date.now() + } })); if (success) { - updateChannelProperty(channel.id, (ch) => { - ch.response_time = time * 1000; - ch.test_time = Date.now() / 1000; + // 更新渠道响应时间 + updateChannelProperty(record.id, (channel) => { + channel.response_time = time * 1000; + channel.test_time = Date.now() / 1000; }); - if (!model) { + + if (!model || model === '') { showInfo( t('通道 ${name} 测试成功,耗时 ${time.toFixed(2)} 秒。') - .replace('${name}', channel.name) + .replace('${name}', record.name) + .replace('${time.toFixed(2)}', time.toFixed(2)), + ); + } else { + showInfo( + t('通道 ${name} 测试成功,模型 ${model} 耗时 ${time.toFixed(2)} 秒。') + .replace('${name}', record.name) + .replace('${model}', model) .replace('${time.toFixed(2)}', time.toFixed(2)), ); } } else { - showError(message); + showError(`${t('模型')} ${model}: ${message}`); } } catch (error) { - showError(error.message); + // 处理网络错误 + const testKey = `${record.id}-${model}`; + setModelTestResults(prev => ({ + ...prev, + [testKey]: { + success: false, + message: error.message || t('网络错误'), + time: 0, + timestamp: Date.now() + } + })); + showError(`${t('模型')} ${model}: ${error.message || t('测试失败')}`); } finally { - setTestingModels((prev) => { + // 从正在测试的模型集合中移除 + setTestingModels(prev => { const newSet = new Set(prev); newSet.delete(model); return newSet; }); } - - setTestQueue((prev) => prev.slice(1)); }; - // Monitor queue changes - useEffect(() => { - if (testQueue.length > 0 && isProcessingQueue) { - processTestQueue(); - } else if (testQueue.length === 0 && isProcessingQueue) { - setIsProcessingQueue(false); - setIsBatchTesting(false); - } - }, [testQueue, isProcessingQueue]); - - // Batch test models + // 批量测试单个渠道的所有模型,参考旧版实现 const batchTestModels = async () => { - if (!currentTestChannel) return; + if (!currentTestChannel || !currentTestChannel.models) { + showError(t('渠道模型信息不完整')); + return; + } + + const models = currentTestChannel.models.split(',').filter(model => + model.toLowerCase().includes(modelSearchKeyword.toLowerCase()) + ); + + if (models.length === 0) { + showError(t('没有找到匹配的模型')); + return; + } setIsBatchTesting(true); - setModelTablePage(1); + shouldStopBatchTestingRef.current = false; // 重置停止标志 - const filteredModels = currentTestChannel.models - .split(',') - .filter((model) => - model.toLowerCase().includes(modelSearchKeyword.toLowerCase()), - ); + // 清空该渠道之前的测试结果 + setModelTestResults(prev => { + const newResults = { ...prev }; + models.forEach(model => { + const testKey = `${currentTestChannel.id}-${model}`; + delete newResults[testKey]; + }); + return newResults; + }); - setTestQueue( - filteredModels.map((model, idx) => ({ - channel: currentTestChannel, - model, - indexInFiltered: idx, - })), - ); - setIsProcessingQueue(true); + try { + showInfo(t('开始批量测试 ${count} 个模型,已清空上次结果...').replace('${count}', models.length)); + + // 提高并发数量以加快测试速度,参考旧版的并发限制 + const concurrencyLimit = 5; + const results = []; + + for (let i = 0; i < models.length; i += concurrencyLimit) { + // 检查是否应该停止 + if (shouldStopBatchTestingRef.current) { + showInfo(t('批量测试已停止')); + break; + } + + const batch = models.slice(i, i + concurrencyLimit); + showInfo(t('正在测试第 ${current} - ${end} 个模型 (共 ${total} 个)') + .replace('${current}', i + 1) + .replace('${end}', Math.min(i + concurrencyLimit, models.length)) + .replace('${total}', models.length) + ); + + const batchPromises = batch.map(model => testChannel(currentTestChannel, model)); + const batchResults = await Promise.allSettled(batchPromises); + results.push(...batchResults); + + // 再次检查是否应该停止 + if (shouldStopBatchTestingRef.current) { + showInfo(t('批量测试已停止')); + break; + } + + // 短暂延迟避免过于频繁的请求 + if (i + concurrencyLimit < models.length) { + await new Promise(resolve => setTimeout(resolve, 100)); + } + } + + if (!shouldStopBatchTestingRef.current) { + // 等待一小段时间确保所有结果都已更新 + await new Promise(resolve => setTimeout(resolve, 300)); + + // 使用当前状态重新计算结果统计 + setModelTestResults(currentResults => { + let successCount = 0; + let failCount = 0; + + models.forEach(model => { + const testKey = `${currentTestChannel.id}-${model}`; + const result = currentResults[testKey]; + if (result && result.success) { + successCount++; + } else { + failCount++; + } + }); + + // 显示完成消息 + setTimeout(() => { + showSuccess(t('批量测试完成!成功: ${success}, 失败: ${fail}, 总计: ${total}') + .replace('${success}', successCount) + .replace('${fail}', failCount) + .replace('${total}', models.length) + ); + }, 100); + + return currentResults; // 不修改状态,只是为了获取最新值 + }); + } + } catch (error) { + showError(t('批量测试过程中发生错误: ') + error.message); + } finally { + setIsBatchTesting(false); + } + }; + + // 停止批量测试 + const stopBatchTesting = () => { + shouldStopBatchTestingRef.current = true; + setIsBatchTesting(false); + setTestingModels(new Set()); + showInfo(t('已停止批量测试')); + }; + + // 清空测试结果 + const clearTestResults = () => { + setModelTestResults({}); + showInfo(t('已清空测试结果')); }; // Handle close modal const handleCloseModal = () => { + // 如果正在批量测试,先停止测试 if (isBatchTesting) { - setTestQueue([]); - setIsProcessingQueue(false); - setIsBatchTesting(false); - showSuccess(t('已停止测试')); - } else { - setShowModelTestModal(false); - setModelSearchKeyword(''); - setSelectedModelKeys([]); - setModelTablePage(1); + shouldStopBatchTestingRef.current = true; + showInfo(t('关闭弹窗,已停止批量测试')); } + + setShowModelTestModal(false); + setModelSearchKeyword(''); + setIsBatchTesting(false); + setTestingModels(new Set()); + setSelectedModelKeys([]); + setModelTablePage(1); + // 可选择性保留测试结果,这里不清空以便用户查看 }; // Type counts @@ -1012,4 +1045,4 @@ export const useChannelsData = () => { setCompactMode, setActivePage, }; -}; +}; \ No newline at end of file From 6c0b1681f9d64140a3682586cc9d53d9d06cada7 Mon Sep 17 00:00:00 2001 From: joesonshaw Date: Fri, 19 Sep 2025 10:49:47 +0800 Subject: [PATCH 13/18] =?UTF-8?q?fix(relay-xunfei):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E8=AE=AF=E9=A3=9E=E6=B8=A0=E9=81=93=E6=97=A0=E6=B3=95=E4=BD=BF?= =?UTF-8?q?=E7=94=A8=E9=97=AE=E9=A2=98=20#1740?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 将连接延迟关闭逻辑调整到协程中执行,防止在完全接收到所有数据前提前关闭 --- relay/channel/xunfei/relay-xunfei.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/relay/channel/xunfei/relay-xunfei.go b/relay/channel/xunfei/relay-xunfei.go index 9d5c190fe..9503d5d39 100644 --- a/relay/channel/xunfei/relay-xunfei.go +++ b/relay/channel/xunfei/relay-xunfei.go @@ -207,10 +207,6 @@ func xunfeiMakeRequest(textRequest dto.GeneralOpenAIRequest, domain, authUrl, ap return nil, nil, err } - defer func() { - conn.Close() - }() - data := requestOpenAI2Xunfei(textRequest, appId, domain) err = conn.WriteJSON(data) if err != nil { @@ -220,6 +216,9 @@ func xunfeiMakeRequest(textRequest dto.GeneralOpenAIRequest, domain, authUrl, ap dataChan := make(chan XunfeiChatResponse) stopChan := make(chan bool) go func() { + defer func() { + conn.Close() + }() for { _, msg, err := conn.ReadMessage() if err != nil { From ee7ce5a476c2ff787325fcd94527e19130cec2d1 Mon Sep 17 00:00:00 2001 From: creamlike1024 Date: Sun, 28 Sep 2025 09:35:07 +0800 Subject: [PATCH 14/18] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=20openrouter-e?= =?UTF-8?q?nterprise=20=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- relay/channel/openai/relay-openai.go | 15 ++++++++++++++- relay/channel/openrouter/dto.go | 7 +++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/relay/channel/openai/relay-openai.go b/relay/channel/openai/relay-openai.go index 4b13a7df1..b8b120541 100644 --- a/relay/channel/openai/relay-openai.go +++ b/relay/channel/openai/relay-openai.go @@ -12,6 +12,7 @@ import ( "one-api/constant" "one-api/dto" "one-api/logger" + "one-api/relay/channel/openrouter" relaycommon "one-api/relay/common" "one-api/relay/helper" "one-api/service" @@ -185,7 +186,19 @@ func OpenaiHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Respo if common.DebugEnabled { println("upstream response body:", string(responseBody)) } - err = common.Unmarshal(responseBody, &simpleResponse) + // Unmarshal to simpleResponse + if info.ChannelType == constant.ChannelTypeOpenRouter { + // 尝试解析为 openrouter enterprise + var enterpriseResponse openrouter.OpenRouterEnterpriseResponse + if err2 := common.Unmarshal(responseBody, &enterpriseResponse); err2 == nil && enterpriseResponse.Data != nil { + err = common.Unmarshal(enterpriseResponse.Data, &simpleResponse) + } else { + // treat as normal openrouter + err = common.Unmarshal(responseBody, &simpleResponse) + } + } else { + err = common.Unmarshal(responseBody, &simpleResponse) + } if err != nil { return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } diff --git a/relay/channel/openrouter/dto.go b/relay/channel/openrouter/dto.go index 607f495bf..a32499852 100644 --- a/relay/channel/openrouter/dto.go +++ b/relay/channel/openrouter/dto.go @@ -1,5 +1,7 @@ package openrouter +import "encoding/json" + type RequestReasoning struct { // One of the following (not both): Effort string `json:"effort,omitempty"` // Can be "high", "medium", or "low" (OpenAI-style) @@ -7,3 +9,8 @@ type RequestReasoning struct { // Optional: Default is false. All models support this. Exclude bool `json:"exclude,omitempty"` // Set to true to exclude reasoning tokens from response } + +type OpenRouterEnterpriseResponse struct { + Data json.RawMessage `json:"data"` + Success bool `json:"success"` +} From 6e6a96d19f830111b3b08da3c34c91d5219d44c7 Mon Sep 17 00:00:00 2001 From: CaIon Date: Sun, 28 Sep 2025 15:23:27 +0800 Subject: [PATCH 15/18] feat: enhance OpenRouter enterprise support with new settings and response handling --- dto/channel_settings.go | 8 ++++ relay/channel/api_request.go | 1 + relay/channel/openai/relay-openai.go | 23 ++++++++-- relay/channel/openrouter/dto.go | 7 ++++ .../channels/modals/EditChannelModal.jsx | 42 +++++++++++++++++++ 5 files changed, 78 insertions(+), 3 deletions(-) diff --git a/dto/channel_settings.go b/dto/channel_settings.go index 8791f516e..d6d6e0848 100644 --- a/dto/channel_settings.go +++ b/dto/channel_settings.go @@ -19,4 +19,12 @@ const ( type ChannelOtherSettings struct { AzureResponsesVersion string `json:"azure_responses_version,omitempty"` VertexKeyType VertexKeyType `json:"vertex_key_type,omitempty"` // "json" or "api_key" + OpenRouterEnterprise *bool `json:"openrouter_enterprise,omitempty"` +} + +func (s *ChannelOtherSettings) IsOpenRouterEnterprise() bool { + if s == nil || s.OpenRouterEnterprise == nil { + return false + } + return *s.OpenRouterEnterprise } diff --git a/relay/channel/api_request.go b/relay/channel/api_request.go index a065caff7..79a0f7060 100644 --- a/relay/channel/api_request.go +++ b/relay/channel/api_request.go @@ -265,6 +265,7 @@ func doRequest(c *gin.Context, req *http.Request, info *common.RelayInfo) (*http resp, err := client.Do(req) if err != nil { + logger.LogError(c, "do request failed: "+err.Error()) return nil, types.NewError(err, types.ErrorCodeDoRequestFailed, types.ErrOptionWithHideErrMsg("upstream error: do request failed")) } if resp == nil { diff --git a/relay/channel/openai/relay-openai.go b/relay/channel/openai/relay-openai.go index 4b13a7df1..26a7f40cc 100644 --- a/relay/channel/openai/relay-openai.go +++ b/relay/channel/openai/relay-openai.go @@ -12,6 +12,7 @@ import ( "one-api/constant" "one-api/dto" "one-api/logger" + "one-api/relay/channel/openrouter" relaycommon "one-api/relay/common" "one-api/relay/helper" "one-api/service" @@ -185,9 +186,25 @@ func OpenaiHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Respo if common.DebugEnabled { println("upstream response body:", string(responseBody)) } - err = common.Unmarshal(responseBody, &simpleResponse) - if err != nil { - return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) + // Unmarshal to simpleResponse + if info.ChannelType == constant.ChannelTypeOpenRouter && info.ChannelOtherSettings.IsOpenRouterEnterprise() { + // 尝试解析为 openrouter enterprise + var enterpriseResponse openrouter.OpenRouterEnterpriseResponse + err = common.Unmarshal(responseBody, &enterpriseResponse) + if err != nil { + return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) + } + if enterpriseResponse.Success { + responseBody = enterpriseResponse.Data + } else { + logger.LogError(c, fmt.Sprintf("openrouter enterprise response success=false, data: %s", enterpriseResponse.Data)) + return nil, types.NewOpenAIError(fmt.Errorf("openrouter response success=false"), types.ErrorCodeBadResponseBody, http.StatusInternalServerError) + } + } else { + err = common.Unmarshal(responseBody, &simpleResponse) + if err != nil { + return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) + } } if oaiError := simpleResponse.GetOpenAIError(); oaiError != nil && oaiError.Type != "" { return nil, types.WithOpenAIError(*oaiError, resp.StatusCode) diff --git a/relay/channel/openrouter/dto.go b/relay/channel/openrouter/dto.go index 607f495bf..a32499852 100644 --- a/relay/channel/openrouter/dto.go +++ b/relay/channel/openrouter/dto.go @@ -1,5 +1,7 @@ package openrouter +import "encoding/json" + type RequestReasoning struct { // One of the following (not both): Effort string `json:"effort,omitempty"` // Can be "high", "medium", or "low" (OpenAI-style) @@ -7,3 +9,8 @@ type RequestReasoning struct { // Optional: Default is false. All models support this. Exclude bool `json:"exclude,omitempty"` // Set to true to exclude reasoning tokens from response } + +type OpenRouterEnterpriseResponse struct { + Data json.RawMessage `json:"data"` + Success bool `json:"success"` +} diff --git a/web/src/components/table/channels/modals/EditChannelModal.jsx b/web/src/components/table/channels/modals/EditChannelModal.jsx index dd620fe01..25ef68c61 100644 --- a/web/src/components/table/channels/modals/EditChannelModal.jsx +++ b/web/src/components/table/channels/modals/EditChannelModal.jsx @@ -164,6 +164,8 @@ const EditChannelModal = (props) => { settings: '', // 仅 Vertex: 密钥格式(存入 settings.vertex_key_type) vertex_key_type: 'json', + // 企业账户设置 + is_enterprise_account: false, }; const [batch, setBatch] = useState(false); const [multiToSingle, setMultiToSingle] = useState(false); @@ -189,6 +191,7 @@ const EditChannelModal = (props) => { const [channelSearchValue, setChannelSearchValue] = useState(''); const [useManualInput, setUseManualInput] = useState(false); // 是否使用手动输入模式 const [keyMode, setKeyMode] = useState('append'); // 密钥模式:replace(覆盖)或 append(追加) + const [isEnterpriseAccount, setIsEnterpriseAccount] = useState(false); // 是否为企业账户 // 2FA验证查看密钥相关状态 const [twoFAState, setTwoFAState] = useState({ @@ -437,15 +440,19 @@ const EditChannelModal = (props) => { parsedSettings.azure_responses_version || ''; // 读取 Vertex 密钥格式 data.vertex_key_type = parsedSettings.vertex_key_type || 'json'; + // 读取企业账户设置 + data.is_enterprise_account = parsedSettings.openrouter_enterprise === true; } catch (error) { console.error('解析其他设置失败:', error); data.azure_responses_version = ''; data.region = ''; data.vertex_key_type = 'json'; + data.is_enterprise_account = false; } } else { // 兼容历史数据:老渠道没有 settings 时,默认按 json 展示 data.vertex_key_type = 'json'; + data.is_enterprise_account = false; } setInputs(data); @@ -457,6 +464,8 @@ const EditChannelModal = (props) => { } else { setAutoBan(true); } + // 同步企业账户状态 + setIsEnterpriseAccount(data.is_enterprise_account || false); setBasicModels(getChannelModels(data.type)); // 同步更新channelSettings状态显示 setChannelSettings({ @@ -716,6 +725,8 @@ const EditChannelModal = (props) => { }); // 重置密钥模式状态 setKeyMode('append'); + // 重置企业账户状态 + setIsEnterpriseAccount(false); // 清空表单中的key_mode字段 if (formApiRef.current) { formApiRef.current.setValue('key_mode', undefined); @@ -879,6 +890,21 @@ const EditChannelModal = (props) => { }; localInputs.setting = JSON.stringify(channelExtraSettings); + // 处理type === 20的企业账户设置 + if (localInputs.type === 20) { + let settings = {}; + if (localInputs.settings) { + try { + settings = JSON.parse(localInputs.settings); + } catch (error) { + console.error('解析settings失败:', error); + } + } + // 设置企业账户标识,无论是true还是false都要传到后端 + settings.openrouter_enterprise = localInputs.is_enterprise_account === true; + localInputs.settings = JSON.stringify(settings); + } + // 清理不需要发送到后端的字段 delete localInputs.force_format; delete localInputs.thinking_to_content; @@ -886,6 +912,7 @@ const EditChannelModal = (props) => { delete localInputs.pass_through_body_enabled; delete localInputs.system_prompt; delete localInputs.system_prompt_override; + delete localInputs.is_enterprise_account; // 顶层的 vertex_key_type 不应发送给后端 delete localInputs.vertex_key_type; @@ -1203,6 +1230,21 @@ const EditChannelModal = (props) => { onChange={(value) => handleInputChange('type', value)} /> + {inputs.type === 20 && ( + { + setIsEnterpriseAccount(value); + handleInputChange('is_enterprise_account', value); + }} + extraText={t('企业账户为特殊返回格式,需要特殊处理,如果非企业账户,请勿勾选')} + initValue={inputs.is_enterprise_account} + /> + )} + Date: Sun, 28 Sep 2025 15:29:01 +0800 Subject: [PATCH 16/18] fix: streamline error handling in OpenRouter response processing --- relay/channel/openai/relay-openai.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/relay/channel/openai/relay-openai.go b/relay/channel/openai/relay-openai.go index 26a7f40cc..a88b68502 100644 --- a/relay/channel/openai/relay-openai.go +++ b/relay/channel/openai/relay-openai.go @@ -200,12 +200,13 @@ func OpenaiHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Respo logger.LogError(c, fmt.Sprintf("openrouter enterprise response success=false, data: %s", enterpriseResponse.Data)) return nil, types.NewOpenAIError(fmt.Errorf("openrouter response success=false"), types.ErrorCodeBadResponseBody, http.StatusInternalServerError) } - } else { - err = common.Unmarshal(responseBody, &simpleResponse) - if err != nil { - return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) - } } + + err = common.Unmarshal(responseBody, &simpleResponse) + if err != nil { + return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError) + } + if oaiError := simpleResponse.GetOpenAIError(); oaiError != nil && oaiError.Type != "" { return nil, types.WithOpenAIError(*oaiError, resp.StatusCode) } From 93e30703d4a19a54a38373a47665b8573058bd4c Mon Sep 17 00:00:00 2001 From: bubblepipe42 Date: Fri, 3 Oct 2025 13:55:19 +0800 Subject: [PATCH 17/18] action --- .github/workflows/electron-build.yml | 104 ++++++++++++ .gitignore | 6 +- electron/README.md | 73 ++++++++ electron/build.sh | 41 +++++ electron/create-tray-icon.js | 60 +++++++ electron/entitlements.mac.plist | 18 ++ electron/icon.png | Bin 0 -> 31262 bytes electron/main.js | 239 +++++++++++++++++++++++++++ electron/package.json | 100 +++++++++++ electron/preload.js | 6 + electron/tray-icon-windows.png | Bin 0 -> 1203 bytes electron/tray-iconTemplate.png | Bin 0 -> 459 bytes electron/tray-iconTemplate@2x.png | Bin 0 -> 754 bytes web/package.json | 1 + 14 files changed, 647 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/electron-build.yml create mode 100644 electron/README.md create mode 100755 electron/build.sh create mode 100644 electron/create-tray-icon.js create mode 100644 electron/entitlements.mac.plist create mode 100644 electron/icon.png create mode 100644 electron/main.js create mode 100644 electron/package.json create mode 100644 electron/preload.js create mode 100644 electron/tray-icon-windows.png create mode 100644 electron/tray-iconTemplate.png create mode 100644 electron/tray-iconTemplate@2x.png diff --git a/.github/workflows/electron-build.yml b/.github/workflows/electron-build.yml new file mode 100644 index 000000000..b274db857 --- /dev/null +++ b/.github/workflows/electron-build.yml @@ -0,0 +1,104 @@ +name: Build Electron App + +on: + push: + tags: + - 'v*.*.*' # Triggers on version tags like v1.0.0 + workflow_dispatch: # Allows manual triggering + +jobs: + build: + strategy: + matrix: + os: [macos-latest, windows-latest] + + runs-on: ${{ matrix.os }} + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '1.21' + + - name: Build frontend + run: | + cd web + npm install --legacy-peer-deps + npm run build + env: + DISABLE_ESLINT_PLUGIN: 'true' + NODE_OPTIONS: '--max_old_space_size=4096' + + - name: Build Go binary (macos/Linux) + if: runner.os != 'Windows' + run: | + go build -ldflags="-s -w" -o new-api + + - name: Build Go binary (Windows) + if: runner.os == 'Windows' + run: | + go build -ldflags="-s -w" -o new-api.exe + + - name: Install Electron dependencies + run: | + cd electron + npm install + + - name: Build Electron app (macOS) + if: runner.os == 'macOS' + run: | + cd electron + npm run build:mac + env: + CSC_IDENTITY_AUTO_DISCOVERY: false # Skip code signing + + - name: Build Electron app (Windows) + if: runner.os == 'Windows' + run: | + cd electron + npm run build:win + + - name: Upload artifacts (macOS) + if: runner.os == 'macOS' + uses: actions/upload-artifact@v4 + with: + name: macos-build + path: | + electron/dist/*.dmg + electron/dist/*.zip + + - name: Upload artifacts (Windows) + if: runner.os == 'Windows' + uses: actions/upload-artifact@v4 + with: + name: windows-build + path: | + electron/dist/*.exe + + release: + needs: build + runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/tags/') + + steps: + - name: Download all artifacts + uses: actions/download-artifact@v4 + + - name: Create Release + uses: softprops/action-gh-release@v1 + with: + files: | + macos-build/* + windows-build/* + draft: false + prerelease: false + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index 1382829fd..570a4385b 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,8 @@ web/dist one-api .DS_Store tiktoken_cache -.eslintcache \ No newline at end of file +.eslintcache + +electron/node_modules +electron/dist +electron/package-lock.json \ No newline at end of file diff --git a/electron/README.md b/electron/README.md new file mode 100644 index 000000000..88463b8ae --- /dev/null +++ b/electron/README.md @@ -0,0 +1,73 @@ +# New API Electron Desktop App + +This directory contains the Electron wrapper for New API, providing a native desktop application with system tray support for Windows, macOS, and Linux. + +## Prerequisites + +### 1. Go Binary (Required) +The Electron app requires the compiled Go binary to function. You have two options: + +**Option A: Use existing binary (without Go installed)** +```bash +# If you have a pre-built binary (e.g., new-api-macos) +cp ../new-api-macos ../new-api +``` + +**Option B: Build from source (requires Go)** +TODO + +### 3. Electron Dependencies +```bash +cd electron +npm install +``` + +## Development + +Run the app in development mode: +```bash +npm start +``` + +This will: +- Start the Go backend on port 3000 +- Open an Electron window with DevTools enabled +- Create a system tray icon (menu bar on macOS) +- Store database in `../data/new-api.db` + +## Building for Production + +### Quick Build +```bash +# Ensure Go binary exists in parent directory +ls ../new-api # Should exist + +# Build for current platform +npm run build + +# Platform-specific builds +npm run build:mac # Creates .dmg and .zip +npm run build:win # Creates .exe installer +npm run build:linux # Creates .AppImage and .deb +``` + +### Build Output +- Built applications are in `electron/dist/` +- macOS: `.dmg` (installer) and `.zip` (portable) +- Windows: `.exe` (installer) and portable exe +- Linux: `.AppImage` and `.deb` + +## Configuration + +### Port +Default port is 3000. To change, edit `main.js`: +```javascript +const PORT = 3000; // Change to desired port +``` + +### Database Location +- **Development**: `../data/new-api.db` (project directory) +- **Production**: + - macOS: `~/Library/Application Support/New API/data/` + - Windows: `%APPDATA%/New API/data/` + - Linux: `~/.config/New API/data/` diff --git a/electron/build.sh b/electron/build.sh new file mode 100755 index 000000000..cef714328 --- /dev/null +++ b/electron/build.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +set -e + +echo "Building New API Electron App..." + +echo "Step 1: Building frontend..." +cd ../web +DISABLE_ESLINT_PLUGIN='true' bun run build +cd ../electron + +echo "Step 2: Building Go backend..." +cd .. + +if [[ "$OSTYPE" == "darwin"* ]]; then + echo "Building for macOS..." + CGO_ENABLED=1 go build -ldflags="-s -w" -o new-api + cd electron + npm install + npm run build:mac +elif [[ "$OSTYPE" == "linux-gnu"* ]]; then + echo "Building for Linux..." + CGO_ENABLED=1 go build -ldflags="-s -w" -o new-api + cd electron + npm install + npm run build:linux +elif [[ "$OSTYPE" == "msys" || "$OSTYPE" == "cygwin" || "$OSTYPE" == "win32" ]]; then + echo "Building for Windows..." + CGO_ENABLED=1 go build -ldflags="-s -w" -o new-api.exe + cd electron + npm install + npm run build:win +else + echo "Unknown OS, building for current platform..." + CGO_ENABLED=1 go build -ldflags="-s -w" -o new-api + cd electron + npm install + npm run build +fi + +echo "Build complete! Check electron/dist/ for output." \ No newline at end of file diff --git a/electron/create-tray-icon.js b/electron/create-tray-icon.js new file mode 100644 index 000000000..517393b2e --- /dev/null +++ b/electron/create-tray-icon.js @@ -0,0 +1,60 @@ +// Create a simple tray icon for macOS +// Run: node create-tray-icon.js + +const fs = require('fs'); +const { createCanvas } = require('canvas'); + +function createTrayIcon() { + // For macOS, we'll use a Template image (black and white) + // Size should be 22x22 for Retina displays (@2x would be 44x44) + const canvas = createCanvas(22, 22); + const ctx = canvas.getContext('2d'); + + // Clear canvas + ctx.clearRect(0, 0, 22, 22); + + // Draw a simple "API" icon + ctx.fillStyle = '#000000'; + ctx.font = 'bold 10px system-ui'; + ctx.textAlign = 'center'; + ctx.textBaseline = 'middle'; + ctx.fillText('API', 11, 11); + + // Save as PNG + const buffer = canvas.toBuffer('image/png'); + fs.writeFileSync('tray-icon.png', buffer); + + // For Template images on macOS (will adapt to menu bar theme) + fs.writeFileSync('tray-iconTemplate.png', buffer); + fs.writeFileSync('tray-iconTemplate@2x.png', buffer); + + console.log('Tray icon created successfully!'); +} + +// Check if canvas is installed +try { + createTrayIcon(); +} catch (err) { + console.log('Canvas module not installed.'); + console.log('For now, creating a placeholder. Install canvas with: npm install canvas'); + + // Create a minimal 1x1 transparent PNG as placeholder + const minimalPNG = Buffer.from([ + 0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A, + 0x00, 0x00, 0x00, 0x0D, 0x49, 0x48, 0x44, 0x52, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0x01, 0x03, 0x00, 0x00, 0x00, 0x25, 0xDB, 0x56, + 0xCA, 0x00, 0x00, 0x00, 0x03, 0x50, 0x4C, 0x54, + 0x45, 0x00, 0x00, 0x00, 0xA7, 0x7A, 0x3D, 0xDA, + 0x00, 0x00, 0x00, 0x01, 0x74, 0x52, 0x4E, 0x53, + 0x00, 0x40, 0xE6, 0xD8, 0x66, 0x00, 0x00, 0x00, + 0x0A, 0x49, 0x44, 0x41, 0x54, 0x08, 0x1D, 0x62, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x01, 0x0A, 0x2D, 0xCB, 0x59, 0x00, 0x00, + 0x00, 0x00, 0x49, 0x45, 0x4E, 0x44, 0xAE, 0x42, + 0x60, 0x82 + ]); + + fs.writeFileSync('tray-icon.png', minimalPNG); + console.log('Created placeholder tray icon.'); +} \ No newline at end of file diff --git a/electron/entitlements.mac.plist b/electron/entitlements.mac.plist new file mode 100644 index 000000000..a00aebcd0 --- /dev/null +++ b/electron/entitlements.mac.plist @@ -0,0 +1,18 @@ + + + + + com.apple.security.cs.allow-unsigned-executable-memory + + com.apple.security.cs.allow-jit + + com.apple.security.cs.disable-library-validation + + com.apple.security.cs.allow-dyld-environment-variables + + com.apple.security.network.client + + com.apple.security.network.server + + + \ No newline at end of file diff --git a/electron/icon.png b/electron/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..c63ac2d77598af361f898c561ed40938719c8391 GIT binary patch literal 31262 zcmeFYRYP1svo*YD7(BQIcSvx8yF+jfE(spo9R>{&g1ZKX;2PX51Pd;~-QDG#Jm)#z zPx!9R)v)(4-PK)PRjXE2hpQ;bprH_<004j{`(9EF0Kl+6!2l8h?Bmj7>K^t1a#oWO z2P#HM_5gqakd+kE@Gv~gM9jd|{L}fvt9!dQ4lOJf%z)4iWv}u^Y)eFfb?AbdJQL@W9y|uC@hd`Cu>WIIH66seQ%Qed@<)C6B?C zp?in)st8>tBn7}R1}k(rLdG4joI4eu$ZVkhymf%*i_+-hgO^Wgt}{}Z&z z_y7Gi0P+*$VEl*A^nZt20!~R_gZ}5|tD@i~fR}BLpvL~u~!i~17vq%Qvdr3wp{1`Tkd~%;QzGye~0;> zJ^r8V{{QQ$1fYQGhJDMk2m!PBYhE_pRR)SUxMGJen8ypog88o^IGieKknZ+S#cp#p z!Kx^*E4#ao@ZJE^(jHuZ3B|+&>2iZdK1q?s-23-oDEMJUL}&?08M=xf&I2@oC~o~1o;B`ia_(i4wi@&>!u)LIJaA#Y z)QukIOaF2VJ};(shN2O|!|Uu|Gwe{0ivSd;(03pJ|o8-}T7`Mr-yS$km>Au=fcQo55nF z8%(AugI{wW)sZnj8oKVm%ZZYLQOJ<57Vo#8u$T}N>2P-%?xqCk=l;byw|@bMw6grJk8^!K6ua|LGf&lV$6F2cWVVo*Tm@qaeoNj!`kU*&p zrvX69(Eyzx6F%%twu)#4qUyMNnswMs2;GeofztjbM8?9xawkU+>~IXSF3-U)S@o!> zBEX6ApmrextFQcX_P?DkjV>~860~$fQlN^k|2CfNtzP8ErZ~sdz{^H~=7CmU1+5Iv zuJZbR4}4d_$>`q{+Es{736v2%ipxOi87U46-&82&BP>vUq6C>z8zW$PiYrl-N~*vY zf1rLVpa~_l>pfy9Nr&59DVt#+cEU$nF#jxSlYyL%30sZYn^ckrl7MSGJkm!MffP)7 zVD;E3FP^FmN2589w`Fz0*ij*(4-%Dt=36~3zrR*&Os*MXg7qB(*mZw^d`|#AP);;T z{%Xt_AKPjFkG~C9-Lk`2W>qNo7ZONu16^~*(~B9%F=w6Aiw`< zEA?{T?H#i5$LVWfhPP6HhweMH;W_c{@|@1O%+$NF;ji)yOjkzh74yzJ>`0fafZ$L< zE;+R&curb!%mb4^3(hZH;N_8Y$h`MD1v>q2yRTkV`-fvusGN`m^8uJI!}U z8cv}~j?P#BhNk9N%nqwCK~%JjHal1>jpv+z3m#G47nMUWc^_D_6snbMO7MHO2R_dw z!RYxBMAkhE%869vj{BY*)-Ea4_TyV=$6%&E-^nW)IR3>d8knM(Qt{p7I6zcPZ`*S8j8=QWz4G9R&>-C8-Z0Q$_P8LV2m{Dg~n_}haTk?U63NJkIsM9Uk9lp@1AAy z+4fsrb0Nb?C||)nK>*Tt<558zNCAk}L7mhKAOE*dWv`$N%#_}f!6b|uNMk0Hq-gPR z_KuFbrUFRtCWDvzge(=LcUb1-IwA&=w)Gx*gfx@es#?|qSK@DxO@hqBGIYTn6&3sr zuE)7zeUEejBmzkd1hCN#6IrjhGXPaxG0n5h(Am4qi0U`86JYYSklHX2mO!*cdZ8Ku zTs)oowH)Eh|`_{fg&VTOSTvr_gRq z#VfPJ@onc5{cvv~`;h`psH|09cE3VL885}%P9H&i`|Wf?9Zu)7BnfAiW=i&9alz>$ zG?B3V8loC;bDuvvU&XcmU_-rnR(X`S%n-F^Ta*C>nb?;hQSL?3{=L}@iN@~57? z#+9MtJ)#^98d1=`RrQAgA?LNvdI+6L90B++FSYviBL;Xl$#dayZpVs(7X#3zEE-`t zU2pi|!*I#?_RZ}wdd?37R+6GO6>r^6r%A|z8o|5n=Mpbpi&$4lLC$fVA6%(m2BAcu z6Xgg&k&|bYmhydNlKGa^Fs6?rae~0$8e@9rq@~TEMe&pLNaE+8Cmr~}zQ|~gzbZ0? zs3^R)p&BV~EslRqm_@{Y{SIH(QES`^s$fNiRyfQic@ML5uz*)SU6Udqm;+d?HVQz6l7f%*zec{)Rk=X{p}4 zp$nfN0HV|~*N-oofENx`hg_Bwm6~t%k7oYhytC`hiIC4aUcL6<0Vpb;>(5r?L>odmHCD7v6AN)_>x6Z&f*1u;ysi=mq%; z`7w^B1$@(f7c06{$hJ-wfxv?(F#M=3z+|ypz<5Z4_;CE{U#der)(}=xRd(wHNr3?H z-59sw+~9W*N!JD&7LA|2@*khupq6{?kJR6b3GW{l0gswjbV*TiIS4Ui505w5c0{Ki zW3&0#K|lUwD5a^6aYmAIE$49-;j3VqyW=KH_M|o1QgscMTXAp+P6IB!lHHSC;8&CF zK|4WfY4eX_i=V)!X&Yjnb!nkoekl5>wZ`L!C4`hRX72wyhfomrjY!AOTTGtr;knb0 zreON~-$@;o0nyF~Bp}1;0=AX&lUtc|hD3>Mktz7wsF9*LuD|rBKk`TQjat^5;qdX! z213hHX$QO#Pp@XGH+fHckuX0#{8=Xkopz`d-G$>H<+$h8gp-`hvH3VJ1~!FvvAMe( zRqc$?;u*c0jZA2K{!v;g!I4dJtMg0d7ib|Arhx`o&3)bl&y9Wse{<>6fLhcI=|oYi zX#OTHd>@-6neSfA!mS43yRjGO zQwLUpzy#(@uPrm$Mv<+S)k^!us1*;@fr9pePL)VEiH|G84kfmH4Sfkx4S%@7J8; z#Tv)J-2M{M;;EU83G7Hf>BGK577piP55&yKz!}8vXjDw8`Ecuh%8XknIAeLu+K(C? zoy&j=XByY6zqC78o$~5Q(1RJHrHrG9pc@?7&8y1&y^uIr7*I0(*#c$sNo4rd}%H1f2#hpAVk#FSd&d1Q<5w*c(ASI1|;du9| zf=86=M*+0cm!)QW~A_g?9eQedPJ@sme< zmF?24#h2EfVSIYt0|6wA~P*afv>=_9W zP^6!!(r8hAn20o%9ftmY{n-H_0p$VF|e695Fuh1YvkMCb4lFY>}un28Mlk{r+Q)7UX*214|~sf>J2#~u@xugA+PvHlQ++R zR}`G(MUb=Z+MJ&Y&dQ6DB(Gw2I;4Ah)EP??!lY?c226jGTP|WK6-eT%u_3`A7+cBE zE33xxf=ckXj#zqbpY~KoaP~3=QJn8Fu+}2Nzj=iqsW~;K5&NV4@UClD4SyFWn#ser ztgs3{@%@v1e_+6i(%NgjIvSi+1d5?f^`WI>J^2+#j(S0)4ID%r#YY>gyFd>;GPF0& zt`7+@D@mZA(Eb(LuDuICM|b5at-2)}FTd9x6lUqxABZYLi$+f@X9aUb`7T~jsCJFa zv?mmuveiss_#Uc!sg|ZL+=FVJ%1=CpI;YiNGd1GNiOwH2;1k2fobq+c?t4w-dd}4D zA5lkV2mEK&P5=2@z6TukZvJhsH=G&FCt}eEwgtqMxL`KOTgi4zKH)XWQN3j zbk(|^(?7=rAA0c%2OLyb_T;Ds*oSiK(}Qpy-i#kq{zbi$U_wjy%;IlF4pcq2>>&T{ z==HNMB(&S&#_~;SUwzEOQagat4Bz@96A80<(S(HftuTi%McR1aOjq^QhqtHYl_@RH z$uZISh7ESn+{%&tTewa8!$v-@#G2muwa&Z^J@cIw>#lRiBc_9nP3~m7cz0$2|E zY;^Is5kpjd{^az1ZW=f}0-V)MwSK_D3750%xp!rUo1u6_jdh64mmXh9mK4eKrRnK5 zzxj@fYntzMnW_8VSq;Ax|E|y62<2sZSMrLT>3c#_{+PgvM>vRS-{GGQeZaja{?Q^4 z(V9zhhe&D@(~WOF1ZaA_Pb}6~Xjz+eP#e>oEf}%R_2XClhQM+;WA5b-fj(ut?(k!$ zuk=(2HyqCJ_uDmQc{Tx`!BPXv=opEYw&DIGAzX(SQQ#O895u@}(ap?2ufJ-NrSKlr z??{bp>U&8dO3Qu?ft%LSF`8PCX`6Fueoq6LTLT!D+cTh77VVa4tUH z#9W&$ClOX7(r)`@#mY**A{2>v=NBw_Sz40$+PX7SKCkJTr3{?K*=&)lJu^`I&XK; zpz^ot@^N*|3l@@#UM-#CRl@^1R4$*XgAou{PF9=S3bzlr#(J|K z1z_9=1&$tN`n@zvet9eqG1!@j2taN7Tl0mGj-Qg@f%O5t}MmW_(8zM zXnW$yD>hDv2GvnCsI6K-N8Ur(3LgTVnV0?u5vq?$rDqG`Rgz|Q<8?WuLV6lHQJW!PMeld4BU$?g0<<3(nE5O?$_bZ*NoePvN#Ad@66bD9zGICrMk+ z4>91dInx1dKmU0Z`U+VoaFgGR-db<6bsWzkOVfpe{PQa)Ey5jo}l$h1>@WUo$p_3izRF^CY~~A*-b5bvI3DU#jyM zzn%|7zK{sEP&MW}$MyM>+03H&+*YE6`{)}B^8n@3zy4gLixta4?lP^IcPm%aDloYc zf!h>6BAxgN0n+yl&I_%zY2?p+bEFb5@@ycVh~oBF_PQN`;H!L-d;^Wtp_o$?;qTj2 za(~iX{tz&>Y+Y?k`=8UhUsdr`h~}jBYb%xG{bc`|Uq4ftb3K;feNFI(+7o(&Jg9$R zOI|h!H|K9~_5*lxS?*Aa(u$6OUuxK>duuQoh+VX^Sa$p4ONTHCvU$3hkx|PxOMXB3 z#ax#G9_7mEWK!!|{(lG|i!$?Z(SPKd*s7Dz7|81;d z(&u|x(OX=A)+fqQ64@@PbW=tFGG_W);6m-f{FNSekZ8Av5=Vx4x)UT&@5N5*0Sy>e zx?Qu9Xx!bnt^C`d8~My)eZ%~y{tZ)}SS)Aze0Ee6-U!rrJC-i#42?5-6;zR8G9AVg zwoj7?-mm%!Z&7p`l#wf#K{)^`znm{U&Hh$YZ|5mA#K>j@!(>Uu0aB@W?p_8 z876F4k;Yh}hVBwy&#*HfEF*NjLTx6Pr~SfB3aRVSlK<(uv+PEz!;IXec6 z%!VxB*gE0?j^PAZ=&EvH$QLU~IX9lEYdbIuJW#f~Y+mA7H}6CPu9q+rh*`QN1G^cE zt0HL^?cGxN9uv|#WMznnUM?KIB6|DUEV{lR9h9@zoqpQ#vW<6pe<;}@RCP6Q?5S_e z#Q&uc!=P2KVfYHYpU;QkQyeo<{L$ZtITs4760O29fyo>mJ{x7`0x%2~75teOOv$p2 zq=$R6K^^+WD_;HK>2}i>5gO_SGe2TAfyBb zmlL-jYTLMUw{yWqgrL?C_!*|y7*98hc$v9g@nm@6D2r z-^mIzJik_sNT_Gy;^^)T=<6Sy`tW_V$~}sl=s!Cd6DyL%HvGsilY9?02o^!^KS6DD@StPe|gM5y@ka>#l5U0eVHq2*;p6YSd_bmU!QeDH@G3^RDpw?IZn~q`-5f;*o8_|^lH*i z$yfK_q za#cZtnP+#rwfV2}MFyX2S?2}H|HTKn(cBHMXEmQF@n6)Ae{q6m-7UyCTO>On7QCjB}<)hp|~B+sgQ4(-?33Jud`v~`zIm# zPQLcM|5XDChS#uoza(l|S=oCUtP0KCMiLr&(?0h|x-Bu)ScY}{VeNF$qpHReAXpF7 ziL`=a zbLWY&k6joY0&MPpa^w`H3eROZ^ijhdI)`?$njhmrnJF&E-ef*8)< zwShxF?kpuGlZ-u=FE1{I+6v1yJh1ukG==OvD+80wrfSQskjFb|=Uw?+MZleg)oD5h zGI7OUl0Nef9$2OKV?gNA)UBgt3k9{*&g}_pHuOCI?>sV5@Q(-5$TG z=9wYGn}hV<1B_Wwfv|((2t%5Qk#-OD_UP;GFJ@{cGV{$?CxX{ELc5G3XMgke?c;Q? z`te)?TDyysD*Z>pUTlW4dR_|b}`&$;GUD+=kpHFHXNNvEC)YOwecqp@^Pd{EzP z?%j^!%y z&u+u(vtZY;KtdGP5<+v{QSJj|lA4-LQVK!YFqz<$Cb7yLDL0wcP99?Nu7SQ`^icu5 zH|F?qgka5{mFF+=s>1LQPJ@XovA^&n3=|d=kg=GQvz^b^xSiAgMv21KN~|0iq;<#( z2u6)Z|8Flq7pdSy$Fp)>=dWK8Y;vERzpEJ!@_ecDRO@9&kq4SMH>!d%runfSl{t$-fkZBoPSGH9jhsk$?=*?_x`^AOaxjeG<3NRLo#qF zcAptqneWlNeuoV*_>7pRC>$W+&*TdMRbVwR>6%Jy1*6F?rD6Vz63t(}yZQ+4KA2)L zKSiS=wE0)afNYBqyq_Z^ZrDlJX_xALEh|HD$^vx@-TtVG#B{@_JKRIXWwexcp;hg%)5Rd9^ z=vR>4`4NQdB8qmMAm%GQB|<+dj7y#x-e<)3E{n&VCP~G^-~)8iVP@o4Y_{!S@$l#q z8j|?H>VBN)znr9~ty3oYi#RJ^>!s$&(uHf#P=s2j=v8rJ3&W}*2ts%k^sexAf1P#U z&@IVj>}kmqqxZ3Fmp#vE_6X9hnura4ZTVlJ!7nEVD;BA7?yBy*UYaxFUuGhhLWeChdyv@jeRE=-sA z{WXebimo`4imDu5=HiHOkzhTQ{E;P$r;+vZW?fF%TM%;Q0JeMG=R@J!*^7&#(C~d{ z=)Y*aK8qj2MuY52`?5tNg)U%(38kYa8hi8~;%qNOH#!S#BXrU=iL9sp`3_}c-FAM- zZ2`j^oQZfa^P`luQ$eZ>QRX4=+pxT}WFP9genbe49n` zB7kEl23Sk`Rgr;O!r~_iUPq+MQw{;06RFB19h@QJa(gN+C1`II2f)m*0J8VqN(q*1h2? zT9O%f4f0vq*MT3q{qqtb$ZgYBaxy^Jp0KV<3Xb9MeXo_UGn{eI*Bb-FXCdG!V!yN> z%zBUV{S_5qW^PpH%8CHf{U!M$kfT7R-M|%)hDEL@o{t-SmF^-fvfsCP?8V+KD2sfxfWG`?|EI}Y zED+~QMx9mt3TKb>&O>A!JpzeU9o9@XFAUybTUZu{wh#ZJryjqhqt5;EVAxMp|pPVr8{Oy|!Ixh#=osdiHU#Z4J^=XxWG6+3CZhdKdI}M`$VGLH`dVKO@N%pp?y-~>Q zl!DQls6>ICaC^!br^?JGo(ku%Ogu(#=1C$ar7U^A}r3@|8`&cC;;&D=Br=IzCip=%yDm}4JT~kboR}0* zC93%`SPUi^|l|avb3X^cUE!@9uOuz^0WeE=nC$XYd9DMr4j*GT_nb+-ZeH z%P9`6>VfM0o02KICa%A#Z1i=V8mnqrf4A-rG!X(A0we`R1z^Xe;uF0ZOOt+1gjGin=gfGQ-|mJ-U8g= zuKJMcR2@dqS$Gq@H3meE5b>3?MjX+<+4W-O{YCEJ$~v~S$RvheujTXTFUqU>jZS`m zM)cHd0cYUSOm5~MdpGT*Ci%;eJ~cHIWRyo%a3%e|Wp^%g@%HvZP#% z)1n-w9ZsONxt+2%J+k)VL^?(hj%*hz&qu3U7yntH`&%!|!{xR2z*Z&$9t|og)|+Am z9eByV2gHC^5pT=LTX8s23Js3+y!eSNFM$FHMW;!z_eF(c{M_}a3l7~dA5xutvBVGUWe|Gtwy6%cij_V=caaZU{#SqX)H}BIm9~ zc%hGj&oY-+wVBWr?}uBs zTF#6var@YL&jlBg*1LcxUd{M_%YPJlc^{cfG-lUM!bG$>n@w*$B?k+R<(scw&-xF| zeA`u8p=th;>?o#D#L|M83*WAZ^QW7CLh*zo`#~g?SBD!t0FC(F!lp$sRRDb}NHmbV zaf{&clV1zc_#7+h!RMz%ATK)$RzKS2A@!SfH=MzNGi<=9ow`YY8PL{kFvf5Nz0~95 zi^Hs~&T0=_n1e4BUVoeSr%_JSg2C&GI%Y;fc*)wd56>U3Dk|#As_NvEKP@7bm;f*X zZBrI|O#a3|nqRsVotCAX?)qC45me<;r8CjQW-YBpI?V{+KEeK>VQ z@sT9&w)w#j<#Uq1G>mT(;6rue*AcB<~X7{<_ATkR{{fj&p|#6>OKB<9#e zRyA2xW!a;e#eGn$FW{))YUGL7DNZQ7mRIu*G8o?Yj{DOU83Qn3=Nf^38=MyTUUjX& zJ~QU&YlxbuwHCk4ieD@E_~yX5?LczSpat~vZVcTs(}px3G^##onvI z)LZGY-NBsfw%e*wbg34#%t(X!J%m{S5Ck6${wXP5<^79}kIyMOt7ciMVG(TNCKAEP z?XK@mfL*R5w-vQ>M&R%-?*L5sYZvbetU69Uuqm;om*jqlh`?RxSN{y`uVhtiW(yI0 zJ87-GC;cqzhwc*bZ#hrBv;|{St5Oq7)usUifBBFQ3O??LqZLc62y@v7NWpfChu?mw zsR%W3B@p$gpWL?=A%{}r{pHjBUsUvJ*D3@)BcA(OS%9~T;_6z1aoz=Uys%D3#gZVom?@6zUXps8BW?)*If@G9Fe9H}Q>co9m>7!CS`-#XG7)5U8WPL%bF{4? zbW(YW>^+?`Kn9WAWjOhg47ws~qWRwCae%5Ar7qAR#Z;4oRXOw{&FwRy5 zuGnZn#oeu!4Qt(n{X^KnNviO>d2(J#G3ik0(=$fos ze?lIC+!HW67LiXDCFPQE$2kPdK3%oW++3o~H>pgAS7s&+D8I7K(z$}Wxog`*0g#UJ z>jZ(~tsv-oB*^8?*u&-te>9cYLkV~5aRh5SVsEHPJPu=MNxw7=8I9K@$HCyFl&Ma? zHZ1>sJ*@TqA^ari+LiIwPI<6YB{Naujs!eq+Lzphpbf5tX!~?BpQEWI;t+}&6%FuT zKgK&EWOVx(fb%^fs^4%w6jP)4$D*(1m|Oa5YiqD;Ui&T@iXZ90jtLV_DtS?LmgR(! zB=MvemjxhYb1sWvG^1a?IP&qp&(gzTR-VMh94Fvm5aIC-cUwP z!EEb#?Vn%)q`AT2>Yr^qHzB^88-$d~_#1A5x6rrqLV#%xi)-8K)_8noK{`ipZatl& zd>JJ}z~u6e+lv#{x;_KN6rEGWny0??NvCg{C3%^m(Od=g6rEOzM4tp7XEh!VZW@N$ zPg0YM{yKe*&JSR0Z4Znf1yYy^O8gkD_*#G(|7`xfRDr=? zG||4j9hh@J%~4aE*3m?kjE%~PxqCfQ=(d-dnhp{Tf4TJU$*CeSff_z<$2?xVwloE| zt7hNOCtHVOl3UMwq@1ls?n6(}_(^|s<_~L_;)(wAuY1fZRzY-Z?<4_n?6vZwuCc35 zm)@>s*Qu<|7gtaFEJN)iz;FOv$ieKc2c>v6bX(?j&wuR;tZaydWPKT9d{HBDdjPGb zQ~8@x8HBjd>uL7qKP+4|ISKRG=;OfIry3?RT-b`fQZatN`Pw+_3#@+Q?k$c!3TrO* zgkP}~Eo`!+{81XvMv$G^VK}9gv?#T@idV3RnPck#H@+_+x)vq&V-aq5vB^_@bYb#l zOLbh;fsHyDBk#9A;ZbQGXF5t&)TXAA3-JJSJF9g(a@US; zE9xEp#xP|>-D4ZJ$ymbrw)WYZG*rfXu70=9Zk&G%Fo&Q&e7AWGj9@W7j*KB7Y2{-+ zp6g^UJrUZwfL*m%^pGJ!Y4Yz0B#S63&PR89$l>4d9eC;IDWZ45x|DruFL8Mo?5)~f zSarXesA2zO;!V}CW$3@Z+u!1-&5BVSssVi4neXB;-L3xuhiU9< zM@BvtvJ*ikjFiE3-4=ni(=Jn<7MJ1Yno?P|Loi?`TJv6}+POb{D-RD^tXEcIxSC!i zX$=WG_CCv4@=wp8TTF$~6*aoRd4u2`t6OTiVQrW2xf7&%t?=O9>Xj-8NFaMcapavRo(e$q?QTQwVtT z9~k@I3%|Az=$Fi2;kGDZCJ$;=3%iKit#@&Luu+~N1zcQjpPno+z}(iDx@}->`!aS` zMSjfv_aWsBf68qeH-+mhF%d}4B%*Lkr;E3I|FT#X)$~OpCiV{y|K}dm$Iwh!N6f*5Pk7!Nhohet=3CBY|U5e8^uJk>uo;=^~$gT$-t zQ4eco$9*6VeDKCXm`qwzotIKOJN_|pp5>$$Bg$b8q6~ee8cdKn23xO_Ea^Rqwlm~i zHS6{CkKvY(QU7`>7vo4yN?73<)<3$voePg9>91?<`5dUu|3S{VW)&JF{Pyjjyt9FL zsTVpxR)#qx!Yf^#-5lMJNsb%c9%V#c*ZcIa9m22M6+P_}#)%u%WI5-G#D$HtNC~XG zDP~cFD^O-8s*^c=fNyw0Z9_*Y-Z+4UR&1BeY$5cK*_k~eOF?^YS z%#ZnO9r*U|m|XYR_TIuvJHhnoU$?=UTg4WL<_qUtHOs?;8|91w%ONfxs1pr90tM#w z|30u?t2SPk3`x$9R@x&Fr3d1_5GHk|Sb%!l5-|-}g9#qB+k=-GIxAzR+$j42P8nPb@~oo*l8I0$n;MK3$3XXkirPzbz8o*D!PC;^>kOLyO|jnrt*cOeGzhVo z<-HZ0#8B+{z)E_65|Y?Q0gFIGlX>l+Vt+KY_3X7Va4$e{kC|PZ@#+AH-x}}maUEVK z-E=C2a?yVXyK!IWQu|eBNSKEpHbVBH{nNN%@A_+tL~<*I$aO-)-VMCRl*ZR-t4Ga} z1BsGE&ag=n5EIGjrQ%tsPeqSS#xOG4`oc4RKn_EmJz zxt6!rs4LrKFuL0bjM%QBDg_X(!BZJhV`_ciq)X=_KpF2-M*HQd#3}e=F!NG%uRXup z4C?U?lZ5Q~x6EYxec<957I8SF;Yxx4db)`A{l)IE!7sm&l+7cFr3(doPXaPJ)v)ko zn=D-MKQ%Zr5pjY~(lvY5nTrlZ$k`Ix#lPn5)89bo+zdwdss4mYQO9$y2Y4+;K)5I-iXF1`brr!@a~XzVY@cg>nCNBYCb+0c)P}XMv7A zH+WkRUWZIhU;Xi6QC9n_$c<5EVx;kylUkp76);5he%V+2hr?tT6(9m%?(nhmjWr#u_xc6jyJG4@(dg zO&L-{qO{uS)w-12=ilx>ZZv!XtdaIOfjLAZapm^|Ehl8Eg;HI~o^m2_?LU?GWUkfW zp;7Ov)TGkXQ=hkUrGgOn)-SqTwfsZ~Fqs}^BDRq$^o}q*RdzD{*gD{I_z1%>HW4Ts zLy@gsU;5lRLwN_s>37Sg^l9eiDAtPMlTC$UF63E57fw|@sALB-+BZ~%3K#9lx|<2P|w_z8awMz|Zrc(Q24 zix4|gJh>knH0_5UuOi5U+Kes-G`5G;MQ$~TaU940^^iced)E6Q#D+PVRab)@gtaN` z`s2GpsL>J*RnCN)!h;iP8nd!t#0U%RL2wx2zUdfEewRs7oECUAmpN0zD^D#>-)R41_T`{=aK^s(K4 z0^9w8Oy-p<Qf7K5+-yi!t(bze;!pzmou1K8+8dhw=SD6nXQHI-R zO@9KTVwGmKqeQ4sIbgl_ZIc+n6Tesro9+1(M>Pga^?i~B4g3I(u8j5n?FH~SEegYz z)LMLZy!Th1#=+pv{gOl#Ci71wnE}>{A)v0|u#=t<{ru;tDuiFxPeA1G{Ys%hV5kH% zOxW|2B)5Ny0o7Bz)^in6^)u!h?`PW64Sb{&q%67gW$P;Fo5ir?{{8Z@Sb+maO#+4h!sJ(J z0(1{D^fgmN&PRzNOya$$_1~1Q93_9X!PNSbYkH&3$p^-WQV1lol%2vsI$f^HZ9m;}UUOSj#wAgnm=}cCDIw?{95*+H*nVrr_}RYpl*=bJC{Z zxxv+j5aiZ==;O!r1%HYKTRQuE6i&LQYD12Ai&*CRf4BPJw|fgviQ5q=YPVP!ew-0e z>8`>$J=h}m^1tk$jaqqmi%WN?o67vIWJ1h6O#1O)t|$igH3b7)T6I>kqErE$>cJwy5=vCV3$skb#A1ECMU{M~*EkM}#`=3qq|WC{(KucaSX z_#gh7)YSC->mtLn4<*X6+vC8vpUyqz>t_Dfu({nemXr)KK7eEA#Gf1Q6VTN!nba>X zExk_h_hjk#j7K|5ZK3r|6(}4KEac&B=pzQ0pIhwvk}BVcFcD$4D)#wfL-s3lr-@Yz z=-6p)OG6TVMuiMbt^qvwA5ZJvKXZh`$OO6o3?6~o)CgN({6jz91emq?&upKYHhcJd za9zyg=X50VG14?4HS^YZ(*b3}_z%fVdVD~iAU(w>eNdw^Vfyr_?9ydKVugNvva;&z zLBSZC&^9Nj--!vL*Q>~IJk);~2U2{scu;nQ2RY_9`#LK1Iw@jMDw3q@uiLkujWjG& zz5(373Deq&itXrs8F2k@!ohU0@uU+i7K^+%_V;D6Jg45MsdS}kku+Ime~w(r5LL3u z^-^;ZR4u=$hMQD>$4w012UC0;*XI0rOF-4L>+2x$p9kK~ZbICPZDGYgSJv^;N2$-*WV5PuMtYm zg}scEXw>j$Q(;@h4>z4_cN!QGk4w-xX;ap39k`34TZk1C{&v0vQSx*xsgFzC z;m@~bG6BO^R&Q3AwI0h+{hnTGtdDKeU2H#i)1Unjm|6It^%w#5J}Bp5_a$Ow2`9AW zYySPP3-Y`#HMRYpyU^y)$C$hH+VM;SbljIY6%N$a)P@7%q%bm#1T#~7Snch5Pr_{_ zSAccl2ZYA&&cu6C0t7i*set6=jKr)3yolm&e&V+1<9;Bf;?BL5?&H^a>T$R7C^IkHcxbrAWy_ExQO{Wp&(wx!E6aLCEC z)M@foWNT)XO0)x@-?iv^?BOK^o+?%Qqo4E0+gXx3j*~l0&kWkuH330J5MFs`q4Kd_tuK3?_4~dKS~bSNyc5Hhtb@wBN$QW)TOK-v(!)*g zBgZAJ(L~Wpc+$&GVOb9|UKm;?Yz6xu;HMb*>=jGTy+?r*Di+C}ZyCM+ggUAq=>V)z znfkY+23oq&89zk^OR0qUqCp)NX8qr~xUZa@Dd3ggY6%{G8C-%tk`-Pl#kHM%FGYKg zxj-9!&1QT%3VeUJEy7c9!FPnLguOETuDB+6cyaKa^L->``fO=PpD06W7F83T6B z^keY-o7dN-ThpSP<*MmNs6OCXw29DyRFVLU$ND4lZ&p1g1~MseJTH~^wg7)qyQlSS zq!S|me4d>u8#B;w0YtP@SH~9Jr%Pc)@h-@8{=(_!u{pm=d_aCZ%w=ga-wf8lv|^C~%K&(7}b&g|G{a<(ywrAweg zQ<&zmX^f#|hzRG&yYW`sjQTIfZG!c-VUs19Nh!$wKw(w0TBxxyrKQbglNO8UPo$_GoCBX__8eiqCi$7t3DQRhcFgz2hF9MWFFEf1%0nY!v??GpR ztOCcyW_V3M^MSoA)N?|?bBI%0(}56( zzj;;wv~K+T`kgUBcL$x)D6}s!<|?kmedVABtp~p2dP$d@Qg}Ce>B&@RFoCHPu*82T zyv@}=XQd*=Zs;x^|NeMTYR7nnei!BJ(>;G)J$gm;Jgid=ehTo=R6_b)i!}jny(QOt z5+}rYQ~P(Kcdly2sN(<|DYbYzSZ3`xBa2eK0M$C(LoeQ|h|uci*o)2e1NFVF(l*(* zwj9Da3%pe-QPu2xm-pv3EN78Rbol{egxOEVuM6j}t5*E2wtt|l@vZyn=RAjhi+0RS z7yX0%wm1;y*;&k3LIZXb+5k(M|Ef{u=5RrNsA=!;Zu~6nCEe-uJ2>MhRoujFB(*js zIv-=4R?i$qw}-yZJ^XRBo=Wsx^Xv?mzt)xuRliN$XZD~cDj!7{f#R~So+n96WcmiuH-}Y_ za0yrpEMV=?(tD)98;9*Q>-YE_aesuB|Mn71($UcAO?pZ=yxPt<)ZpUcbR{Q@>Y1p$ zFh&S!bE+f`M^lQtAng`vT1j?B>zj%fUu`yKdBhLg>;EYB4g@WktvY%9XAj?xbamtcy&1c-B-C zuc{{_RB&DRAl?%Of~#uY7bA*aroq*3*5$j|twrIV#UH2Zmu+1?)dykF_T!R7$^#XF z%@Ik=9NtokEo<%_xbtOTq<;#_*ked=nD<=EiX`De8y8;_x&xjyFA?C$aGCDWVa1qG zp!oAL+S0ws1rx3B)3xFCh}ego2Wk>dGbBgPKkw0Jl8V)Dej4&+9wO;Gz1TkeJ0B;p zU7QTGHQ&xWPIq%g%6TF=Sq9BN%pq$WKNem5ObdjqL@LBHHb7n}%k=THnVVSvI3{8< z6FJ=|B-C(Twnn$R960O4AIYX#KM~--ht+R)bV5;rn8~ZXkpuN|Q)s=<|278$(GwDT zAFuVfQEKRyy!%ByGj1>Vf33&jDa#9p%~&^N*VgxXiFZSs8;Fma^W*fvHDnBv->rz$$Glj9`g)_mtk2T&~6<>|OLv(gm>)Tu5xlQU1 zgSssLExgyoK_EyfuPOM|d@c-{q#TI>2T z52xw<6}oa&TS{2G`2KF7@(vkNK|#Un0L9$(UJ!~ zBLL*mRoLCTp9%ix-(J4LFYHc3lfE6QKe9BA@PyZYpcV8iS@#9aHyd6J2%2isY`y}U zLj{Ci(-SkwwN6i6ygZvwzZ6>?{0n_6-^JRSTFTgn!jPIpD%9bE42p3iQfFF;pZ^s+ z&+Ld;@}{IggkIVDQkrRuU{S|N@cV_|ti^C%)1~`xoW&g!(RcpQo4W)m>pS!X)l6&6 zMRbqtDbngdbnu`m6^{gk)6GLgBeNp#-*K z2WqG`Isas2E9AL9I_)wSbei}fa}waAPbPSWSS@w>K6fF1mBIj$RX7s;)ka?qqha;* z9s>qfI$$IqBg}nzYKYgWq|A5x*vd_yLAGE!<|78k>5VP{s|%}v z;st^`=b_7cWpqb2joE;iPkdWp?t0L8R2sQJDOkp?vbr4htl8l?V!5z-+fBXO8^#l1 zJ`k_W*=+K#kEihPuufHmE>Iz@ifaQNkdo5k=t8Nfv|KgzQ2_sPs$9dwH-lG2oryJc zC1lFkPDDy-LgeF;l;EBfayvE5{m45_)JT>XxYCeGnIW~d#F&~({SgxJHo{2xKPiyW z6zo|>Ci-O557{zj=TCg*@92MKSb)U=aMR|TaRG&>9lBsX0g7LTre9FVRFTJntZ`+9 zqVh}XUv^sDa9**rJp)wK&rPYbSN~6{q$zt1&1goNC@o*G-;r9_7lsag{G0S9Ak9It zo6qryEKp}lgB$TG#6wQo{~oe|1H6RM^d*xW<;4zTH^{$&=^MR>VSJO?&s#}Zf=EVe zCq?guMk}0rpp6QJ?^zs$2Mm&h%0Gpu8oe>%_$+TJqO9^m z^#M2n4uH5Hd3w$buyjv_64u3Nvt;%nGD*YchBCuQ)cJpVie24`sNY2SJqKN`>2R6? z&l))Gj}jgLxYbg5LI-hoH5JwV>9 zEkGu|GS)1V(xAY9_bEOSfs~#vGdljKvlOU#J@H^9$l6h&sR6ge^L%74B{9~j=^P>n z)QCqeIoPs&c@A(KZZ@za91+Y6bXqy369HMDGzNV7ZroYv?lU;fg;DKt!%`!C1%MK$ zl@*QRpr?&l!Sxlf;t7@V7yr2@ZA^jnVVkGnapx@p2eR=<)VaMYiR_L|E<1(#h#4NC>IYE~xO~)58ZcAM z=4`@}I3C%HIW3k8>h6Ac&c4hq@k(>0gMJQ1gl9OOZJq$z!gXT>F0Wa))K_K8FoG{5qO)~aM;$y!5?Ml{NXiO%{Aq#xN zyyIRWCQU6Uqw&fqTPL_MhtcQVaYf9skRC8uMi&WrSypsl+lk_#>XzOw%ol;^(*v*= zS#KLrDf7hF@&E@zm-c6zMfRsW$H!b8OdB;@Y0{Q`l^`~`%oU5zwu3o(lg>cnD_^5q zmHrQoYW^s#)$-UDXr(a~48|`8b-sIuOTOSI7oFJWC4`esk${dE)HF9_I0#QbJ4)cL z0QHigD%FP8_m4j71tMkH_-)ynvxV*pM1k3Uuw8`IQ>uZxf1YN6YLZm~so&@$J8w6) z)r;QW>^Z9wAsu+Hy6X3wUW}hbXwYJ|!^oR0H>KdQ9}#Op8&7eR&{o&L?Yo~)%ReK@ z(k7fWh?M^ORcnb@6;N}b!e?@302dhptxW@)Pa0@W# z#aK|i=oPDP4q!SNu?PJWJz>+0-)}@OG{S76hVQHH%25JOWG2t@!z8~Ycc{C!&Dl5? ze{2WtpKvs+y=w!LfC)l}8)m(ZpPK9}^>P$gjb=Oq63cj9Q^h3=n|wsmEScVjzMHUn zLd>|xcVNnLr@9YnAZyswkWa*QTi+x{%~*!W#7Od`}+wR($49w1wjp^m`=PGdj&k(Y3f}s@Wf1n6foBQm!*n5LoQ> z;Uy&{hb#Mk@*#)|$s;Bf6!pdaB_m%vwvzuns$~6ABtd6CNBZ?Jpd3PxXGl(o#ah$z z#oS!7k6^Y`RcR(RFH2=TEg{-Th@*24_Ny4qM2;P9(^bX~71oSioH%kVbs$ZQPq{&# z6D~w{Sdy;Jq4UzTtyEG{$i;7;sIan$G84w$N@G}WJ;F_Znlija05w#2FX!`tNr9bd zxS1)mh%g*GDDZTt3!j?7Dh!6xa8#lmlSNX?EV>d2@vZeTaw|pRwMtMsfw&vb27uOf zzJ*}3o&hI845vzNrlh9&fXYLlDvVj9h)BMwnlfq3t(8_s9@PC!Q9mND1Dr$OuugG} zKC!hr!NFzJScw5db*|Q}iCBMHB^0lYALzy0MuLJi@X+Km#v|Z!ySdOfQ6n3J3F1Wj zUC%So_Fv)QeGZK=X0h`Fp3G@#Cfz%zF>S!!)ECd0V7NHTeiKrq=&XhCA>-}k`9bctX_Y>#URd>!8 zo=4kZarwB=7}Rai$rt7$E)nz-CVG9!)yG;JQ~)4DC`y}?osJsYm?qq*I~5Qav{JTw zx$UC=jlx$qIcV$`>7R^w24bKmW%^jkJyb8Z5=LZnJUBIr$qKGlTacpE^j^yhBA~~| zqtj%+Y*jkPH$>7y%MS%KIHZRUoNR|TAAvW4E_h)KC}54RhNSslkAnA-&r>au0fFV< z_-uy?+MVT)LM5>%eOlavg#$S2ihyF-M>XcHX&eON3T}ee6QH?g5&x1GwhK4Pa>Ln%E`zin!;PNa>UA0FlX<)=Q2Z?^l8ZIo-2yKcbnZY_(EFy?LVk0ruW z#piAOkpcCPa#DMm;%7JJ5h}uw1n^X9s-fhhEvhUxYz^)MGUOq?c?sQ$%GP+ZVJ2dS z$}z}E_c!%#60NmQFk-t~5L`qcTiba6ctYo^puj+7_csR!R(H=Fy3Qs>p_e!lg~3M7 zjQAn2+V0OXaH|rUCtJ%;H zF7(U>M|wBnko-@&*-x7^wW_Ia8;>BUM?~eb1KGXlZQt>DqZ{H;`%7=^Z>oi9YpYFj z*OK4wDd7tHqZI19M=Ba6Wuunjnxf+L&GmWT{Fu)*d*rFQFsr=b;ie>%CdZD+Ia3*6 zQT1mj#t2&Tfj!%K_=chQZQ9K&j16epw04+m`D9YJ>^Q(G%b1RNab- z1tXcP-$N7p_yY%Bvc5YScT+0U*-Z}j*b_f*(C!WhB?&_14jtfoDHVc@Fc_S;P zvstpOCoe&%2cBI$Ml$m?QMT%9-=DPuhu+^j6RiJ0r8#qOhY&_+q$uj|yBr7A5cxUF z=vHmNn0C@}XY8TgQXfmv7KFT7{%?D>&bI`3szvk09Y!Z6IJK-Ulf6XRY`)(=R0CZc z^U12QBJ~07dT7muV~Pv4B#Lus`#{V8kiobgeVvV7QArDzQB$c7N{yWyO4oRDVqvJ}czZM#&##{3F z14y&5TWmmiOm9GafdhUov&IiEe^&Ia-0C_!aN-ogXJ$Ki9j0pDpGFHX#X>#VVDO}^ z;eblqm)Bgg{tMt3>-0q1Z%5;AHY>`6949DF?&N>4S%D|7Znm>#lh{NZru?8NfPB;9 zWvdl~XX~=7%B0Io(p2kHM9=E^MQix^PV;*BN_uHbW)UP%lMV#5)=;O!1J61G=2t>c znz#Xj;B4I`Uy|489Jj&2GRUb`gq&%3awpkZI7tx8d3UGMP6&@xFfXq7mp@{Ob)=Ar z_m`Sl+j+h6IOZ=;26Oo~adv8%iG=pFcN2-g>|e~6s9zasNSws9W_W;1#JNz19%j+Y zrxVat(M&ym#H*X*cy+U>t#G!zirw7oXKm}lil)V?e1P4Y2rE^U$*n;Js>|jRQfnd* zd`WwdQsZS1EyCdv-#2q$Vv$OOS*ZLBqsxzr8{_9hTZB@ed=Qs(HYewFWWk|}2>cU4 zH7t$QquRqk`6F40UGsWeCHt$Ip`qsKsve~B`MS%?Ljdx?wA=n*G%Iw=EK@aQe>i#|Sc>^))7OQSXh`RF z;_0Th(7E%ixdFtW@a=jj-$U`NvN;q>Q=|0Hla=A3pUkRq1mnUF!?WJem6EOMXP;&6 zM+G^~>dEak;n|{dEG-2%w7_%~_5B)JzfV4Ta{k3srAf|%tK)16OYfT=n{pE@rXOys za1HKbyzKHfswDBwB~DOqPwaokSUnQO=3`WshK@|Mw>CL3WtYcB@YL;vvX|@?)880E zn9rm}h3;-Wl0hJ%`j`I*aL;8rm0{29yL~bXX*dnS1s%6F@<`qkD!&<**FF1k^o?Is zuSG(LvBg2)nl3xAA2y*Sx6<#ICZ9+9d>Xfs$RKm$yhGH$TyqRzroG$fEi)KroDPrL z(&+6QZ*ILEr|DH*^e#UG{@WXpDyrM6OnJfo<9pmkb` zx%&2>8S1`(&&Kg3&4&VtE5_%$(EfkQW(qRTE6SApOc}cD*LT1Rk6<Xnf3tmg+8`U`~^{{8Zs|{rn!TSY@L) zLB#24UNUO@)N;I)(kp-^(0=^agL`HA@|>9U%xIx)aV-OA$%co7bfl1cxt+&gzh4dZ`ALl{wR%M<9f2H72Mg{5e$P3|8(tft! zxDq)aCu6c>`{*oA-m2PWKqqEB7lafTU^h2KWZd}$+EuX@9-Lq8-K4;$gw5XeNyS2q z^pJg{+^v6tL1Lh;QoICHRpSFLUrz9ylm1miHJ4JHw6f6k{rI82_Y{riSx~3g#Bqa7 z7iQx3S>CY$!(MF>c64P~_Uy~!vgPTTV9224X<@&8oC>Ou2+wDjXsJtjuip6&Qa+lx7 z@>m?V?aC%MssWav0_MhUVHwKNcqZG|_Vqv$XsM74=z0NZT_8Yih@|MTyS z;GNKFmt9Y3(jQ7bc7t1O!)&rHj=IIjEEBxs;aou`mrve{3d$bbWhfbRJg6$5ncsZd zB!|NRq(-b%;RL6MDeV{R8&TQqa-#jl#fZlHL~uX}(PQg+64%!wu3sh$Oi9BnGYI%08LifxPcyVdr%j4 zf;($Q=Vq82{PJV(7mywn~0} zFH!D%0gkT}QwW*qP+K=t=XpCByyLFzy*S#`U}6$>We^D8eTk$nbyY27wpU){9Tt@) zR=vdEBaH_i+J>GeD^#XaYI-ZKsMx!nt<>x=&`OidHiN<{gL9} zRu}VQ9&wPJGKnCU43hSCbl~yr)yS1b${KaDdyg!Mm;H?9*A-S+aQX3{{GRhUi4GiJ z|1!J>Zm+G?Cb#i&OG7`$WD%!L^!y9aYSWSw7*G5Aj;}jD9Z}&!bV`9_fxlU0eH8Xc z$VmkaSeiYS=b?Jwn4j*5ON>47xuQs+vjrGlgeOl`Lan*>@m#EeQAc+nR*{Rtd(`q| zlS%ThVRfDs;ST%z6#ulJtqld5wIC$Wu0$UYy@H(?VegqyO&R)b8(hx%e3*&@a$jvD zDhpPhUZ)L1zC^?nmzYGhif}x4uR)zZvw7UNbuM3Chq3ww3%+~ai}ooNQtQG_{2E-O z`I#y8_1o8d6NTR=pu%Don}fD=erYcXlyYMCb~f24YkGS2DX-*61YeG4i2ndh)G?Q~ zD^>-W|KvER7glZN-$%*WLvX#Hu_iC2u~2+4HVBz?PTDQj+PVs8J8Lqm-dnl$!peT< zRrBiAH{zVhTJ!sPndf&bY4qv^pfmFyzBxN?#rhr|mREYo`93vOvb-mnUNg=-$L};c zy(c`#gnFdM>Y{=z9)%+pJ`<(GaPvMY@N)}L<{;QBnx1d53tu8XGGaYp<10piQJ34if})_ul`l&ToUwrsaioBn?{ee_a^(`zzd?E%WJ;NvMbj z-U3i;`~n#aCf-j&BpCaDt9)i@7{2rWOq$neEAznEpoNw=+@1^k7GUGd$%j2;PFg9s zN{_)Dm7lG$mJirraZ&b=wQMHu`qLor0uJ~KbnoA`>!lFUBc+N5aOqow_DJ}xx- z;ZN8sJ4R>U-;lenVm4@rKKY2=?cVbmQ|FU^&Ggu4e7v0Q65$bigzK;$glp2_Ck}!F zkLGzL|F1ZB?PJ(OOWVs4mkMg|<0Upn1LjSE}4>x0@L0Shd zZE)w0iSh+2J<-Gk90o(r^r6swq7DL^uDF4dC>^|9qxBt_;aWh}(XR?fca`ogIk}`g?Ic=h8YZ!vfTkyxZ80 zhhdg*0Vv#(z_J-f-(X9xHR4F!OI}UePQCZ{U1WwfYoBgvx8q=&f*0$5O)w2sLDkx# z{FVfySEqHit2g^~$96~ojj;C>eM|8Bq+Z(n2mu*i^=k7vLR;mEL!CS+X|#AH!086G zsm?0+@_)%*cH_N=npdf2WNkD9%|oImyicCysu(N%*=As}Z;u>_#_B7A-(^v|hD9~6 zMWp{FcXm`o&u@+YiVj!Oc9nl_P^zu!4~YaXA??e1ruMg>+r*2@!l8kbVt%8p zG*4`1i})O8Lvlo|rj(ndXg4_}KV?8i>@j*$yLdlvfS#OE~V9pE38i# z4V&y>Dr1h5`y%vqU14LfL6ey24B~LzM2XfxkL)O&UQ0a$<$qet*zuUI2~u%4jU)#} zsuH!&RBJUDg7e>{uiU&tP87m0o_4m+C+PgWRRIb9vUyf4d41VA<~QQ&)}hhzADzB{gMUc5Yz^>nDid`;(V<0O|_&~s+w zG3gz%zv}Ip{Lw+0Z&~LVb7-93fLAm7jWW_8sgLg{rzm)R^v{X3Sbd+duYmN?k=X5y z<&kG>RMyIh!2IWBh&cuQx|j#saYucNs}(n5KA>XqWBI8*!_SkQKb(B`=y>Gh5t;q% zy+N~Bi@xA8u|z;OmoDp`MPqwZ8xX&2HM+?}2R)CB-L1-Z61HYwe(RlQwp`6lXKzEV zILKB5P9uBDDJ4z0YEepuq|rZ~Lb&-K$90Xt4gbnpxE2|y#o;(C(psP34c&fsubjl? zXi{BRV#!O=K}!(z6DrK;p(UcP*{RIlk$~5t?_(~}!yn%>B@NknSARO47lVG`edF(Y zX0f_|6+-nZ9OH6TRC?9KbhqgH*QYk)OUp)e#h2?+=}9u+Ge7ysr=;jv2*qvU5y@#r z)63k2z1=n*Tu)tnA%BD@G0tUcKPajg9QsNmA>&bS2QdeSdN=+k6W>oHA?eq6;N~LB-IaiF=5j!96}pS+@7Eo&KVMD*E4Nzg{K2KEerL>hwFjg3VY72FpP#Ct$G$%|(L+(d zKHEbWIH1H-UdssyyHRi}BIiooSM=+4^!3)nM^8lh)H4!G6`!pm{EhCD5IHoN=x_tI z@ucMFj=HJj097&f!!^OPrGQU>x0g?cUd#UTn+0q@y{FZ1S^q@3I1PjS!wP9TGReM; z+%rnlzXDRV&h8iobGy(5)ix|7{bx2ayYyApf7Bw8ou?4?ryeVUfCC$#rTyft6xo9@ z(Hjq64fg3o!5dj^6mEdOc{Vd|X3&CsDbKp~E!=stTRuuaNbUj+Q)PMQj)RbCp{!=# z9~r_IDH?Y5-%W`%_KKdQ|0E~(Q!o28AMvNY+2O#|!0r1JlM5^_HjBe==&Iow{HlZE zFEOy@xn?fg*N9r=pX;U;N8#Uh=xAED8c5P}_c%R7eU8-087boJ65U^ZZt4H29k~al z!meLS`9i9eJ8WJz_?7kWUWEPBxnT250u8W5U3AkmsFB`51_k*>iT!5f1&WO&p4nWq z^Lq5J@T%6BX`w?B`dyb!rUaQqVqB+Kibu(Nc4GDJ;VsiU!nUUR|L)U|x^{o@y4dd~ z*~FKg&2^fy(#c4RvVWiIj1@pien zRzmAxCEyAL*#joS{4V=dQX-$@xkJ8=?AG>47Hx` zn!!!>y5NIOC*#X9rFU~0S8YE+nbi?W$@hl0{N?j^7WP3wgaF<53H*p~fKINM*a<5= z`}kf&T0?D;b%Rv(`^NeZ$D1y2!_mBV+Kn)dl}6kL6DGjpQV7+glR_kBFtO5?wv_Tz zV)`3^l~_ygzTt7gJfx%&jrP&Im9HbC_)fo*N`WE>pn+Z7jY&=U1d3zB1Yn}$;hiChZ#%-ddMW+VMf z`|*)7P#h0fgl66(Vo)hf0*nl?Mf`@Rjy-dak~QLq-SVaY+1)Myz4M3sHB67=RhUHH zF(NR#qbw>)&^?MyS0SI%czf;g)O^$d$-%UpIs1&&LH>` zfW(Pqy?9WG-ufgmJt1eJa4pn}(10p(&sggN9TMhy96I6IkLM)M9`5TWM|VVnjx z%ZrqB5i=^~vW04w#T9m;S3&`O69;sD{a|wVVI_^{Jum!lorS<82$ z-Jf^fc^f{ji}NTbsn@!Ih!4z81`&k<*dq!*AX5CvlOQ-mOmPq#44C54_!AZI75Lq) z3pm=~807yO31t_DTZ0i}!@>XO_c*}badYnfjtl@)7HmENP-;vJ% z320ZC#0xo!z`#0yJ{J-C|BXB|21vkoXyE@t9x)4`pEU$<<3A%|0Ey>nO054Zlwyqq zpsyf>@P8v80nh6!06p@*3%3I3TX7QqcjW)J_`g&9-$nhOEdKvfH$kvh+8wnpwytbG Q5b%+eRFbIrXcGMY0IP { + const binaryPath = getBinaryPath(); + const isDev = process.env.NODE_ENV === 'development'; + + console.log('Starting server from:', binaryPath); + + const env = { ...process.env, PORT: PORT.toString() }; + + let dataDir; + if (isDev) { + dataDir = path.join(__dirname, '..', 'data'); + } else { + const userDataPath = app.getPath('userData'); + dataDir = path.join(userDataPath, 'data'); + } + + if (!fs.existsSync(dataDir)) { + fs.mkdirSync(dataDir, { recursive: true }); + } + + env.SQLITE_PATH = path.join(dataDir, 'new-api.db'); + + const workingDir = isDev + ? path.join(__dirname, '..') + : process.resourcesPath; + + serverProcess = spawn(binaryPath, [], { + env, + cwd: workingDir + }); + + serverProcess.stdout.on('data', (data) => { + console.log(`Server: ${data}`); + }); + + serverProcess.stderr.on('data', (data) => { + console.error(`Server Error: ${data}`); + }); + + serverProcess.on('error', (err) => { + console.error('Failed to start server:', err); + reject(err); + }); + + serverProcess.on('close', (code) => { + console.log(`Server process exited with code ${code}`); + if (mainWindow && !mainWindow.isDestroyed()) { + mainWindow.close(); + } + }); + + waitForServer(resolve, reject); + }); +} + +function waitForServer(resolve, reject, retries = 30) { + if (retries === 0) { + reject(new Error('Server failed to start within timeout')); + return; + } + + const req = http.get(`http://localhost:${PORT}`, (res) => { + console.log('Server is ready'); + resolve(); + }); + + req.on('error', () => { + setTimeout(() => waitForServer(resolve, reject, retries - 1), 1000); + }); + + req.end(); +} + +function createWindow() { + mainWindow = new BrowserWindow({ + width: 1400, + height: 900, + webPreferences: { + preload: path.join(__dirname, 'preload.js'), + nodeIntegration: false, + contextIsolation: true + }, + title: 'New API', + icon: path.join(__dirname, 'icon.png') + }); + + mainWindow.loadURL(`http://localhost:${PORT}`); + + if (process.env.NODE_ENV === 'development') { + mainWindow.webContents.openDevTools(); + } + + // Close to tray instead of quitting + mainWindow.on('close', (event) => { + if (!app.isQuitting) { + event.preventDefault(); + mainWindow.hide(); + if (process.platform === 'darwin') { + app.dock.hide(); + } + } + }); + + mainWindow.on('closed', () => { + mainWindow = null; + }); +} + +function createTray() { + // Use template icon for macOS (black with transparency, auto-adapts to theme) + // Use colored icon for Windows + const trayIconPath = process.platform === 'darwin' + ? path.join(__dirname, 'tray-iconTemplate.png') + : path.join(__dirname, 'tray-icon-windows.png'); + + tray = new Tray(trayIconPath); + + const contextMenu = Menu.buildFromTemplate([ + { + label: 'Show New API', + click: () => { + if (mainWindow === null) { + createWindow(); + } else { + mainWindow.show(); + if (process.platform === 'darwin') { + app.dock.show(); + } + } + } + }, + { type: 'separator' }, + { + label: 'Quit', + click: () => { + app.isQuitting = true; + app.quit(); + } + } + ]); + + tray.setToolTip('New API'); + tray.setContextMenu(contextMenu); + + // On macOS, clicking the tray icon shows the window + tray.on('click', () => { + if (mainWindow === null) { + createWindow(); + } else { + mainWindow.isVisible() ? mainWindow.hide() : mainWindow.show(); + if (mainWindow.isVisible() && process.platform === 'darwin') { + app.dock.show(); + } + } + }); +} + +app.whenReady().then(async () => { + try { + await startServer(); + createTray(); + createWindow(); + } catch (err) { + console.error('Failed to start application:', err); + dialog.showErrorBox('Startup Error', `Failed to start server: ${err.message}`); + app.quit(); + } +}); + +app.on('window-all-closed', () => { + // Don't quit when window is closed, keep running in tray + // Only quit when explicitly choosing Quit from tray menu +}); + +app.on('activate', () => { + if (BrowserWindow.getAllWindows().length === 0) { + createWindow(); + } +}); + +app.on('before-quit', (event) => { + if (serverProcess) { + event.preventDefault(); + + console.log('Shutting down server...'); + serverProcess.kill('SIGTERM'); + + setTimeout(() => { + if (serverProcess) { + serverProcess.kill('SIGKILL'); + } + app.exit(); + }, 5000); + + serverProcess.on('close', () => { + serverProcess = null; + app.exit(); + }); + } +}); \ No newline at end of file diff --git a/electron/package.json b/electron/package.json new file mode 100644 index 000000000..9cdf3d12f --- /dev/null +++ b/electron/package.json @@ -0,0 +1,100 @@ +{ + "name": "new-api-electron", + "version": "1.0.0", + "description": "New API - AI Model Gateway Desktop Application", + "main": "main.js", + "scripts": { + "start": "set NODE_ENV=development&& electron .", + "build": "electron-builder", + "build:mac": "electron-builder --mac", + "build:win": "electron-builder --win", + "build:linux": "electron-builder --linux" + }, + "keywords": [ + "ai", + "api", + "gateway", + "openai", + "claude" + ], + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "https://github.com/Calcium-Ion/new-api" + }, + "devDependencies": { + "electron": "^28.0.0", + "electron-builder": "^24.9.1" + }, + "build": { + "appId": "com.newapi.desktop", + "productName": "New API", + "publish": null, + "directories": { + "output": "dist" + }, + "files": [ + "main.js", + "preload.js", + "icon.png", + "tray-iconTemplate.png", + "tray-iconTemplate@2x.png", + "tray-icon-windows.png" + ], + "mac": { + "category": "public.app-category.developer-tools", + "icon": "icon.png", + "identity": null, + "hardenedRuntime": false, + "gatekeeperAssess": false, + "entitlements": "entitlements.mac.plist", + "entitlementsInherit": "entitlements.mac.plist", + "target": [ + "dmg", + "zip" + ], + "extraResources": [ + { + "from": "../new-api", + "to": "bin/new-api" + }, + { + "from": "../web/dist", + "to": "web/dist" + } + ] + }, + "win": { + "icon": "icon.png", + "target": [ + "nsis", + "portable" + ], + "extraResources": [ + { + "from": "../new-api.exe", + "to": "bin/new-api.exe" + } + ] + }, + "linux": { + "icon": "icon.png", + "target": [ + "AppImage", + "deb" + ], + "category": "Development", + "extraResources": [ + { + "from": "../new-api", + "to": "bin/new-api" + } + ] + }, + "nsis": { + "oneClick": false, + "allowToChangeInstallationDirectory": true + } + } +} \ No newline at end of file diff --git a/electron/preload.js b/electron/preload.js new file mode 100644 index 000000000..6d8b6daa0 --- /dev/null +++ b/electron/preload.js @@ -0,0 +1,6 @@ +const { contextBridge } = require('electron'); + +contextBridge.exposeInMainWorld('electron', { + version: process.versions.electron, + platform: process.platform +}); \ No newline at end of file diff --git a/electron/tray-icon-windows.png b/electron/tray-icon-windows.png new file mode 100644 index 0000000000000000000000000000000000000000..57df8ead031450e7787bb01bec1c0be0e906f0f6 GIT binary patch literal 1203 zcmZWoX;f236n;rvfI{xH-x6O3{njggI5v=Wc$#U=e%R68bWBN8VMHfzbW1tZ*yONWT$X`dbxkNYM3rj|%}VT7BC zju^x>I%DJ=G98V%s;iP<%|iUxDZSIIx_b|ieoi0Ip~))Kf2u-R3u-c5ZIY8o6}p5< z-JMVz6Wt?I=17^YJlS~ixZ%V}17{rB-G_o`(#yJ50S**iFKUEa<)yj<_ceHIzkjDD zUts`tw=o24cp2aF*o(+5(m(egou(G;(Iq13PaYl51sxuz3U1aq zEU1l#n*gtravdiADZD{T%Jeq6B`l`b4qf_e040+D0(xyEHpwS%gkc!KmX3to(t18a z&tw?Mw0|S&wSWa4`y{dsClTLn zlt_66Zt?CxiTDvfkTtJ9@KCdT7$FiTK(R!%+G-dB_JTaauiHtlpHaaB4T!&E2q9Dd z0qM#*z05K62dzD_6!fD8pf$+}02NgBe5!%{Qhim7^?8w?5V&;EBsWDgXhD5TvW>Qy z#btJIv-6Ca%y@G!^R?`(-ahxtb?{d1%Yp+BLe&3T*NQi`*Ka+mfiS&Qofu876Fph< zFNQ}BafQFFxH!V0p@MT^Gj>R*KD%9v)xq!A*tDimtIvnhG1}P3(cD7;sT|v4LAAev ziEYDnKityNL$eP~p+33a*gzk&9z3=sg)+=i;_1iPW5>-X>U-W)6h-bTj0&Ly^Fbdf z7-Ue5^r8cuqUNx`r^B!5#}nF0OxLKp%1f2tW5E0Ucp=$UVwjQ4juT68ugj#_?P47W(#k5m*34;x)Qo3<`Uja=OtMc=kLnFp3iS9+q2e8;1>k<4h)RH7iGtRY`dO$ zTbEVCWw#TjEUle}Ec~5YAjO8_{I6LomSzJ(*8Y{Mlg?oLjvz=*wM*Re+}z;-<=Gf! z_%{{ZN})=n>SXe}oJAs!roHSMDfiR@hs}OoTJ3$9&kq}2^YqJCt7f<{63LakYNc;1 zk?HBVo^kJNVc`>VzM|;6+S<0v#+3!@I|Cv*ohs*i;A zH%2+!N1NzR?Mj0}1pG@9xPn!8)74RA6}>V(F45+3h!PfVcJCa#$djf$NMNp$TDyE3 kyV0{HeVneK@{oCO|{#S9GG z!XV7ZFl&wkP_Qw;C&U#sR%fZ>$7zmYc*&kIant>eck|4iehX2gQcmEuz z{F=Cqqha|&@g1y&Q9jxs3q=B%)Lf^Ze_X8e;U;hDvn_g?JpUEm5dvyk4UX$+uyx4kfBSaIWU^~T@O3fm^-Kre+=ySF ztIN7`AEV68qdOf$gj<*MZ%*T%kkyu}&oDVnIL!Bma2BgTrd;A1okE#&RSFl@D1GU% z_B&L@y!aW1fUfGC726WB=3QBK{+fx4&*Zy3b6A)2uj2#x-P6_2Wt~$(69B(4fSCXQ literal 0 HcmV?d00001 diff --git a/electron/tray-iconTemplate@2x.png b/electron/tray-iconTemplate@2x.png new file mode 100644 index 0000000000000000000000000000000000000000..d5666a04e55b0233bb3c3981a361d1f61ea575e8 GIT binary patch literal 754 zcmeAS@N?(olHy`uVBq!ia0vp^Iv~u!3?wz9Rv7~+#^NA%C&rs6b?Si}&H|6fVg?3o zVGw3ym^DWND7Z1eC&U#<(-tr?Qf@vs7dImtJ1-9(0~3?yBj?9J6Rk^v{DK+&OR<)| z@c6UW_#^+~Ut2k5J`mR9I#O^({^GUsYC#Th;(3R59yh-|afM>FwWe=t+Uu`f%O0zo zbTf6`a%|?!3}7Hl_H=O!shIP2%JoTy40v4Ms`xd2VN?8a&-(Vi|A()MO1#-%ZT-7L z&gk0Jjf*_W7%Fbs&svwcU*)n&dHZdVBlh_#_`UekvNvo}p7QkU@->g6FIclOTv6Eg zc|qLSGC?(`GfXGWeA4cIDOd1kwf{89m3iFz%O1O3*>V5TQ~94gb*}vMbBs2Q zW&ExqberRQQt>Xci8r^XOuV)>`mf@0_o7!vlo`M4P1sWLY8%UQlLUj~CMP!^4xTL} z6I6R#ra6!68IPUarl{su-p>{?-C4R#e)sXjpv{>g0!qyLYYdkrJuWnmnWx9mvpiTJ zqWo&XncKG(#x>5^6>|8&i3gPhza1FLLb#Xa&G?}A^|$(auBkkNHAU=&arfnkhefSI<14qid^b4TcP^#>`ia1e z6$Z(U>06Q>$?eJ#yS#4&i#4MrQ|P8A+Y8g&B{r+`$G_{6+F|;c$MqZ68sisJ_p0yb t4fNr;^!CD8Vb}Vr(k55u*4>={R(g)PNNv{CrnR7Svd$@?2>^`i{gwa# literal 0 HcmV?d00001 diff --git a/web/package.json b/web/package.json index f014d84b9..b94445f3c 100644 --- a/web/package.json +++ b/web/package.json @@ -10,6 +10,7 @@ "@visactor/react-vchart": "~1.8.8", "@visactor/vchart": "~1.8.8", "@visactor/vchart-semi-theme": "~1.8.8", + "antd": "^5.27.4", "axios": "^0.27.2", "clsx": "^2.1.1", "country-flag-icons": "^1.5.19", From d2492d2af963b391faf6b61f128bb6242f6c4ea4 Mon Sep 17 00:00:00 2001 From: bubblepipe42 Date: Fri, 3 Oct 2025 14:28:29 +0800 Subject: [PATCH 18/18] fix deps --- web/package.json | 1 - 1 file changed, 1 deletion(-) diff --git a/web/package.json b/web/package.json index b94445f3c..f014d84b9 100644 --- a/web/package.json +++ b/web/package.json @@ -10,7 +10,6 @@ "@visactor/react-vchart": "~1.8.8", "@visactor/vchart": "~1.8.8", "@visactor/vchart-semi-theme": "~1.8.8", - "antd": "^5.27.4", "axios": "^0.27.2", "clsx": "^2.1.1", "country-flag-icons": "^1.5.19",