mirror of
https://github.com/QuantumNous/new-api.git
synced 2026-04-19 12:18:37 +00:00
fix: preserve explicit zero values in native relay requests
This commit is contained in:
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/QuantumNous/new-api/common"
|
||||
"github.com/QuantumNous/new-api/types"
|
||||
"github.com/samber/lo"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
@@ -31,26 +32,26 @@ type GeneralOpenAIRequest struct {
|
||||
Prompt any `json:"prompt,omitempty"`
|
||||
Prefix any `json:"prefix,omitempty"`
|
||||
Suffix any `json:"suffix,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
|
||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||
MaxCompletionTokens uint `json:"max_completion_tokens,omitempty"`
|
||||
MaxTokens *uint `json:"max_tokens,omitempty"`
|
||||
MaxCompletionTokens *uint `json:"max_completion_tokens,omitempty"`
|
||||
ReasoningEffort string `json:"reasoning_effort,omitempty"`
|
||||
Verbosity json.RawMessage `json:"verbosity,omitempty"` // gpt-5
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
TopK *int `json:"top_k,omitempty"`
|
||||
Stop any `json:"stop,omitempty"`
|
||||
N int `json:"n,omitempty"`
|
||||
N *int `json:"n,omitempty"`
|
||||
Input any `json:"input,omitempty"`
|
||||
Instruction string `json:"instruction,omitempty"`
|
||||
Size string `json:"size,omitempty"`
|
||||
Functions json.RawMessage `json:"functions,omitempty"`
|
||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
|
||||
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
|
||||
ResponseFormat *ResponseFormat `json:"response_format,omitempty"`
|
||||
EncodingFormat json.RawMessage `json:"encoding_format,omitempty"`
|
||||
Seed float64 `json:"seed,omitempty"`
|
||||
Seed *float64 `json:"seed,omitempty"`
|
||||
ParallelTooCalls *bool `json:"parallel_tool_calls,omitempty"`
|
||||
Tools []ToolCallRequest `json:"tools,omitempty"`
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
@@ -59,9 +60,9 @@ type GeneralOpenAIRequest struct {
|
||||
// ServiceTier specifies upstream service level and may affect billing.
|
||||
// This field is filtered by default and can be enabled via channel setting allow_service_tier.
|
||||
ServiceTier string `json:"service_tier,omitempty"`
|
||||
LogProbs bool `json:"logprobs,omitempty"`
|
||||
TopLogProbs int `json:"top_logprobs,omitempty"`
|
||||
Dimensions int `json:"dimensions,omitempty"`
|
||||
LogProbs *bool `json:"logprobs,omitempty"`
|
||||
TopLogProbs *int `json:"top_logprobs,omitempty"`
|
||||
Dimensions *int `json:"dimensions,omitempty"`
|
||||
Modalities json.RawMessage `json:"modalities,omitempty"`
|
||||
Audio json.RawMessage `json:"audio,omitempty"`
|
||||
// 安全标识符,用于帮助 OpenAI 检测可能违反使用政策的应用程序用户
|
||||
@@ -100,8 +101,8 @@ type GeneralOpenAIRequest struct {
|
||||
// pplx Params
|
||||
SearchDomainFilter json.RawMessage `json:"search_domain_filter,omitempty"`
|
||||
SearchRecencyFilter string `json:"search_recency_filter,omitempty"`
|
||||
ReturnImages bool `json:"return_images,omitempty"`
|
||||
ReturnRelatedQuestions bool `json:"return_related_questions,omitempty"`
|
||||
ReturnImages *bool `json:"return_images,omitempty"`
|
||||
ReturnRelatedQuestions *bool `json:"return_related_questions,omitempty"`
|
||||
SearchMode string `json:"search_mode,omitempty"`
|
||||
// Minimax
|
||||
ReasoningSplit json.RawMessage `json:"reasoning_split,omitempty"`
|
||||
@@ -140,10 +141,12 @@ func (r *GeneralOpenAIRequest) GetTokenCountMeta() *types.TokenCountMeta {
|
||||
texts = append(texts, inputs...)
|
||||
}
|
||||
|
||||
if r.MaxCompletionTokens > r.MaxTokens {
|
||||
tokenCountMeta.MaxTokens = int(r.MaxCompletionTokens)
|
||||
maxTokens := lo.FromPtrOr(r.MaxTokens, uint(0))
|
||||
maxCompletionTokens := lo.FromPtrOr(r.MaxCompletionTokens, uint(0))
|
||||
if maxCompletionTokens > maxTokens {
|
||||
tokenCountMeta.MaxTokens = int(maxCompletionTokens)
|
||||
} else {
|
||||
tokenCountMeta.MaxTokens = int(r.MaxTokens)
|
||||
tokenCountMeta.MaxTokens = int(maxTokens)
|
||||
}
|
||||
|
||||
for _, message := range r.Messages {
|
||||
@@ -222,7 +225,7 @@ func (r *GeneralOpenAIRequest) GetTokenCountMeta() *types.TokenCountMeta {
|
||||
}
|
||||
|
||||
func (r *GeneralOpenAIRequest) IsStream(c *gin.Context) bool {
|
||||
return r.Stream
|
||||
return lo.FromPtrOr(r.Stream, false)
|
||||
}
|
||||
|
||||
func (r *GeneralOpenAIRequest) SetModelName(modelName string) {
|
||||
@@ -273,10 +276,11 @@ type StreamOptions struct {
|
||||
}
|
||||
|
||||
func (r *GeneralOpenAIRequest) GetMaxTokens() uint {
|
||||
if r.MaxCompletionTokens != 0 {
|
||||
return r.MaxCompletionTokens
|
||||
maxCompletionTokens := lo.FromPtrOr(r.MaxCompletionTokens, uint(0))
|
||||
if maxCompletionTokens != 0 {
|
||||
return maxCompletionTokens
|
||||
}
|
||||
return r.MaxTokens
|
||||
return lo.FromPtrOr(r.MaxTokens, uint(0))
|
||||
}
|
||||
|
||||
func (r *GeneralOpenAIRequest) ParseInput() []string {
|
||||
@@ -816,7 +820,7 @@ type OpenAIResponsesRequest struct {
|
||||
Conversation json.RawMessage `json:"conversation,omitempty"`
|
||||
ContextManagement json.RawMessage `json:"context_management,omitempty"`
|
||||
Instructions json.RawMessage `json:"instructions,omitempty"`
|
||||
MaxOutputTokens uint `json:"max_output_tokens,omitempty"`
|
||||
MaxOutputTokens *uint `json:"max_output_tokens,omitempty"`
|
||||
TopLogProbs *int `json:"top_logprobs,omitempty"`
|
||||
Metadata json.RawMessage `json:"metadata,omitempty"`
|
||||
ParallelToolCalls json.RawMessage `json:"parallel_tool_calls,omitempty"`
|
||||
@@ -833,7 +837,7 @@ type OpenAIResponsesRequest struct {
|
||||
// SafetyIdentifier carries client identity for policy abuse detection.
|
||||
// This field is filtered by default and can be enabled via channel setting allow_safety_identifier.
|
||||
SafetyIdentifier string `json:"safety_identifier,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
StreamOptions *StreamOptions `json:"stream_options,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
Text json.RawMessage `json:"text,omitempty"`
|
||||
@@ -842,7 +846,7 @@ type OpenAIResponsesRequest struct {
|
||||
TopP *float64 `json:"top_p,omitempty"`
|
||||
Truncation string `json:"truncation,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
MaxToolCalls uint `json:"max_tool_calls,omitempty"`
|
||||
MaxToolCalls *uint `json:"max_tool_calls,omitempty"`
|
||||
Prompt json.RawMessage `json:"prompt,omitempty"`
|
||||
// qwen
|
||||
EnableThinking json.RawMessage `json:"enable_thinking,omitempty"`
|
||||
@@ -905,12 +909,12 @@ func (r *OpenAIResponsesRequest) GetTokenCountMeta() *types.TokenCountMeta {
|
||||
return &types.TokenCountMeta{
|
||||
CombineText: strings.Join(texts, "\n"),
|
||||
Files: fileMeta,
|
||||
MaxTokens: int(r.MaxOutputTokens),
|
||||
MaxTokens: int(lo.FromPtrOr(r.MaxOutputTokens, uint(0))),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *OpenAIResponsesRequest) IsStream(c *gin.Context) bool {
|
||||
return r.Stream
|
||||
return lo.FromPtrOr(r.Stream, false)
|
||||
}
|
||||
|
||||
func (r *OpenAIResponsesRequest) SetModelName(modelName string) {
|
||||
|
||||
Reference in New Issue
Block a user