mirror of
https://github.com/QuantumNous/new-api.git
synced 2026-03-30 02:44:40 +00:00
feat: update Gemini API response handling to include block reason and improve error reporting
This commit is contained in:
@@ -293,11 +293,12 @@ type GeminiChatSafetyRating struct {
|
||||
|
||||
type GeminiChatPromptFeedback struct {
|
||||
SafetyRatings []GeminiChatSafetyRating `json:"safetyRatings"`
|
||||
BlockReason *string `json:"blockReason,omitempty"`
|
||||
}
|
||||
|
||||
type GeminiChatResponse struct {
|
||||
Candidates []GeminiChatCandidate `json:"candidates"`
|
||||
PromptFeedback GeminiChatPromptFeedback `json:"promptFeedback"`
|
||||
PromptFeedback *GeminiChatPromptFeedback `json:"promptFeedback,omitempty"`
|
||||
UsageMetadata GeminiUsageMetadata `json:"usageMetadata"`
|
||||
}
|
||||
|
||||
|
||||
@@ -1050,7 +1050,12 @@ func GeminiChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.R
|
||||
return nil, types.NewOpenAIError(err, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
|
||||
}
|
||||
if len(geminiResponse.Candidates) == 0 {
|
||||
return nil, types.NewOpenAIError(errors.New("no candidates returned"), types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
|
||||
//return nil, types.NewOpenAIError(errors.New("no candidates returned"), types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
|
||||
if geminiResponse.PromptFeedback != nil && geminiResponse.PromptFeedback.BlockReason != nil {
|
||||
return nil, types.NewOpenAIError(errors.New("request blocked by Gemini API: "+*geminiResponse.PromptFeedback.BlockReason), types.ErrorCodePromptBlocked, http.StatusBadRequest)
|
||||
} else {
|
||||
return nil, types.NewOpenAIError(errors.New("empty response from Gemini API"), types.ErrorCodeEmptyResponse, http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
fullTextResponse := responseGeminiChat2OpenAI(c, &geminiResponse)
|
||||
fullTextResponse.Model = info.UpstreamModelName
|
||||
|
||||
@@ -636,9 +636,6 @@ func extractTextFromGeminiParts(parts []dto.GeminiPart) string {
|
||||
func ResponseOpenAI2Gemini(openAIResponse *dto.OpenAITextResponse, info *relaycommon.RelayInfo) *dto.GeminiChatResponse {
|
||||
geminiResponse := &dto.GeminiChatResponse{
|
||||
Candidates: make([]dto.GeminiChatCandidate, 0, len(openAIResponse.Choices)),
|
||||
PromptFeedback: dto.GeminiChatPromptFeedback{
|
||||
SafetyRatings: []dto.GeminiChatSafetyRating{},
|
||||
},
|
||||
UsageMetadata: dto.GeminiUsageMetadata{
|
||||
PromptTokenCount: openAIResponse.PromptTokens,
|
||||
CandidatesTokenCount: openAIResponse.CompletionTokens,
|
||||
@@ -735,9 +732,6 @@ func StreamResponseOpenAI2Gemini(openAIResponse *dto.ChatCompletionsStreamRespon
|
||||
|
||||
geminiResponse := &dto.GeminiChatResponse{
|
||||
Candidates: make([]dto.GeminiChatCandidate, 0, len(openAIResponse.Choices)),
|
||||
PromptFeedback: dto.GeminiChatPromptFeedback{
|
||||
SafetyRatings: []dto.GeminiChatSafetyRating{},
|
||||
},
|
||||
UsageMetadata: dto.GeminiUsageMetadata{
|
||||
PromptTokenCount: info.PromptTokens,
|
||||
CandidatesTokenCount: 0, // 流式响应中可能没有完整的 usage 信息
|
||||
|
||||
@@ -69,6 +69,7 @@ const (
|
||||
ErrorCodeEmptyResponse ErrorCode = "empty_response"
|
||||
ErrorCodeAwsInvokeError ErrorCode = "aws_invoke_error"
|
||||
ErrorCodeModelNotFound ErrorCode = "model_not_found"
|
||||
ErrorCodePromptBlocked ErrorCode = "prompt_blocked"
|
||||
|
||||
// sql error
|
||||
ErrorCodeQueryDataError ErrorCode = "query_data_error"
|
||||
|
||||
Reference in New Issue
Block a user