feat: refactor token estimation logic

- Introduced new OpenAI text models in `common/model.go`.
- Added `IsOpenAITextModel` function to check for OpenAI text models.
- Refactored token estimation methods across various channels to use estimated prompt tokens instead of direct prompt token counts.
- Updated related functions and structures to accommodate the new token estimation approach, enhancing overall token management.
This commit is contained in:
CaIon
2025-12-02 21:34:39 +08:00
parent d3ca454c3b
commit f5b409d74f
26 changed files with 389 additions and 275 deletions

View File

@@ -105,7 +105,7 @@ func tencentStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *htt
data = strings.TrimPrefix(data, "data:")
var tencentResponse TencentChatResponse
err := json.Unmarshal([]byte(data), &tencentResponse)
err := common.Unmarshal([]byte(data), &tencentResponse)
if err != nil {
common.SysLog("error unmarshalling stream response: " + err.Error())
continue
@@ -130,7 +130,7 @@ func tencentStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *htt
service.CloseResponseBodyGracefully(resp)
return service.ResponseText2Usage(c, responseText, info.UpstreamModelName, info.PromptTokens), nil
return service.ResponseText2Usage(c, responseText, info.UpstreamModelName, info.GetEstimatePromptTokens()), nil
}
func tencentHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {