mirror of
https://github.com/Wei-Shaw/claude-relay-service.git
synced 2026-01-22 16:43:35 +00:00
refactor: 重构 handleChatCompletions 函数模块化
- 使用策略模式处理不同后端(Claude/OpenAI/Gemini) - 添加 OpenAI chat/completions 兼容支持
This commit is contained in:
@@ -12,9 +12,28 @@ const logger = require('../utils/logger')
|
||||
const redis = require('../models/redis')
|
||||
const { getEffectiveModel, parseVendorPrefixedModel } = require('../utils/modelHelper')
|
||||
const sessionHelper = require('../utils/sessionHelper')
|
||||
const openaiToClaude = require('../services/openaiToClaude')
|
||||
const claudeCodeHeadersService = require('../services/claudeCodeHeadersService')
|
||||
|
||||
const router = express.Router()
|
||||
|
||||
// 🔍 检测模型对应的后端服务
|
||||
function detectBackendFromModel(modelName) {
|
||||
if (!modelName) {
|
||||
return 'claude'
|
||||
}
|
||||
if (modelName.startsWith('claude-')) {
|
||||
return 'claude'
|
||||
}
|
||||
if (modelName.startsWith('gpt-')) {
|
||||
return 'openai'
|
||||
}
|
||||
if (modelName.startsWith('gemini-')) {
|
||||
return 'gemini'
|
||||
}
|
||||
return 'claude' // 默认使用 Claude
|
||||
}
|
||||
|
||||
// 🔧 共享的消息处理函数
|
||||
async function handleMessagesRequest(req, res) {
|
||||
try {
|
||||
@@ -778,25 +797,20 @@ router.post('/v1/messages', authenticateApiKey, handleMessagesRequest)
|
||||
// 🚀 Claude API messages 端点 - /claude/v1/messages (别名)
|
||||
router.post('/claude/v1/messages', authenticateApiKey, handleMessagesRequest)
|
||||
|
||||
// 📋 模型列表端点 - Claude Code 客户端需要
|
||||
// 📋 模型列表端点 - OpenAI 兼容,返回所有支持的模型
|
||||
router.get('/v1/models', authenticateApiKey, async (req, res) => {
|
||||
try {
|
||||
// 返回支持的模型列表
|
||||
// 返回支持的模型列表(Claude + OpenAI + Gemini)
|
||||
const models = [
|
||||
// Claude 模型
|
||||
{
|
||||
id: 'claude-3-5-sonnet-20241022',
|
||||
id: 'claude-sonnet-4-5-20250929',
|
||||
object: 'model',
|
||||
created: 1669599635,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-3-5-haiku-20241022',
|
||||
object: 'model',
|
||||
created: 1669599635,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-3-opus-20240229',
|
||||
id: 'claude-opus-4-1-20250805',
|
||||
object: 'model',
|
||||
created: 1669599635,
|
||||
owned_by: 'anthropic'
|
||||
@@ -806,6 +820,92 @@ router.get('/v1/models', authenticateApiKey, async (req, res) => {
|
||||
object: 'model',
|
||||
created: 1669599635,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-opus-4-20250514',
|
||||
object: 'model',
|
||||
created: 1669599635,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-3-7-sonnet-20250219',
|
||||
object: 'model',
|
||||
created: 1669599635,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-3-5-sonnet-20241022',
|
||||
object: 'model',
|
||||
created: 1729036800,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-3-5-haiku-20241022',
|
||||
object: 'model',
|
||||
created: 1729036800,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-3-haiku-20240307',
|
||||
object: 'model',
|
||||
created: 1709251200,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-3-opus-20240229',
|
||||
object: 'model',
|
||||
created: 1736726400,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
// OpenAI 模型
|
||||
{
|
||||
id: 'gpt-4o',
|
||||
object: 'model',
|
||||
created: 1715367600,
|
||||
owned_by: 'openai'
|
||||
},
|
||||
{
|
||||
id: 'gpt-4o-mini',
|
||||
object: 'model',
|
||||
created: 1721088000,
|
||||
owned_by: 'openai'
|
||||
},
|
||||
{
|
||||
id: 'gpt-4-turbo',
|
||||
object: 'model',
|
||||
created: 1712102400,
|
||||
owned_by: 'openai'
|
||||
},
|
||||
{
|
||||
id: 'gpt-4',
|
||||
object: 'model',
|
||||
created: 1687132800,
|
||||
owned_by: 'openai'
|
||||
},
|
||||
{
|
||||
id: 'gpt-3.5-turbo',
|
||||
object: 'model',
|
||||
created: 1677649200,
|
||||
owned_by: 'openai'
|
||||
},
|
||||
// Gemini 模型
|
||||
{
|
||||
id: 'gemini-1.5-pro',
|
||||
object: 'model',
|
||||
created: 1707868800,
|
||||
owned_by: 'google'
|
||||
},
|
||||
{
|
||||
id: 'gemini-1.5-flash',
|
||||
object: 'model',
|
||||
created: 1715990400,
|
||||
owned_by: 'google'
|
||||
},
|
||||
{
|
||||
id: 'gemini-2.0-flash-exp',
|
||||
object: 'model',
|
||||
created: 1733011200,
|
||||
owned_by: 'google'
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1038,5 +1138,385 @@ router.post('/v1/messages/count_tokens', authenticateApiKey, async (req, res) =>
|
||||
}
|
||||
})
|
||||
|
||||
// 🔍 验证 OpenAI chat/completions 请求参数
|
||||
function validateChatCompletionRequest(body) {
|
||||
if (!body || !body.messages || !Array.isArray(body.messages)) {
|
||||
return {
|
||||
valid: false,
|
||||
error: {
|
||||
message: 'Missing or invalid field: messages (must be an array)',
|
||||
type: 'invalid_request_error',
|
||||
code: 'invalid_request'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (body.messages.length === 0) {
|
||||
return {
|
||||
valid: false,
|
||||
error: {
|
||||
message: 'Messages array cannot be empty',
|
||||
type: 'invalid_request_error',
|
||||
code: 'invalid_request'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { valid: true }
|
||||
}
|
||||
|
||||
// 🌊 处理 Claude 流式请求
|
||||
async function handleClaudeStreamRequest(
|
||||
claudeRequest,
|
||||
apiKeyData,
|
||||
req,
|
||||
res,
|
||||
accountId,
|
||||
requestedModel
|
||||
) {
|
||||
logger.info(`🌊 Processing OpenAI stream request for model: ${requestedModel}`)
|
||||
|
||||
// 设置 SSE 响应头
|
||||
res.setHeader('Content-Type', 'text/event-stream')
|
||||
res.setHeader('Cache-Control', 'no-cache')
|
||||
res.setHeader('Connection', 'keep-alive')
|
||||
res.setHeader('X-Accel-Buffering', 'no')
|
||||
|
||||
// 创建中止控制器
|
||||
const abortController = new AbortController()
|
||||
|
||||
// 处理客户端断开
|
||||
req.on('close', () => {
|
||||
if (abortController && !abortController.signal.aborted) {
|
||||
logger.info('🔌 Client disconnected, aborting Claude request')
|
||||
abortController.abort()
|
||||
}
|
||||
})
|
||||
|
||||
// 获取该账号存储的 Claude Code headers
|
||||
const claudeCodeHeaders = await claudeCodeHeadersService.getAccountHeaders(accountId)
|
||||
|
||||
// 使用转换后的响应流
|
||||
await claudeRelayService.relayStreamRequestWithUsageCapture(
|
||||
claudeRequest,
|
||||
apiKeyData,
|
||||
res,
|
||||
claudeCodeHeaders,
|
||||
(usage) => {
|
||||
// 记录使用统计
|
||||
if (usage && usage.input_tokens !== undefined && usage.output_tokens !== undefined) {
|
||||
const model = usage.model || claudeRequest.model
|
||||
|
||||
apiKeyService
|
||||
.recordUsageWithDetails(apiKeyData.id, usage, model, accountId)
|
||||
.catch((error) => {
|
||||
logger.error('❌ Failed to record usage:', error)
|
||||
})
|
||||
}
|
||||
},
|
||||
// 流转换器:将 Claude SSE 转换为 OpenAI SSE
|
||||
(() => {
|
||||
const sessionId = `chatcmpl-${Math.random().toString(36).substring(2, 15)}${Math.random().toString(36).substring(2, 15)}`
|
||||
return (chunk) => openaiToClaude.convertStreamChunk(chunk, requestedModel, sessionId)
|
||||
})(),
|
||||
{
|
||||
betaHeader:
|
||||
'oauth-2025-04-20,claude-code-20250219,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14'
|
||||
}
|
||||
)
|
||||
|
||||
return { abortController }
|
||||
}
|
||||
|
||||
// 📄 处理 Claude 非流式请求
|
||||
async function handleClaudeNonStreamRequest(
|
||||
claudeRequest,
|
||||
apiKeyData,
|
||||
req,
|
||||
res,
|
||||
accountId,
|
||||
requestedModel
|
||||
) {
|
||||
logger.info(`📄 Processing OpenAI non-stream request for model: ${requestedModel}`)
|
||||
|
||||
// 获取该账号存储的 Claude Code headers
|
||||
const claudeCodeHeaders = await claudeCodeHeadersService.getAccountHeaders(accountId)
|
||||
|
||||
// 发送请求到 Claude
|
||||
const claudeResponse = await claudeRelayService.relayRequest(
|
||||
claudeRequest,
|
||||
apiKeyData,
|
||||
req,
|
||||
res,
|
||||
claudeCodeHeaders,
|
||||
{ betaHeader: 'oauth-2025-04-20' }
|
||||
)
|
||||
|
||||
// 解析 Claude 响应
|
||||
let claudeData
|
||||
try {
|
||||
claudeData = JSON.parse(claudeResponse.body)
|
||||
} catch (error) {
|
||||
logger.error('❌ Failed to parse Claude response:', error)
|
||||
return {
|
||||
error: {
|
||||
status: 502,
|
||||
data: {
|
||||
error: {
|
||||
message: 'Invalid response from Claude API',
|
||||
type: 'api_error',
|
||||
code: 'invalid_response'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 处理错误响应
|
||||
if (claudeResponse.statusCode >= 400) {
|
||||
return {
|
||||
error: {
|
||||
status: claudeResponse.statusCode,
|
||||
data: {
|
||||
error: {
|
||||
message: claudeData.error?.message || 'Claude API error',
|
||||
type: claudeData.error?.type || 'api_error',
|
||||
code: claudeData.error?.code || 'unknown_error'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 转换为 OpenAI 格式
|
||||
const openaiResponse = openaiToClaude.convertResponse(claudeData, requestedModel)
|
||||
|
||||
// 记录使用统计
|
||||
if (claudeData.usage) {
|
||||
const { usage } = claudeData
|
||||
apiKeyService
|
||||
.recordUsageWithDetails(apiKeyData.id, usage, claudeRequest.model, accountId)
|
||||
.catch((error) => {
|
||||
logger.error('❌ Failed to record usage:', error)
|
||||
})
|
||||
}
|
||||
|
||||
return { success: true, data: openaiResponse }
|
||||
}
|
||||
|
||||
// 🤖 处理 Claude 后端
|
||||
async function handleClaudeBackend(req, res, apiKeyData, requestedModel) {
|
||||
// 转换 OpenAI 请求为 Claude 格式
|
||||
const claudeRequest = openaiToClaude.convertRequest(req.body)
|
||||
|
||||
// 检查模型限制
|
||||
if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels?.length > 0) {
|
||||
if (!apiKeyData.restrictedModels.includes(claudeRequest.model)) {
|
||||
return res.status(403).json({
|
||||
error: {
|
||||
message: `Model ${requestedModel} is not allowed for this API key`,
|
||||
type: 'invalid_request_error',
|
||||
code: 'model_not_allowed'
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// 生成会话哈希用于 sticky 会话
|
||||
const sessionHash = sessionHelper.generateSessionHash(claudeRequest)
|
||||
|
||||
// 选择可用的 Claude 账户
|
||||
const accountSelection = await unifiedClaudeScheduler.selectAccountForApiKey(
|
||||
apiKeyData,
|
||||
sessionHash,
|
||||
claudeRequest.model
|
||||
)
|
||||
const { accountId } = accountSelection
|
||||
|
||||
// 处理流式或非流式请求
|
||||
if (claudeRequest.stream) {
|
||||
const { abortController } = await handleClaudeStreamRequest(
|
||||
claudeRequest,
|
||||
apiKeyData,
|
||||
req,
|
||||
res,
|
||||
accountId,
|
||||
requestedModel
|
||||
)
|
||||
return { abortController }
|
||||
} else {
|
||||
const result = await handleClaudeNonStreamRequest(
|
||||
claudeRequest,
|
||||
apiKeyData,
|
||||
req,
|
||||
res,
|
||||
accountId,
|
||||
requestedModel
|
||||
)
|
||||
|
||||
if (result.error) {
|
||||
return res.status(result.error.status).json(result.error.data)
|
||||
}
|
||||
|
||||
return res.json(result.data)
|
||||
}
|
||||
}
|
||||
|
||||
// 🔧 处理 OpenAI 后端(未实现)
|
||||
async function handleOpenAIBackend(req, res, _apiKeyData, _requestedModel) {
|
||||
return res.status(501).json({
|
||||
error: {
|
||||
message: 'OpenAI backend not yet implemented for this endpoint',
|
||||
type: 'not_implemented',
|
||||
code: 'not_implemented'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// 💎 处理 Gemini 后端(未实现)
|
||||
async function handleGeminiBackend(req, res, _apiKeyData, _requestedModel) {
|
||||
return res.status(501).json({
|
||||
error: {
|
||||
message: 'Gemini backend not yet implemented for this endpoint',
|
||||
type: 'not_implemented',
|
||||
code: 'not_implemented'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// 🗺️ 后端处理策略映射
|
||||
const backendHandlers = {
|
||||
claude: handleClaudeBackend,
|
||||
openai: handleOpenAIBackend,
|
||||
gemini: handleGeminiBackend
|
||||
}
|
||||
|
||||
// 🚀 OpenAI 兼容的 chat/completions 处理器(智能路由)
|
||||
async function handleChatCompletions(req, res) {
|
||||
const startTime = Date.now()
|
||||
let abortController = null
|
||||
|
||||
try {
|
||||
const apiKeyData = req.apiKey
|
||||
|
||||
// 验证必需参数
|
||||
const validation = validateChatCompletionRequest(req.body)
|
||||
if (!validation.valid) {
|
||||
return res.status(400).json({ error: validation.error })
|
||||
}
|
||||
|
||||
// 检测模型对应的后端
|
||||
const requestedModel = req.body.model || 'claude-3-5-sonnet-20241022'
|
||||
const backend = detectBackendFromModel(requestedModel)
|
||||
|
||||
logger.debug(
|
||||
`📥 Received OpenAI format request for model: ${requestedModel}, backend: ${backend}`
|
||||
)
|
||||
|
||||
// 使用策略模式处理不同后端
|
||||
const handler = backendHandlers[backend]
|
||||
if (!handler) {
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: `Unsupported backend: ${backend}`,
|
||||
type: 'server_error',
|
||||
code: 'unsupported_backend'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// 调用对应的后端处理器
|
||||
const result = await handler(req, res, apiKeyData, requestedModel)
|
||||
|
||||
// 保存 abort controller(用于清理)
|
||||
if (result && result.abortController) {
|
||||
;({ abortController } = result)
|
||||
}
|
||||
|
||||
const duration = Date.now() - startTime
|
||||
logger.info(`✅ OpenAI chat/completions request completed in ${duration}ms`)
|
||||
return undefined
|
||||
} catch (error) {
|
||||
logger.error('❌ OpenAI chat/completions error:', error)
|
||||
|
||||
const status = error.status || 500
|
||||
if (!res.headersSent) {
|
||||
res.status(status).json({
|
||||
error: {
|
||||
message: error.message || 'Internal server error',
|
||||
type: 'server_error',
|
||||
code: 'internal_error'
|
||||
}
|
||||
})
|
||||
}
|
||||
return undefined
|
||||
} finally {
|
||||
// 清理资源
|
||||
if (abortController) {
|
||||
abortController = null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 🔧 OpenAI 兼容的 completions 处理器(传统格式,转换为 chat 格式)
|
||||
async function handleCompletions(req, res) {
|
||||
try {
|
||||
// 验证必需参数
|
||||
if (!req.body.prompt) {
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
message: 'Prompt is required',
|
||||
type: 'invalid_request_error',
|
||||
code: 'invalid_request'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// 将传统 completions 格式转换为 chat 格式
|
||||
const chatRequest = {
|
||||
model: req.body.model || 'claude-3-5-sonnet-20241022',
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: req.body.prompt
|
||||
}
|
||||
],
|
||||
max_tokens: req.body.max_tokens,
|
||||
temperature: req.body.temperature,
|
||||
top_p: req.body.top_p,
|
||||
stream: req.body.stream,
|
||||
stop: req.body.stop,
|
||||
n: req.body.n || 1,
|
||||
presence_penalty: req.body.presence_penalty,
|
||||
frequency_penalty: req.body.frequency_penalty,
|
||||
logit_bias: req.body.logit_bias,
|
||||
user: req.body.user
|
||||
}
|
||||
|
||||
// 使用 chat/completions 处理器
|
||||
req.body = chatRequest
|
||||
await handleChatCompletions(req, res)
|
||||
return undefined
|
||||
} catch (error) {
|
||||
logger.error('❌ OpenAI completions error:', error)
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to process completion request',
|
||||
type: 'server_error',
|
||||
code: 'internal_error'
|
||||
}
|
||||
})
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
// 📋 OpenAI 兼容的 chat/completions 端点
|
||||
router.post('/v1/chat/completions', authenticateApiKey, handleChatCompletions)
|
||||
|
||||
// 🔧 OpenAI 兼容的 completions 端点(传统格式)
|
||||
router.post('/v1/completions', authenticateApiKey, handleCompletions)
|
||||
|
||||
module.exports = router
|
||||
module.exports.handleMessagesRequest = handleMessagesRequest
|
||||
|
||||
@@ -31,10 +31,25 @@ class OpenAIToClaudeConverter {
|
||||
stream: openaiRequest.stream || false
|
||||
}
|
||||
|
||||
// Claude Code 必需的系统消息
|
||||
// 定义 Claude Code 的默认系统提示词
|
||||
const claudeCodeSystemMessage = "You are Claude Code, Anthropic's official CLI for Claude."
|
||||
|
||||
claudeRequest.system = claudeCodeSystemMessage
|
||||
// 如果 OpenAI 请求中包含系统消息,提取并检查
|
||||
const systemMessage = this._extractSystemMessage(openaiRequest.messages)
|
||||
if (systemMessage && systemMessage.includes('You are currently in Xcode')) {
|
||||
// Xcode 系统提示词
|
||||
claudeRequest.system = systemMessage
|
||||
logger.info(
|
||||
`🔍 Xcode request detected, using Xcode system prompt (${systemMessage.length} chars)`
|
||||
)
|
||||
logger.debug(`📋 System prompt preview: ${systemMessage.substring(0, 150)}...`)
|
||||
} else {
|
||||
// 使用 Claude Code 默认系统提示词
|
||||
claudeRequest.system = claudeCodeSystemMessage
|
||||
logger.debug(
|
||||
`📋 Using Claude Code default system prompt${systemMessage ? ' (ignored custom prompt)' : ''}`
|
||||
)
|
||||
}
|
||||
|
||||
// 处理停止序列
|
||||
if (openaiRequest.stop) {
|
||||
|
||||
Reference in New Issue
Block a user