From c8f2abfedc0983d9e6cad31f4a7360adc192839a Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 17:40:57 +0800 Subject: [PATCH 1/3] =?UTF-8?q?feat:=20=E5=A4=84=E7=90=86=20openai=20?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E8=AF=B7=E6=B1=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/openaiGeminiRoutes.js | 41 +++++++++++++++++++++++----- src/utils/logger.js | 47 ++++++++++++++++++++++++++++++-- 2 files changed, 79 insertions(+), 9 deletions(-) diff --git a/src/routes/openaiGeminiRoutes.js b/src/routes/openaiGeminiRoutes.js index 7a48975c..7599b98c 100644 --- a/src/routes/openaiGeminiRoutes.js +++ b/src/routes/openaiGeminiRoutes.js @@ -27,6 +27,7 @@ function checkPermissions(apiKeyData, requiredPermission = 'gemini') { router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { const startTime = Date.now(); let abortController = null; + let account = null; // Declare account outside try block for error handling try { const apiKeyData = req.apiKey; @@ -41,16 +42,42 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { } }); } + // 处理请求体结构 - 支持多种格式 + let requestBody = req.body; + + // 如果请求体被包装在 body 字段中,解包它 + if (req.body.body && typeof req.body.body === 'object') { + requestBody = req.body.body; + } + + // 从 URL 路径中提取模型信息(如果存在) + let urlModel = null; + const urlPath = req.body?.config?.url || req.originalUrl || req.url; + const modelMatch = urlPath.match(/\/([^\/]+):(?:stream)?[Gg]enerateContent/); + if (modelMatch) { + urlModel = modelMatch[1]; + logger.debug(`Extracted model from URL: ${urlModel}`); + } // 提取请求参数 const { - messages, - model = 'gemini-2.0-flash-exp', + messages: requestMessages, + contents, + model: bodyModel = 'gemini-2.0-flash-exp', temperature = 0.7, max_tokens = 4096, stream = false - } = req.body; - + } = requestBody; + + // 优先使用 URL 中的模型,其次是请求体中的模型 + const model = urlModel || bodyModel; + + // 支持两种格式: OpenAI 的 messages 或 Gemini 的 contents + let messages = requestMessages; + if (contents && Array.isArray(contents)) { + messages = contents; + } + // 验证必需参数 if (!messages || !Array.isArray(messages) || messages.length === 0) { return res.status(400).json({ @@ -79,7 +106,7 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { const sessionHash = generateSessionHash(req); // 选择可用的 Gemini 账户 - const account = await geminiAccountService.selectAvailableAccount( + account = await geminiAccountService.selectAvailableAccount( apiKeyData.id, sessionHash ); @@ -153,8 +180,8 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { // 处理速率限制 if (error.status === 429) { - if (req.apiKey && req.account) { - await geminiAccountService.setAccountRateLimited(req.account.id, true); + if (req.apiKey && account) { + await geminiAccountService.setAccountRateLimited(account.id, true); } } diff --git a/src/utils/logger.js b/src/utils/logger.js index e905e90a..87765dad 100644 --- a/src/utils/logger.js +++ b/src/utils/logger.js @@ -5,6 +5,49 @@ const path = require('path'); const fs = require('fs'); const os = require('os'); +// 安全的 JSON 序列化函数,处理循环引用 +const safeStringify = (obj, maxDepth = 3) => { + const seen = new WeakSet(); + + const replacer = (key, value, depth = 0) => { + if (depth > maxDepth) return '[Max Depth Reached]'; + + if (value !== null && typeof value === 'object') { + if (seen.has(value)) { + return '[Circular Reference]'; + } + seen.add(value); + + // 过滤掉常见的循环引用对象 + if (value.constructor) { + const constructorName = value.constructor.name; + if (['Socket', 'TLSSocket', 'HTTPParser', 'IncomingMessage', 'ServerResponse'].includes(constructorName)) { + return `[${constructorName} Object]`; + } + } + + // 递归处理对象属性 + if (Array.isArray(value)) { + return value.map((item, index) => replacer(index, item, depth + 1)); + } else { + const result = {}; + for (const [k, v] of Object.entries(value)) { + result[k] = replacer(k, v, depth + 1); + } + return result; + } + } + + return value; + }; + + try { + return JSON.stringify(replacer('', obj)); + } catch (error) { + return JSON.stringify({ error: 'Failed to serialize object', message: error.message }); + } +}; + // 📝 增强的日志格式 const createLogFormat = (colorize = false) => { const formats = [ @@ -31,7 +74,7 @@ const createLogFormat = (colorize = false) => { // 添加元数据 if (metadata && Object.keys(metadata).length > 0) { - logMessage += ` | ${JSON.stringify(metadata)}`; + logMessage += ` | ${safeStringify(metadata)}`; } // 添加其他属性 @@ -42,7 +85,7 @@ const createLogFormat = (colorize = false) => { delete additionalData.stack; if (Object.keys(additionalData).length > 0) { - logMessage += ` | ${JSON.stringify(additionalData)}`; + logMessage += ` | ${safeStringify(additionalData)}`; } return stack ? `${logMessage}\n${stack}` : logMessage; From 2eee902988cabdfc86ec8fdcfba7d75568c4f541 Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 18:20:39 +0800 Subject: [PATCH 2/3] =?UTF-8?q?feat:=20=E5=A4=84=E7=90=86=20openai=20?= =?UTF-8?q?=E6=A0=BC=E5=BC=8F=E8=AF=B7=E6=B1=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/openaiGeminiRoutes.has-convert.js | 424 +++++++++++++++ src/routes/openaiGeminiRoutes.js | 307 ++++++++++- ...naiGeminiRoutes.js.nohas-convert copy.back | 318 ++++++++++++ .../openaiGeminiRoutes.js.version-08041512 | 490 ++++++++++++++++++ src/services/geminiAccountService.js | 6 +- 5 files changed, 1517 insertions(+), 28 deletions(-) create mode 100644 src/routes/openaiGeminiRoutes.has-convert.js create mode 100644 src/routes/openaiGeminiRoutes.js.nohas-convert copy.back create mode 100644 src/routes/openaiGeminiRoutes.js.version-08041512 diff --git a/src/routes/openaiGeminiRoutes.has-convert.js b/src/routes/openaiGeminiRoutes.has-convert.js new file mode 100644 index 00000000..8267ee24 --- /dev/null +++ b/src/routes/openaiGeminiRoutes.has-convert.js @@ -0,0 +1,424 @@ +const express = require('express'); +const router = express.Router(); +const logger = require('../utils/logger'); +const { authenticateApiKey } = require('../middleware/auth'); +const geminiAccountService = require('../services/geminiAccountService'); +const unifiedGeminiScheduler = require('../services/unifiedGeminiScheduler'); +const sessionHelper = require('../utils/sessionHelper'); + +// 检查 API Key 权限 +function checkPermissions(apiKeyData, requiredPermission = 'gemini') { + const permissions = apiKeyData.permissions || 'all'; + return permissions === 'all' || permissions === requiredPermission; +} + +// 转换 OpenAI 消息格式到 Gemini 格式 +function convertMessagesToGemini(messages) { + const contents = []; + let systemInstruction = ''; + + // 辅助函数:提取文本内容 + function extractTextContent(content) { + if (typeof content === 'string') { + return content; + } + + if (Array.isArray(content)) { + return content.map(item => { + if (typeof item === 'string') { + return item; + } + if (typeof item === 'object' && item.type === 'text' && item.text) { + return item.text; + } + if (typeof item === 'object' && item.text) { + return item.text; + } + return ''; + }).join(''); + } + + if (typeof content === 'object' && content.text) { + return content.text; + } + + return String(content); + } + + for (const message of messages) { + const textContent = extractTextContent(message.content); + + if (message.role === 'system') { + systemInstruction += (systemInstruction ? '\n\n' : '') + textContent; + } else if (message.role === 'user') { + contents.push({ + role: 'user', + parts: [{ text: textContent }] + }); + } else if (message.role === 'assistant') { + contents.push({ + role: 'model', + parts: [{ text: textContent }] + }); + } + } + + return { contents, systemInstruction }; +} + +// 转换 Gemini 响应到 OpenAI 格式 +function convertGeminiResponseToOpenAI(geminiResponse, model, stream = false) { + if (stream) { + // 处理流式响应 - 原样返回 SSE 数据 + return geminiResponse; + } else { + // 非流式响应转换 + if (geminiResponse.candidates && geminiResponse.candidates.length > 0) { + const candidate = geminiResponse.candidates[0]; + const content = candidate.content?.parts?.[0]?.text || ''; + const finishReason = candidate.finishReason?.toLowerCase() || 'stop'; + + // 计算 token 使用量 + const usage = geminiResponse.usageMetadata || { + promptTokenCount: 0, + candidatesTokenCount: 0, + totalTokenCount: 0 + }; + + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion', + created: Math.floor(Date.now() / 1000), + model: model, + choices: [{ + index: 0, + message: { + role: 'assistant', + content: content + }, + finish_reason: finishReason + }], + usage: { + prompt_tokens: usage.promptTokenCount, + completion_tokens: usage.candidatesTokenCount, + total_tokens: usage.totalTokenCount + } + }; + } else { + throw new Error('No response from Gemini'); + } + } +} + +// OpenAI 兼容的聊天完成端点 +router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { + const startTime = Date.now(); + let abortController = null; + + try { + const apiKeyData = req.apiKey; + + // 检查权限 + if (!checkPermissions(apiKeyData, 'gemini')) { + return res.status(403).json({ + error: { + message: 'This API key does not have permission to access Gemini', + type: 'permission_denied', + code: 'permission_denied' + } + }); + } + + // 提取请求参数 + const { + messages, + model = 'gemini-2.0-flash-exp', + temperature = 0.7, + max_tokens = 4096, + stream = false + } = req.body; + + // 验证必需参数 + if (!messages || !Array.isArray(messages) || messages.length === 0) { + return res.status(400).json({ + error: { + message: 'Messages array is required', + type: 'invalid_request_error', + code: 'invalid_request' + } + }); + } + + // 检查模型限制 + if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels.length > 0) { + if (!apiKeyData.restrictedModels.includes(model)) { + return res.status(403).json({ + error: { + message: `Model ${model} is not allowed for this API key`, + type: 'invalid_request_error', + code: 'model_not_allowed' + } + }); + } + } + + // 转换消息格式 + const { contents, systemInstruction } = convertMessagesToGemini(messages); + + // 构建 Gemini 请求体 + const geminiRequestBody = { + contents, + generationConfig: { + temperature, + maxOutputTokens: max_tokens, + candidateCount: 1 + } + }; + + if (systemInstruction) { + geminiRequestBody.systemInstruction = { parts: [{ text: systemInstruction }] }; + } + + // 生成会话哈希 + const sessionHash = sessionHelper.generateSessionHash(req.body); + + // 使用统一调度选择账号 + const { accountId } = await unifiedGeminiScheduler.selectAccountForApiKey(req.apiKey, sessionHash, model); + const account = await geminiAccountService.getAccount(accountId); + const { accessToken, refreshToken } = account; + + logger.info(`Using Gemini account: ${accountId} for API key: ${apiKeyData.id}`); + + // 创建中止控制器 + abortController = new AbortController(); + + // 处理客户端断开连接 + req.on('close', () => { + if (abortController && !abortController.signal.aborted) { + logger.info('Client disconnected, aborting Gemini request'); + abortController.abort(); + } + }); + + const client = await geminiAccountService.getOauthClient(accessToken, refreshToken); + + if (stream) { + // 流式响应 + logger.info('StreamGenerateContent request', { + model: model, + projectId: account.projectId, + apiKeyId: req.apiKey?.id || 'unknown' + }); + + const streamResponse = await geminiAccountService.generateContentStream( + client, + { model, request: geminiRequestBody }, + null, // user_prompt_id + account.projectId, + req.apiKey?.id, // 使用 API Key ID 作为 session ID + abortController.signal // 传递中止信号 + ); + + // 设置 SSE 响应头 + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + res.setHeader('X-Accel-Buffering', 'no'); + + // 处理流式响应,转换为 OpenAI 格式 + let buffer = ''; + + streamResponse.on('data', (chunk) => { + try { + buffer += chunk.toString(); + const lines = buffer.split('\n'); + buffer = lines.pop() || ''; // 保留最后一个不完整的行 + + for (const line of lines) { + if (!line.trim()) continue; + + // 处理 SSE 格式 + let jsonData = line; + if (line.startsWith('data: ')) { + jsonData = line.substring(6).trim(); + } + + if (!jsonData || jsonData === '[DONE]') continue; + + try { + const data = JSON.parse(jsonData); + + // 转换为 OpenAI 流式格式 + if (data.candidates && data.candidates.length > 0) { + const candidate = data.candidates[0]; + const content = candidate.content?.parts?.[0]?.text || ''; + const finishReason = candidate.finishReason?.toLowerCase(); + + const openaiChunk = { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Math.floor(Date.now() / 1000), + model: model, + choices: [{ + index: 0, + delta: { + content: content + }, + finish_reason: finishReason === 'stop' ? 'stop' : null + }] + }; + + res.write(`data: ${JSON.stringify(openaiChunk)}\n\n`); + + // 如果结束了,发送最终的 [DONE] + if (finishReason === 'stop') { + res.write('data: [DONE]\n\n'); + } + } + } catch (e) { + logger.debug('Error parsing JSON line:', e.message); + } + } + } catch (error) { + logger.error('Stream processing error:', error); + if (!res.headersSent) { + res.status(500).json({ + error: { + message: error.message || 'Stream error', + type: 'api_error' + } + }); + } + } + }); + + streamResponse.on('end', () => { + logger.info('Stream completed successfully'); + if (!res.headersSent) { + res.write('data: [DONE]\n\n'); + } + res.end(); + }); + + streamResponse.on('error', (error) => { + logger.error('Stream error:', error); + if (!res.headersSent) { + res.status(500).json({ + error: { + message: error.message || 'Stream error', + type: 'api_error' + } + }); + } else { + res.end(); + } + }); + + } else { + // 非流式响应 + logger.info('GenerateContent request', { + model: model, + projectId: account.projectId, + apiKeyId: req.apiKey?.id || 'unknown' + }); + + const response = await geminiAccountService.generateContent( + client, + { model, request: geminiRequestBody }, + null, // user_prompt_id + account.projectId, + req.apiKey?.id // 使用 API Key ID 作为 session ID + ); + + // 转换为 OpenAI 格式并返回 + const openaiResponse = convertGeminiResponseToOpenAI(response, model, false); + res.json(openaiResponse); + } + + const duration = Date.now() - startTime; + logger.info(`OpenAI-Gemini request completed in ${duration}ms`); + + } catch (error) { + logger.error('OpenAI-Gemini request error:', error); + + // 返回 OpenAI 格式的错误响应 + const status = error.status || 500; + const errorResponse = { + error: { + message: error.message || 'Internal server error', + type: 'server_error', + code: 'internal_error' + } + }; + + if (!res.headersSent) { + res.status(status).json(errorResponse); + } + } finally { + // 清理资源 + if (abortController) { + abortController = null; + } + } +}); + +// 获取模型列表端点(OpenAI 兼容) +router.get('/v1/models', authenticateApiKey, async (req, res) => { + try { + const apiKeyData = req.apiKey; + + // 检查权限 + if (!checkPermissions(apiKeyData, 'gemini')) { + return res.status(403).json({ + error: { + message: 'This API key does not have permission to access Gemini', + type: 'permission_denied', + code: 'permission_denied' + } + }); + } + + // 返回支持的 Gemini 模型列表(OpenAI 格式) + const models = [ + { + id: 'gemini-2.0-flash-exp', + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: 'google' + }, + { + id: 'gemini-2.0-flash-thinking-exp', + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: 'google' + }, + { + id: 'gemini-1.5-pro', + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: 'google' + }, + { + id: 'gemini-1.5-flash', + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: 'google' + } + ]; + + res.json({ + object: 'list', + data: models + }); + } catch (error) { + logger.error('Error getting models:', error); + res.status(500).json({ + error: { + message: error.message || 'Internal server error', + type: 'server_error' + } + }); + } +}); + +module.exports = router; \ No newline at end of file diff --git a/src/routes/openaiGeminiRoutes.js b/src/routes/openaiGeminiRoutes.js index 7599b98c..b0eca2b9 100644 --- a/src/routes/openaiGeminiRoutes.js +++ b/src/routes/openaiGeminiRoutes.js @@ -3,7 +3,7 @@ const router = express.Router(); const logger = require('../utils/logger'); const { authenticateApiKey } = require('../middleware/auth'); const geminiAccountService = require('../services/geminiAccountService'); -const { sendGeminiRequest, getAvailableModels } = require('../services/geminiRelayService'); +const { getAvailableModels } = require('../services/geminiRelayService'); const crypto = require('crypto'); // 生成会话哈希 @@ -23,6 +23,142 @@ function checkPermissions(apiKeyData, requiredPermission = 'gemini') { return permissions === 'all' || permissions === requiredPermission; } +// 转换 OpenAI 消息格式到 Gemini 格式 +function convertMessagesToGemini(messages) { + const contents = []; + let systemInstruction = ''; + + // 辅助函数:提取文本内容 + function extractTextContent(content) { + // 处理 null 或 undefined + if (content == null) { + return ''; + } + + // 处理字符串 + if (typeof content === 'string') { + return content; + } + + // 处理数组格式的内容 + if (Array.isArray(content)) { + return content.map(item => { + if (item == null) return ''; + if (typeof item === 'string') { + return item; + } + if (typeof item === 'object') { + // 处理 {type: 'text', text: '...'} 格式 + if (item.type === 'text' && item.text) { + return item.text; + } + // 处理 {text: '...'} 格式 + if (item.text) { + return item.text; + } + // 处理嵌套的对象或数组 + if (item.content) { + return extractTextContent(item.content); + } + } + return ''; + }).join(''); + } + + // 处理对象格式的内容 + if (typeof content === 'object') { + // 处理 {text: '...'} 格式 + if (content.text) { + return content.text; + } + // 处理 {content: '...'} 格式 + if (content.content) { + return extractTextContent(content.content); + } + // 处理 {parts: [{text: '...'}]} 格式 + if (content.parts && Array.isArray(content.parts)) { + return content.parts.map(part => { + if (part && part.text) { + return part.text; + } + return ''; + }).join(''); + } + } + + // 最后的后备选项:只有在内容确实不为空且有意义时才转换为字符串 + if (content !== undefined && content !== null && content !== '' && typeof content !== 'object') { + return String(content); + } + + return ''; + } + + for (const message of messages) { + const textContent = extractTextContent(message.content); + + if (message.role === 'system') { + systemInstruction += (systemInstruction ? '\n\n' : '') + textContent; + } else if (message.role === 'user') { + contents.push({ + role: 'user', + parts: [{ text: textContent }] + }); + } else if (message.role === 'assistant') { + contents.push({ + role: 'model', + parts: [{ text: textContent }] + }); + } + } + + return { contents, systemInstruction }; +} + +// 转换 Gemini 响应到 OpenAI 格式 +function convertGeminiResponseToOpenAI(geminiResponse, model, stream = false) { + if (stream) { + // 处理流式响应 - 原样返回 SSE 数据 + return geminiResponse; + } else { + // 非流式响应转换 + if (geminiResponse.candidates && geminiResponse.candidates.length > 0) { + const candidate = geminiResponse.candidates[0]; + const content = candidate.content?.parts?.[0]?.text || ''; + const finishReason = candidate.finishReason?.toLowerCase() || 'stop'; + + // 计算 token 使用量 + const usage = geminiResponse.usageMetadata || { + promptTokenCount: 0, + candidatesTokenCount: 0, + totalTokenCount: 0 + }; + + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion', + created: Math.floor(Date.now() / 1000), + model: model, + choices: [{ + index: 0, + message: { + role: 'assistant', + content: content + }, + finish_reason: finishReason + }], + usage: { + prompt_tokens: usage.promptTokenCount, + completion_tokens: usage.candidatesTokenCount, + total_tokens: usage.totalTokenCount + } + }; + } else { + throw new Error('No response from Gemini'); + } + } +} + // OpenAI 兼容的聊天完成端点 router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { const startTime = Date.now(); @@ -62,7 +198,7 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { // 提取请求参数 const { messages: requestMessages, - contents, + contents: requestContents, model: bodyModel = 'gemini-2.0-flash-exp', temperature = 0.7, max_tokens = 4096, @@ -74,8 +210,8 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { // 支持两种格式: OpenAI 的 messages 或 Gemini 的 contents let messages = requestMessages; - if (contents && Array.isArray(contents)) { - messages = contents; + if (requestContents && Array.isArray(requestContents)) { + messages = requestContents; } // 验证必需参数 @@ -102,6 +238,23 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { } } + // 转换消息格式 + const { contents: geminiContents, systemInstruction } = convertMessagesToGemini(messages); + + // 构建 Gemini 请求体 + const geminiRequestBody = { + contents: geminiContents, + generationConfig: { + temperature, + maxOutputTokens: max_tokens, + candidateCount: 1 + } + }; + + if (systemInstruction) { + geminiRequestBody.systemInstruction = { parts: [{ text: systemInstruction }] }; + } + // 生成会话哈希用于粘性会话 const sessionHash = generateSessionHash(req); @@ -137,39 +290,141 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { } }); - // 发送请求到 Gemini(已经返回 OpenAI 格式) - const geminiResponse = await sendGeminiRequest({ - messages, - model, - temperature, - maxTokens: max_tokens, - stream, - accessToken: account.accessToken, - proxy: account.proxy, - apiKeyId: apiKeyData.id, - signal: abortController.signal, - projectId: account.projectId - }); + // 获取OAuth客户端 + const client = await geminiAccountService.getOauthClient(account.accessToken, account.refreshToken); + let project = 'verdant-wares-464411-k9'; if (stream) { + // 流式响应 + logger.info('StreamGenerateContent request', { + model: model, + projectId: account.projectId, + apiKeyId: apiKeyData.id + }); + + const streamResponse = await geminiAccountService.generateContentStream( + client, + { model, request: geminiRequestBody }, + null, // user_prompt_id + project || account.projectId, + apiKeyData.id, // 使用 API Key ID 作为 session ID + abortController.signal // 传递中止信号 + ); + // 设置流式响应头 res.setHeader('Content-Type', 'text/event-stream'); res.setHeader('Cache-Control', 'no-cache'); res.setHeader('Connection', 'keep-alive'); res.setHeader('X-Accel-Buffering', 'no'); - // 流式传输响应 - for await (const chunk of geminiResponse) { - if (abortController.signal.aborted) { - break; - } - res.write(chunk); - } + // 处理流式响应,转换为 OpenAI 格式 + let buffer = ''; + + streamResponse.on('data', (chunk) => { + try { + buffer += chunk.toString(); + const lines = buffer.split('\n'); + buffer = lines.pop() || ''; // 保留最后一个不完整的行 + + for (const line of lines) { + if (!line.trim()) continue; + + // 处理 SSE 格式 + let jsonData = line; + if (line.startsWith('data: ')) { + jsonData = line.substring(6).trim(); + } + + if (!jsonData || jsonData === '[DONE]') continue; + + try { + const data = JSON.parse(jsonData); + + // 转换为 OpenAI 流式格式 + if (data.candidates && data.candidates.length > 0) { + const candidate = data.candidates[0]; + const content = candidate.content?.parts?.[0]?.text || ''; + const finishReason = candidate.finishReason?.toLowerCase(); + + const openaiChunk = { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Math.floor(Date.now() / 1000), + model: model, + choices: [{ + index: 0, + delta: { + content: content + }, + finish_reason: finishReason === 'stop' ? 'stop' : null + }] + }; + + res.write(`data: ${JSON.stringify(openaiChunk)}\n\n`); + + // 如果结束了,发送最终的 [DONE] + if (finishReason === 'stop') { + res.write('data: [DONE]\n\n'); + } + } + } catch (e) { + logger.debug('Error parsing JSON line:', e.message); + } + } + } catch (error) { + logger.error('Stream processing error:', error); + if (!res.headersSent) { + res.status(500).json({ + error: { + message: error.message || 'Stream error', + type: 'api_error' + } + }); + } + } + }); + + streamResponse.on('end', () => { + logger.info('Stream completed successfully'); + if (!res.headersSent) { + res.write('data: [DONE]\n\n'); + } + res.end(); + }); + + streamResponse.on('error', (error) => { + logger.error('Stream error:', error); + if (!res.headersSent) { + res.status(500).json({ + error: { + message: error.message || 'Stream error', + type: 'api_error' + } + }); + } else { + res.end(); + } + }); - res.end(); } else { // 非流式响应 - res.json(geminiResponse); + logger.info('GenerateContent request', { + model: model, + projectId: account.projectId, + apiKeyId: apiKeyData.id + }); + + const response = await geminiAccountService.generateContent( + client, + { model, request: geminiRequestBody }, + null, // user_prompt_id + account.projectId, + apiKeyData.id // 使用 API Key ID 作为 session ID + ); + + // 转换为 OpenAI 格式并返回 + const openaiResponse = convertGeminiResponseToOpenAI(response, model, false); + res.json(openaiResponse); } const duration = Date.now() - startTime; diff --git a/src/routes/openaiGeminiRoutes.js.nohas-convert copy.back b/src/routes/openaiGeminiRoutes.js.nohas-convert copy.back new file mode 100644 index 00000000..7599b98c --- /dev/null +++ b/src/routes/openaiGeminiRoutes.js.nohas-convert copy.back @@ -0,0 +1,318 @@ +const express = require('express'); +const router = express.Router(); +const logger = require('../utils/logger'); +const { authenticateApiKey } = require('../middleware/auth'); +const geminiAccountService = require('../services/geminiAccountService'); +const { sendGeminiRequest, getAvailableModels } = require('../services/geminiRelayService'); +const crypto = require('crypto'); + +// 生成会话哈希 +function generateSessionHash(req) { + const sessionData = [ + req.headers['user-agent'], + req.ip, + req.headers['authorization']?.substring(0, 20) + ].filter(Boolean).join(':'); + + return crypto.createHash('sha256').update(sessionData).digest('hex'); +} + +// 检查 API Key 权限 +function checkPermissions(apiKeyData, requiredPermission = 'gemini') { + const permissions = apiKeyData.permissions || 'all'; + return permissions === 'all' || permissions === requiredPermission; +} + +// OpenAI 兼容的聊天完成端点 +router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { + const startTime = Date.now(); + let abortController = null; + let account = null; // Declare account outside try block for error handling + + try { + const apiKeyData = req.apiKey; + + // 检查权限 + if (!checkPermissions(apiKeyData, 'gemini')) { + return res.status(403).json({ + error: { + message: 'This API key does not have permission to access Gemini', + type: 'permission_denied', + code: 'permission_denied' + } + }); + } + // 处理请求体结构 - 支持多种格式 + let requestBody = req.body; + + // 如果请求体被包装在 body 字段中,解包它 + if (req.body.body && typeof req.body.body === 'object') { + requestBody = req.body.body; + } + + // 从 URL 路径中提取模型信息(如果存在) + let urlModel = null; + const urlPath = req.body?.config?.url || req.originalUrl || req.url; + const modelMatch = urlPath.match(/\/([^\/]+):(?:stream)?[Gg]enerateContent/); + if (modelMatch) { + urlModel = modelMatch[1]; + logger.debug(`Extracted model from URL: ${urlModel}`); + } + + // 提取请求参数 + const { + messages: requestMessages, + contents, + model: bodyModel = 'gemini-2.0-flash-exp', + temperature = 0.7, + max_tokens = 4096, + stream = false + } = requestBody; + + // 优先使用 URL 中的模型,其次是请求体中的模型 + const model = urlModel || bodyModel; + + // 支持两种格式: OpenAI 的 messages 或 Gemini 的 contents + let messages = requestMessages; + if (contents && Array.isArray(contents)) { + messages = contents; + } + + // 验证必需参数 + if (!messages || !Array.isArray(messages) || messages.length === 0) { + return res.status(400).json({ + error: { + message: 'Messages array is required', + type: 'invalid_request_error', + code: 'invalid_request' + } + }); + } + + // 检查模型限制 + if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels.length > 0) { + if (!apiKeyData.restrictedModels.includes(model)) { + return res.status(403).json({ + error: { + message: `Model ${model} is not allowed for this API key`, + type: 'invalid_request_error', + code: 'model_not_allowed' + } + }); + } + } + + // 生成会话哈希用于粘性会话 + const sessionHash = generateSessionHash(req); + + // 选择可用的 Gemini 账户 + account = await geminiAccountService.selectAvailableAccount( + apiKeyData.id, + sessionHash + ); + + if (!account) { + return res.status(503).json({ + error: { + message: 'No available Gemini accounts', + type: 'service_unavailable', + code: 'service_unavailable' + } + }); + } + + logger.info(`Using Gemini account: ${account.id} for API key: ${apiKeyData.id}`); + + // 标记账户被使用 + await geminiAccountService.markAccountUsed(account.id); + + // 创建中止控制器 + abortController = new AbortController(); + + // 处理客户端断开连接 + req.on('close', () => { + if (abortController && !abortController.signal.aborted) { + logger.info('Client disconnected, aborting Gemini request'); + abortController.abort(); + } + }); + + // 发送请求到 Gemini(已经返回 OpenAI 格式) + const geminiResponse = await sendGeminiRequest({ + messages, + model, + temperature, + maxTokens: max_tokens, + stream, + accessToken: account.accessToken, + proxy: account.proxy, + apiKeyId: apiKeyData.id, + signal: abortController.signal, + projectId: account.projectId + }); + + if (stream) { + // 设置流式响应头 + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + res.setHeader('X-Accel-Buffering', 'no'); + + // 流式传输响应 + for await (const chunk of geminiResponse) { + if (abortController.signal.aborted) { + break; + } + res.write(chunk); + } + + res.end(); + } else { + // 非流式响应 + res.json(geminiResponse); + } + + const duration = Date.now() - startTime; + logger.info(`OpenAI-Gemini request completed in ${duration}ms`); + + } catch (error) { + logger.error('OpenAI-Gemini request error:', error); + + // 处理速率限制 + if (error.status === 429) { + if (req.apiKey && account) { + await geminiAccountService.setAccountRateLimited(account.id, true); + } + } + + // 返回 OpenAI 格式的错误响应 + const status = error.status || 500; + const errorResponse = { + error: error.error || { + message: error.message || 'Internal server error', + type: 'server_error', + code: 'internal_error' + } + }; + + res.status(status).json(errorResponse); + } finally { + // 清理资源 + if (abortController) { + abortController = null; + } + } +}); + +// OpenAI 兼容的模型列表端点 +router.get('/v1/models', authenticateApiKey, async (req, res) => { + try { + const apiKeyData = req.apiKey; + + // 检查权限 + if (!checkPermissions(apiKeyData, 'gemini')) { + return res.status(403).json({ + error: { + message: 'This API key does not have permission to access Gemini', + type: 'permission_denied', + code: 'permission_denied' + } + }); + } + + // 选择账户获取模型列表 + const account = await geminiAccountService.selectAvailableAccount(apiKeyData.id); + + let models = []; + + if (account) { + // 获取实际的模型列表 + models = await getAvailableModels(account.accessToken, account.proxy); + } else { + // 返回默认模型列表 + models = [ + { + id: 'gemini-2.0-flash-exp', + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: 'google' + } + ]; + } + + // 如果启用了模型限制,过滤模型列表 + if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels.length > 0) { + models = models.filter(model => apiKeyData.restrictedModels.includes(model.id)); + } + + res.json({ + object: 'list', + data: models + }); + + } catch (error) { + logger.error('Failed to get OpenAI-Gemini models:', error); + res.status(500).json({ + error: { + message: 'Failed to retrieve models', + type: 'server_error', + code: 'internal_error' + } + }); + } +}); + +// OpenAI 兼容的模型详情端点 +router.get('/v1/models/:model', authenticateApiKey, async (req, res) => { + try { + const apiKeyData = req.apiKey; + const modelId = req.params.model; + + // 检查权限 + if (!checkPermissions(apiKeyData, 'gemini')) { + return res.status(403).json({ + error: { + message: 'This API key does not have permission to access Gemini', + type: 'permission_denied', + code: 'permission_denied' + } + }); + } + + // 检查模型限制 + if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels.length > 0) { + if (!apiKeyData.restrictedModels.includes(modelId)) { + return res.status(404).json({ + error: { + message: `Model '${modelId}' not found`, + type: 'invalid_request_error', + code: 'model_not_found' + } + }); + } + } + + // 返回模型信息 + res.json({ + id: modelId, + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: 'google', + permission: [], + root: modelId, + parent: null + }); + + } catch (error) { + logger.error('Failed to get model details:', error); + res.status(500).json({ + error: { + message: 'Failed to retrieve model details', + type: 'server_error', + code: 'internal_error' + } + }); + } +}); + +module.exports = router; \ No newline at end of file diff --git a/src/routes/openaiGeminiRoutes.js.version-08041512 b/src/routes/openaiGeminiRoutes.js.version-08041512 new file mode 100644 index 00000000..5c0aae65 --- /dev/null +++ b/src/routes/openaiGeminiRoutes.js.version-08041512 @@ -0,0 +1,490 @@ +const express = require('express'); +const router = express.Router(); +const logger = require('../utils/logger'); +const { authenticateApiKey } = require('../middleware/auth'); +const geminiAccountService = require('../services/geminiAccountService'); +const unifiedGeminiScheduler = require('../services/unifiedGeminiScheduler'); +const sessionHelper = require('../utils/sessionHelper'); + +// 检查 API Key 权限 +function checkPermissions(apiKeyData, requiredPermission = 'gemini') { + const permissions = apiKeyData.permissions || 'all'; + return permissions === 'all' || permissions === requiredPermission; +} + +// 转换 OpenAI 消息格式到 Gemini 格式 +function convertMessagesToGemini(messages) { + const contents = []; + let systemInstruction = ''; + + // 辅助函数:提取文本内容 + function extractTextContent(content) { + // 处理 null 或 undefined + if (content == null) { + return ''; + } + + // 处理字符串 + if (typeof content === 'string') { + return content; + } + + // 处理数组格式的内容 + if (Array.isArray(content)) { + return content.map(item => { + if (item == null) return ''; + if (typeof item === 'string') { + return item; + } + if (typeof item === 'object') { + // 处理 {type: 'text', text: '...'} 格式 + if (item.type === 'text' && item.text) { + return item.text; + } + // 处理 {text: '...'} 格式 + if (item.text) { + return item.text; + } + // 处理嵌套的对象或数组 + if (item.content) { + return extractTextContent(item.content); + } + } + return ''; + }).join(''); + } + + // 处理对象格式的内容 + if (typeof content === 'object') { + // 处理 {text: '...'} 格式 + if (content.text) { + return content.text; + } + // 处理 {content: '...'} 格式 + if (content.content) { + return extractTextContent(content.content); + } + // 处理 {parts: [{text: '...'}]} 格式 + if (content.parts && Array.isArray(content.parts)) { + return content.parts.map(part => { + if (part && part.text) { + return part.text; + } + return ''; + }).join(''); + } + } + + // 最后的后备选项:只有在内容确实不为空且有意义时才转换为字符串 + if (content !== undefined && content !== null && content !== '' && typeof content !== 'object') { + return String(content); + } + + return ''; + } + + for (const message of messages) { + const textContent = extractTextContent(message.content); + + if (message.role === 'system') { + systemInstruction += (systemInstruction ? '\n\n' : '') + textContent; + } else if (message.role === 'user') { + contents.push({ + role: 'user', + parts: [{ text: textContent }] + }); + } else if (message.role === 'assistant') { + contents.push({ + role: 'model', + parts: [{ text: textContent }] + }); + } + } + + return { contents, systemInstruction }; +} + +// 转换 Gemini 响应到 OpenAI 格式 +function convertGeminiResponseToOpenAI(geminiResponse, model, stream = false) { + if (stream) { + // 处理流式响应 - 原样返回 SSE 数据 + return geminiResponse; + } else { + // 非流式响应转换 + if (geminiResponse.candidates && geminiResponse.candidates.length > 0) { + const candidate = geminiResponse.candidates[0]; + const content = candidate.content?.parts?.[0]?.text || ''; + const finishReason = candidate.finishReason?.toLowerCase() || 'stop'; + + // 计算 token 使用量 + const usage = geminiResponse.usageMetadata || { + promptTokenCount: 0, + candidatesTokenCount: 0, + totalTokenCount: 0 + }; + + return { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion', + created: Math.floor(Date.now() / 1000), + model: model, + choices: [{ + index: 0, + message: { + role: 'assistant', + content: content + }, + finish_reason: finishReason + }], + usage: { + prompt_tokens: usage.promptTokenCount, + completion_tokens: usage.candidatesTokenCount, + total_tokens: usage.totalTokenCount + } + }; + } else { + throw new Error('No response from Gemini'); + } + } +} + +// OpenAI 兼容的聊天完成端点 +router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { + const startTime = Date.now(); + let abortController = null; + + try { + const apiKeyData = req.apiKey; + + // 检查权限 + if (!checkPermissions(apiKeyData, 'gemini')) { + return res.status(403).json({ + error: { + message: 'This API key does not have permission to access Gemini', + type: 'permission_denied', + code: 'permission_denied' + } + }); + } + + // 处理请求体结构 - 支持多种格式 + let requestBody = req.body; + + // 如果请求体被包装在 body 字段中,解包它 + if (req.body.body && typeof req.body.body === 'object') { + requestBody = req.body.body; + } + + // 从 URL 路径中提取模型信息(如果存在) + let urlModel = null; + const urlPath = req.body?.config?.url || req.originalUrl || req.url; + const modelMatch = urlPath.match(/\/([^\/]+):(?:stream)?[Gg]enerateContent/); + if (modelMatch) { + urlModel = modelMatch[1]; + logger.debug(`Extracted model from URL: ${urlModel}`); + } + + // 提取请求参数 + const { + messages: requestMessages, + contents: requestContents, + model: bodyModel = 'gemini-2.0-flash-exp', + temperature = 0.7, + max_tokens = 4096, + stream = false + } = requestBody; + + // 优先使用 URL 中的模型,其次是请求体中的模型 + const model = urlModel || bodyModel; + + // 支持两种格式: OpenAI 的 messages 或 Gemini 的 contents + let messages = requestMessages; + if (requestContents && Array.isArray(requestContents)) { + messages = requestContents; + } + + // 验证必需参数 + if (!messages || !Array.isArray(messages) || messages.length === 0) { + return res.status(400).json({ + error: { + message: 'Messages array is required', + type: 'invalid_request_error', + code: 'invalid_request' + } + }); + } + + // 检查模型限制 + if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels.length > 0) { + if (!apiKeyData.restrictedModels.includes(model)) { + return res.status(403).json({ + error: { + message: `Model ${model} is not allowed for this API key`, + type: 'invalid_request_error', + code: 'model_not_allowed' + } + }); + } + } + + // 转换消息格式 + const { contents, systemInstruction } = convertMessagesToGemini(messages); + + // 构建 Gemini 请求体 + const geminiRequestBody = { + contents, + generationConfig: { + temperature, + maxOutputTokens: max_tokens, + candidateCount: 1 + } + }; + + if (systemInstruction) { + geminiRequestBody.systemInstruction = { parts: [{ text: systemInstruction }] }; + } + + // 生成会话哈希 + const sessionHash = sessionHelper.generateSessionHash(req.body); + + // 使用统一调度选择账号 + const { accountId } = await unifiedGeminiScheduler.selectAccountForApiKey(req.apiKey, sessionHash, model); + const account = await geminiAccountService.getAccount(accountId); + const { accessToken, refreshToken } = account; + + logger.info(`Using Gemini account: ${accountId} for API key: ${apiKeyData.id}`); + + // 创建中止控制器 + abortController = new AbortController(); + + // 处理客户端断开连接 + req.on('close', () => { + if (abortController && !abortController.signal.aborted) { + logger.info('Client disconnected, aborting Gemini request'); + abortController.abort(); + } + }); + + const client = await geminiAccountService.getOauthClient(accessToken, refreshToken); + + const project = 'verdant-wares-464411-k9'; + if (stream) { + // 流式响应 + logger.info('StreamGenerateContent request', { + model: model, + projectId: project || account.projectId, + apiKeyId: req.apiKey?.id || 'unknown' + }); + + const streamResponse = await geminiAccountService.generateContentStream( + client, + { model, request: geminiRequestBody }, + null, // user_prompt_id + project || account.projectId, + req.apiKey?.id, // 使用 API Key ID 作为 session ID + abortController.signal // 传递中止信号 + ); + + // 设置 SSE 响应头 + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + res.setHeader('X-Accel-Buffering', 'no'); + + // 处理流式响应,转换为 OpenAI 格式 + let buffer = ''; + + streamResponse.on('data', (chunk) => { + try { + buffer += chunk.toString(); + const lines = buffer.split('\n'); + buffer = lines.pop() || ''; // 保留最后一个不完整的行 + + for (const line of lines) { + if (!line.trim()) continue; + + // 处理 SSE 格式 + let jsonData = line; + if (line.startsWith('data: ')) { + jsonData = line.substring(6).trim(); + } + + if (!jsonData || jsonData === '[DONE]') continue; + + try { + const data = JSON.parse(jsonData); + + // 转换为 OpenAI 流式格式 + if (data.candidates && data.candidates.length > 0) { + const candidate = data.candidates[0]; + const content = candidate.content?.parts?.[0]?.text || ''; + const finishReason = candidate.finishReason?.toLowerCase(); + + const openaiChunk = { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Math.floor(Date.now() / 1000), + model: model, + choices: [{ + index: 0, + delta: { + content: content + }, + finish_reason: finishReason === 'stop' ? 'stop' : null + }] + }; + + res.write(`data: ${JSON.stringify(openaiChunk)}\n\n`); + + // 如果结束了,发送最终的 [DONE] + if (finishReason === 'stop') { + res.write('data: [DONE]\n\n'); + } + } + } catch (e) { + logger.debug('Error parsing JSON line:', e.message); + } + } + } catch (error) { + logger.error('Stream processing error:', error); + if (!res.headersSent) { + res.status(500).json({ + error: { + message: error.message || 'Stream error', + type: 'api_error' + } + }); + } + } + }); + + streamResponse.on('end', () => { + logger.info('Stream completed successfully'); + if (!res.headersSent) { + res.write('data: [DONE]\n\n'); + } + res.end(); + }); + + streamResponse.on('error', (error) => { + logger.error('Stream error:', error); + if (!res.headersSent) { + res.status(500).json({ + error: { + message: error.message || 'Stream error', + type: 'api_error' + } + }); + } else { + res.end(); + } + }); + + } else { + // 非流式响应 + logger.info('GenerateContent request', { + model: model, + projectId: project || account.projectId, + apiKeyId: req.apiKey?.id || 'unknown' + }); + + const response = await geminiAccountService.generateContent( + client, + { model, request: geminiRequestBody }, + null, // user_prompt_id + project || account.projectId, + req.apiKey?.id // 使用 API Key ID 作为 session ID + ); + + // 转换为 OpenAI 格式并返回 + const openaiResponse = convertGeminiResponseToOpenAI(response, model, false); + res.json(openaiResponse); + } + + const duration = Date.now() - startTime; + logger.info(`OpenAI-Gemini request completed in ${duration}ms`); + + } catch (error) { + logger.error('OpenAI-Gemini request error:', error); + + // 返回 OpenAI 格式的错误响应 + const status = error.status || 500; + const errorResponse = { + error: { + message: error.message || 'Internal server error', + type: 'server_error', + code: 'internal_error' + } + }; + + if (!res.headersSent) { + res.status(status).json(errorResponse); + } + } finally { + // 清理资源 + if (abortController) { + abortController = null; + } + } +}); + +// 获取模型列表端点(OpenAI 兼容) +router.get('/v1/models', authenticateApiKey, async (req, res) => { + try { + const apiKeyData = req.apiKey; + + // 检查权限 + if (!checkPermissions(apiKeyData, 'gemini')) { + return res.status(403).json({ + error: { + message: 'This API key does not have permission to access Gemini', + type: 'permission_denied', + code: 'permission_denied' + } + }); + } + + // 返回支持的 Gemini 模型列表(OpenAI 格式) + const models = [ + { + id: 'gemini-2.0-flash-exp', + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: 'google' + }, + { + id: 'gemini-2.0-flash-thinking-exp', + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: 'google' + }, + { + id: 'gemini-1.5-pro', + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: 'google' + }, + { + id: 'gemini-1.5-flash', + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: 'google' + } + ]; + + res.json({ + object: 'list', + data: models + }); + } catch (error) { + logger.error('Error getting models:', error); + res.status(500).json({ + error: { + message: error.message || 'Internal server error', + type: 'server_error' + } + }); + } +}); + +module.exports = router; \ No newline at end of file diff --git a/src/services/geminiAccountService.js b/src/services/geminiAccountService.js index 348fdf3c..0aeb4587 100644 --- a/src/services/geminiAccountService.js +++ b/src/services/geminiAccountService.js @@ -977,7 +977,7 @@ async function generateContent(client, requestData, userPromptId, projectId = nu sessionId }); - const response = await axios({ + const axiosConfig = { url: `${CODE_ASSIST_ENDPOINT}/${CODE_ASSIST_API_VERSION}:generateContent`, method: 'POST', headers: { @@ -986,7 +986,9 @@ async function generateContent(client, requestData, userPromptId, projectId = nu }, data: request, timeout: 60000, // 生成内容可能需要更长时间 - }); + }; + + const response = await axios(axiosConfig); logger.info('✅ generateContent API调用成功'); return response.data; From 54ad8452c3cc3334e3608ab3fdf7beeeed71db2c Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 20:34:17 +0800 Subject: [PATCH 3/3] =?UTF-8?q?feat:=20gemini=20=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=93=8D=E5=BA=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/openaiGeminiRoutes.has-convert.js | 424 --------------- src/routes/openaiGeminiRoutes.js | 113 ++-- ...naiGeminiRoutes.js.nohas-convert copy.back | 318 ------------ .../openaiGeminiRoutes.js.version-08041512 | 490 ------------------ src/services/geminiAccountService.js | 2 +- .../src/components/accounts/AccountForm.vue | 36 +- 6 files changed, 100 insertions(+), 1283 deletions(-) delete mode 100644 src/routes/openaiGeminiRoutes.has-convert.js delete mode 100644 src/routes/openaiGeminiRoutes.js.nohas-convert copy.back delete mode 100644 src/routes/openaiGeminiRoutes.js.version-08041512 diff --git a/src/routes/openaiGeminiRoutes.has-convert.js b/src/routes/openaiGeminiRoutes.has-convert.js deleted file mode 100644 index 8267ee24..00000000 --- a/src/routes/openaiGeminiRoutes.has-convert.js +++ /dev/null @@ -1,424 +0,0 @@ -const express = require('express'); -const router = express.Router(); -const logger = require('../utils/logger'); -const { authenticateApiKey } = require('../middleware/auth'); -const geminiAccountService = require('../services/geminiAccountService'); -const unifiedGeminiScheduler = require('../services/unifiedGeminiScheduler'); -const sessionHelper = require('../utils/sessionHelper'); - -// 检查 API Key 权限 -function checkPermissions(apiKeyData, requiredPermission = 'gemini') { - const permissions = apiKeyData.permissions || 'all'; - return permissions === 'all' || permissions === requiredPermission; -} - -// 转换 OpenAI 消息格式到 Gemini 格式 -function convertMessagesToGemini(messages) { - const contents = []; - let systemInstruction = ''; - - // 辅助函数:提取文本内容 - function extractTextContent(content) { - if (typeof content === 'string') { - return content; - } - - if (Array.isArray(content)) { - return content.map(item => { - if (typeof item === 'string') { - return item; - } - if (typeof item === 'object' && item.type === 'text' && item.text) { - return item.text; - } - if (typeof item === 'object' && item.text) { - return item.text; - } - return ''; - }).join(''); - } - - if (typeof content === 'object' && content.text) { - return content.text; - } - - return String(content); - } - - for (const message of messages) { - const textContent = extractTextContent(message.content); - - if (message.role === 'system') { - systemInstruction += (systemInstruction ? '\n\n' : '') + textContent; - } else if (message.role === 'user') { - contents.push({ - role: 'user', - parts: [{ text: textContent }] - }); - } else if (message.role === 'assistant') { - contents.push({ - role: 'model', - parts: [{ text: textContent }] - }); - } - } - - return { contents, systemInstruction }; -} - -// 转换 Gemini 响应到 OpenAI 格式 -function convertGeminiResponseToOpenAI(geminiResponse, model, stream = false) { - if (stream) { - // 处理流式响应 - 原样返回 SSE 数据 - return geminiResponse; - } else { - // 非流式响应转换 - if (geminiResponse.candidates && geminiResponse.candidates.length > 0) { - const candidate = geminiResponse.candidates[0]; - const content = candidate.content?.parts?.[0]?.text || ''; - const finishReason = candidate.finishReason?.toLowerCase() || 'stop'; - - // 计算 token 使用量 - const usage = geminiResponse.usageMetadata || { - promptTokenCount: 0, - candidatesTokenCount: 0, - totalTokenCount: 0 - }; - - return { - id: `chatcmpl-${Date.now()}`, - object: 'chat.completion', - created: Math.floor(Date.now() / 1000), - model: model, - choices: [{ - index: 0, - message: { - role: 'assistant', - content: content - }, - finish_reason: finishReason - }], - usage: { - prompt_tokens: usage.promptTokenCount, - completion_tokens: usage.candidatesTokenCount, - total_tokens: usage.totalTokenCount - } - }; - } else { - throw new Error('No response from Gemini'); - } - } -} - -// OpenAI 兼容的聊天完成端点 -router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { - const startTime = Date.now(); - let abortController = null; - - try { - const apiKeyData = req.apiKey; - - // 检查权限 - if (!checkPermissions(apiKeyData, 'gemini')) { - return res.status(403).json({ - error: { - message: 'This API key does not have permission to access Gemini', - type: 'permission_denied', - code: 'permission_denied' - } - }); - } - - // 提取请求参数 - const { - messages, - model = 'gemini-2.0-flash-exp', - temperature = 0.7, - max_tokens = 4096, - stream = false - } = req.body; - - // 验证必需参数 - if (!messages || !Array.isArray(messages) || messages.length === 0) { - return res.status(400).json({ - error: { - message: 'Messages array is required', - type: 'invalid_request_error', - code: 'invalid_request' - } - }); - } - - // 检查模型限制 - if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels.length > 0) { - if (!apiKeyData.restrictedModels.includes(model)) { - return res.status(403).json({ - error: { - message: `Model ${model} is not allowed for this API key`, - type: 'invalid_request_error', - code: 'model_not_allowed' - } - }); - } - } - - // 转换消息格式 - const { contents, systemInstruction } = convertMessagesToGemini(messages); - - // 构建 Gemini 请求体 - const geminiRequestBody = { - contents, - generationConfig: { - temperature, - maxOutputTokens: max_tokens, - candidateCount: 1 - } - }; - - if (systemInstruction) { - geminiRequestBody.systemInstruction = { parts: [{ text: systemInstruction }] }; - } - - // 生成会话哈希 - const sessionHash = sessionHelper.generateSessionHash(req.body); - - // 使用统一调度选择账号 - const { accountId } = await unifiedGeminiScheduler.selectAccountForApiKey(req.apiKey, sessionHash, model); - const account = await geminiAccountService.getAccount(accountId); - const { accessToken, refreshToken } = account; - - logger.info(`Using Gemini account: ${accountId} for API key: ${apiKeyData.id}`); - - // 创建中止控制器 - abortController = new AbortController(); - - // 处理客户端断开连接 - req.on('close', () => { - if (abortController && !abortController.signal.aborted) { - logger.info('Client disconnected, aborting Gemini request'); - abortController.abort(); - } - }); - - const client = await geminiAccountService.getOauthClient(accessToken, refreshToken); - - if (stream) { - // 流式响应 - logger.info('StreamGenerateContent request', { - model: model, - projectId: account.projectId, - apiKeyId: req.apiKey?.id || 'unknown' - }); - - const streamResponse = await geminiAccountService.generateContentStream( - client, - { model, request: geminiRequestBody }, - null, // user_prompt_id - account.projectId, - req.apiKey?.id, // 使用 API Key ID 作为 session ID - abortController.signal // 传递中止信号 - ); - - // 设置 SSE 响应头 - res.setHeader('Content-Type', 'text/event-stream'); - res.setHeader('Cache-Control', 'no-cache'); - res.setHeader('Connection', 'keep-alive'); - res.setHeader('X-Accel-Buffering', 'no'); - - // 处理流式响应,转换为 OpenAI 格式 - let buffer = ''; - - streamResponse.on('data', (chunk) => { - try { - buffer += chunk.toString(); - const lines = buffer.split('\n'); - buffer = lines.pop() || ''; // 保留最后一个不完整的行 - - for (const line of lines) { - if (!line.trim()) continue; - - // 处理 SSE 格式 - let jsonData = line; - if (line.startsWith('data: ')) { - jsonData = line.substring(6).trim(); - } - - if (!jsonData || jsonData === '[DONE]') continue; - - try { - const data = JSON.parse(jsonData); - - // 转换为 OpenAI 流式格式 - if (data.candidates && data.candidates.length > 0) { - const candidate = data.candidates[0]; - const content = candidate.content?.parts?.[0]?.text || ''; - const finishReason = candidate.finishReason?.toLowerCase(); - - const openaiChunk = { - id: `chatcmpl-${Date.now()}`, - object: 'chat.completion.chunk', - created: Math.floor(Date.now() / 1000), - model: model, - choices: [{ - index: 0, - delta: { - content: content - }, - finish_reason: finishReason === 'stop' ? 'stop' : null - }] - }; - - res.write(`data: ${JSON.stringify(openaiChunk)}\n\n`); - - // 如果结束了,发送最终的 [DONE] - if (finishReason === 'stop') { - res.write('data: [DONE]\n\n'); - } - } - } catch (e) { - logger.debug('Error parsing JSON line:', e.message); - } - } - } catch (error) { - logger.error('Stream processing error:', error); - if (!res.headersSent) { - res.status(500).json({ - error: { - message: error.message || 'Stream error', - type: 'api_error' - } - }); - } - } - }); - - streamResponse.on('end', () => { - logger.info('Stream completed successfully'); - if (!res.headersSent) { - res.write('data: [DONE]\n\n'); - } - res.end(); - }); - - streamResponse.on('error', (error) => { - logger.error('Stream error:', error); - if (!res.headersSent) { - res.status(500).json({ - error: { - message: error.message || 'Stream error', - type: 'api_error' - } - }); - } else { - res.end(); - } - }); - - } else { - // 非流式响应 - logger.info('GenerateContent request', { - model: model, - projectId: account.projectId, - apiKeyId: req.apiKey?.id || 'unknown' - }); - - const response = await geminiAccountService.generateContent( - client, - { model, request: geminiRequestBody }, - null, // user_prompt_id - account.projectId, - req.apiKey?.id // 使用 API Key ID 作为 session ID - ); - - // 转换为 OpenAI 格式并返回 - const openaiResponse = convertGeminiResponseToOpenAI(response, model, false); - res.json(openaiResponse); - } - - const duration = Date.now() - startTime; - logger.info(`OpenAI-Gemini request completed in ${duration}ms`); - - } catch (error) { - logger.error('OpenAI-Gemini request error:', error); - - // 返回 OpenAI 格式的错误响应 - const status = error.status || 500; - const errorResponse = { - error: { - message: error.message || 'Internal server error', - type: 'server_error', - code: 'internal_error' - } - }; - - if (!res.headersSent) { - res.status(status).json(errorResponse); - } - } finally { - // 清理资源 - if (abortController) { - abortController = null; - } - } -}); - -// 获取模型列表端点(OpenAI 兼容) -router.get('/v1/models', authenticateApiKey, async (req, res) => { - try { - const apiKeyData = req.apiKey; - - // 检查权限 - if (!checkPermissions(apiKeyData, 'gemini')) { - return res.status(403).json({ - error: { - message: 'This API key does not have permission to access Gemini', - type: 'permission_denied', - code: 'permission_denied' - } - }); - } - - // 返回支持的 Gemini 模型列表(OpenAI 格式) - const models = [ - { - id: 'gemini-2.0-flash-exp', - object: 'model', - created: Math.floor(Date.now() / 1000), - owned_by: 'google' - }, - { - id: 'gemini-2.0-flash-thinking-exp', - object: 'model', - created: Math.floor(Date.now() / 1000), - owned_by: 'google' - }, - { - id: 'gemini-1.5-pro', - object: 'model', - created: Math.floor(Date.now() / 1000), - owned_by: 'google' - }, - { - id: 'gemini-1.5-flash', - object: 'model', - created: Math.floor(Date.now() / 1000), - owned_by: 'google' - } - ]; - - res.json({ - object: 'list', - data: models - }); - } catch (error) { - logger.error('Error getting models:', error); - res.status(500).json({ - error: { - message: error.message || 'Internal server error', - type: 'server_error' - } - }); - } -}); - -module.exports = router; \ No newline at end of file diff --git a/src/routes/openaiGeminiRoutes.js b/src/routes/openaiGeminiRoutes.js index b0eca2b9..8b1af00b 100644 --- a/src/routes/openaiGeminiRoutes.js +++ b/src/routes/openaiGeminiRoutes.js @@ -122,13 +122,16 @@ function convertGeminiResponseToOpenAI(geminiResponse, model, stream = false) { return geminiResponse; } else { // 非流式响应转换 - if (geminiResponse.candidates && geminiResponse.candidates.length > 0) { - const candidate = geminiResponse.candidates[0]; + // 处理嵌套的 response 结构 + const actualResponse = geminiResponse.response || geminiResponse; + + if (actualResponse.candidates && actualResponse.candidates.length > 0) { + const candidate = actualResponse.candidates[0]; const content = candidate.content?.parts?.[0]?.text || ''; const finishReason = candidate.finishReason?.toLowerCase() || 'stop'; // 计算 token 使用量 - const usage = geminiResponse.usageMetadata || { + const usage = actualResponse.usageMetadata || { promptTokenCount: 0, candidatesTokenCount: 0, totalTokenCount: 0 @@ -189,7 +192,7 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { // 从 URL 路径中提取模型信息(如果存在) let urlModel = null; const urlPath = req.body?.config?.url || req.originalUrl || req.url; - const modelMatch = urlPath.match(/\/([^\/]+):(?:stream)?[Gg]enerateContent/); + const modelMatch = urlPath.match(/\/([^/]+):(?:stream)?[Gg]enerateContent/); if (modelMatch) { urlModel = modelMatch[1]; logger.debug(`Extracted model from URL: ${urlModel}`); @@ -204,6 +207,10 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { max_tokens = 4096, stream = false } = requestBody; + + // 检查URL中是否包含stream标识 + const isStreamFromUrl = urlPath && urlPath.includes('streamGenerateContent'); + const actualStream = stream || isStreamFromUrl; // 优先使用 URL 中的模型,其次是请求体中的模型 const model = urlModel || bodyModel; @@ -292,9 +299,7 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { // 获取OAuth客户端 const client = await geminiAccountService.getOauthClient(account.accessToken, account.refreshToken); - - let project = 'verdant-wares-464411-k9'; - if (stream) { + if (actualStream) { // 流式响应 logger.info('StreamGenerateContent request', { model: model, @@ -306,7 +311,7 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { client, { model, request: geminiRequestBody }, null, // user_prompt_id - project || account.projectId, + account.projectId, // 使用有权限的项目ID apiKeyData.id, // 使用 API Key ID 作为 session ID abortController.signal // 传递中止信号 ); @@ -320,9 +325,29 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { // 处理流式响应,转换为 OpenAI 格式 let buffer = ''; + // 发送初始的空消息,符合 OpenAI 流式格式 + const initialChunk = { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Math.floor(Date.now() / 1000), + model: model, + choices: [{ + index: 0, + delta: { role: 'assistant' }, + finish_reason: null + }] + }; + res.write(`data: ${JSON.stringify(initialChunk)}\n\n`); + streamResponse.on('data', (chunk) => { try { - buffer += chunk.toString(); + const chunkStr = chunk.toString(); + + if (!chunkStr.trim()) { + return; + } + + buffer += chunkStr; const lines = buffer.split('\n'); buffer = lines.pop() || ''; // 保留最后一个不完整的行 @@ -341,30 +366,51 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { const data = JSON.parse(jsonData); // 转换为 OpenAI 流式格式 - if (data.candidates && data.candidates.length > 0) { - const candidate = data.candidates[0]; + if (data.response?.candidates && data.response.candidates.length > 0) { + const candidate = data.response.candidates[0]; const content = candidate.content?.parts?.[0]?.text || ''; - const finishReason = candidate.finishReason?.toLowerCase(); + const finishReason = candidate.finishReason; - const openaiChunk = { - id: `chatcmpl-${Date.now()}`, - object: 'chat.completion.chunk', - created: Math.floor(Date.now() / 1000), - model: model, - choices: [{ - index: 0, - delta: { - content: content - }, - finish_reason: finishReason === 'stop' ? 'stop' : null - }] - }; - - res.write(`data: ${JSON.stringify(openaiChunk)}\n\n`); - - // 如果结束了,发送最终的 [DONE] - if (finishReason === 'stop') { - res.write('data: [DONE]\n\n'); + // 只有当有内容或者是结束标记时才发送数据 + if (content || finishReason === 'STOP') { + const openaiChunk = { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Math.floor(Date.now() / 1000), + model: model, + choices: [{ + index: 0, + delta: content ? { content: content } : {}, + finish_reason: finishReason === 'STOP' ? 'stop' : null + }] + }; + + res.write(`data: ${JSON.stringify(openaiChunk)}\n\n`); + + // 如果结束了,添加 usage 信息并发送最终的 [DONE] + if (finishReason === 'STOP') { + // 如果有 usage 数据,添加到最后一个 chunk + if (data.response.usageMetadata) { + const usageChunk = { + id: `chatcmpl-${Date.now()}`, + object: 'chat.completion.chunk', + created: Math.floor(Date.now() / 1000), + model: model, + choices: [{ + index: 0, + delta: {}, + finish_reason: 'stop' + }], + usage: { + prompt_tokens: data.response.usageMetadata.promptTokenCount || 0, + completion_tokens: data.response.usageMetadata.candidatesTokenCount || 0, + total_tokens: data.response.usageMetadata.totalTokenCount || 0 + } + }; + res.write(`data: ${JSON.stringify(usageChunk)}\n\n`); + } + res.write('data: [DONE]\n\n'); + } } } } catch (e) { @@ -402,6 +448,9 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { } }); } else { + // 如果已经开始发送流数据,发送错误事件 + res.write(`data: {"error": {"message": "${error.message || 'Stream error'}"}}\n\n`); + res.write('data: [DONE]\n\n'); res.end(); } }); @@ -418,7 +467,7 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { client, { model, request: geminiRequestBody }, null, // user_prompt_id - account.projectId, + account.projectId, // 使用有权限的项目ID apiKeyData.id // 使用 API Key ID 作为 session ID ); diff --git a/src/routes/openaiGeminiRoutes.js.nohas-convert copy.back b/src/routes/openaiGeminiRoutes.js.nohas-convert copy.back deleted file mode 100644 index 7599b98c..00000000 --- a/src/routes/openaiGeminiRoutes.js.nohas-convert copy.back +++ /dev/null @@ -1,318 +0,0 @@ -const express = require('express'); -const router = express.Router(); -const logger = require('../utils/logger'); -const { authenticateApiKey } = require('../middleware/auth'); -const geminiAccountService = require('../services/geminiAccountService'); -const { sendGeminiRequest, getAvailableModels } = require('../services/geminiRelayService'); -const crypto = require('crypto'); - -// 生成会话哈希 -function generateSessionHash(req) { - const sessionData = [ - req.headers['user-agent'], - req.ip, - req.headers['authorization']?.substring(0, 20) - ].filter(Boolean).join(':'); - - return crypto.createHash('sha256').update(sessionData).digest('hex'); -} - -// 检查 API Key 权限 -function checkPermissions(apiKeyData, requiredPermission = 'gemini') { - const permissions = apiKeyData.permissions || 'all'; - return permissions === 'all' || permissions === requiredPermission; -} - -// OpenAI 兼容的聊天完成端点 -router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { - const startTime = Date.now(); - let abortController = null; - let account = null; // Declare account outside try block for error handling - - try { - const apiKeyData = req.apiKey; - - // 检查权限 - if (!checkPermissions(apiKeyData, 'gemini')) { - return res.status(403).json({ - error: { - message: 'This API key does not have permission to access Gemini', - type: 'permission_denied', - code: 'permission_denied' - } - }); - } - // 处理请求体结构 - 支持多种格式 - let requestBody = req.body; - - // 如果请求体被包装在 body 字段中,解包它 - if (req.body.body && typeof req.body.body === 'object') { - requestBody = req.body.body; - } - - // 从 URL 路径中提取模型信息(如果存在) - let urlModel = null; - const urlPath = req.body?.config?.url || req.originalUrl || req.url; - const modelMatch = urlPath.match(/\/([^\/]+):(?:stream)?[Gg]enerateContent/); - if (modelMatch) { - urlModel = modelMatch[1]; - logger.debug(`Extracted model from URL: ${urlModel}`); - } - - // 提取请求参数 - const { - messages: requestMessages, - contents, - model: bodyModel = 'gemini-2.0-flash-exp', - temperature = 0.7, - max_tokens = 4096, - stream = false - } = requestBody; - - // 优先使用 URL 中的模型,其次是请求体中的模型 - const model = urlModel || bodyModel; - - // 支持两种格式: OpenAI 的 messages 或 Gemini 的 contents - let messages = requestMessages; - if (contents && Array.isArray(contents)) { - messages = contents; - } - - // 验证必需参数 - if (!messages || !Array.isArray(messages) || messages.length === 0) { - return res.status(400).json({ - error: { - message: 'Messages array is required', - type: 'invalid_request_error', - code: 'invalid_request' - } - }); - } - - // 检查模型限制 - if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels.length > 0) { - if (!apiKeyData.restrictedModels.includes(model)) { - return res.status(403).json({ - error: { - message: `Model ${model} is not allowed for this API key`, - type: 'invalid_request_error', - code: 'model_not_allowed' - } - }); - } - } - - // 生成会话哈希用于粘性会话 - const sessionHash = generateSessionHash(req); - - // 选择可用的 Gemini 账户 - account = await geminiAccountService.selectAvailableAccount( - apiKeyData.id, - sessionHash - ); - - if (!account) { - return res.status(503).json({ - error: { - message: 'No available Gemini accounts', - type: 'service_unavailable', - code: 'service_unavailable' - } - }); - } - - logger.info(`Using Gemini account: ${account.id} for API key: ${apiKeyData.id}`); - - // 标记账户被使用 - await geminiAccountService.markAccountUsed(account.id); - - // 创建中止控制器 - abortController = new AbortController(); - - // 处理客户端断开连接 - req.on('close', () => { - if (abortController && !abortController.signal.aborted) { - logger.info('Client disconnected, aborting Gemini request'); - abortController.abort(); - } - }); - - // 发送请求到 Gemini(已经返回 OpenAI 格式) - const geminiResponse = await sendGeminiRequest({ - messages, - model, - temperature, - maxTokens: max_tokens, - stream, - accessToken: account.accessToken, - proxy: account.proxy, - apiKeyId: apiKeyData.id, - signal: abortController.signal, - projectId: account.projectId - }); - - if (stream) { - // 设置流式响应头 - res.setHeader('Content-Type', 'text/event-stream'); - res.setHeader('Cache-Control', 'no-cache'); - res.setHeader('Connection', 'keep-alive'); - res.setHeader('X-Accel-Buffering', 'no'); - - // 流式传输响应 - for await (const chunk of geminiResponse) { - if (abortController.signal.aborted) { - break; - } - res.write(chunk); - } - - res.end(); - } else { - // 非流式响应 - res.json(geminiResponse); - } - - const duration = Date.now() - startTime; - logger.info(`OpenAI-Gemini request completed in ${duration}ms`); - - } catch (error) { - logger.error('OpenAI-Gemini request error:', error); - - // 处理速率限制 - if (error.status === 429) { - if (req.apiKey && account) { - await geminiAccountService.setAccountRateLimited(account.id, true); - } - } - - // 返回 OpenAI 格式的错误响应 - const status = error.status || 500; - const errorResponse = { - error: error.error || { - message: error.message || 'Internal server error', - type: 'server_error', - code: 'internal_error' - } - }; - - res.status(status).json(errorResponse); - } finally { - // 清理资源 - if (abortController) { - abortController = null; - } - } -}); - -// OpenAI 兼容的模型列表端点 -router.get('/v1/models', authenticateApiKey, async (req, res) => { - try { - const apiKeyData = req.apiKey; - - // 检查权限 - if (!checkPermissions(apiKeyData, 'gemini')) { - return res.status(403).json({ - error: { - message: 'This API key does not have permission to access Gemini', - type: 'permission_denied', - code: 'permission_denied' - } - }); - } - - // 选择账户获取模型列表 - const account = await geminiAccountService.selectAvailableAccount(apiKeyData.id); - - let models = []; - - if (account) { - // 获取实际的模型列表 - models = await getAvailableModels(account.accessToken, account.proxy); - } else { - // 返回默认模型列表 - models = [ - { - id: 'gemini-2.0-flash-exp', - object: 'model', - created: Math.floor(Date.now() / 1000), - owned_by: 'google' - } - ]; - } - - // 如果启用了模型限制,过滤模型列表 - if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels.length > 0) { - models = models.filter(model => apiKeyData.restrictedModels.includes(model.id)); - } - - res.json({ - object: 'list', - data: models - }); - - } catch (error) { - logger.error('Failed to get OpenAI-Gemini models:', error); - res.status(500).json({ - error: { - message: 'Failed to retrieve models', - type: 'server_error', - code: 'internal_error' - } - }); - } -}); - -// OpenAI 兼容的模型详情端点 -router.get('/v1/models/:model', authenticateApiKey, async (req, res) => { - try { - const apiKeyData = req.apiKey; - const modelId = req.params.model; - - // 检查权限 - if (!checkPermissions(apiKeyData, 'gemini')) { - return res.status(403).json({ - error: { - message: 'This API key does not have permission to access Gemini', - type: 'permission_denied', - code: 'permission_denied' - } - }); - } - - // 检查模型限制 - if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels.length > 0) { - if (!apiKeyData.restrictedModels.includes(modelId)) { - return res.status(404).json({ - error: { - message: `Model '${modelId}' not found`, - type: 'invalid_request_error', - code: 'model_not_found' - } - }); - } - } - - // 返回模型信息 - res.json({ - id: modelId, - object: 'model', - created: Math.floor(Date.now() / 1000), - owned_by: 'google', - permission: [], - root: modelId, - parent: null - }); - - } catch (error) { - logger.error('Failed to get model details:', error); - res.status(500).json({ - error: { - message: 'Failed to retrieve model details', - type: 'server_error', - code: 'internal_error' - } - }); - } -}); - -module.exports = router; \ No newline at end of file diff --git a/src/routes/openaiGeminiRoutes.js.version-08041512 b/src/routes/openaiGeminiRoutes.js.version-08041512 deleted file mode 100644 index 5c0aae65..00000000 --- a/src/routes/openaiGeminiRoutes.js.version-08041512 +++ /dev/null @@ -1,490 +0,0 @@ -const express = require('express'); -const router = express.Router(); -const logger = require('../utils/logger'); -const { authenticateApiKey } = require('../middleware/auth'); -const geminiAccountService = require('../services/geminiAccountService'); -const unifiedGeminiScheduler = require('../services/unifiedGeminiScheduler'); -const sessionHelper = require('../utils/sessionHelper'); - -// 检查 API Key 权限 -function checkPermissions(apiKeyData, requiredPermission = 'gemini') { - const permissions = apiKeyData.permissions || 'all'; - return permissions === 'all' || permissions === requiredPermission; -} - -// 转换 OpenAI 消息格式到 Gemini 格式 -function convertMessagesToGemini(messages) { - const contents = []; - let systemInstruction = ''; - - // 辅助函数:提取文本内容 - function extractTextContent(content) { - // 处理 null 或 undefined - if (content == null) { - return ''; - } - - // 处理字符串 - if (typeof content === 'string') { - return content; - } - - // 处理数组格式的内容 - if (Array.isArray(content)) { - return content.map(item => { - if (item == null) return ''; - if (typeof item === 'string') { - return item; - } - if (typeof item === 'object') { - // 处理 {type: 'text', text: '...'} 格式 - if (item.type === 'text' && item.text) { - return item.text; - } - // 处理 {text: '...'} 格式 - if (item.text) { - return item.text; - } - // 处理嵌套的对象或数组 - if (item.content) { - return extractTextContent(item.content); - } - } - return ''; - }).join(''); - } - - // 处理对象格式的内容 - if (typeof content === 'object') { - // 处理 {text: '...'} 格式 - if (content.text) { - return content.text; - } - // 处理 {content: '...'} 格式 - if (content.content) { - return extractTextContent(content.content); - } - // 处理 {parts: [{text: '...'}]} 格式 - if (content.parts && Array.isArray(content.parts)) { - return content.parts.map(part => { - if (part && part.text) { - return part.text; - } - return ''; - }).join(''); - } - } - - // 最后的后备选项:只有在内容确实不为空且有意义时才转换为字符串 - if (content !== undefined && content !== null && content !== '' && typeof content !== 'object') { - return String(content); - } - - return ''; - } - - for (const message of messages) { - const textContent = extractTextContent(message.content); - - if (message.role === 'system') { - systemInstruction += (systemInstruction ? '\n\n' : '') + textContent; - } else if (message.role === 'user') { - contents.push({ - role: 'user', - parts: [{ text: textContent }] - }); - } else if (message.role === 'assistant') { - contents.push({ - role: 'model', - parts: [{ text: textContent }] - }); - } - } - - return { contents, systemInstruction }; -} - -// 转换 Gemini 响应到 OpenAI 格式 -function convertGeminiResponseToOpenAI(geminiResponse, model, stream = false) { - if (stream) { - // 处理流式响应 - 原样返回 SSE 数据 - return geminiResponse; - } else { - // 非流式响应转换 - if (geminiResponse.candidates && geminiResponse.candidates.length > 0) { - const candidate = geminiResponse.candidates[0]; - const content = candidate.content?.parts?.[0]?.text || ''; - const finishReason = candidate.finishReason?.toLowerCase() || 'stop'; - - // 计算 token 使用量 - const usage = geminiResponse.usageMetadata || { - promptTokenCount: 0, - candidatesTokenCount: 0, - totalTokenCount: 0 - }; - - return { - id: `chatcmpl-${Date.now()}`, - object: 'chat.completion', - created: Math.floor(Date.now() / 1000), - model: model, - choices: [{ - index: 0, - message: { - role: 'assistant', - content: content - }, - finish_reason: finishReason - }], - usage: { - prompt_tokens: usage.promptTokenCount, - completion_tokens: usage.candidatesTokenCount, - total_tokens: usage.totalTokenCount - } - }; - } else { - throw new Error('No response from Gemini'); - } - } -} - -// OpenAI 兼容的聊天完成端点 -router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => { - const startTime = Date.now(); - let abortController = null; - - try { - const apiKeyData = req.apiKey; - - // 检查权限 - if (!checkPermissions(apiKeyData, 'gemini')) { - return res.status(403).json({ - error: { - message: 'This API key does not have permission to access Gemini', - type: 'permission_denied', - code: 'permission_denied' - } - }); - } - - // 处理请求体结构 - 支持多种格式 - let requestBody = req.body; - - // 如果请求体被包装在 body 字段中,解包它 - if (req.body.body && typeof req.body.body === 'object') { - requestBody = req.body.body; - } - - // 从 URL 路径中提取模型信息(如果存在) - let urlModel = null; - const urlPath = req.body?.config?.url || req.originalUrl || req.url; - const modelMatch = urlPath.match(/\/([^\/]+):(?:stream)?[Gg]enerateContent/); - if (modelMatch) { - urlModel = modelMatch[1]; - logger.debug(`Extracted model from URL: ${urlModel}`); - } - - // 提取请求参数 - const { - messages: requestMessages, - contents: requestContents, - model: bodyModel = 'gemini-2.0-flash-exp', - temperature = 0.7, - max_tokens = 4096, - stream = false - } = requestBody; - - // 优先使用 URL 中的模型,其次是请求体中的模型 - const model = urlModel || bodyModel; - - // 支持两种格式: OpenAI 的 messages 或 Gemini 的 contents - let messages = requestMessages; - if (requestContents && Array.isArray(requestContents)) { - messages = requestContents; - } - - // 验证必需参数 - if (!messages || !Array.isArray(messages) || messages.length === 0) { - return res.status(400).json({ - error: { - message: 'Messages array is required', - type: 'invalid_request_error', - code: 'invalid_request' - } - }); - } - - // 检查模型限制 - if (apiKeyData.enableModelRestriction && apiKeyData.restrictedModels.length > 0) { - if (!apiKeyData.restrictedModels.includes(model)) { - return res.status(403).json({ - error: { - message: `Model ${model} is not allowed for this API key`, - type: 'invalid_request_error', - code: 'model_not_allowed' - } - }); - } - } - - // 转换消息格式 - const { contents, systemInstruction } = convertMessagesToGemini(messages); - - // 构建 Gemini 请求体 - const geminiRequestBody = { - contents, - generationConfig: { - temperature, - maxOutputTokens: max_tokens, - candidateCount: 1 - } - }; - - if (systemInstruction) { - geminiRequestBody.systemInstruction = { parts: [{ text: systemInstruction }] }; - } - - // 生成会话哈希 - const sessionHash = sessionHelper.generateSessionHash(req.body); - - // 使用统一调度选择账号 - const { accountId } = await unifiedGeminiScheduler.selectAccountForApiKey(req.apiKey, sessionHash, model); - const account = await geminiAccountService.getAccount(accountId); - const { accessToken, refreshToken } = account; - - logger.info(`Using Gemini account: ${accountId} for API key: ${apiKeyData.id}`); - - // 创建中止控制器 - abortController = new AbortController(); - - // 处理客户端断开连接 - req.on('close', () => { - if (abortController && !abortController.signal.aborted) { - logger.info('Client disconnected, aborting Gemini request'); - abortController.abort(); - } - }); - - const client = await geminiAccountService.getOauthClient(accessToken, refreshToken); - - const project = 'verdant-wares-464411-k9'; - if (stream) { - // 流式响应 - logger.info('StreamGenerateContent request', { - model: model, - projectId: project || account.projectId, - apiKeyId: req.apiKey?.id || 'unknown' - }); - - const streamResponse = await geminiAccountService.generateContentStream( - client, - { model, request: geminiRequestBody }, - null, // user_prompt_id - project || account.projectId, - req.apiKey?.id, // 使用 API Key ID 作为 session ID - abortController.signal // 传递中止信号 - ); - - // 设置 SSE 响应头 - res.setHeader('Content-Type', 'text/event-stream'); - res.setHeader('Cache-Control', 'no-cache'); - res.setHeader('Connection', 'keep-alive'); - res.setHeader('X-Accel-Buffering', 'no'); - - // 处理流式响应,转换为 OpenAI 格式 - let buffer = ''; - - streamResponse.on('data', (chunk) => { - try { - buffer += chunk.toString(); - const lines = buffer.split('\n'); - buffer = lines.pop() || ''; // 保留最后一个不完整的行 - - for (const line of lines) { - if (!line.trim()) continue; - - // 处理 SSE 格式 - let jsonData = line; - if (line.startsWith('data: ')) { - jsonData = line.substring(6).trim(); - } - - if (!jsonData || jsonData === '[DONE]') continue; - - try { - const data = JSON.parse(jsonData); - - // 转换为 OpenAI 流式格式 - if (data.candidates && data.candidates.length > 0) { - const candidate = data.candidates[0]; - const content = candidate.content?.parts?.[0]?.text || ''; - const finishReason = candidate.finishReason?.toLowerCase(); - - const openaiChunk = { - id: `chatcmpl-${Date.now()}`, - object: 'chat.completion.chunk', - created: Math.floor(Date.now() / 1000), - model: model, - choices: [{ - index: 0, - delta: { - content: content - }, - finish_reason: finishReason === 'stop' ? 'stop' : null - }] - }; - - res.write(`data: ${JSON.stringify(openaiChunk)}\n\n`); - - // 如果结束了,发送最终的 [DONE] - if (finishReason === 'stop') { - res.write('data: [DONE]\n\n'); - } - } - } catch (e) { - logger.debug('Error parsing JSON line:', e.message); - } - } - } catch (error) { - logger.error('Stream processing error:', error); - if (!res.headersSent) { - res.status(500).json({ - error: { - message: error.message || 'Stream error', - type: 'api_error' - } - }); - } - } - }); - - streamResponse.on('end', () => { - logger.info('Stream completed successfully'); - if (!res.headersSent) { - res.write('data: [DONE]\n\n'); - } - res.end(); - }); - - streamResponse.on('error', (error) => { - logger.error('Stream error:', error); - if (!res.headersSent) { - res.status(500).json({ - error: { - message: error.message || 'Stream error', - type: 'api_error' - } - }); - } else { - res.end(); - } - }); - - } else { - // 非流式响应 - logger.info('GenerateContent request', { - model: model, - projectId: project || account.projectId, - apiKeyId: req.apiKey?.id || 'unknown' - }); - - const response = await geminiAccountService.generateContent( - client, - { model, request: geminiRequestBody }, - null, // user_prompt_id - project || account.projectId, - req.apiKey?.id // 使用 API Key ID 作为 session ID - ); - - // 转换为 OpenAI 格式并返回 - const openaiResponse = convertGeminiResponseToOpenAI(response, model, false); - res.json(openaiResponse); - } - - const duration = Date.now() - startTime; - logger.info(`OpenAI-Gemini request completed in ${duration}ms`); - - } catch (error) { - logger.error('OpenAI-Gemini request error:', error); - - // 返回 OpenAI 格式的错误响应 - const status = error.status || 500; - const errorResponse = { - error: { - message: error.message || 'Internal server error', - type: 'server_error', - code: 'internal_error' - } - }; - - if (!res.headersSent) { - res.status(status).json(errorResponse); - } - } finally { - // 清理资源 - if (abortController) { - abortController = null; - } - } -}); - -// 获取模型列表端点(OpenAI 兼容) -router.get('/v1/models', authenticateApiKey, async (req, res) => { - try { - const apiKeyData = req.apiKey; - - // 检查权限 - if (!checkPermissions(apiKeyData, 'gemini')) { - return res.status(403).json({ - error: { - message: 'This API key does not have permission to access Gemini', - type: 'permission_denied', - code: 'permission_denied' - } - }); - } - - // 返回支持的 Gemini 模型列表(OpenAI 格式) - const models = [ - { - id: 'gemini-2.0-flash-exp', - object: 'model', - created: Math.floor(Date.now() / 1000), - owned_by: 'google' - }, - { - id: 'gemini-2.0-flash-thinking-exp', - object: 'model', - created: Math.floor(Date.now() / 1000), - owned_by: 'google' - }, - { - id: 'gemini-1.5-pro', - object: 'model', - created: Math.floor(Date.now() / 1000), - owned_by: 'google' - }, - { - id: 'gemini-1.5-flash', - object: 'model', - created: Math.floor(Date.now() / 1000), - owned_by: 'google' - } - ]; - - res.json({ - object: 'list', - data: models - }); - } catch (error) { - logger.error('Error getting models:', error); - res.status(500).json({ - error: { - message: error.message || 'Internal server error', - type: 'server_error' - } - }); - } -}); - -module.exports = router; \ No newline at end of file diff --git a/src/services/geminiAccountService.js b/src/services/geminiAccountService.js index 0aeb4587..d11fea1f 100644 --- a/src/services/geminiAccountService.js +++ b/src/services/geminiAccountService.js @@ -294,7 +294,7 @@ async function createAccount(accountData) { // 代理设置 proxy: accountData.proxy ? JSON.stringify(accountData.proxy) : '', - // 项目编号(Google Cloud/Workspace 账号需要) + // 项目 ID(Google Cloud/Workspace 账号需要) projectId: accountData.projectId || '', // 支持的模型列表(可选) diff --git a/web/admin-spa/src/components/accounts/AccountForm.vue b/web/admin-spa/src/components/accounts/AccountForm.vue index eb252e55..763dca6d 100644 --- a/web/admin-spa/src/components/accounts/AccountForm.vue +++ b/web/admin-spa/src/components/accounts/AccountForm.vue @@ -210,26 +210,26 @@ - +
- +

- Google Cloud/Workspace 账号需要提供项目编号 + Google Cloud/Workspace 账号需要提供项目 ID

-

某些 Google 账号(特别是绑定了 Google Cloud 的账号)会被识别为 Workspace 账号,需要提供额外的项目编号。

+

某些 Google 账号(特别是绑定了 Google Cloud 的账号)会被识别为 Workspace 账号,需要提供额外的项目 ID。

- 如何获取项目编号: + 如何获取项目 ID:

  1. @@ -239,9 +239,9 @@ class="text-blue-600 hover:underline font-medium" >Google Cloud Console
  2. -
  3. 复制项目编号(Project Number),通常是12位纯数字
  4. +
  5. 复制项目 ID(Project ID),通常是字符串格式
  6. - ⚠️ 注意:不要复制项目ID(Project ID),要复制项目编号! + ⚠️ 注意:要复制项目 ID(Project ID),不要复制项目编号(Project Number)!
@@ -595,17 +595,17 @@
- +
- +

- Google Cloud/Workspace 账号可能需要提供项目编号 + Google Cloud/Workspace 账号可能需要提供项目 ID

@@ -914,13 +914,13 @@ const nextStep = async () => { return } - // 对于Gemini账户,检查项目编号 + // 对于Gemini账户,检查项目 ID if (form.value.platform === 'gemini' && oauthStep.value === 1 && form.value.addType === 'oauth') { if (!form.value.projectId || form.value.projectId.trim() === '') { // 使用自定义确认弹窗 const confirmed = await showConfirm( - '项目编号未填写', - '您尚未填写项目编号。\n\n如果您的Google账号绑定了Google Cloud或被识别为Workspace账号,需要提供项目编号。\n如果您使用的是普通个人账号,可以继续不填写。', + '项目 ID 未填写', + '您尚未填写项目 ID。\n\n如果您的Google账号绑定了Google Cloud或被识别为Workspace账号,需要提供项目 ID。\n如果您使用的是普通个人账号,可以继续不填写。', '继续', '返回填写' ) @@ -1110,13 +1110,13 @@ const updateAccount = async () => { return } - // 对于Gemini账户,检查项目编号 + // 对于Gemini账户,检查项目 ID if (form.value.platform === 'gemini') { if (!form.value.projectId || form.value.projectId.trim() === '') { // 使用自定义确认弹窗 const confirmed = await showConfirm( - '项目编号未填写', - '您尚未填写项目编号。\n\n如果您的Google账号绑定了Google Cloud或被识别为Workspace账号,需要提供项目编号。\n如果您使用的是普通个人账号,可以继续不填写。', + '项目 ID 未填写', + '您尚未填写项目 ID。\n\n如果您的Google账号绑定了Google Cloud或被识别为Workspace账号,需要提供项目 ID。\n如果您使用的是普通个人账号,可以继续不填写。', '继续保存', '返回填写' )