From f2312893d49fcd1125ee21a2900119b5cef8afba Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 22:36:36 +0800 Subject: [PATCH 1/9] =?UTF-8?q?feat:=20gemini=20=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=93=8D=E5=BA=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/geminiRoutes.js | 72 +++++++++++++++++++++++++++----------- 1 file changed, 51 insertions(+), 21 deletions(-) diff --git a/src/routes/geminiRoutes.js b/src/routes/geminiRoutes.js index eefc48a9..f3dd598f 100644 --- a/src/routes/geminiRoutes.js +++ b/src/routes/geminiRoutes.js @@ -291,9 +291,9 @@ router.get('/key-info', authenticateApiKey, async (req, res) => { } }); -router.post('/v1internal\\:loadCodeAssist', authenticateApiKey, async (req, res) => { +// 共用的 loadCodeAssist 处理函数 +async function handleLoadCodeAssist(req, res) { try { - const sessionHash = sessionHelper.generateSessionHash(req.body); // 使用统一调度选择账号(传递请求的模型) @@ -304,7 +304,8 @@ router.post('/v1internal\\:loadCodeAssist', authenticateApiKey, async (req, res) const { metadata, cloudaicompanionProject } = req.body; - logger.info('LoadCodeAssist request', { + const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; + logger.info(`LoadCodeAssist request (${version})`, { metadata: metadata || {}, cloudaicompanionProject: cloudaicompanionProject || null, apiKeyId: req.apiKey?.id || 'unknown' @@ -314,15 +315,17 @@ router.post('/v1internal\\:loadCodeAssist', authenticateApiKey, async (req, res) const response = await geminiAccountService.loadCodeAssist(client, cloudaicompanionProject); res.json(response); } catch (error) { - logger.error('Error in loadCodeAssist endpoint', { error: error.message }); + const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; + logger.error(`Error in loadCodeAssist endpoint (${version})`, { error: error.message }); res.status(500).json({ error: 'Internal server error', message: error.message }); } -}); +} -router.post('/v1internal\\:onboardUser', authenticateApiKey, async (req, res) => { +// 共用的 onboardUser 处理函数 +async function handleOnboardUser(req, res) { try { const { tierId, cloudaicompanionProject, metadata } = req.body; const sessionHash = sessionHelper.generateSessionHash(req.body); @@ -332,7 +335,8 @@ router.post('/v1internal\\:onboardUser', authenticateApiKey, async (req, res) => const { accountId } = await unifiedGeminiScheduler.selectAccountForApiKey(req.apiKey, sessionHash, requestedModel); const { accessToken, refreshToken } = await geminiAccountService.getAccount(accountId); - logger.info('OnboardUser request', { + const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; + logger.info(`OnboardUser request (${version})`, { tierId: tierId || 'not provided', cloudaicompanionProject: cloudaicompanionProject || null, metadata: metadata || {}, @@ -351,15 +355,17 @@ router.post('/v1internal\\:onboardUser', authenticateApiKey, async (req, res) => res.json(response); } } catch (error) { - logger.error('Error in onboardUser endpoint', { error: error.message }); + const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; + logger.error(`Error in onboardUser endpoint (${version})`, { error: error.message }); res.status(500).json({ error: 'Internal server error', message: error.message }); } -}); +} -router.post('/v1internal\\:countTokens', authenticateApiKey, async (req, res) => { +// 共用的 countTokens 处理函数 +async function handleCountTokens(req, res) { try { // 处理请求体结构,支持直接 contents 或 request.contents const requestData = req.body.request || req.body; @@ -380,7 +386,8 @@ router.post('/v1internal\\:countTokens', authenticateApiKey, async (req, res) => const { accountId } = await unifiedGeminiScheduler.selectAccountForApiKey(req.apiKey, sessionHash, model); const { accessToken, refreshToken } = await geminiAccountService.getAccount(accountId); - logger.info('CountTokens request', { + const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; + logger.info(`CountTokens request (${version})`, { model: model, contentsLength: contents.length, apiKeyId: req.apiKey?.id || 'unknown' @@ -391,7 +398,8 @@ router.post('/v1internal\\:countTokens', authenticateApiKey, async (req, res) => res.json(response); } catch (error) { - logger.error('Error in countTokens endpoint', { error: error.message }); + const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; + logger.error(`Error in countTokens endpoint (${version})`, { error: error.message }); res.status(500).json({ error: { message: error.message || 'Internal server error', @@ -399,9 +407,10 @@ router.post('/v1internal\\:countTokens', authenticateApiKey, async (req, res) => } }); } -}); +} -router.post('/v1internal\\:generateContent', authenticateApiKey, async (req, res) => { +// 共用的 generateContent 处理函数 +async function handleGenerateContent(req, res) { try { const { model, project, user_prompt_id, request: requestData } = req.body; const sessionHash = sessionHelper.generateSessionHash(req.body); @@ -421,7 +430,8 @@ router.post('/v1internal\\:generateContent', authenticateApiKey, async (req, res const account = await geminiAccountService.getAccount(accountId); const { accessToken, refreshToken } = account; - logger.info('GenerateContent request', { + const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; + logger.info(`GenerateContent request (${version})`, { model: model, userPromptId: user_prompt_id, projectId: project || account.projectId, @@ -439,7 +449,8 @@ router.post('/v1internal\\:generateContent', authenticateApiKey, async (req, res res.json(response); } catch (error) { - logger.error('Error in generateContent endpoint', { error: error.message }); + const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; + logger.error(`Error in generateContent endpoint (${version})`, { error: error.message }); res.status(500).json({ error: { message: error.message || 'Internal server error', @@ -447,9 +458,10 @@ router.post('/v1internal\\:generateContent', authenticateApiKey, async (req, res } }); } -}); +} -router.post('/v1internal\\:streamGenerateContent', authenticateApiKey, async (req, res) => { +// 共用的 streamGenerateContent 处理函数 +async function handleStreamGenerateContent(req, res) { let abortController = null; try { @@ -471,7 +483,8 @@ router.post('/v1internal\\:streamGenerateContent', authenticateApiKey, async (re const account = await geminiAccountService.getAccount(accountId); const { accessToken, refreshToken } = account; - logger.info('StreamGenerateContent request', { + const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; + logger.info(`StreamGenerateContent request (${version})`, { model: model, userPromptId: user_prompt_id, projectId: project || account.projectId, @@ -528,7 +541,8 @@ router.post('/v1internal\\:streamGenerateContent', authenticateApiKey, async (re }); } catch (error) { - logger.error('Error in streamGenerateContent endpoint', { error: error.message }); + const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; + logger.error(`Error in streamGenerateContent endpoint (${version})`, { error: error.message }); if (!res.headersSent) { res.status(500).json({ @@ -544,6 +558,22 @@ router.post('/v1internal\\:streamGenerateContent', authenticateApiKey, async (re abortController = null; } } -}); +} + +// v1internal 和 v1beta 端点注册 +router.post('/v1internal\\:loadCodeAssist', authenticateApiKey, handleLoadCodeAssist); +router.post('/v1beta\\:loadCodeAssist', authenticateApiKey, handleLoadCodeAssist); + +router.post('/v1internal\\:onboardUser', authenticateApiKey, handleOnboardUser); +router.post('/v1beta\\:onboardUser', authenticateApiKey, handleOnboardUser); + +router.post('/v1internal\\:countTokens', authenticateApiKey, handleCountTokens); +router.post('/v1beta\\:countTokens', authenticateApiKey, handleCountTokens); + +router.post('/v1internal\\:generateContent', authenticateApiKey, handleGenerateContent); +router.post('/v1beta\\:generateContent', authenticateApiKey, handleGenerateContent); + +router.post('/v1internal\\:streamGenerateContent', authenticateApiKey, handleStreamGenerateContent); +router.post('/v1beta\\:streamGenerateContent', authenticateApiKey, handleStreamGenerateContent); module.exports = router; \ No newline at end of file From ae9f6158d195bbc55d3706e3d99f688910b9589b Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 22:37:06 +0800 Subject: [PATCH 2/9] =?UTF-8?q?feat:=20gemini=20=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=93=8D=E5=BA=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/geminiRoutes.js | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/routes/geminiRoutes.js b/src/routes/geminiRoutes.js index f3dd598f..36051b3d 100644 --- a/src/routes/geminiRoutes.js +++ b/src/routes/geminiRoutes.js @@ -560,20 +560,19 @@ async function handleStreamGenerateContent(req, res) { } } -// v1internal 和 v1beta 端点注册 +// 注册所有路由端点 +// v1internal 版本的端点 router.post('/v1internal\\:loadCodeAssist', authenticateApiKey, handleLoadCodeAssist); -router.post('/v1beta\\:loadCodeAssist', authenticateApiKey, handleLoadCodeAssist); - router.post('/v1internal\\:onboardUser', authenticateApiKey, handleOnboardUser); -router.post('/v1beta\\:onboardUser', authenticateApiKey, handleOnboardUser); - router.post('/v1internal\\:countTokens', authenticateApiKey, handleCountTokens); -router.post('/v1beta\\:countTokens', authenticateApiKey, handleCountTokens); - router.post('/v1internal\\:generateContent', authenticateApiKey, handleGenerateContent); -router.post('/v1beta\\:generateContent', authenticateApiKey, handleGenerateContent); - router.post('/v1internal\\:streamGenerateContent', authenticateApiKey, handleStreamGenerateContent); + +// v1beta 版本的端点 +router.post('/v1beta\\:loadCodeAssist', authenticateApiKey, handleLoadCodeAssist); +router.post('/v1beta\\:onboardUser', authenticateApiKey, handleOnboardUser); +router.post('/v1beta\\:countTokens', authenticateApiKey, handleCountTokens); +router.post('/v1beta\\:generateContent', authenticateApiKey, handleGenerateContent); router.post('/v1beta\\:streamGenerateContent', authenticateApiKey, handleStreamGenerateContent); module.exports = router; \ No newline at end of file From 08a962833b9e05437d3b5b7e07623bcd935e62de Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 22:38:58 +0800 Subject: [PATCH 3/9] =?UTF-8?q?feat:=20gemini=20=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=93=8D=E5=BA=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/geminiRoutes.js | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/routes/geminiRoutes.js b/src/routes/geminiRoutes.js index 36051b3d..223a66d4 100644 --- a/src/routes/geminiRoutes.js +++ b/src/routes/geminiRoutes.js @@ -569,10 +569,10 @@ router.post('/v1internal\\:generateContent', authenticateApiKey, handleGenerateC router.post('/v1internal\\:streamGenerateContent', authenticateApiKey, handleStreamGenerateContent); // v1beta 版本的端点 -router.post('/v1beta\\:loadCodeAssist', authenticateApiKey, handleLoadCodeAssist); -router.post('/v1beta\\:onboardUser', authenticateApiKey, handleOnboardUser); -router.post('/v1beta\\:countTokens', authenticateApiKey, handleCountTokens); -router.post('/v1beta\\:generateContent', authenticateApiKey, handleGenerateContent); -router.post('/v1beta\\:streamGenerateContent', authenticateApiKey, handleStreamGenerateContent); +router.post('/v1beta/models\\:loadCodeAssist', authenticateApiKey, handleLoadCodeAssist); +router.post('/v1beta/models\\:onboardUser', authenticateApiKey, handleOnboardUser); +router.post('/v1beta/models\\:countTokens', authenticateApiKey, handleCountTokens); +router.post('/v1beta/models\\:generateContent', authenticateApiKey, handleGenerateContent); +router.post('/v1beta/models\\:streamGenerateContent', authenticateApiKey, handleStreamGenerateContent); module.exports = router; \ No newline at end of file From 49221dfbc5c898103f73b0e8dd2d0780a7d56e16 Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 22:45:11 +0800 Subject: [PATCH 4/9] =?UTF-8?q?feat:=20gemini=20=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=93=8D=E5=BA=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/geminiRoutes.js | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/routes/geminiRoutes.js b/src/routes/geminiRoutes.js index 223a66d4..abd4379e 100644 --- a/src/routes/geminiRoutes.js +++ b/src/routes/geminiRoutes.js @@ -568,11 +568,11 @@ router.post('/v1internal\\:countTokens', authenticateApiKey, handleCountTokens); router.post('/v1internal\\:generateContent', authenticateApiKey, handleGenerateContent); router.post('/v1internal\\:streamGenerateContent', authenticateApiKey, handleStreamGenerateContent); -// v1beta 版本的端点 -router.post('/v1beta/models\\:loadCodeAssist', authenticateApiKey, handleLoadCodeAssist); -router.post('/v1beta/models\\:onboardUser', authenticateApiKey, handleOnboardUser); -router.post('/v1beta/models\\:countTokens', authenticateApiKey, handleCountTokens); -router.post('/v1beta/models\\:generateContent', authenticateApiKey, handleGenerateContent); -router.post('/v1beta/models\\:streamGenerateContent', authenticateApiKey, handleStreamGenerateContent); +// v1beta 版本的端点 - 支持动态模型名称 +router.post('/v1beta/models/:modelName\\:loadCodeAssist', authenticateApiKey, handleLoadCodeAssist); +router.post('/v1beta/models/:modelName\\:onboardUser', authenticateApiKey, handleOnboardUser); +router.post('/v1beta/models/:modelName\\:countTokens', authenticateApiKey, handleCountTokens); +router.post('/v1beta/models/:modelName\\:generateContent', authenticateApiKey, handleGenerateContent); +router.post('/v1beta/models/:modelName\\:streamGenerateContent', authenticateApiKey, handleStreamGenerateContent); module.exports = router; \ No newline at end of file From 74f428d99003e6709eb639a0c822930d385dce05 Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 22:57:31 +0800 Subject: [PATCH 5/9] =?UTF-8?q?feat:=20gemini=20=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=93=8D=E5=BA=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/geminiRoutes.js | 1 + 1 file changed, 1 insertion(+) diff --git a/src/routes/geminiRoutes.js b/src/routes/geminiRoutes.js index abd4379e..86932821 100644 --- a/src/routes/geminiRoutes.js +++ b/src/routes/geminiRoutes.js @@ -414,6 +414,7 @@ async function handleGenerateContent(req, res) { try { const { model, project, user_prompt_id, request: requestData } = req.body; const sessionHash = sessionHelper.generateSessionHash(req.body); + console.log(321, requestData); // 验证必需参数 if (!requestData || !requestData.contents) { From e4b53640612fc6f0a79696fbba39dda41dcbfa3e Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 23:04:40 +0800 Subject: [PATCH 6/9] =?UTF-8?q?feat:=20gemini=20=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=93=8D=E5=BA=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/geminiRoutes.js | 44 ++++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/src/routes/geminiRoutes.js b/src/routes/geminiRoutes.js index 86932821..ca5eb7d9 100644 --- a/src/routes/geminiRoutes.js +++ b/src/routes/geminiRoutes.js @@ -415,9 +415,27 @@ async function handleGenerateContent(req, res) { const { model, project, user_prompt_id, request: requestData } = req.body; const sessionHash = sessionHelper.generateSessionHash(req.body); console.log(321, requestData); + + // 处理 OpenAI 格式请求(没有 request 字段的情况) + let actualRequestData = requestData; + if (!requestData && req.body.messages) { + // 这是 OpenAI 格式的请求,构建 Gemini 格式的 request 对象 + actualRequestData = { + contents: req.body.messages.map(msg => ({ + role: msg.role === 'assistant' ? 'model' : msg.role, + parts: [{ text: msg.content }] + })), + generationConfig: { + temperature: req.body.temperature, + maxOutputTokens: req.body.max_tokens, + topP: req.body.top_p, + topK: req.body.top_k + } + }; + } // 验证必需参数 - if (!requestData || !requestData.contents) { + if (!actualRequestData || !actualRequestData.contents) { return res.status(400).json({ error: { message: 'Request contents are required', @@ -442,7 +460,7 @@ async function handleGenerateContent(req, res) { const client = await geminiAccountService.getOauthClient(accessToken, refreshToken); const response = await geminiAccountService.generateContent( client, - { model, request: requestData }, + { model, request: actualRequestData }, user_prompt_id, project || account.projectId, req.apiKey?.id // 使用 API Key ID 作为 session ID @@ -469,8 +487,26 @@ async function handleStreamGenerateContent(req, res) { const { model, project, user_prompt_id, request: requestData } = req.body; const sessionHash = sessionHelper.generateSessionHash(req.body); + // 处理 OpenAI 格式请求(没有 request 字段的情况) + let actualRequestData = requestData; + if (!requestData && req.body.messages) { + // 这是 OpenAI 格式的请求,构建 Gemini 格式的 request 对象 + actualRequestData = { + contents: req.body.messages.map(msg => ({ + role: msg.role === 'assistant' ? 'model' : msg.role, + parts: [{ text: msg.content }] + })), + generationConfig: { + temperature: req.body.temperature, + maxOutputTokens: req.body.max_tokens, + topP: req.body.top_p, + topK: req.body.top_k + } + }; + } + // 验证必需参数 - if (!requestData || !requestData.contents) { + if (!actualRequestData || !actualRequestData.contents) { return res.status(400).json({ error: { message: 'Request contents are required', @@ -506,7 +542,7 @@ async function handleStreamGenerateContent(req, res) { const client = await geminiAccountService.getOauthClient(accessToken, refreshToken); const streamResponse = await geminiAccountService.generateContentStream( client, - { model, request: requestData }, + { model, request: actualRequestData }, user_prompt_id, project || account.projectId, req.apiKey?.id, // 使用 API Key ID 作为 session ID From 71b0d743fd560f5381f92fe845583a3fddf11235 Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 23:05:51 +0800 Subject: [PATCH 7/9] =?UTF-8?q?feat:=20gemini=20=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=93=8D=E5=BA=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/geminiRoutes.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/routes/geminiRoutes.js b/src/routes/geminiRoutes.js index ca5eb7d9..60b93ee6 100644 --- a/src/routes/geminiRoutes.js +++ b/src/routes/geminiRoutes.js @@ -414,7 +414,6 @@ async function handleGenerateContent(req, res) { try { const { model, project, user_prompt_id, request: requestData } = req.body; const sessionHash = sessionHelper.generateSessionHash(req.body); - console.log(321, requestData); // 处理 OpenAI 格式请求(没有 request 字段的情况) let actualRequestData = requestData; @@ -433,6 +432,7 @@ async function handleGenerateContent(req, res) { } }; } + console.log(321, actualRequestData); // 验证必需参数 if (!actualRequestData || !actualRequestData.contents) { From c214d72befb1205a3ef84bb58f6b73bab9a80156 Mon Sep 17 00:00:00 2001 From: mouyong Date: Mon, 4 Aug 2025 23:10:53 +0800 Subject: [PATCH 8/9] =?UTF-8?q?feat:=20gemini=20=E6=B5=81=E5=BC=8F?= =?UTF-8?q?=E5=93=8D=E5=BA=94?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/routes/geminiRoutes.js | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/routes/geminiRoutes.js b/src/routes/geminiRoutes.js index 60b93ee6..c7c85d51 100644 --- a/src/routes/geminiRoutes.js +++ b/src/routes/geminiRoutes.js @@ -425,10 +425,10 @@ async function handleGenerateContent(req, res) { parts: [{ text: msg.content }] })), generationConfig: { - temperature: req.body.temperature, - maxOutputTokens: req.body.max_tokens, - topP: req.body.top_p, - topK: req.body.top_k + temperature: req.body.temperature !== undefined ? req.body.temperature : 0.7, + maxOutputTokens: req.body.max_tokens !== undefined ? req.body.max_tokens : 4096, + topP: req.body.top_p !== undefined ? req.body.top_p : 0.95, + topK: req.body.top_k !== undefined ? req.body.top_k : 40 } }; } @@ -497,10 +497,10 @@ async function handleStreamGenerateContent(req, res) { parts: [{ text: msg.content }] })), generationConfig: { - temperature: req.body.temperature, - maxOutputTokens: req.body.max_tokens, - topP: req.body.top_p, - topK: req.body.top_k + temperature: req.body.temperature !== undefined ? req.body.temperature : 0.7, + maxOutputTokens: req.body.max_tokens !== undefined ? req.body.max_tokens : 4096, + topP: req.body.top_p !== undefined ? req.body.top_p : 0.95, + topK: req.body.top_k !== undefined ? req.body.top_k : 40 } }; } From 9fca0b0c204e97885ad8b6f5987f45e1a31247ac Mon Sep 17 00:00:00 2001 From: mouyong Date: Tue, 5 Aug 2025 01:19:23 +0800 Subject: [PATCH 9/9] =?UTF-8?q?feat:=20gemini=20=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/middleware/auth.js | 1 + src/routes/geminiRoutes.js | 72 ++++++++++++++++++++++---------------- 2 files changed, 42 insertions(+), 31 deletions(-) diff --git a/src/middleware/auth.js b/src/middleware/auth.js index 47bd4333..dc441781 100644 --- a/src/middleware/auth.js +++ b/src/middleware/auth.js @@ -11,6 +11,7 @@ const authenticateApiKey = async (req, res, next) => { try { // 安全提取API Key,支持多种格式 const apiKey = req.headers['x-api-key'] || + req.headers['x-goog-api-key'] || req.headers['authorization']?.replace(/^Bearer\s+/i, '') || req.headers['api-key']; diff --git a/src/routes/geminiRoutes.js b/src/routes/geminiRoutes.js index c7c85d51..ba10dd01 100644 --- a/src/routes/geminiRoutes.js +++ b/src/routes/geminiRoutes.js @@ -415,24 +415,28 @@ async function handleGenerateContent(req, res) { const { model, project, user_prompt_id, request: requestData } = req.body; const sessionHash = sessionHelper.generateSessionHash(req.body); - // 处理 OpenAI 格式请求(没有 request 字段的情况) + // 处理不同格式的请求 let actualRequestData = requestData; - if (!requestData && req.body.messages) { - // 这是 OpenAI 格式的请求,构建 Gemini 格式的 request 对象 - actualRequestData = { - contents: req.body.messages.map(msg => ({ - role: msg.role === 'assistant' ? 'model' : msg.role, - parts: [{ text: msg.content }] - })), - generationConfig: { - temperature: req.body.temperature !== undefined ? req.body.temperature : 0.7, - maxOutputTokens: req.body.max_tokens !== undefined ? req.body.max_tokens : 4096, - topP: req.body.top_p !== undefined ? req.body.top_p : 0.95, - topK: req.body.top_k !== undefined ? req.body.top_k : 40 - } - }; + if (!requestData) { + if (req.body.messages) { + // 这是 OpenAI 格式的请求,构建 Gemini 格式的 request 对象 + actualRequestData = { + contents: req.body.messages.map(msg => ({ + role: msg.role === 'assistant' ? 'model' : msg.role, + parts: [{ text: msg.content }] + })), + generationConfig: { + temperature: req.body.temperature !== undefined ? req.body.temperature : 0.7, + maxOutputTokens: req.body.max_tokens !== undefined ? req.body.max_tokens : 4096, + topP: req.body.top_p !== undefined ? req.body.top_p : 0.95, + topK: req.body.top_k !== undefined ? req.body.top_k : 40 + } + }; + } else if (req.body.contents) { + // 直接的 Gemini 格式请求(没有 request 包装) + actualRequestData = req.body; + } } - console.log(321, actualRequestData); // 验证必需参数 if (!actualRequestData || !actualRequestData.contents) { @@ -468,6 +472,7 @@ async function handleGenerateContent(req, res) { res.json(response); } catch (error) { + console.log(321, error.response); const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; logger.error(`Error in generateContent endpoint (${version})`, { error: error.message }); res.status(500).json({ @@ -487,22 +492,27 @@ async function handleStreamGenerateContent(req, res) { const { model, project, user_prompt_id, request: requestData } = req.body; const sessionHash = sessionHelper.generateSessionHash(req.body); - // 处理 OpenAI 格式请求(没有 request 字段的情况) + // 处理不同格式的请求 let actualRequestData = requestData; - if (!requestData && req.body.messages) { - // 这是 OpenAI 格式的请求,构建 Gemini 格式的 request 对象 - actualRequestData = { - contents: req.body.messages.map(msg => ({ - role: msg.role === 'assistant' ? 'model' : msg.role, - parts: [{ text: msg.content }] - })), - generationConfig: { - temperature: req.body.temperature !== undefined ? req.body.temperature : 0.7, - maxOutputTokens: req.body.max_tokens !== undefined ? req.body.max_tokens : 4096, - topP: req.body.top_p !== undefined ? req.body.top_p : 0.95, - topK: req.body.top_k !== undefined ? req.body.top_k : 40 - } - }; + if (!requestData) { + if (req.body.messages) { + // 这是 OpenAI 格式的请求,构建 Gemini 格式的 request 对象 + actualRequestData = { + contents: req.body.messages.map(msg => ({ + role: msg.role === 'assistant' ? 'model' : msg.role, + parts: [{ text: msg.content }] + })), + generationConfig: { + temperature: req.body.temperature !== undefined ? req.body.temperature : 0.7, + maxOutputTokens: req.body.max_tokens !== undefined ? req.body.max_tokens : 4096, + topP: req.body.top_p !== undefined ? req.body.top_p : 0.95, + topK: req.body.top_k !== undefined ? req.body.top_k : 40 + } + }; + } else if (req.body.contents) { + // 直接的 Gemini 格式请求(没有 request 包装) + actualRequestData = req.body; + } } // 验证必需参数