From 9fca0b0c204e97885ad8b6f5987f45e1a31247ac Mon Sep 17 00:00:00 2001 From: mouyong Date: Tue, 5 Aug 2025 01:19:23 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20gemini=20=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/middleware/auth.js | 1 + src/routes/geminiRoutes.js | 72 ++++++++++++++++++++++---------------- 2 files changed, 42 insertions(+), 31 deletions(-) diff --git a/src/middleware/auth.js b/src/middleware/auth.js index 47bd4333..dc441781 100644 --- a/src/middleware/auth.js +++ b/src/middleware/auth.js @@ -11,6 +11,7 @@ const authenticateApiKey = async (req, res, next) => { try { // 安全提取API Key,支持多种格式 const apiKey = req.headers['x-api-key'] || + req.headers['x-goog-api-key'] || req.headers['authorization']?.replace(/^Bearer\s+/i, '') || req.headers['api-key']; diff --git a/src/routes/geminiRoutes.js b/src/routes/geminiRoutes.js index c7c85d51..ba10dd01 100644 --- a/src/routes/geminiRoutes.js +++ b/src/routes/geminiRoutes.js @@ -415,24 +415,28 @@ async function handleGenerateContent(req, res) { const { model, project, user_prompt_id, request: requestData } = req.body; const sessionHash = sessionHelper.generateSessionHash(req.body); - // 处理 OpenAI 格式请求(没有 request 字段的情况) + // 处理不同格式的请求 let actualRequestData = requestData; - if (!requestData && req.body.messages) { - // 这是 OpenAI 格式的请求,构建 Gemini 格式的 request 对象 - actualRequestData = { - contents: req.body.messages.map(msg => ({ - role: msg.role === 'assistant' ? 'model' : msg.role, - parts: [{ text: msg.content }] - })), - generationConfig: { - temperature: req.body.temperature !== undefined ? req.body.temperature : 0.7, - maxOutputTokens: req.body.max_tokens !== undefined ? req.body.max_tokens : 4096, - topP: req.body.top_p !== undefined ? req.body.top_p : 0.95, - topK: req.body.top_k !== undefined ? req.body.top_k : 40 - } - }; + if (!requestData) { + if (req.body.messages) { + // 这是 OpenAI 格式的请求,构建 Gemini 格式的 request 对象 + actualRequestData = { + contents: req.body.messages.map(msg => ({ + role: msg.role === 'assistant' ? 'model' : msg.role, + parts: [{ text: msg.content }] + })), + generationConfig: { + temperature: req.body.temperature !== undefined ? req.body.temperature : 0.7, + maxOutputTokens: req.body.max_tokens !== undefined ? req.body.max_tokens : 4096, + topP: req.body.top_p !== undefined ? req.body.top_p : 0.95, + topK: req.body.top_k !== undefined ? req.body.top_k : 40 + } + }; + } else if (req.body.contents) { + // 直接的 Gemini 格式请求(没有 request 包装) + actualRequestData = req.body; + } } - console.log(321, actualRequestData); // 验证必需参数 if (!actualRequestData || !actualRequestData.contents) { @@ -468,6 +472,7 @@ async function handleGenerateContent(req, res) { res.json(response); } catch (error) { + console.log(321, error.response); const version = req.path.includes('v1beta') ? 'v1beta' : 'v1internal'; logger.error(`Error in generateContent endpoint (${version})`, { error: error.message }); res.status(500).json({ @@ -487,22 +492,27 @@ async function handleStreamGenerateContent(req, res) { const { model, project, user_prompt_id, request: requestData } = req.body; const sessionHash = sessionHelper.generateSessionHash(req.body); - // 处理 OpenAI 格式请求(没有 request 字段的情况) + // 处理不同格式的请求 let actualRequestData = requestData; - if (!requestData && req.body.messages) { - // 这是 OpenAI 格式的请求,构建 Gemini 格式的 request 对象 - actualRequestData = { - contents: req.body.messages.map(msg => ({ - role: msg.role === 'assistant' ? 'model' : msg.role, - parts: [{ text: msg.content }] - })), - generationConfig: { - temperature: req.body.temperature !== undefined ? req.body.temperature : 0.7, - maxOutputTokens: req.body.max_tokens !== undefined ? req.body.max_tokens : 4096, - topP: req.body.top_p !== undefined ? req.body.top_p : 0.95, - topK: req.body.top_k !== undefined ? req.body.top_k : 40 - } - }; + if (!requestData) { + if (req.body.messages) { + // 这是 OpenAI 格式的请求,构建 Gemini 格式的 request 对象 + actualRequestData = { + contents: req.body.messages.map(msg => ({ + role: msg.role === 'assistant' ? 'model' : msg.role, + parts: [{ text: msg.content }] + })), + generationConfig: { + temperature: req.body.temperature !== undefined ? req.body.temperature : 0.7, + maxOutputTokens: req.body.max_tokens !== undefined ? req.body.max_tokens : 4096, + topP: req.body.top_p !== undefined ? req.body.top_p : 0.95, + topK: req.body.top_k !== undefined ? req.body.top_k : 40 + } + }; + } else if (req.body.contents) { + // 直接的 Gemini 格式请求(没有 request 包装) + actualRequestData = req.body; + } } // 验证必需参数