mirror of
https://github.com/Wei-Shaw/claude-relay-service.git
synced 2026-03-30 00:33:35 +00:00
Merge pull request #1015 from DragonFSKY/feat/unified-endpoint-response-format [skip ci]
feat: /v1/chat/completions 返回标准 OpenAI Chat Completions 格式
This commit is contained in:
File diff suppressed because one or more lines are too long
@@ -8,7 +8,10 @@ const {
|
||||
handleStreamGenerateContent: geminiHandleStreamGenerateContent
|
||||
} = require('../handlers/geminiHandlers')
|
||||
const openaiRoutes = require('./openaiRoutes')
|
||||
const { CODEX_CLI_INSTRUCTIONS } = require('./openaiRoutes')
|
||||
const apiKeyService = require('../services/apiKeyService')
|
||||
const GeminiToOpenAIConverter = require('../services/geminiToOpenAI')
|
||||
const CodexToOpenAIConverter = require('../services/codexToOpenAI')
|
||||
|
||||
const router = express.Router()
|
||||
|
||||
@@ -71,6 +74,147 @@ async function routeToBackend(req, res, requestedModel) {
|
||||
}
|
||||
})
|
||||
}
|
||||
// 响应格式拦截:Codex/Responses → OpenAI Chat Completions
|
||||
const codexConverter = new CodexToOpenAIConverter()
|
||||
const originalJson = res.json.bind(res)
|
||||
|
||||
// 流式:patch res.write/res.end 拦截 SSE 事件
|
||||
// 与 openaiRoutes 保持一致:stream 缺省时视为流式(stream !== false)
|
||||
if (req.body.stream !== false) {
|
||||
const streamState = codexConverter.createStreamState()
|
||||
const sseBuffer = { data: '' }
|
||||
const originalWrite = res.write.bind(res)
|
||||
const originalEnd = res.end.bind(res)
|
||||
|
||||
res.write = function (chunk, encoding, callback) {
|
||||
if (res.statusCode >= 400) {
|
||||
return originalWrite(chunk, encoding, callback)
|
||||
}
|
||||
|
||||
const str = (typeof chunk === 'string' ? chunk : chunk.toString()).replace(/\r\n/g, '\n')
|
||||
sseBuffer.data += str
|
||||
|
||||
let idx
|
||||
while ((idx = sseBuffer.data.indexOf('\n\n')) !== -1) {
|
||||
const event = sseBuffer.data.slice(0, idx)
|
||||
sseBuffer.data = sseBuffer.data.slice(idx + 2)
|
||||
|
||||
if (!event.trim()) {
|
||||
continue
|
||||
}
|
||||
|
||||
const lines = event.split('\n')
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const jsonStr = line.slice(6)
|
||||
if (!jsonStr || jsonStr === '[DONE]') {
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
const eventData = JSON.parse(jsonStr)
|
||||
if (eventData.error) {
|
||||
originalWrite(`data: ${jsonStr}\n\n`)
|
||||
continue
|
||||
}
|
||||
const converted = codexConverter.convertStreamChunk(
|
||||
eventData,
|
||||
requestedModel,
|
||||
streamState
|
||||
)
|
||||
for (const c of converted) {
|
||||
originalWrite(c)
|
||||
}
|
||||
} catch (e) {
|
||||
originalWrite(`data: ${jsonStr}\n\n`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof callback === 'function') {
|
||||
callback()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
res.end = function (chunk, encoding, callback) {
|
||||
if (res.statusCode < 400) {
|
||||
// 处理 res.end(chunk) 传入的最后一块数据
|
||||
if (chunk) {
|
||||
const str = (typeof chunk === 'string' ? chunk : chunk.toString()).replace(
|
||||
/\r\n/g,
|
||||
'\n'
|
||||
)
|
||||
sseBuffer.data += str
|
||||
chunk = undefined
|
||||
}
|
||||
|
||||
if (sseBuffer.data.trim()) {
|
||||
const remaining = `${sseBuffer.data}\n\n`
|
||||
sseBuffer.data = ''
|
||||
|
||||
const lines = remaining.split('\n')
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const jsonStr = line.slice(6)
|
||||
if (!jsonStr || jsonStr === '[DONE]') {
|
||||
continue
|
||||
}
|
||||
try {
|
||||
const eventData = JSON.parse(jsonStr)
|
||||
if (eventData.error) {
|
||||
originalWrite(`data: ${jsonStr}\n\n`)
|
||||
} else {
|
||||
const converted = codexConverter.convertStreamChunk(
|
||||
eventData,
|
||||
requestedModel,
|
||||
streamState
|
||||
)
|
||||
for (const c of converted) {
|
||||
originalWrite(c)
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
originalWrite(`data: ${jsonStr}\n\n`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
originalWrite('data: [DONE]\n\n')
|
||||
}
|
||||
return originalEnd(chunk, encoding, callback)
|
||||
}
|
||||
}
|
||||
|
||||
// 非流式:patch res.json 拦截 JSON 响应
|
||||
// chatgpt.com 后端返回 { type: "response.completed", response: {...} }
|
||||
// api.openai.com 后端返回标准 Response 对象 { object: "response", status, output, ... }
|
||||
res.json = function (data) {
|
||||
if (res.statusCode >= 400) {
|
||||
return originalJson(data)
|
||||
}
|
||||
if (data && (data.type === 'response.completed' || data.object === 'response')) {
|
||||
try {
|
||||
return originalJson(codexConverter.convertResponse(data, requestedModel))
|
||||
} catch (e) {
|
||||
logger.debug('Codex response conversion failed, passing through:', e.message)
|
||||
return originalJson(data)
|
||||
}
|
||||
}
|
||||
return originalJson(data)
|
||||
}
|
||||
|
||||
// 输入转换:Chat Completions → Responses API 格式
|
||||
req.body = codexConverter.buildRequestFromOpenAI(req.body)
|
||||
// 注入 Codex CLI 系统提示词(与 handleResponses 非 Codex CLI 适配一致)
|
||||
req.body.instructions = CODEX_CLI_INSTRUCTIONS
|
||||
req._fromUnifiedEndpoint = true
|
||||
// 修正请求路径:body 已转为 Responses 格式,路径需与之匹配
|
||||
// Express req.path 是只读 getter(派生自 req.url),需改 req.url
|
||||
req.url = '/v1/responses'
|
||||
|
||||
return await openaiRoutes.handleResponses(req, res)
|
||||
} else if (backend === 'gemini') {
|
||||
// Gemini 后端
|
||||
@@ -84,20 +228,101 @@ async function routeToBackend(req, res, requestedModel) {
|
||||
})
|
||||
}
|
||||
|
||||
// 转换为 Gemini 格式
|
||||
const geminiRequest = {
|
||||
model: requestedModel,
|
||||
messages: req.body.messages,
|
||||
temperature: req.body.temperature || 0.7,
|
||||
max_tokens: req.body.max_tokens || 4096,
|
||||
stream: req.body.stream || false
|
||||
// 将 OpenAI Chat Completions 参数转换为 Gemini 原生格式
|
||||
// standard 处理器从 req.body 根层解构 contents/generationConfig 等字段
|
||||
const geminiRequest = buildGeminiRequestFromOpenAI(req.body)
|
||||
|
||||
// standard 处理器从 req.params.modelName 获取模型名
|
||||
req.params = req.params || {}
|
||||
req.params.modelName = requestedModel
|
||||
|
||||
// 平铺到 req.body 根层(保留 messages/stream 等原始字段给 sessionHelper 计算 hash)
|
||||
req.body.contents = geminiRequest.contents
|
||||
req.body.generationConfig = geminiRequest.generationConfig || {}
|
||||
req.body.safetySettings = geminiRequest.safetySettings
|
||||
// standard 处理器读取 camelCase: systemInstruction
|
||||
if (geminiRequest.system_instruction) {
|
||||
req.body.systemInstruction = geminiRequest.system_instruction
|
||||
}
|
||||
if (geminiRequest.tools) {
|
||||
req.body.tools = geminiRequest.tools
|
||||
}
|
||||
if (geminiRequest.toolConfig) {
|
||||
req.body.toolConfig = geminiRequest.toolConfig
|
||||
}
|
||||
|
||||
req.body = geminiRequest
|
||||
if (req.body.stream) {
|
||||
// 响应格式拦截:Gemini SSE → OpenAI Chat Completions chunk
|
||||
const geminiConverter = new GeminiToOpenAIConverter()
|
||||
const geminiStreamState = geminiConverter.createStreamState()
|
||||
const geminiOriginalWrite = res.write.bind(res)
|
||||
const geminiOriginalEnd = res.end.bind(res)
|
||||
|
||||
res.write = function (chunk, encoding, callback) {
|
||||
if (res.statusCode >= 400) {
|
||||
return geminiOriginalWrite(chunk, encoding, callback)
|
||||
}
|
||||
|
||||
const converted = geminiConverter.convertStreamChunk(
|
||||
chunk,
|
||||
requestedModel,
|
||||
geminiStreamState
|
||||
)
|
||||
if (converted) {
|
||||
return geminiOriginalWrite(converted, encoding, callback)
|
||||
}
|
||||
if (typeof callback === 'function') {
|
||||
callback()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
res.end = function (chunk, encoding, callback) {
|
||||
if (res.statusCode < 400) {
|
||||
// 处理 res.end(chunk) 传入的最后一块数据
|
||||
if (chunk) {
|
||||
const converted = geminiConverter.convertStreamChunk(
|
||||
chunk,
|
||||
requestedModel,
|
||||
geminiStreamState
|
||||
)
|
||||
if (converted) {
|
||||
geminiOriginalWrite(converted)
|
||||
}
|
||||
chunk = undefined
|
||||
}
|
||||
// 刷新 converter 内部 buffer 中的残留数据
|
||||
if (geminiStreamState.buffer.trim()) {
|
||||
const remaining = geminiConverter.convertStreamChunk(
|
||||
'\n\n',
|
||||
requestedModel,
|
||||
geminiStreamState
|
||||
)
|
||||
if (remaining) {
|
||||
geminiOriginalWrite(remaining)
|
||||
}
|
||||
}
|
||||
geminiOriginalWrite('data: [DONE]\n\n')
|
||||
}
|
||||
return geminiOriginalEnd(chunk, encoding, callback)
|
||||
}
|
||||
|
||||
if (geminiRequest.stream) {
|
||||
return await geminiHandleStreamGenerateContent(req, res)
|
||||
} else {
|
||||
// 响应格式拦截:Gemini JSON → OpenAI chat.completion
|
||||
const geminiConverter = new GeminiToOpenAIConverter()
|
||||
const geminiOriginalJson = res.json.bind(res)
|
||||
|
||||
res.json = function (data) {
|
||||
if (res.statusCode >= 400) {
|
||||
return geminiOriginalJson(data)
|
||||
}
|
||||
if (data && (data.candidates || data.response?.candidates)) {
|
||||
return geminiOriginalJson(geminiConverter.convertResponse(data, requestedModel))
|
||||
}
|
||||
return geminiOriginalJson(data)
|
||||
}
|
||||
|
||||
return await geminiHandleGenerateContent(req, res)
|
||||
}
|
||||
} else {
|
||||
@@ -198,6 +423,280 @@ router.post('/v1/completions', authenticateApiKey, async (req, res) => {
|
||||
}
|
||||
})
|
||||
|
||||
// --- OpenAI Chat Completions → Gemini 原生请求转换(OpenAI → Gemini 格式映射) ---
|
||||
|
||||
function buildGeminiRequestFromOpenAI(body) {
|
||||
const request = {}
|
||||
const generationConfig = {}
|
||||
const messages = body.messages || []
|
||||
|
||||
// 第一遍:收集 assistant tool_calls 的 id→name 映射(用于 tool response 关联)
|
||||
const toolCallNames = Object.create(null)
|
||||
for (const msg of messages) {
|
||||
if (msg.role === 'assistant' && msg.tool_calls) {
|
||||
for (const tc of msg.tool_calls) {
|
||||
if (tc.id && tc.function?.name) {
|
||||
toolCallNames[tc.id] = tc.function.name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 第二遍:构建 contents + system_instruction
|
||||
const systemParts = []
|
||||
const contents = []
|
||||
|
||||
for (const msg of messages) {
|
||||
if (msg.role === 'system' || msg.role === 'developer') {
|
||||
const text = extractTextContent(msg.content)
|
||||
if (text) {
|
||||
systemParts.push({ text })
|
||||
}
|
||||
} else if (msg.role === 'user') {
|
||||
const parts = buildContentParts(msg.content)
|
||||
if (parts.length > 0) {
|
||||
contents.push({ role: 'user', parts })
|
||||
}
|
||||
} else if (msg.role === 'assistant') {
|
||||
// 格式映射: assistant 内容保留 text + image(多模态)
|
||||
const parts = buildContentParts(msg.content)
|
||||
// tool_calls → functionCall parts
|
||||
if (msg.tool_calls) {
|
||||
for (const tc of msg.tool_calls) {
|
||||
if (tc.function) {
|
||||
let args
|
||||
try {
|
||||
args = JSON.parse(tc.function.arguments || '{}')
|
||||
} catch {
|
||||
// parse 失败时尝试保留原始内容
|
||||
args = tc.function.arguments ? { _raw: tc.function.arguments } : {}
|
||||
}
|
||||
parts.push({
|
||||
functionCall: { name: tc.function.name, args }
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
if (parts.length > 0) {
|
||||
contents.push({ role: 'model', parts })
|
||||
}
|
||||
} else if (msg.role === 'tool') {
|
||||
// tool response → functionResponse(Gemini 用 user role)
|
||||
const name = toolCallNames[msg.tool_call_id] || msg.name || 'unknown'
|
||||
let responseContent
|
||||
try {
|
||||
responseContent =
|
||||
typeof msg.content === 'string' ? JSON.parse(msg.content) : msg.content || {}
|
||||
} catch {
|
||||
responseContent = { result: msg.content }
|
||||
}
|
||||
contents.push({
|
||||
role: 'user',
|
||||
parts: [{ functionResponse: { name, response: responseContent } }]
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (systemParts.length > 0) {
|
||||
if (contents.length === 0) {
|
||||
// Gemini 格式:只有 system 消息时,将其作为 user content(避免 Gemini 拒绝空 contents)
|
||||
contents.push({ role: 'user', parts: systemParts })
|
||||
} else {
|
||||
request.system_instruction = { parts: systemParts }
|
||||
}
|
||||
}
|
||||
request.contents = contents
|
||||
|
||||
// Generation config
|
||||
if (body.temperature !== undefined) {
|
||||
generationConfig.temperature = body.temperature
|
||||
}
|
||||
const maxTokens = body.max_completion_tokens || body.max_tokens
|
||||
if (maxTokens !== undefined) {
|
||||
generationConfig.maxOutputTokens = maxTokens
|
||||
}
|
||||
if (body.top_p !== undefined) {
|
||||
generationConfig.topP = body.top_p
|
||||
}
|
||||
if (body.top_k !== undefined) {
|
||||
generationConfig.topK = body.top_k
|
||||
}
|
||||
if (body.n !== undefined && body.n > 1) {
|
||||
generationConfig.candidateCount = body.n
|
||||
}
|
||||
if (body.stop) {
|
||||
generationConfig.stopSequences = Array.isArray(body.stop) ? body.stop : [body.stop]
|
||||
}
|
||||
|
||||
// modalities → responseModalities(text→TEXT, image→IMAGE, audio→AUDIO)
|
||||
if (body.modalities && Array.isArray(body.modalities)) {
|
||||
const modalityMap = { text: 'TEXT', image: 'IMAGE', audio: 'AUDIO' }
|
||||
const mapped = body.modalities.map((m) => modalityMap[m.toLowerCase()]).filter(Boolean)
|
||||
if (mapped.length > 0) {
|
||||
generationConfig.responseModalities = mapped
|
||||
}
|
||||
}
|
||||
|
||||
// image_config → imageConfig(Gemini 格式:aspect_ratio→aspectRatio, image_size→imageSize)
|
||||
if (body.image_config) {
|
||||
const imageConfig = {}
|
||||
if (body.image_config.aspect_ratio) {
|
||||
imageConfig.aspectRatio = body.image_config.aspect_ratio
|
||||
}
|
||||
if (body.image_config.image_size) {
|
||||
imageConfig.imageSize = body.image_config.image_size
|
||||
}
|
||||
if (Object.keys(imageConfig).length > 0) {
|
||||
generationConfig.imageConfig = imageConfig
|
||||
}
|
||||
}
|
||||
|
||||
// reasoning_effort → thinkingConfig(Gemini 格式)
|
||||
if (body.reasoning_effort) {
|
||||
const effort = body.reasoning_effort.toLowerCase()
|
||||
if (effort === 'none') {
|
||||
generationConfig.thinkingConfig = { thinkingLevel: 'none', includeThoughts: false }
|
||||
} else if (effort === 'auto') {
|
||||
// 格式映射: auto → thinkingBudget:-1 (让模型自行决定)
|
||||
generationConfig.thinkingConfig = { thinkingBudget: -1, includeThoughts: true }
|
||||
} else {
|
||||
generationConfig.thinkingConfig = { thinkingLevel: effort, includeThoughts: true }
|
||||
}
|
||||
}
|
||||
|
||||
// response_format → responseMimeType / responseSchema
|
||||
if (body.response_format) {
|
||||
if (body.response_format.type === 'json_object') {
|
||||
generationConfig.responseMimeType = 'application/json'
|
||||
} else if (
|
||||
body.response_format.type === 'json_schema' &&
|
||||
body.response_format.json_schema?.schema
|
||||
) {
|
||||
generationConfig.responseMimeType = 'application/json'
|
||||
generationConfig.responseSchema = body.response_format.json_schema.schema
|
||||
}
|
||||
}
|
||||
|
||||
if (Object.keys(generationConfig).length > 0) {
|
||||
request.generationConfig = generationConfig
|
||||
}
|
||||
|
||||
// Tools: OpenAI function → Gemini functionDeclarations(OpenAI → Gemini 格式映射)
|
||||
if (body.tools && body.tools.length > 0) {
|
||||
const functionDeclarations = []
|
||||
const extraTools = []
|
||||
for (const tool of body.tools) {
|
||||
if (tool.type === 'function' && tool.function) {
|
||||
const decl = {
|
||||
name: tool.function.name,
|
||||
description: tool.function.description || ''
|
||||
}
|
||||
if (tool.function.parameters) {
|
||||
// 格式映射: parameters → parametersJsonSchema, 删除 strict
|
||||
const schema = { ...tool.function.parameters }
|
||||
delete schema.strict
|
||||
decl.parametersJsonSchema = schema
|
||||
} else {
|
||||
decl.parametersJsonSchema = { type: 'object', properties: {} }
|
||||
}
|
||||
functionDeclarations.push(decl)
|
||||
} else if (
|
||||
tool.type === 'google_search' ||
|
||||
tool.type === 'code_execution' ||
|
||||
tool.type === 'url_context'
|
||||
) {
|
||||
// 非 function 工具透传,snake_case → camelCase(Gemini 原生格式)
|
||||
const typeMap = {
|
||||
google_search: 'googleSearch',
|
||||
code_execution: 'codeExecution',
|
||||
url_context: 'urlContext'
|
||||
}
|
||||
const geminiType = typeMap[tool.type]
|
||||
extraTools.push({ [geminiType]: tool[tool.type] || {} })
|
||||
}
|
||||
}
|
||||
const toolsArray = []
|
||||
if (functionDeclarations.length > 0) {
|
||||
toolsArray.push({ functionDeclarations })
|
||||
}
|
||||
toolsArray.push(...extraTools)
|
||||
if (toolsArray.length > 0) {
|
||||
request.tools = toolsArray
|
||||
}
|
||||
}
|
||||
|
||||
// tool_choice → toolConfig.functionCallingConfig
|
||||
if (body.tool_choice) {
|
||||
if (body.tool_choice === 'none') {
|
||||
request.toolConfig = { functionCallingConfig: { mode: 'NONE' } }
|
||||
} else if (body.tool_choice === 'auto') {
|
||||
request.toolConfig = { functionCallingConfig: { mode: 'AUTO' } }
|
||||
} else if (body.tool_choice === 'required') {
|
||||
request.toolConfig = { functionCallingConfig: { mode: 'ANY' } }
|
||||
} else if (typeof body.tool_choice === 'object' && body.tool_choice.function?.name) {
|
||||
request.toolConfig = {
|
||||
functionCallingConfig: {
|
||||
mode: 'ANY',
|
||||
allowedFunctionNames: [body.tool_choice.function.name]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 默认安全设置(Gemini 格式:最大化允许,避免不必要的内容拦截)
|
||||
if (!request.safetySettings) {
|
||||
request.safetySettings = [
|
||||
{ category: 'HARM_CATEGORY_HARASSMENT', threshold: 'OFF' },
|
||||
{ category: 'HARM_CATEGORY_HATE_SPEECH', threshold: 'OFF' },
|
||||
{ category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT', threshold: 'OFF' },
|
||||
{ category: 'HARM_CATEGORY_DANGEROUS_CONTENT', threshold: 'OFF' },
|
||||
{ category: 'HARM_CATEGORY_CIVIC_INTEGRITY', threshold: 'BLOCK_NONE' }
|
||||
]
|
||||
}
|
||||
|
||||
return request
|
||||
}
|
||||
|
||||
function extractTextContent(content) {
|
||||
if (typeof content === 'string') {
|
||||
return content
|
||||
}
|
||||
if (Array.isArray(content)) {
|
||||
return content
|
||||
.filter((c) => c.type === 'text')
|
||||
.map((c) => c.text)
|
||||
.join('')
|
||||
}
|
||||
return ''
|
||||
}
|
||||
|
||||
function buildContentParts(content) {
|
||||
if (typeof content === 'string') {
|
||||
return [{ text: content }]
|
||||
}
|
||||
if (Array.isArray(content)) {
|
||||
const parts = []
|
||||
for (const item of content) {
|
||||
if (item.type === 'text') {
|
||||
parts.push({ text: item.text })
|
||||
} else if (item.type === 'image_url' && item.image_url?.url) {
|
||||
const { url } = item.image_url
|
||||
if (url.startsWith('data:')) {
|
||||
const match = url.match(/^data:([^;]+);base64,(.+)$/)
|
||||
if (match) {
|
||||
parts.push({ inlineData: { mimeType: match[1], data: match[2] } })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return parts
|
||||
}
|
||||
if (!content) {
|
||||
return []
|
||||
}
|
||||
return [{ text: String(content) }]
|
||||
}
|
||||
|
||||
module.exports = router
|
||||
module.exports.detectBackendFromModel = detectBackendFromModel
|
||||
module.exports.routeToBackend = routeToBackend
|
||||
|
||||
717
src/services/codexToOpenAI.js
Normal file
717
src/services/codexToOpenAI.js
Normal file
@@ -0,0 +1,717 @@
|
||||
/**
|
||||
* Codex Responses API → OpenAI Chat Completions 格式转换器
|
||||
* 将 Codex/OpenAI Responses API 的 SSE 事件转为标准 chat.completion / chat.completion.chunk 格式
|
||||
*/
|
||||
|
||||
class CodexToOpenAIConverter {
|
||||
constructor() {
|
||||
// 工具名缩短映射(buildRequestFromOpenAI 填充,响应转换时逆向恢复)
|
||||
this._reverseToolNameMap = {}
|
||||
}
|
||||
|
||||
createStreamState() {
|
||||
return {
|
||||
responseId: '',
|
||||
createdAt: 0,
|
||||
model: '',
|
||||
functionCallIndex: -1,
|
||||
hasReceivedArgumentsDelta: false,
|
||||
hasToolCallAnnounced: false,
|
||||
roleSent: false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 流式转换: 单个已解析的 SSE 事件 → OpenAI chunk SSE 字符串数组
|
||||
* @param {Object} eventData - 已解析的 SSE JSON 对象
|
||||
* @param {string} model - 请求模型名
|
||||
* @param {Object} state - createStreamState() 的返回值
|
||||
* @returns {string[]} "data: {...}\n\n" 字符串数组(可能为空)
|
||||
*/
|
||||
convertStreamChunk(eventData, model, state) {
|
||||
const { type } = eventData
|
||||
if (!type) {
|
||||
return []
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case 'response.created':
|
||||
return this._handleResponseCreated(eventData, state)
|
||||
|
||||
case 'response.reasoning_summary_text.delta':
|
||||
return this._emitChunk(state, model, { reasoning_content: eventData.delta })
|
||||
|
||||
case 'response.reasoning_summary_text.done':
|
||||
// done 事件仅为结束信号,delta 已通过 .delta 事件发送,不再注入内容
|
||||
return []
|
||||
|
||||
case 'response.output_text.delta':
|
||||
return this._emitChunk(state, model, { content: eventData.delta })
|
||||
|
||||
case 'response.output_item.added':
|
||||
return this._handleOutputItemAdded(eventData, model, state)
|
||||
|
||||
case 'response.function_call_arguments.delta':
|
||||
return this._handleArgumentsDelta(eventData, model, state)
|
||||
|
||||
case 'response.function_call_arguments.done':
|
||||
return this._handleArgumentsDone(eventData, model, state)
|
||||
|
||||
case 'response.output_item.done':
|
||||
return this._handleOutputItemDone(eventData, model, state)
|
||||
|
||||
case 'response.completed':
|
||||
return this._handleResponseCompleted(eventData, model, state)
|
||||
|
||||
case 'response.failed':
|
||||
case 'response.incomplete':
|
||||
return this._handleResponseError(eventData, model, state)
|
||||
|
||||
case 'error':
|
||||
return this._handleStreamError(eventData, model, state)
|
||||
|
||||
default:
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 非流式转换: Codex 完整响应 → OpenAI chat.completion
|
||||
* @param {Object} responseData - Codex 响应对象或 response.completed 事件
|
||||
* @param {string} model - 请求模型名
|
||||
* @returns {Object}
|
||||
*/
|
||||
convertResponse(responseData, model) {
|
||||
// 自动检测:response.completed 事件包装 vs 直接响应对象
|
||||
const resp = responseData.type === 'response.completed' ? responseData.response : responseData
|
||||
|
||||
const message = { role: 'assistant', content: null }
|
||||
const toolCalls = []
|
||||
|
||||
const output = resp.output || []
|
||||
for (const item of output) {
|
||||
if (item.type === 'reasoning') {
|
||||
const summaryTexts = (item.summary || [])
|
||||
.filter((s) => s.type === 'summary_text')
|
||||
.map((s) => s.text)
|
||||
if (summaryTexts.length > 0) {
|
||||
message.reasoning_content = (message.reasoning_content || '') + summaryTexts.join('')
|
||||
}
|
||||
} else if (item.type === 'message') {
|
||||
const contentTexts = (item.content || [])
|
||||
.filter((c) => c.type === 'output_text')
|
||||
.map((c) => c.text)
|
||||
if (contentTexts.length > 0) {
|
||||
message.content = (message.content || '') + contentTexts.join('')
|
||||
}
|
||||
} else if (item.type === 'function_call') {
|
||||
toolCalls.push({
|
||||
id: item.call_id || item.id,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: this._restoreToolName(item.name),
|
||||
arguments: item.arguments || '{}'
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (toolCalls.length > 0) {
|
||||
message.tool_calls = toolCalls
|
||||
}
|
||||
|
||||
// response.failed → 返回 error 结构(与流式 _handleResponseError 一致)
|
||||
if (resp.status === 'failed') {
|
||||
const err = resp.error || {}
|
||||
return {
|
||||
error: {
|
||||
message: err.message || 'Response failed',
|
||||
type: err.type || 'server_error',
|
||||
code: err.code || null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const finishReason = toolCalls.length > 0 ? 'tool_calls' : this._mapResponseStatus(resp)
|
||||
|
||||
const result = {
|
||||
id: resp.id || `chatcmpl-${Date.now()}`,
|
||||
object: 'chat.completion',
|
||||
created: this._parseCreatedAt(resp.created_at),
|
||||
model: resp.model || model,
|
||||
choices: [{ index: 0, message, finish_reason: finishReason }]
|
||||
}
|
||||
|
||||
const usage = this._mapUsage(resp.usage)
|
||||
if (usage) {
|
||||
result.usage = usage
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// --- 内部方法:流式事件处理 ---
|
||||
|
||||
_handleResponseCreated(eventData, state) {
|
||||
const resp = eventData.response || {}
|
||||
state.responseId = resp.id || ''
|
||||
if (resp.created_at) {
|
||||
state.createdAt = this._parseCreatedAt(resp.created_at)
|
||||
}
|
||||
state.model = resp.model || ''
|
||||
return []
|
||||
}
|
||||
|
||||
_handleOutputItemAdded(eventData, model, state) {
|
||||
const { item } = eventData
|
||||
if (!item || item.type !== 'function_call') {
|
||||
return []
|
||||
}
|
||||
|
||||
state.functionCallIndex++
|
||||
state.hasReceivedArgumentsDelta = false
|
||||
state.hasToolCallAnnounced = true
|
||||
|
||||
return this._emitChunk(state, model, {
|
||||
tool_calls: [
|
||||
{
|
||||
index: state.functionCallIndex,
|
||||
id: item.call_id || item.id,
|
||||
type: 'function',
|
||||
function: { name: this._restoreToolName(item.name), arguments: '' }
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
_handleArgumentsDelta(eventData, model, state) {
|
||||
state.hasReceivedArgumentsDelta = true
|
||||
return this._emitChunk(state, model, {
|
||||
tool_calls: [
|
||||
{
|
||||
index: state.functionCallIndex,
|
||||
function: { arguments: eventData.delta }
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
_handleArgumentsDone(eventData, model, state) {
|
||||
// 如果已收到增量 delta,done 不需要再输出
|
||||
if (state.hasReceivedArgumentsDelta) {
|
||||
return []
|
||||
}
|
||||
|
||||
// 没有收到 delta,一次性输出完整参数
|
||||
return this._emitChunk(state, model, {
|
||||
tool_calls: [
|
||||
{
|
||||
index: state.functionCallIndex,
|
||||
function: { arguments: eventData.arguments || '{}' }
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
_handleOutputItemDone(eventData, model, state) {
|
||||
const { item } = eventData
|
||||
if (!item || item.type !== 'function_call') {
|
||||
return []
|
||||
}
|
||||
|
||||
// 如果已经通过 output_item.added 通知过,不重复输出
|
||||
if (state.hasToolCallAnnounced) {
|
||||
state.hasToolCallAnnounced = false
|
||||
return []
|
||||
}
|
||||
|
||||
// Fallback:未收到 added 事件,输出完整 tool call
|
||||
state.functionCallIndex++
|
||||
return this._emitChunk(state, model, {
|
||||
tool_calls: [
|
||||
{
|
||||
index: state.functionCallIndex,
|
||||
id: item.call_id || item.id,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: this._restoreToolName(item.name),
|
||||
arguments: item.arguments || '{}'
|
||||
}
|
||||
}
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
_handleResponseCompleted(eventData, model, state) {
|
||||
const resp = eventData.response || {}
|
||||
const chunk = this._makeChunk(state, model)
|
||||
|
||||
if (state.functionCallIndex >= 0) {
|
||||
chunk.choices[0].finish_reason = 'tool_calls'
|
||||
} else {
|
||||
chunk.choices[0].finish_reason = this._mapResponseStatus(resp)
|
||||
}
|
||||
|
||||
const usage = this._mapUsage(resp.usage)
|
||||
if (usage) {
|
||||
chunk.usage = usage
|
||||
}
|
||||
|
||||
return [`data: ${JSON.stringify(chunk)}\n\n`]
|
||||
}
|
||||
|
||||
_handleResponseError(eventData, model, state) {
|
||||
const resp = eventData.response || {}
|
||||
const results = []
|
||||
|
||||
// response.failed → 转为 error SSE 事件(保留错误语义)
|
||||
if (resp.status === 'failed') {
|
||||
const err = resp.error || {}
|
||||
results.push(
|
||||
`data: ${JSON.stringify({
|
||||
error: {
|
||||
message: err.message || 'Response failed',
|
||||
type: err.type || 'server_error',
|
||||
code: err.code || null
|
||||
}
|
||||
})}\n\n`
|
||||
)
|
||||
}
|
||||
|
||||
// response.incomplete 及其他非 failed 状态 → 带 finish_reason 的终止 chunk
|
||||
if (resp.status !== 'failed') {
|
||||
const chunk = this._makeChunk(state, model)
|
||||
if (state.functionCallIndex >= 0) {
|
||||
chunk.choices[0].finish_reason = 'tool_calls'
|
||||
} else {
|
||||
chunk.choices[0].finish_reason = this._mapResponseStatus(resp)
|
||||
}
|
||||
const usage = this._mapUsage(resp.usage)
|
||||
if (usage) {
|
||||
chunk.usage = usage
|
||||
}
|
||||
results.push(`data: ${JSON.stringify(chunk)}\n\n`)
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
_handleStreamError(eventData) {
|
||||
// type: "error" → 转为 OpenAI 格式的 error SSE 事件
|
||||
const errorObj = {
|
||||
error: {
|
||||
message: eventData.message || 'Unknown error',
|
||||
type: 'server_error',
|
||||
code: eventData.code || null
|
||||
}
|
||||
}
|
||||
return [`data: ${JSON.stringify(errorObj)}\n\n`]
|
||||
}
|
||||
|
||||
// --- 工具方法 ---
|
||||
|
||||
_emitChunk(state, model, delta) {
|
||||
const chunk = this._makeChunk(state, model)
|
||||
if (!state.roleSent) {
|
||||
delta.role = 'assistant'
|
||||
state.roleSent = true
|
||||
}
|
||||
chunk.choices[0].delta = delta
|
||||
return [`data: ${JSON.stringify(chunk)}\n\n`]
|
||||
}
|
||||
|
||||
_makeChunk(state, model) {
|
||||
return {
|
||||
id: state.responseId || `chatcmpl-${Date.now()}`,
|
||||
object: 'chat.completion.chunk',
|
||||
created: state.createdAt || Math.floor(Date.now() / 1000),
|
||||
model: state.model || model,
|
||||
choices: [{ index: 0, delta: {}, finish_reason: null }]
|
||||
}
|
||||
}
|
||||
|
||||
_mapResponseStatus(resp) {
|
||||
const { status } = resp
|
||||
if (!status || status === 'completed') {
|
||||
return 'stop'
|
||||
}
|
||||
if (status === 'incomplete') {
|
||||
const reason = resp.incomplete_details?.reason
|
||||
if (reason === 'max_output_tokens') {
|
||||
return 'length'
|
||||
}
|
||||
if (reason === 'content_filter') {
|
||||
return 'content_filter'
|
||||
}
|
||||
return 'length'
|
||||
}
|
||||
// failed, cancelled, etc.
|
||||
return 'stop'
|
||||
}
|
||||
|
||||
_parseCreatedAt(createdAt) {
|
||||
if (!createdAt) {
|
||||
return Math.floor(Date.now() / 1000)
|
||||
}
|
||||
if (typeof createdAt === 'number') {
|
||||
return createdAt
|
||||
}
|
||||
const ts = Math.floor(new Date(createdAt).getTime() / 1000)
|
||||
return isNaN(ts) ? Math.floor(Date.now() / 1000) : ts
|
||||
}
|
||||
|
||||
_mapUsage(usage) {
|
||||
if (!usage) {
|
||||
return undefined
|
||||
}
|
||||
const result = {
|
||||
prompt_tokens: usage.input_tokens || 0,
|
||||
completion_tokens: usage.output_tokens || 0,
|
||||
total_tokens: usage.total_tokens || 0
|
||||
}
|
||||
if (usage.input_tokens_details?.cached_tokens > 0) {
|
||||
result.prompt_tokens_details = { cached_tokens: usage.input_tokens_details.cached_tokens }
|
||||
}
|
||||
if (usage.output_tokens_details?.reasoning_tokens > 0) {
|
||||
result.completion_tokens_details = {
|
||||
reasoning_tokens: usage.output_tokens_details.reasoning_tokens
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// =============================================
|
||||
// 请求转换: Chat Completions → Responses API
|
||||
// =============================================
|
||||
// =============================================
|
||||
|
||||
/**
|
||||
* 将 OpenAI Chat Completions 请求体转为 Responses API 格式
|
||||
* @param {Object} chatBody - Chat Completions 格式请求体
|
||||
* @returns {Object} Responses API 格式请求体
|
||||
*/
|
||||
buildRequestFromOpenAI(chatBody) {
|
||||
const result = {}
|
||||
|
||||
if (chatBody.model) {
|
||||
result.model = chatBody.model
|
||||
}
|
||||
if (chatBody.stream !== undefined) {
|
||||
result.stream = chatBody.stream
|
||||
}
|
||||
|
||||
// messages → input(instructions 由调用方设置,此处只转换消息到 input)
|
||||
const input = []
|
||||
|
||||
for (const msg of chatBody.messages || []) {
|
||||
switch (msg.role) {
|
||||
case 'system':
|
||||
case 'developer':
|
||||
input.push({
|
||||
type: 'message',
|
||||
role: 'developer',
|
||||
content: this._wrapContent(msg.content, 'user')
|
||||
})
|
||||
break
|
||||
|
||||
case 'user':
|
||||
input.push({
|
||||
type: 'message',
|
||||
role: 'user',
|
||||
content: this._wrapContent(msg.content, 'user')
|
||||
})
|
||||
break
|
||||
|
||||
case 'assistant':
|
||||
if (msg.content) {
|
||||
input.push({
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
content: this._wrapContent(msg.content, 'assistant')
|
||||
})
|
||||
}
|
||||
if (msg.tool_calls && msg.tool_calls.length > 0) {
|
||||
for (const tc of msg.tool_calls) {
|
||||
if (tc.type === 'function') {
|
||||
input.push({
|
||||
type: 'function_call',
|
||||
call_id: tc.id,
|
||||
name: this._shortenToolName(tc.function?.name || ''),
|
||||
arguments:
|
||||
typeof tc.function?.arguments === 'string'
|
||||
? tc.function.arguments
|
||||
: JSON.stringify(tc.function?.arguments ?? {})
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
|
||||
case 'tool':
|
||||
input.push({
|
||||
type: 'function_call_output',
|
||||
call_id: msg.tool_call_id,
|
||||
output: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content)
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
result.input = input
|
||||
|
||||
// temperature/top_p/max_output_tokens 不透传,与上游 Codex API 行为保持一致
|
||||
|
||||
// reasoning 配置
|
||||
result.reasoning = {
|
||||
effort: chatBody.reasoning_effort || 'medium',
|
||||
summary: 'auto'
|
||||
}
|
||||
|
||||
// 固定值
|
||||
result.parallel_tool_calls = true
|
||||
result.include = ['reasoning.encrypted_content']
|
||||
result.store = false
|
||||
|
||||
// 收集所有工具名(tools + assistant.tool_calls),统一构建缩短映射
|
||||
const allToolNames = new Set()
|
||||
if (chatBody.tools) {
|
||||
for (const t of chatBody.tools) {
|
||||
if (t.type === 'function' && t.function?.name) {
|
||||
allToolNames.add(t.function.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const msg of chatBody.messages || []) {
|
||||
if (msg.role === 'assistant' && msg.tool_calls) {
|
||||
for (const tc of msg.tool_calls) {
|
||||
if (tc.type === 'function' && tc.function?.name) {
|
||||
allToolNames.add(tc.function.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (allToolNames.size > 0) {
|
||||
this._toolNameMap = this._buildShortNameMap([...allToolNames])
|
||||
this._reverseToolNameMap = {}
|
||||
for (const [orig, short] of Object.entries(this._toolNameMap)) {
|
||||
if (orig !== short) {
|
||||
this._reverseToolNameMap[short] = orig
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tools 展平
|
||||
if (chatBody.tools && chatBody.tools.length > 0) {
|
||||
result.tools = this._convertTools(chatBody.tools)
|
||||
}
|
||||
|
||||
// tool_choice
|
||||
if (chatBody.tool_choice !== undefined) {
|
||||
result.tool_choice = this._convertToolChoice(chatBody.tool_choice)
|
||||
}
|
||||
|
||||
// response_format → text.format
|
||||
if (chatBody.response_format) {
|
||||
const text = this._convertResponseFormat(chatBody.response_format)
|
||||
if (text && Object.keys(text).length > 0) {
|
||||
result.text = text
|
||||
}
|
||||
}
|
||||
|
||||
// session 字段透传(handleResponses 用于 session hash)
|
||||
if (chatBody.session_id) {
|
||||
result.session_id = chatBody.session_id
|
||||
}
|
||||
if (chatBody.conversation_id) {
|
||||
result.conversation_id = chatBody.conversation_id
|
||||
}
|
||||
if (chatBody.prompt_cache_key) {
|
||||
result.prompt_cache_key = chatBody.prompt_cache_key
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// --- 请求转换辅助方法 ---
|
||||
|
||||
_extractTextContent(content) {
|
||||
if (typeof content === 'string') {
|
||||
return content
|
||||
}
|
||||
if (Array.isArray(content)) {
|
||||
return content
|
||||
.filter((c) => c.type === 'text')
|
||||
.map((c) => c.text)
|
||||
.join('')
|
||||
}
|
||||
return String(content || '')
|
||||
}
|
||||
|
||||
_wrapContent(content, role) {
|
||||
const textType = role === 'assistant' ? 'output_text' : 'input_text'
|
||||
if (typeof content === 'string') {
|
||||
return [{ type: textType, text: content }]
|
||||
}
|
||||
if (Array.isArray(content)) {
|
||||
return content
|
||||
.map((item) => {
|
||||
switch (item.type) {
|
||||
case 'text':
|
||||
return { type: textType, text: item.text }
|
||||
case 'image_url':
|
||||
return {
|
||||
type: 'input_image',
|
||||
image_url: item.image_url?.url || item.image_url
|
||||
}
|
||||
default:
|
||||
return item
|
||||
}
|
||||
})
|
||||
.filter(Boolean)
|
||||
}
|
||||
return [{ type: textType, text: String(content || '') }]
|
||||
}
|
||||
|
||||
_convertTools(tools) {
|
||||
return tools
|
||||
.filter((t) => t && t.type)
|
||||
.map((t) => {
|
||||
// 非 function 工具(web_search、code_interpreter 等)原样透传
|
||||
if (t.type !== 'function') {
|
||||
return t
|
||||
}
|
||||
// function 工具展平:去除 function wrapper
|
||||
if (!t.function) {
|
||||
return null
|
||||
}
|
||||
const tool = { type: 'function', name: this._shortenToolName(t.function.name) }
|
||||
if (t.function.description) {
|
||||
tool.description = t.function.description
|
||||
}
|
||||
if (t.function.parameters) {
|
||||
tool.parameters = t.function.parameters
|
||||
}
|
||||
if (t.function.strict !== undefined) {
|
||||
tool.strict = t.function.strict
|
||||
}
|
||||
return tool
|
||||
})
|
||||
.filter(Boolean)
|
||||
}
|
||||
|
||||
_convertToolChoice(tc) {
|
||||
if (typeof tc === 'string') {
|
||||
return tc
|
||||
}
|
||||
if (tc && typeof tc === 'object') {
|
||||
if (tc.type === 'function' && tc.function?.name) {
|
||||
return { type: 'function', name: this._shortenToolName(tc.function.name) }
|
||||
}
|
||||
return tc
|
||||
}
|
||||
return tc
|
||||
}
|
||||
|
||||
_convertResponseFormat(rf) {
|
||||
if (!rf || !rf.type) {
|
||||
return {}
|
||||
}
|
||||
if (rf.type === 'text') {
|
||||
return { format: { type: 'text' } }
|
||||
}
|
||||
if (rf.type === 'json_schema' && rf.json_schema) {
|
||||
const format = { type: 'json_schema' }
|
||||
if (rf.json_schema.name) {
|
||||
format.name = rf.json_schema.name
|
||||
}
|
||||
if (rf.json_schema.strict !== undefined) {
|
||||
format.strict = rf.json_schema.strict
|
||||
}
|
||||
if (rf.json_schema.schema) {
|
||||
format.schema = rf.json_schema.schema
|
||||
}
|
||||
return { format }
|
||||
}
|
||||
return {}
|
||||
}
|
||||
|
||||
/**
|
||||
* 工具名缩短:优先使用唯一化 map,无 map 时做简单截断
|
||||
*/
|
||||
_shortenToolName(name) {
|
||||
if (!name) {
|
||||
return name
|
||||
}
|
||||
if (this._toolNameMap && this._toolNameMap[name]) {
|
||||
return this._toolNameMap[name]
|
||||
}
|
||||
const LIMIT = 64
|
||||
if (name.length <= LIMIT) {
|
||||
return name
|
||||
}
|
||||
if (name.startsWith('mcp__')) {
|
||||
const idx = name.lastIndexOf('__')
|
||||
if (idx > 0) {
|
||||
const candidate = `mcp__${name.slice(idx + 2)}`
|
||||
return candidate.length > LIMIT ? candidate.slice(0, LIMIT) : candidate
|
||||
}
|
||||
}
|
||||
return name.slice(0, LIMIT)
|
||||
}
|
||||
|
||||
/**
|
||||
* 构建工具名缩短映射(保证唯一)
|
||||
* 构建唯一缩短名映射,处理碰撞
|
||||
* @returns {Object} { originalName: shortName }
|
||||
*/
|
||||
_buildShortNameMap(names) {
|
||||
const LIMIT = 64
|
||||
const used = new Set()
|
||||
const map = {}
|
||||
|
||||
const baseCandidate = (n) => {
|
||||
if (n.length <= LIMIT) {
|
||||
return n
|
||||
}
|
||||
if (n.startsWith('mcp__')) {
|
||||
const idx = n.lastIndexOf('__')
|
||||
if (idx > 0) {
|
||||
const cand = `mcp__${n.slice(idx + 2)}`
|
||||
return cand.length > LIMIT ? cand.slice(0, LIMIT) : cand
|
||||
}
|
||||
}
|
||||
return n.slice(0, LIMIT)
|
||||
}
|
||||
|
||||
const makeUnique = (cand) => {
|
||||
if (!used.has(cand)) {
|
||||
return cand
|
||||
}
|
||||
const base = cand
|
||||
for (let i = 1; ; i++) {
|
||||
const suffix = `_${i}`
|
||||
const allowed = LIMIT - suffix.length
|
||||
const tmp = (base.length > allowed ? base.slice(0, allowed) : base) + suffix
|
||||
if (!used.has(tmp)) {
|
||||
return tmp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const n of names) {
|
||||
const short = makeUnique(baseCandidate(n))
|
||||
used.add(short)
|
||||
map[n] = short
|
||||
}
|
||||
return map
|
||||
}
|
||||
|
||||
/**
|
||||
* 逆向恢复工具名(用于响应转换)
|
||||
*/
|
||||
_restoreToolName(shortName) {
|
||||
return this._reverseToolNameMap[shortName] || shortName
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = CodexToOpenAIConverter
|
||||
392
src/services/geminiToOpenAI.js
Normal file
392
src/services/geminiToOpenAI.js
Normal file
@@ -0,0 +1,392 @@
|
||||
/**
|
||||
* Gemini 响应格式 → OpenAI Chat Completions 格式转换器
|
||||
* 将 Gemini API 的原生响应转为标准 OpenAI chat.completion / chat.completion.chunk 格式
|
||||
*/
|
||||
|
||||
class GeminiToOpenAIConverter {
|
||||
createStreamState() {
|
||||
return {
|
||||
buffer: '',
|
||||
id: `chatcmpl-${Date.now()}`,
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
functionIndex: 0,
|
||||
candidatesWithFunctionCalls: new Set(),
|
||||
roleSent: false
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 流式转换: 拦截 res.write 的 chunk → OpenAI SSE 字符串
|
||||
* @param {Buffer|string} rawChunk
|
||||
* @param {string} model
|
||||
* @param {Object} state - createStreamState() 返回的状态
|
||||
* @returns {string|null} 转换后的 SSE 字符串,null 表示跳过
|
||||
*/
|
||||
convertStreamChunk(rawChunk, model, state) {
|
||||
const str = (typeof rawChunk === 'string' ? rawChunk : rawChunk.toString()).replace(
|
||||
/\r\n/g,
|
||||
'\n'
|
||||
)
|
||||
|
||||
// 心跳透传:仅当 buffer 为空时才透传空白
|
||||
// buffer 有数据时需要继续处理(空白可能是 SSE 事件的 \n\n 分隔符)
|
||||
if (!str.trim() && !state.buffer) {
|
||||
return str
|
||||
}
|
||||
|
||||
state.buffer += str
|
||||
let output = ''
|
||||
|
||||
// 按 \n\n 分割完整 SSE 事件
|
||||
let idx
|
||||
while ((idx = state.buffer.indexOf('\n\n')) !== -1) {
|
||||
const event = state.buffer.slice(0, idx)
|
||||
state.buffer = state.buffer.slice(idx + 2)
|
||||
|
||||
if (!event.trim()) {
|
||||
continue
|
||||
}
|
||||
|
||||
const lines = event.split('\n')
|
||||
for (const line of lines) {
|
||||
if (!line.startsWith('data: ')) {
|
||||
continue
|
||||
}
|
||||
|
||||
const jsonStr = line.slice(6).trim()
|
||||
if (!jsonStr) {
|
||||
continue
|
||||
}
|
||||
|
||||
// [DONE] 消费(由 res.end patch 统一发送)
|
||||
if (jsonStr === '[DONE]') {
|
||||
continue
|
||||
}
|
||||
|
||||
let geminiData
|
||||
try {
|
||||
geminiData = JSON.parse(jsonStr)
|
||||
} catch (e) {
|
||||
// 解析失败透传
|
||||
output += `data: ${jsonStr}\n\n`
|
||||
continue
|
||||
}
|
||||
|
||||
// 错误事件透传
|
||||
if (geminiData.error) {
|
||||
output += `data: ${jsonStr}\n\n`
|
||||
continue
|
||||
}
|
||||
|
||||
const chunks = this._convertGeminiChunkToOpenAI(geminiData, model, state)
|
||||
for (const c of chunks) {
|
||||
output += `data: ${JSON.stringify(c)}\n\n`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return output || null
|
||||
}
|
||||
|
||||
/**
|
||||
* 非流式转换: Gemini JSON → OpenAI chat.completion
|
||||
* @param {Object} geminiData - { candidates, usageMetadata, modelVersion, responseId }
|
||||
* @param {string} model
|
||||
* @returns {Object}
|
||||
*/
|
||||
convertResponse(geminiData, model) {
|
||||
// 兼容 v1internal 包裹格式 { response: { candidates: [...] } }
|
||||
const data = geminiData.response || geminiData
|
||||
const candidates = data.candidates || []
|
||||
const choices = candidates.map((candidate, i) => {
|
||||
const parts = candidate.content?.parts || []
|
||||
const textParts = []
|
||||
const thoughtParts = []
|
||||
const toolCalls = []
|
||||
const images = []
|
||||
let fnIndex = 0
|
||||
|
||||
for (const part of parts) {
|
||||
if (
|
||||
part.thoughtSignature &&
|
||||
!part.text &&
|
||||
!part.functionCall &&
|
||||
!part.inlineData &&
|
||||
!part.inline_data
|
||||
) {
|
||||
continue
|
||||
}
|
||||
|
||||
if (part.functionCall) {
|
||||
toolCalls.push({
|
||||
id: `${part.functionCall.name}-${Date.now()}-${fnIndex}`,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: part.functionCall.name,
|
||||
arguments: JSON.stringify(part.functionCall.args || {})
|
||||
}
|
||||
})
|
||||
fnIndex++
|
||||
} else if (part.text !== undefined) {
|
||||
if (part.thought) {
|
||||
thoughtParts.push(part.text)
|
||||
} else {
|
||||
textParts.push(part.text)
|
||||
}
|
||||
} else if (part.inlineData || part.inline_data) {
|
||||
const inlineData = part.inlineData || part.inline_data
|
||||
const imgData = inlineData.data
|
||||
if (imgData) {
|
||||
const mimeType = inlineData.mimeType || inlineData.mime_type || 'image/png'
|
||||
images.push({
|
||||
type: 'image_url',
|
||||
index: images.length,
|
||||
image_url: { url: `data:${mimeType};base64,${imgData}` }
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const message = { role: 'assistant' }
|
||||
if (textParts.length > 0) {
|
||||
message.content = textParts.join('')
|
||||
} else {
|
||||
message.content = null
|
||||
}
|
||||
if (thoughtParts.length > 0) {
|
||||
message.reasoning_content = thoughtParts.join('')
|
||||
}
|
||||
if (toolCalls.length > 0) {
|
||||
message.tool_calls = toolCalls
|
||||
}
|
||||
if (images.length > 0) {
|
||||
message.images = images
|
||||
}
|
||||
|
||||
let finishReason = 'stop'
|
||||
if (toolCalls.length > 0) {
|
||||
finishReason = 'tool_calls'
|
||||
} else if (candidate.finishReason) {
|
||||
finishReason = this._mapFinishReason(candidate.finishReason)
|
||||
}
|
||||
|
||||
return {
|
||||
index: candidate.index !== undefined ? candidate.index : i,
|
||||
message,
|
||||
finish_reason: finishReason
|
||||
}
|
||||
})
|
||||
|
||||
const result = {
|
||||
id: data.responseId || `chatcmpl-${Date.now()}`,
|
||||
object: 'chat.completion',
|
||||
created: this._parseCreateTime(data.createTime),
|
||||
model: data.modelVersion || model,
|
||||
choices
|
||||
}
|
||||
|
||||
const usage = this._mapUsage(data.usageMetadata)
|
||||
if (usage) {
|
||||
result.usage = usage
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// --- 内部方法 ---
|
||||
|
||||
_convertGeminiChunkToOpenAI(geminiData, model, state) {
|
||||
// 兼容 v1internal 包裹格式 { response: { candidates: [...] } }
|
||||
const data = geminiData.response || geminiData
|
||||
|
||||
// 更新元数据
|
||||
if (data.responseId) {
|
||||
state.id = data.responseId
|
||||
}
|
||||
if (data.modelVersion) {
|
||||
state.model = data.modelVersion
|
||||
}
|
||||
if (data.createTime) {
|
||||
const ts = this._parseCreateTime(data.createTime)
|
||||
if (ts !== Math.floor(Date.now() / 1000)) {
|
||||
state.created = ts
|
||||
}
|
||||
}
|
||||
|
||||
const candidates = data.candidates || []
|
||||
if (candidates.length === 0 && data.usageMetadata) {
|
||||
// 仅 usage 的最终 chunk
|
||||
const chunk = this._makeChunk(state, model)
|
||||
chunk.choices[0].finish_reason = 'stop'
|
||||
chunk.usage = this._mapUsage(data.usageMetadata)
|
||||
return [chunk]
|
||||
}
|
||||
|
||||
const results = []
|
||||
for (let i = 0; i < candidates.length; i++) {
|
||||
const candidate = candidates[i]
|
||||
const candidateIndex = candidate.index !== undefined ? candidate.index : i
|
||||
const parts = candidate.content?.parts || []
|
||||
|
||||
for (const part of parts) {
|
||||
if (
|
||||
part.thoughtSignature &&
|
||||
!part.text &&
|
||||
!part.functionCall &&
|
||||
!part.inlineData &&
|
||||
!part.inline_data
|
||||
) {
|
||||
continue
|
||||
}
|
||||
|
||||
const chunk = this._makeChunk(state, model)
|
||||
chunk.choices[0].index = candidateIndex
|
||||
|
||||
if (part.functionCall) {
|
||||
state.candidatesWithFunctionCalls.add(candidateIndex)
|
||||
chunk.choices[0].delta = {
|
||||
tool_calls: [
|
||||
{
|
||||
index: state.functionIndex,
|
||||
id: `${part.functionCall.name}-${Date.now()}-${state.functionIndex}`,
|
||||
type: 'function',
|
||||
function: {
|
||||
name: part.functionCall.name,
|
||||
arguments: JSON.stringify(part.functionCall.args || {})
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
this._injectRole(state, chunk.choices[0].delta)
|
||||
state.functionIndex++
|
||||
results.push(chunk)
|
||||
} else if (part.text !== undefined) {
|
||||
if (part.thought) {
|
||||
chunk.choices[0].delta = { reasoning_content: part.text }
|
||||
} else {
|
||||
chunk.choices[0].delta = { content: part.text }
|
||||
}
|
||||
this._injectRole(state, chunk.choices[0].delta)
|
||||
results.push(chunk)
|
||||
} else if (part.inlineData || part.inline_data) {
|
||||
const inlineData = part.inlineData || part.inline_data
|
||||
const imgData = inlineData.data
|
||||
if (imgData) {
|
||||
const mimeType = inlineData.mimeType || inlineData.mime_type || 'image/png'
|
||||
chunk.choices[0].delta = {
|
||||
images: [
|
||||
{
|
||||
type: 'image_url',
|
||||
index: 0,
|
||||
image_url: { url: `data:${mimeType};base64,${imgData}` }
|
||||
}
|
||||
]
|
||||
}
|
||||
this._injectRole(state, chunk.choices[0].delta)
|
||||
results.push(chunk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// finish_reason
|
||||
if (candidate.finishReason) {
|
||||
const chunk = this._makeChunk(state, model)
|
||||
chunk.choices[0].index = candidateIndex
|
||||
|
||||
if (state.candidatesWithFunctionCalls.has(candidateIndex)) {
|
||||
chunk.choices[0].finish_reason = 'tool_calls'
|
||||
} else {
|
||||
chunk.choices[0].finish_reason = this._mapFinishReason(candidate.finishReason)
|
||||
}
|
||||
|
||||
if (data.usageMetadata) {
|
||||
chunk.usage = this._mapUsage(data.usageMetadata)
|
||||
}
|
||||
|
||||
results.push(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
_mapFinishReason(geminiReason) {
|
||||
// 按 Gemini FinishReason 官方枚举完整映射
|
||||
const fr = geminiReason.toLowerCase()
|
||||
if (fr === 'stop') {
|
||||
return 'stop'
|
||||
}
|
||||
if (fr === 'max_tokens') {
|
||||
return 'length'
|
||||
}
|
||||
// 工具调用异常 → stop(调用失败不等于内容过滤)
|
||||
if (
|
||||
fr === 'malformed_function_call' ||
|
||||
fr === 'too_many_tool_calls' ||
|
||||
fr === 'unexpected_tool_call'
|
||||
) {
|
||||
return 'stop'
|
||||
}
|
||||
// 内容策略/安全拦截 → content_filter
|
||||
if (
|
||||
fr === 'safety' ||
|
||||
fr === 'recitation' ||
|
||||
fr === 'blocklist' ||
|
||||
fr === 'prohibited_content' ||
|
||||
fr === 'spii' ||
|
||||
fr === 'image_safety' ||
|
||||
fr === 'language'
|
||||
) {
|
||||
return 'content_filter'
|
||||
}
|
||||
// FINISH_REASON_UNSPECIFIED, OTHER, 未知值 → stop
|
||||
return 'stop'
|
||||
}
|
||||
|
||||
_makeChunk(state, model) {
|
||||
return {
|
||||
id: state.id,
|
||||
object: 'chat.completion.chunk',
|
||||
created: state.created,
|
||||
model: state.model || model,
|
||||
choices: [{ index: 0, delta: {}, finish_reason: null }]
|
||||
}
|
||||
}
|
||||
|
||||
_injectRole(state, delta) {
|
||||
if (!state.roleSent) {
|
||||
delta.role = 'assistant'
|
||||
state.roleSent = true
|
||||
}
|
||||
}
|
||||
|
||||
_parseCreateTime(createTime) {
|
||||
if (!createTime) {
|
||||
return Math.floor(Date.now() / 1000)
|
||||
}
|
||||
// Gemini 官方文档:createTime 为 RFC3339 格式字符串
|
||||
const ts = Math.floor(new Date(createTime).getTime() / 1000)
|
||||
return isNaN(ts) ? Math.floor(Date.now() / 1000) : ts
|
||||
}
|
||||
|
||||
_mapUsage(meta) {
|
||||
if (!meta) {
|
||||
return undefined
|
||||
}
|
||||
const completionTokens = (meta.candidatesTokenCount || 0) + (meta.thoughtsTokenCount || 0)
|
||||
const result = {
|
||||
prompt_tokens: meta.promptTokenCount || 0,
|
||||
completion_tokens: completionTokens,
|
||||
total_tokens: meta.totalTokenCount || 0
|
||||
}
|
||||
if (meta.thoughtsTokenCount > 0) {
|
||||
result.completion_tokens_details = { reasoning_tokens: meta.thoughtsTokenCount }
|
||||
}
|
||||
if (meta.cachedContentTokenCount > 0) {
|
||||
result.prompt_tokens_details = { cached_tokens: meta.cachedContentTokenCount }
|
||||
}
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = GeminiToOpenAIConverter
|
||||
Reference in New Issue
Block a user