mirror of
https://github.com/Wei-Shaw/claude-relay-service.git
synced 2026-01-22 16:43:35 +00:00
fix: 防止客户端断开连接时服务崩溃
当客户端在流式响应过程中断开连接时,catch 块尝试发送 JSON 错误响应 会触发 ERR_HTTP_HEADERS_SENT 错误,导致 unhandledRejection 使服务崩溃。 修复文件: - src/routes/openaiClaudeRoutes.js - src/routes/openaiGeminiRoutes.js 修复内容: - 添加 res.headersSent 检查,避免在响应已发送后再次尝试发送 - 客户端断开连接使用 INFO 级别日志(不是 ERROR) - 客户端断开使用 499 状态码 (Client Closed Request) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -402,16 +402,29 @@ async function handleChatCompletion(req, res, apiKeyData) {
|
||||
const duration = Date.now() - startTime
|
||||
logger.info(`✅ OpenAI-Claude request completed in ${duration}ms`)
|
||||
} catch (error) {
|
||||
logger.error('❌ OpenAI-Claude request error:', error)
|
||||
// 客户端主动断开连接是正常情况,使用 INFO 级别
|
||||
if (error.message === 'Client disconnected') {
|
||||
logger.info('🔌 OpenAI-Claude stream ended: Client disconnected')
|
||||
} else {
|
||||
logger.error('❌ OpenAI-Claude request error:', error)
|
||||
}
|
||||
|
||||
const status = error.status || 500
|
||||
res.status(status).json({
|
||||
error: {
|
||||
message: error.message || 'Internal server error',
|
||||
type: 'server_error',
|
||||
code: 'internal_error'
|
||||
// 检查响应是否已发送(流式响应场景),避免 ERR_HTTP_HEADERS_SENT
|
||||
if (!res.headersSent) {
|
||||
// 客户端断开使用 499 状态码 (Client Closed Request)
|
||||
if (error.message === 'Client disconnected') {
|
||||
res.status(499).end()
|
||||
} else {
|
||||
const status = error.status || 500
|
||||
res.status(status).json({
|
||||
error: {
|
||||
message: error.message || 'Internal server error',
|
||||
type: 'server_error',
|
||||
code: 'internal_error'
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
} finally {
|
||||
// 清理资源
|
||||
if (abortController) {
|
||||
|
||||
@@ -604,7 +604,12 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => {
|
||||
const duration = Date.now() - startTime
|
||||
logger.info(`OpenAI-Gemini request completed in ${duration}ms`)
|
||||
} catch (error) {
|
||||
logger.error('OpenAI-Gemini request error:', error)
|
||||
// 客户端主动断开连接是正常情况,使用 INFO 级别
|
||||
if (error.message === 'Client disconnected') {
|
||||
logger.info('🔌 OpenAI-Gemini stream ended: Client disconnected')
|
||||
} else {
|
||||
logger.error('OpenAI-Gemini request error:', error)
|
||||
}
|
||||
|
||||
// 处理速率限制
|
||||
if (error.status === 429) {
|
||||
@@ -613,17 +618,24 @@ router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => {
|
||||
}
|
||||
}
|
||||
|
||||
// 返回 OpenAI 格式的错误响应
|
||||
const status = error.status || 500
|
||||
const errorResponse = {
|
||||
error: error.error || {
|
||||
message: error.message || 'Internal server error',
|
||||
type: 'server_error',
|
||||
code: 'internal_error'
|
||||
// 检查响应是否已发送(流式响应场景),避免 ERR_HTTP_HEADERS_SENT
|
||||
if (!res.headersSent) {
|
||||
// 客户端断开使用 499 状态码 (Client Closed Request)
|
||||
if (error.message === 'Client disconnected') {
|
||||
res.status(499).end()
|
||||
} else {
|
||||
// 返回 OpenAI 格式的错误响应
|
||||
const status = error.status || 500
|
||||
const errorResponse = {
|
||||
error: error.error || {
|
||||
message: error.message || 'Internal server error',
|
||||
type: 'server_error',
|
||||
code: 'internal_error'
|
||||
}
|
||||
}
|
||||
res.status(status).json(errorResponse)
|
||||
}
|
||||
}
|
||||
|
||||
res.status(status).json(errorResponse)
|
||||
} finally {
|
||||
// 清理资源
|
||||
if (abortController) {
|
||||
|
||||
Reference in New Issue
Block a user