mirror of
https://github.com/Wei-Shaw/claude-relay-service.git
synced 2026-01-22 16:43:35 +00:00
Merge branch 'bottotl/main' into dev
This commit is contained in:
19
src/app.js
19
src/app.js
@@ -14,6 +14,7 @@ const cacheMonitor = require('./utils/cacheMonitor')
|
||||
|
||||
// Import routes
|
||||
const apiRoutes = require('./routes/api')
|
||||
const unifiedRoutes = require('./routes/unified')
|
||||
const adminRoutes = require('./routes/admin')
|
||||
const webRoutes = require('./routes/web')
|
||||
const apiStatsRoutes = require('./routes/apiStats')
|
||||
@@ -55,6 +56,11 @@ class Application {
|
||||
logger.info('🔄 Initializing pricing service...')
|
||||
await pricingService.initialize()
|
||||
|
||||
// 📋 初始化模型服务
|
||||
logger.info('🔄 Initializing model service...')
|
||||
const modelService = require('./services/modelService')
|
||||
await modelService.initialize()
|
||||
|
||||
// 📊 初始化缓存监控
|
||||
await this.initializeCacheMonitoring()
|
||||
|
||||
@@ -251,6 +257,7 @@ class Application {
|
||||
|
||||
// 🛣️ 路由
|
||||
this.app.use('/api', apiRoutes)
|
||||
this.app.use('/api', unifiedRoutes) // 统一智能路由(支持 /v1/chat/completions 等)
|
||||
this.app.use('/claude', apiRoutes) // /claude 路由别名,与 /api 功能相同
|
||||
this.app.use('/admin', adminRoutes)
|
||||
this.app.use('/users', userRoutes)
|
||||
@@ -262,7 +269,8 @@ class Application {
|
||||
this.app.use('/gemini', geminiRoutes) // 保留原有路径以保持向后兼容
|
||||
this.app.use('/openai/gemini', openaiGeminiRoutes)
|
||||
this.app.use('/openai/claude', openaiClaudeRoutes)
|
||||
this.app.use('/openai', openaiRoutes)
|
||||
this.app.use('/openai', unifiedRoutes) // 复用统一智能路由,支持 /openai/v1/chat/completions
|
||||
this.app.use('/openai', openaiRoutes) // Codex API 路由(/openai/responses, /openai/v1/responses)
|
||||
// Droid 路由:支持多种 Factory.ai 端点
|
||||
this.app.use('/droid', droidRoutes) // Droid (Factory.ai) API 转发
|
||||
this.app.use('/azure', azureOpenaiRoutes)
|
||||
@@ -630,6 +638,15 @@ class Application {
|
||||
logger.error('❌ Error cleaning up pricing service:', error)
|
||||
}
|
||||
|
||||
// 清理 model service 的文件监听器
|
||||
try {
|
||||
const modelService = require('./services/modelService')
|
||||
modelService.cleanup()
|
||||
logger.info('📋 Model service cleaned up')
|
||||
} catch (error) {
|
||||
logger.error('❌ Error cleaning up model service:', error)
|
||||
}
|
||||
|
||||
// 停止限流清理服务
|
||||
try {
|
||||
const rateLimitCleanupService = require('./services/rateLimitCleanupService')
|
||||
|
||||
@@ -11,7 +11,6 @@ const logger = require('../utils/logger')
|
||||
const { getEffectiveModel, parseVendorPrefixedModel } = require('../utils/modelHelper')
|
||||
const sessionHelper = require('../utils/sessionHelper')
|
||||
const { updateRateLimitCounters } = require('../utils/rateLimitHelper')
|
||||
|
||||
const router = express.Router()
|
||||
|
||||
function queueRateLimitUpdate(rateLimitInfo, usageSummary, model, context = '') {
|
||||
@@ -722,40 +721,23 @@ router.post('/v1/messages', authenticateApiKey, handleMessagesRequest)
|
||||
// 🚀 Claude API messages 端点 - /claude/v1/messages (别名)
|
||||
router.post('/claude/v1/messages', authenticateApiKey, handleMessagesRequest)
|
||||
|
||||
// 📋 模型列表端点 - Claude Code 客户端需要
|
||||
// 📋 模型列表端点 - 支持 Claude, OpenAI, Gemini
|
||||
router.get('/v1/models', authenticateApiKey, async (req, res) => {
|
||||
try {
|
||||
// 返回支持的模型列表
|
||||
const models = [
|
||||
{
|
||||
id: 'claude-3-5-sonnet-20241022',
|
||||
object: 'model',
|
||||
created: 1669599635,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-3-5-haiku-20241022',
|
||||
object: 'model',
|
||||
created: 1669599635,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-3-opus-20240229',
|
||||
object: 'model',
|
||||
created: 1669599635,
|
||||
owned_by: 'anthropic'
|
||||
},
|
||||
{
|
||||
id: 'claude-sonnet-4-20250514',
|
||||
object: 'model',
|
||||
created: 1669599635,
|
||||
owned_by: 'anthropic'
|
||||
}
|
||||
]
|
||||
const modelService = require('../services/modelService')
|
||||
|
||||
// 从 modelService 获取所有支持的模型
|
||||
const models = modelService.getAllModels()
|
||||
|
||||
// 可选:根据 API Key 的模型限制过滤
|
||||
let filteredModels = models
|
||||
if (req.apiKey.enableModelRestriction && req.apiKey.restrictedModels?.length > 0) {
|
||||
filteredModels = models.filter((model) => req.apiKey.restrictedModels.includes(model.id))
|
||||
}
|
||||
|
||||
res.json({
|
||||
object: 'list',
|
||||
data: models
|
||||
data: filteredModels
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('❌ Models list error:', error)
|
||||
|
||||
@@ -490,3 +490,4 @@ router.post('/v1/completions', authenticateApiKey, async (req, res) => {
|
||||
})
|
||||
|
||||
module.exports = router
|
||||
module.exports.handleChatCompletion = handleChatCompletion
|
||||
|
||||
@@ -919,3 +919,4 @@ router.get('/key-info', authenticateApiKey, async (req, res) => {
|
||||
})
|
||||
|
||||
module.exports = router
|
||||
module.exports.handleResponses = handleResponses
|
||||
|
||||
225
src/routes/unified.js
Normal file
225
src/routes/unified.js
Normal file
@@ -0,0 +1,225 @@
|
||||
const express = require('express')
|
||||
const { authenticateApiKey } = require('../middleware/auth')
|
||||
const logger = require('../utils/logger')
|
||||
const { handleChatCompletion } = require('./openaiClaudeRoutes')
|
||||
const {
|
||||
handleGenerateContent: geminiHandleGenerateContent,
|
||||
handleStreamGenerateContent: geminiHandleStreamGenerateContent
|
||||
} = require('./geminiRoutes')
|
||||
const openaiRoutes = require('./openaiRoutes')
|
||||
|
||||
const router = express.Router()
|
||||
|
||||
// 🔍 根据模型名称检测后端类型
|
||||
function detectBackendFromModel(modelName) {
|
||||
if (!modelName) {
|
||||
return 'claude' // 默认 Claude
|
||||
}
|
||||
|
||||
// 首先尝试使用 modelService 查找模型的 provider
|
||||
try {
|
||||
const modelService = require('../services/modelService')
|
||||
const provider = modelService.getModelProvider(modelName)
|
||||
|
||||
if (provider === 'anthropic') {
|
||||
return 'claude'
|
||||
}
|
||||
if (provider === 'openai') {
|
||||
return 'openai'
|
||||
}
|
||||
if (provider === 'google') {
|
||||
return 'gemini'
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn(`⚠️ Failed to detect backend from modelService: ${error.message}`)
|
||||
}
|
||||
|
||||
// 降级到前缀匹配作为后备方案
|
||||
const model = modelName.toLowerCase()
|
||||
|
||||
// Claude 模型
|
||||
if (model.startsWith('claude-')) {
|
||||
return 'claude'
|
||||
}
|
||||
|
||||
// OpenAI 模型
|
||||
if (
|
||||
model.startsWith('gpt-') ||
|
||||
model.startsWith('o1-') ||
|
||||
model.startsWith('o3-') ||
|
||||
model === 'chatgpt-4o-latest'
|
||||
) {
|
||||
return 'openai'
|
||||
}
|
||||
|
||||
// Gemini 模型
|
||||
if (model.startsWith('gemini-')) {
|
||||
return 'gemini'
|
||||
}
|
||||
|
||||
// 默认使用 Claude
|
||||
return 'claude'
|
||||
}
|
||||
|
||||
// 🚀 智能后端路由处理器
|
||||
async function routeToBackend(req, res, requestedModel) {
|
||||
const backend = detectBackendFromModel(requestedModel)
|
||||
|
||||
logger.info(`🔀 Routing request - Model: ${requestedModel}, Backend: ${backend}`)
|
||||
|
||||
// 检查权限
|
||||
const permissions = req.apiKey.permissions || 'all'
|
||||
|
||||
if (backend === 'claude') {
|
||||
// Claude 后端:通过 OpenAI 兼容层
|
||||
if (permissions !== 'all' && permissions !== 'claude') {
|
||||
return res.status(403).json({
|
||||
error: {
|
||||
message: 'This API key does not have permission to access Claude',
|
||||
type: 'permission_denied',
|
||||
code: 'permission_denied'
|
||||
}
|
||||
})
|
||||
}
|
||||
await handleChatCompletion(req, res, req.apiKey)
|
||||
} else if (backend === 'openai') {
|
||||
// OpenAI 后端
|
||||
if (permissions !== 'all' && permissions !== 'openai') {
|
||||
return res.status(403).json({
|
||||
error: {
|
||||
message: 'This API key does not have permission to access OpenAI',
|
||||
type: 'permission_denied',
|
||||
code: 'permission_denied'
|
||||
}
|
||||
})
|
||||
}
|
||||
return await openaiRoutes.handleResponses(req, res)
|
||||
} else if (backend === 'gemini') {
|
||||
// Gemini 后端
|
||||
if (permissions !== 'all' && permissions !== 'gemini') {
|
||||
return res.status(403).json({
|
||||
error: {
|
||||
message: 'This API key does not have permission to access Gemini',
|
||||
type: 'permission_denied',
|
||||
code: 'permission_denied'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// 转换为 Gemini 格式
|
||||
const geminiRequest = {
|
||||
model: requestedModel,
|
||||
messages: req.body.messages,
|
||||
temperature: req.body.temperature || 0.7,
|
||||
max_tokens: req.body.max_tokens || 4096,
|
||||
stream: req.body.stream || false
|
||||
}
|
||||
|
||||
req.body = geminiRequest
|
||||
|
||||
if (geminiRequest.stream) {
|
||||
return await geminiHandleStreamGenerateContent(req, res)
|
||||
} else {
|
||||
return await geminiHandleGenerateContent(req, res)
|
||||
}
|
||||
} else {
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: `Unsupported backend: ${backend}`,
|
||||
type: 'server_error',
|
||||
code: 'unsupported_backend'
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// 🔄 OpenAI 兼容的 chat/completions 端点(智能后端路由)
|
||||
router.post('/v1/chat/completions', authenticateApiKey, async (req, res) => {
|
||||
try {
|
||||
// 验证必需参数
|
||||
if (!req.body.messages || !Array.isArray(req.body.messages) || req.body.messages.length === 0) {
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
message: 'Messages array is required and cannot be empty',
|
||||
type: 'invalid_request_error',
|
||||
code: 'invalid_request'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const requestedModel = req.body.model || 'claude-3-5-sonnet-20241022'
|
||||
req.body.model = requestedModel // 确保模型已设置
|
||||
|
||||
// 使用统一的后端路由处理器
|
||||
await routeToBackend(req, res, requestedModel)
|
||||
} catch (error) {
|
||||
logger.error('❌ OpenAI chat/completions error:', error)
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({
|
||||
error: {
|
||||
message: 'Internal server error',
|
||||
type: 'server_error',
|
||||
code: 'internal_error'
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// 🔄 OpenAI 兼容的 completions 端点(传统格式,智能后端路由)
|
||||
router.post('/v1/completions', authenticateApiKey, async (req, res) => {
|
||||
try {
|
||||
// 验证必需参数
|
||||
if (!req.body.prompt) {
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
message: 'Prompt is required',
|
||||
type: 'invalid_request_error',
|
||||
code: 'invalid_request'
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// 将传统 completions 格式转换为 chat 格式
|
||||
const originalBody = req.body
|
||||
const requestedModel = originalBody.model || 'claude-3-5-sonnet-20241022'
|
||||
|
||||
req.body = {
|
||||
model: requestedModel,
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: originalBody.prompt
|
||||
}
|
||||
],
|
||||
max_tokens: originalBody.max_tokens,
|
||||
temperature: originalBody.temperature,
|
||||
top_p: originalBody.top_p,
|
||||
stream: originalBody.stream,
|
||||
stop: originalBody.stop,
|
||||
n: originalBody.n || 1,
|
||||
presence_penalty: originalBody.presence_penalty,
|
||||
frequency_penalty: originalBody.frequency_penalty,
|
||||
logit_bias: originalBody.logit_bias,
|
||||
user: originalBody.user
|
||||
}
|
||||
|
||||
// 使用统一的后端路由处理器
|
||||
await routeToBackend(req, res, requestedModel)
|
||||
} catch (error) {
|
||||
logger.error('❌ OpenAI completions error:', error)
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to process completion request',
|
||||
type: 'server_error',
|
||||
code: 'internal_error'
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
module.exports = router
|
||||
module.exports.detectBackendFromModel = detectBackendFromModel
|
||||
module.exports.routeToBackend = routeToBackend
|
||||
266
src/services/modelService.js
Normal file
266
src/services/modelService.js
Normal file
@@ -0,0 +1,266 @@
|
||||
const fs = require('fs')
|
||||
const path = require('path')
|
||||
const logger = require('../utils/logger')
|
||||
|
||||
/**
|
||||
* 模型服务
|
||||
* 管理系统支持的 AI 模型列表
|
||||
* 与 pricingService 独立,专注于"支持哪些模型"而不是"如何计费"
|
||||
*/
|
||||
class ModelService {
|
||||
constructor() {
|
||||
this.modelsFile = path.join(process.cwd(), 'data', 'supported_models.json')
|
||||
this.supportedModels = null
|
||||
this.fileWatcher = null
|
||||
}
|
||||
|
||||
/**
|
||||
* 初始化模型服务
|
||||
*/
|
||||
async initialize() {
|
||||
try {
|
||||
this.loadModels()
|
||||
this.setupFileWatcher()
|
||||
logger.success('✅ Model service initialized successfully')
|
||||
} catch (error) {
|
||||
logger.error('❌ Failed to initialize model service:', error)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 加载支持的模型配置
|
||||
*/
|
||||
loadModels() {
|
||||
try {
|
||||
if (fs.existsSync(this.modelsFile)) {
|
||||
const data = fs.readFileSync(this.modelsFile, 'utf8')
|
||||
this.supportedModels = JSON.parse(data)
|
||||
|
||||
const totalModels = Object.values(this.supportedModels).reduce(
|
||||
(sum, config) => sum + config.models.length,
|
||||
0
|
||||
)
|
||||
|
||||
logger.info(`📋 Loaded ${totalModels} supported models from configuration`)
|
||||
} else {
|
||||
logger.warn('⚠️ Supported models file not found, using defaults')
|
||||
this.supportedModels = this.getDefaultModels()
|
||||
|
||||
// 创建默认配置文件
|
||||
this.saveDefaultConfig()
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('❌ Failed to load supported models:', error)
|
||||
this.supportedModels = this.getDefaultModels()
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取默认模型配置(后备方案)
|
||||
*/
|
||||
getDefaultModels() {
|
||||
return {
|
||||
claude: {
|
||||
provider: 'anthropic',
|
||||
description: 'Claude models from Anthropic',
|
||||
models: [
|
||||
'claude-sonnet-4-5-20250929',
|
||||
'claude-opus-4-1-20250805',
|
||||
'claude-sonnet-4-20250514',
|
||||
'claude-opus-4-20250514',
|
||||
'claude-3-7-sonnet-20250219',
|
||||
'claude-3-5-sonnet-20241022',
|
||||
'claude-3-5-haiku-20241022',
|
||||
'claude-3-opus-20240229',
|
||||
'claude-3-haiku-20240307'
|
||||
]
|
||||
},
|
||||
openai: {
|
||||
provider: 'openai',
|
||||
description: 'OpenAI GPT models',
|
||||
models: [
|
||||
'gpt-4o',
|
||||
'gpt-4o-mini',
|
||||
'gpt-4.1',
|
||||
'gpt-4.1-mini',
|
||||
'gpt-4.1-nano',
|
||||
'gpt-4-turbo',
|
||||
'gpt-4',
|
||||
'gpt-3.5-turbo',
|
||||
'o3',
|
||||
'o4-mini',
|
||||
'chatgpt-4o-latest'
|
||||
]
|
||||
},
|
||||
gemini: {
|
||||
provider: 'google',
|
||||
description: 'Google Gemini models',
|
||||
models: [
|
||||
'gemini-1.5-pro',
|
||||
'gemini-1.5-flash',
|
||||
'gemini-2.0-flash',
|
||||
'gemini-2.0-flash-exp',
|
||||
'gemini-2.0-flash-thinking',
|
||||
'gemini-2.0-flash-thinking-exp',
|
||||
'gemini-2.0-pro',
|
||||
'gemini-2.5-flash',
|
||||
'gemini-2.5-flash-lite',
|
||||
'gemini-2.5-pro'
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 保存默认配置到文件
|
||||
*/
|
||||
saveDefaultConfig() {
|
||||
try {
|
||||
const dataDir = path.dirname(this.modelsFile)
|
||||
if (!fs.existsSync(dataDir)) {
|
||||
fs.mkdirSync(dataDir, { recursive: true })
|
||||
}
|
||||
|
||||
fs.writeFileSync(this.modelsFile, JSON.stringify(this.supportedModels, null, 2))
|
||||
logger.info('💾 Created default supported_models.json configuration')
|
||||
} catch (error) {
|
||||
logger.error('❌ Failed to save default config:', error)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取所有支持的模型(OpenAI API 格式)
|
||||
*/
|
||||
getAllModels() {
|
||||
const models = []
|
||||
const now = Math.floor(Date.now() / 1000)
|
||||
|
||||
for (const [_service, config] of Object.entries(this.supportedModels)) {
|
||||
for (const modelId of config.models) {
|
||||
models.push({
|
||||
id: modelId,
|
||||
object: 'model',
|
||||
created: now,
|
||||
owned_by: config.provider
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return models.sort((a, b) => {
|
||||
// 先按 provider 排序,再按 model id 排序
|
||||
if (a.owned_by !== b.owned_by) {
|
||||
return a.owned_by.localeCompare(b.owned_by)
|
||||
}
|
||||
return a.id.localeCompare(b.id)
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* 按 provider 获取模型
|
||||
* @param {string} provider - 'anthropic', 'openai', 'google' 等
|
||||
*/
|
||||
getModelsByProvider(provider) {
|
||||
return this.getAllModels().filter((m) => m.owned_by === provider)
|
||||
}
|
||||
|
||||
/**
|
||||
* 检查模型是否被支持
|
||||
* @param {string} modelId - 模型 ID
|
||||
*/
|
||||
isModelSupported(modelId) {
|
||||
if (!modelId) {
|
||||
return false
|
||||
}
|
||||
return this.getAllModels().some((m) => m.id === modelId)
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取模型的 provider
|
||||
* @param {string} modelId - 模型 ID
|
||||
*/
|
||||
getModelProvider(modelId) {
|
||||
const model = this.getAllModels().find((m) => m.id === modelId)
|
||||
return model ? model.owned_by : null
|
||||
}
|
||||
|
||||
/**
|
||||
* 重新加载模型配置
|
||||
*/
|
||||
reloadModels() {
|
||||
logger.info('🔄 Reloading supported models configuration...')
|
||||
this.loadModels()
|
||||
}
|
||||
|
||||
/**
|
||||
* 设置文件监听器(监听配置文件变化)
|
||||
*/
|
||||
setupFileWatcher() {
|
||||
try {
|
||||
// 如果已有监听器,先关闭
|
||||
if (this.fileWatcher) {
|
||||
this.fileWatcher.close()
|
||||
this.fileWatcher = null
|
||||
}
|
||||
|
||||
// 只有文件存在时才设置监听器
|
||||
if (!fs.existsSync(this.modelsFile)) {
|
||||
logger.debug('📋 Models file does not exist yet, skipping file watcher setup')
|
||||
return
|
||||
}
|
||||
|
||||
// 使用 fs.watchFile 监听文件变化
|
||||
const watchOptions = {
|
||||
persistent: true,
|
||||
interval: 60000 // 每60秒检查一次
|
||||
}
|
||||
|
||||
let lastMtime = fs.statSync(this.modelsFile).mtimeMs
|
||||
|
||||
fs.watchFile(this.modelsFile, watchOptions, (curr, _prev) => {
|
||||
if (curr.mtimeMs !== lastMtime) {
|
||||
lastMtime = curr.mtimeMs
|
||||
logger.info('📋 Detected change in supported_models.json, reloading...')
|
||||
this.reloadModels()
|
||||
}
|
||||
})
|
||||
|
||||
// 保存引用以便清理
|
||||
this.fileWatcher = {
|
||||
close: () => fs.unwatchFile(this.modelsFile)
|
||||
}
|
||||
|
||||
logger.info('👁️ File watcher set up for supported_models.json')
|
||||
} catch (error) {
|
||||
logger.error('❌ Failed to setup file watcher:', error)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取服务状态
|
||||
*/
|
||||
getStatus() {
|
||||
const totalModels = this.supportedModels
|
||||
? Object.values(this.supportedModels).reduce((sum, config) => sum + config.models.length, 0)
|
||||
: 0
|
||||
|
||||
return {
|
||||
initialized: this.supportedModels !== null,
|
||||
totalModels,
|
||||
providers: this.supportedModels ? Object.keys(this.supportedModels) : [],
|
||||
fileExists: fs.existsSync(this.modelsFile)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 清理资源
|
||||
*/
|
||||
cleanup() {
|
||||
if (this.fileWatcher) {
|
||||
this.fileWatcher.close()
|
||||
this.fileWatcher = null
|
||||
logger.debug('📋 Model service file watcher closed')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = new ModelService()
|
||||
@@ -31,10 +31,25 @@ class OpenAIToClaudeConverter {
|
||||
stream: openaiRequest.stream || false
|
||||
}
|
||||
|
||||
// Claude Code 必需的系统消息
|
||||
// 定义 Claude Code 的默认系统提示词
|
||||
const claudeCodeSystemMessage = "You are Claude Code, Anthropic's official CLI for Claude."
|
||||
|
||||
claudeRequest.system = claudeCodeSystemMessage
|
||||
// 如果 OpenAI 请求中包含系统消息,提取并检查
|
||||
const systemMessage = this._extractSystemMessage(openaiRequest.messages)
|
||||
if (systemMessage && systemMessage.includes('You are currently in Xcode')) {
|
||||
// Xcode 系统提示词
|
||||
claudeRequest.system = systemMessage
|
||||
logger.info(
|
||||
`🔍 Xcode request detected, using Xcode system prompt (${systemMessage.length} chars)`
|
||||
)
|
||||
logger.debug(`📋 System prompt preview: ${systemMessage.substring(0, 150)}...`)
|
||||
} else {
|
||||
// 使用 Claude Code 默认系统提示词
|
||||
claudeRequest.system = claudeCodeSystemMessage
|
||||
logger.debug(
|
||||
`📋 Using Claude Code default system prompt${systemMessage ? ' (ignored custom prompt)' : ''}`
|
||||
)
|
||||
}
|
||||
|
||||
// 处理停止序列
|
||||
if (openaiRequest.stop) {
|
||||
|
||||
Reference in New Issue
Block a user