Merge branch 'upstream-main' into feature/pyro

This commit is contained in:
Seefs
2025-12-25 17:08:02 +08:00
65 changed files with 1069 additions and 432 deletions

1
.gitignore vendored
View File

@@ -16,6 +16,7 @@ new-api
tiktoken_cache tiktoken_cache
.eslintcache .eslintcache
.gocache .gocache
.gomodcache/
.cache .cache
web/bun.lock web/bun.lock

View File

@@ -146,7 +146,7 @@ docker run --name new-api -d --restart always \
🎉 After deployment is complete, visit `http://localhost:3000` to start using! 🎉 After deployment is complete, visit `http://localhost:3000` to start using!
📖 For more deployment methods, please refer to [Deployment Guide](https://docs.newapi.pro/installation) 📖 For more deployment methods, please refer to [Deployment Guide](https://docs.newapi.pro/en/docs/installation)
--- ---
@@ -154,7 +154,7 @@ docker run --name new-api -d --restart always \
<div align="center"> <div align="center">
### 📖 [Official Documentation](https://docs.newapi.pro/) | [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/QuantumNous/new-api) ### 📖 [Official Documentation](https://docs.newapi.pro/en/docs) | [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/QuantumNous/new-api)
</div> </div>
@@ -162,17 +162,17 @@ docker run --name new-api -d --restart always \
| Category | Link | | Category | Link |
|------|------| |------|------|
| 🚀 Deployment Guide | [Installation Documentation](https://docs.newapi.pro/installation) | | 🚀 Deployment Guide | [Installation Documentation](https://docs.newapi.pro/en/docs/installation) |
| ⚙️ Environment Configuration | [Environment Variables](https://docs.newapi.pro/installation/environment-variables) | | ⚙️ Environment Configuration | [Environment Variables](https://docs.newapi.pro/en/docs/installation/config-maintenance/environment-variables) |
| 📡 API Documentation | [API Documentation](https://docs.newapi.pro/api) | | 📡 API Documentation | [API Documentation](https://docs.newapi.pro/en/docs/api) |
| ❓ FAQ | [FAQ](https://docs.newapi.pro/support/faq) | | ❓ FAQ | [FAQ](https://docs.newapi.pro/en/docs/support/faq) |
| 💬 Community Interaction | [Communication Channels](https://docs.newapi.pro/support/community-interaction) | | 💬 Community Interaction | [Communication Channels](https://docs.newapi.pro/en/docs/support/community-interaction) |
--- ---
## ✨ Key Features ## ✨ Key Features
> For detailed features, please refer to [Features Introduction](https://docs.newapi.pro/wiki/features-introduction) > For detailed features, please refer to [Features Introduction](https://docs.newapi.pro/en/docs/guide/wiki/basic-concepts/features-introduction)
### 🎨 Core Functions ### 🎨 Core Functions
@@ -201,11 +201,11 @@ docker run --name new-api -d --restart always \
### 🚀 Advanced Features ### 🚀 Advanced Features
**API Format Support:** **API Format Support:**
- ⚡ [OpenAI Responses](https://docs.newapi.pro/api/openai-responses) - ⚡ [OpenAI Responses](https://docs.newapi.pro/en/docs/api/ai-model/chat/openai/create-response)
- ⚡ [OpenAI Realtime API](https://docs.newapi.pro/api/openai-realtime) (including Azure) - ⚡ [OpenAI Realtime API](https://docs.newapi.pro/en/docs/api/ai-model/realtime/create-realtime-session) (including Azure)
- ⚡ [Claude Messages](https://docs.newapi.pro/api/anthropic-chat) - ⚡ [Claude Messages](https://docs.newapi.pro/en/docs/api/ai-model/chat/create-message)
- ⚡ [Google Gemini](https://docs.newapi.pro/api/google-gemini-chat/) - ⚡ [Google Gemini](https://doc.newapi.pro/en/api/google-gemini-chat)
- 🔄 [Rerank Models](https://docs.newapi.pro/api/jinaai-rerank) (Cohere, Jina) - 🔄 [Rerank Models](https://docs.newapi.pro/en/docs/api/ai-model/rerank/create-rerank) (Cohere, Jina)
**Intelligent Routing:** **Intelligent Routing:**
- ⚖️ Channel weighted random - ⚖️ Channel weighted random
@@ -246,16 +246,16 @@ docker run --name new-api -d --restart always \
## 🤖 Model Support ## 🤖 Model Support
> For details, please refer to [API Documentation - Relay Interface](https://docs.newapi.pro/api) > For details, please refer to [API Documentation - Relay Interface](https://docs.newapi.pro/en/docs/api)
| Model Type | Description | Documentation | | Model Type | Description | Documentation |
|---------|------|------| |---------|------|------|
| 🤖 OpenAI GPTs | gpt-4-gizmo-* series | - | | 🤖 OpenAI GPTs | gpt-4-gizmo-* series | - |
| 🎨 Midjourney-Proxy | [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy) | [Documentation](https://docs.newapi.pro/api/midjourney-proxy-image) | | 🎨 Midjourney-Proxy | [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy) | [Documentation](https://doc.newapi.pro/en/api/midjourney-proxy-image) |
| 🎵 Suno-API | [Suno API](https://github.com/Suno-API/Suno-API) | [Documentation](https://docs.newapi.pro/api/suno-music) | | 🎵 Suno-API | [Suno API](https://github.com/Suno-API/Suno-API) | [Documentation](https://doc.newapi.pro/en/api/suno-music) |
| 🔄 Rerank | Cohere, Jina | [Documentation](https://docs.newapi.pro/api/jinaai-rerank) | | 🔄 Rerank | Cohere, Jina | [Documentation](https://docs.newapi.pro/en/docs/api/ai-model/rerank/create-rerank) |
| 💬 Claude | Messages format | [Documentation](https://docs.newapi.pro/api/anthropic-chat) | | 💬 Claude | Messages format | [Documentation](https://docs.newapi.pro/en/docs/api/ai-model/chat/create-message) |
| 🌐 Gemini | Google Gemini format | [Documentation](https://docs.newapi.pro/api/google-gemini-chat/) | | 🌐 Gemini | Google Gemini format | [Documentation](https://doc.newapi.pro/en/api/google-gemini-chat) |
| 🔧 Dify | ChatFlow mode | - | | 🔧 Dify | ChatFlow mode | - |
| 🎯 Custom | Supports complete call address | - | | 🎯 Custom | Supports complete call address | - |
@@ -264,16 +264,16 @@ docker run --name new-api -d --restart always \
<details> <details>
<summary>View complete interface list</summary> <summary>View complete interface list</summary>
- [Chat Interface (Chat Completions)](https://docs.newapi.pro/api/openai-chat) - [Chat Interface (Chat Completions)](https://docs.newapi.pro/en/docs/api/ai-model/chat/openai/create-chat-completion)
- [Response Interface (Responses)](https://docs.newapi.pro/api/openai-responses) - [Response Interface (Responses)](https://docs.newapi.pro/en/docs/api/ai-model/chat/openai/create-response)
- [Image Interface (Image)](https://docs.newapi.pro/api/openai-image) - [Image Interface (Image)](https://docs.newapi.pro/en/docs/api/ai-model/images/openai/v1-images-generations--post)
- [Audio Interface (Audio)](https://docs.newapi.pro/api/openai-audio) - [Audio Interface (Audio)](https://docs.newapi.pro/en/docs/api/ai-model/audio/openai/create-transcription)
- [Video Interface (Video)](https://docs.newapi.pro/api/openai-video) - [Video Interface (Video)](https://docs.newapi.pro/en/docs/api/ai-model/videos/create-video-generation)
- [Embedding Interface (Embeddings)](https://docs.newapi.pro/api/openai-embeddings) - [Embedding Interface (Embeddings)](https://docs.newapi.pro/en/docs/api/ai-model/embeddings/create-embedding)
- [Rerank Interface (Rerank)](https://docs.newapi.pro/api/jinaai-rerank) - [Rerank Interface (Rerank)](https://docs.newapi.pro/en/docs/api/ai-model/rerank/create-rerank)
- [Realtime Conversation (Realtime)](https://docs.newapi.pro/api/openai-realtime) - [Realtime Conversation (Realtime)](https://docs.newapi.pro/en/docs/api/ai-model/realtime/create-realtime-session)
- [Claude Chat](https://docs.newapi.pro/api/anthropic-chat) - [Claude Chat](https://docs.newapi.pro/en/docs/api/ai-model/chat/create-message)
- [Google Gemini Chat](https://docs.newapi.pro/api/google-gemini-chat/) - [Google Gemini Chat](https://doc.newapi.pro/en/api/google-gemini-chat)
</details> </details>
@@ -305,6 +305,7 @@ docker run --name new-api -d --restart always \
| `REDIS_CONN_STRING` | Redis connection string | - | | `REDIS_CONN_STRING` | Redis connection string | - |
| `STREAMING_TIMEOUT` | Streaming timeout (seconds) | `300` | | `STREAMING_TIMEOUT` | Streaming timeout (seconds) | `300` |
| `STREAM_SCANNER_MAX_BUFFER_MB` | Max per-line buffer (MB) for the stream scanner; increase when upstream sends huge image/base64 payloads | `64` | | `STREAM_SCANNER_MAX_BUFFER_MB` | Max per-line buffer (MB) for the stream scanner; increase when upstream sends huge image/base64 payloads | `64` |
| `MAX_REQUEST_BODY_MB` | Max request body size (MB, counted **after decompression**; prevents huge requests/zip bombs from exhausting memory). Exceeding it returns `413` | `32` |
| `AZURE_DEFAULT_API_VERSION` | Azure API version | `2025-04-01-preview` | | `AZURE_DEFAULT_API_VERSION` | Azure API version | `2025-04-01-preview` |
| `ERROR_LOG_ENABLED` | Error log switch | `false` | | `ERROR_LOG_ENABLED` | Error log switch | `false` |
| `PYROSCOPE_URL` | Pyroscope server address | - | | `PYROSCOPE_URL` | Pyroscope server address | - |
@@ -315,7 +316,7 @@ docker run --name new-api -d --restart always \
| `PYROSCOPE_BLOCK_RATE` | Pyroscope block sampling rate | `5` | | `PYROSCOPE_BLOCK_RATE` | Pyroscope block sampling rate | `5` |
| `HOSTNAME` | Hostname tag for Pyroscope | `new-api` | | `HOSTNAME` | Hostname tag for Pyroscope | `new-api` |
📖 **Complete configuration:** [Environment Variables Documentation](https://docs.newapi.pro/installation/environment-variables) 📖 **Complete configuration:** [Environment Variables Documentation](https://docs.newapi.pro/en/docs/installation/config-maintenance/environment-variables)
</details> </details>
@@ -417,10 +418,10 @@ docker run --name new-api -d --restart always \
| Resource | Link | | Resource | Link |
|------|------| |------|------|
| 📘 FAQ | [FAQ](https://docs.newapi.pro/support/faq) | | 📘 FAQ | [FAQ](https://docs.newapi.pro/en/docs/support/faq) |
| 💬 Community Interaction | [Communication Channels](https://docs.newapi.pro/support/community-interaction) | | 💬 Community Interaction | [Communication Channels](https://docs.newapi.pro/en/docs/support/community-interaction) |
| 🐛 Issue Feedback | [Issue Feedback](https://docs.newapi.pro/support/feedback-issues) | | 🐛 Issue Feedback | [Issue Feedback](https://docs.newapi.pro/en/docs/support/feedback-issues) |
| 📚 Complete Documentation | [Official Documentation](https://docs.newapi.pro/support) | | 📚 Complete Documentation | [Official Documentation](https://docs.newapi.pro/en/docs) |
### 🤝 Contribution Guide ### 🤝 Contribution Guide
@@ -449,7 +450,7 @@ Welcome all forms of contribution!
If this project is helpful to you, welcome to give us a ⭐️ Star If this project is helpful to you, welcome to give us a ⭐️ Star
**[Official Documentation](https://docs.newapi.pro/)** • **[Issue Feedback](https://github.com/Calcium-Ion/new-api/issues)** • **[Latest Release](https://github.com/Calcium-Ion/new-api/releases)** **[Official Documentation](https://docs.newapi.pro/en/docs)** • **[Issue Feedback](https://github.com/Calcium-Ion/new-api/issues)** • **[Latest Release](https://github.com/Calcium-Ion/new-api/releases)**
<sub>Built with ❤️ by QuantumNous</sub> <sub>Built with ❤️ by QuantumNous</sub>

View File

@@ -146,7 +146,7 @@ docker run --name new-api -d --restart always \
🎉 Après le déploiement, visitez `http://localhost:3000` pour commencer à utiliser! 🎉 Après le déploiement, visitez `http://localhost:3000` pour commencer à utiliser!
📖 Pour plus de méthodes de déploiement, veuillez vous référer à [Guide de déploiement](https://docs.newapi.pro/installation) 📖 Pour plus de méthodes de déploiement, veuillez vous référer à [Guide de déploiement](https://docs.newapi.pro/en/docs/installation)
--- ---
@@ -154,7 +154,7 @@ docker run --name new-api -d --restart always \
<div align="center"> <div align="center">
### 📖 [Documentation officielle](https://docs.newapi.pro/) | [![Demander à DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/QuantumNous/new-api) ### 📖 [Documentation officielle](https://docs.newapi.pro/en/docs) | [![Demander à DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/QuantumNous/new-api)
</div> </div>
@@ -162,17 +162,17 @@ docker run --name new-api -d --restart always \
| Catégorie | Lien | | Catégorie | Lien |
|------|------| |------|------|
| 🚀 Guide de déploiement | [Documentation d'installation](https://docs.newapi.pro/installation) | | 🚀 Guide de déploiement | [Documentation d'installation](https://docs.newapi.pro/en/docs/installation) |
| ⚙️ Configuration de l'environnement | [Variables d'environnement](https://docs.newapi.pro/installation/environment-variables) | | ⚙️ Configuration de l'environnement | [Variables d'environnement](https://docs.newapi.pro/en/docs/installation/config-maintenance/environment-variables) |
| 📡 Documentation de l'API | [Documentation de l'API](https://docs.newapi.pro/api) | | 📡 Documentation de l'API | [Documentation de l'API](https://docs.newapi.pro/en/docs/api) |
| ❓ FAQ | [FAQ](https://docs.newapi.pro/support/faq) | | ❓ FAQ | [FAQ](https://docs.newapi.pro/en/docs/support/faq) |
| 💬 Interaction avec la communauté | [Canaux de communication](https://docs.newapi.pro/support/community-interaction) | | 💬 Interaction avec la communauté | [Canaux de communication](https://docs.newapi.pro/en/docs/support/community-interaction) |
--- ---
## ✨ Fonctionnalités clés ## ✨ Fonctionnalités clés
> Pour les fonctionnalités détaillées, veuillez vous référer à [Présentation des fonctionnalités](https://docs.newapi.pro/wiki/features-introduction) | > Pour les fonctionnalités détaillées, veuillez vous référer à [Présentation des fonctionnalités](https://docs.newapi.pro/en/docs/guide/wiki/basic-concepts/features-introduction) |
### 🎨 Fonctions principales ### 🎨 Fonctions principales
@@ -200,11 +200,11 @@ docker run --name new-api -d --restart always \
### 🚀 Fonctionnalités avancées ### 🚀 Fonctionnalités avancées
**Prise en charge des formats d'API:** **Prise en charge des formats d'API:**
- ⚡ [OpenAI Responses](https://docs.newapi.pro/api/openai-responses) - ⚡ [OpenAI Responses](https://docs.newapi.pro/en/docs/api/ai-model/chat/openai/create-response)
- ⚡ [OpenAI Realtime API](https://docs.newapi.pro/api/openai-realtime) (y compris Azure) - ⚡ [OpenAI Realtime API](https://docs.newapi.pro/en/docs/api/ai-model/realtime/create-realtime-session) (y compris Azure)
- ⚡ [Claude Messages](https://docs.newapi.pro/api/anthropic-chat) - ⚡ [Claude Messages](https://docs.newapi.pro/en/docs/api/ai-model/chat/create-message)
- ⚡ [Google Gemini](https://docs.newapi.pro/api/google-gemini-chat/) - ⚡ [Google Gemini](https://doc.newapi.pro/en/api/google-gemini-chat)
- 🔄 [Modèles Rerank](https://docs.newapi.pro/api/jinaai-rerank) (Cohere, Jina) - 🔄 [Modèles Rerank](https://docs.newapi.pro/en/docs/api/ai-model/rerank/create-rerank) (Cohere, Jina)
**Routage intelligent:** **Routage intelligent:**
- ⚖️ Sélection aléatoire pondérée des canaux - ⚖️ Sélection aléatoire pondérée des canaux
@@ -242,16 +242,16 @@ docker run --name new-api -d --restart always \
## 🤖 Prise en charge des modèles ## 🤖 Prise en charge des modèles
> Pour les détails, veuillez vous référer à [Documentation de l'API - Interface de relais](https://docs.newapi.pro/api) > Pour les détails, veuillez vous référer à [Documentation de l'API - Interface de relais](https://docs.newapi.pro/en/docs/api)
| Type de modèle | Description | Documentation | | Type de modèle | Description | Documentation |
|---------|------|------| |---------|------|------|
| 🤖 OpenAI GPTs | série gpt-4-gizmo-* | - | | 🤖 OpenAI GPTs | série gpt-4-gizmo-* | - |
| 🎨 Midjourney-Proxy | [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy) | [Documentation](https://docs.newapi.pro/api/midjourney-proxy-image) | | 🎨 Midjourney-Proxy | [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy) | [Documentation](https://doc.newapi.pro/en/api/midjourney-proxy-image) |
| 🎵 Suno-API | [Suno API](https://github.com/Suno-API/Suno-API) | [Documentation](https://docs.newapi.pro/api/suno-music) | | 🎵 Suno-API | [Suno API](https://github.com/Suno-API/Suno-API) | [Documentation](https://doc.newapi.pro/en/api/suno-music) |
| 🔄 Rerank | Cohere, Jina | [Documentation](https://docs.newapi.pro/api/jinaai-rerank) | | 🔄 Rerank | Cohere, Jina | [Documentation](https://docs.newapi.pro/en/docs/api/ai-model/rerank/create-rerank) |
| 💬 Claude | Format Messages | [Documentation](https://docs.newapi.pro/api/anthropic-chat) | | 💬 Claude | Format Messages | [Documentation](https://docs.newapi.pro/en/docs/api/ai-model/chat/create-message) |
| 🌐 Gemini | Format Google Gemini | [Documentation](https://docs.newapi.pro/api/google-gemini-chat/) | | 🌐 Gemini | Format Google Gemini | [Documentation](https://doc.newapi.pro/en/api/google-gemini-chat) |
| 🔧 Dify | Mode ChatFlow | - | | 🔧 Dify | Mode ChatFlow | - |
| 🎯 Personnalisé | Prise en charge de l'adresse d'appel complète | - | | 🎯 Personnalisé | Prise en charge de l'adresse d'appel complète | - |
@@ -260,16 +260,16 @@ docker run --name new-api -d --restart always \
<details> <details>
<summary>Voir la liste complète des interfaces</summary> <summary>Voir la liste complète des interfaces</summary>
- [Interface de discussion (Chat Completions)](https://docs.newapi.pro/api/openai-chat) - [Interface de discussion (Chat Completions)](https://docs.newapi.pro/en/docs/api/ai-model/chat/openai/create-chat-completion)
- [Interface de réponse (Responses)](https://docs.newapi.pro/api/openai-responses) - [Interface de réponse (Responses)](https://docs.newapi.pro/en/docs/api/ai-model/chat/openai/create-response)
- [Interface d'image (Image)](https://docs.newapi.pro/api/openai-image) - [Interface d'image (Image)](https://docs.newapi.pro/en/docs/api/ai-model/images/openai/v1-images-generations--post)
- [Interface audio (Audio)](https://docs.newapi.pro/api/openai-audio) - [Interface audio (Audio)](https://docs.newapi.pro/en/docs/api/ai-model/audio/openai/create-transcription)
- [Interface vidéo (Video)](https://docs.newapi.pro/api/openai-video) - [Interface vidéo (Video)](https://docs.newapi.pro/en/docs/api/ai-model/videos/create-video-generation)
- [Interface d'incorporation (Embeddings)](https://docs.newapi.pro/api/openai-embeddings) - [Interface d'incorporation (Embeddings)](https://docs.newapi.pro/en/docs/api/ai-model/embeddings/create-embedding)
- [Interface de rerank (Rerank)](https://docs.newapi.pro/api/jinaai-rerank) - [Interface de rerank (Rerank)](https://docs.newapi.pro/en/docs/api/ai-model/rerank/create-rerank)
- [Conversation en temps réel (Realtime)](https://docs.newapi.pro/api/openai-realtime) - [Conversation en temps réel (Realtime)](https://docs.newapi.pro/en/docs/api/ai-model/realtime/create-realtime-session)
- [Discussion Claude](https://docs.newapi.pro/api/anthropic-chat) - [Discussion Claude](https://docs.newapi.pro/en/docs/api/ai-model/chat/create-message)
- [Discussion Google Gemini](https://docs.newapi.pro/api/google-gemini-chat/) - [Discussion Google Gemini](https://doc.newapi.pro/en/api/google-gemini-chat)
</details> </details>
@@ -301,6 +301,7 @@ docker run --name new-api -d --restart always \
| `REDIS_CONN_STRING` | Chaine de connexion Redis | - | | `REDIS_CONN_STRING` | Chaine de connexion Redis | - |
| `STREAMING_TIMEOUT` | Délai d'expiration du streaming (secondes) | `300` | | `STREAMING_TIMEOUT` | Délai d'expiration du streaming (secondes) | `300` |
| `STREAM_SCANNER_MAX_BUFFER_MB` | Taille max du buffer par ligne (Mo) pour le scanner SSE ; à augmenter quand les sorties image/base64 sont très volumineuses (ex. images 4K) | `64` | | `STREAM_SCANNER_MAX_BUFFER_MB` | Taille max du buffer par ligne (Mo) pour le scanner SSE ; à augmenter quand les sorties image/base64 sont très volumineuses (ex. images 4K) | `64` |
| `MAX_REQUEST_BODY_MB` | Taille maximale du corps de requête (Mo, comptée **après décompression** ; évite les requêtes énormes/zip bombs qui saturent la mémoire). Dépassement ⇒ `413` | `32` |
| `AZURE_DEFAULT_API_VERSION` | Version de l'API Azure | `2025-04-01-preview` | | `AZURE_DEFAULT_API_VERSION` | Version de l'API Azure | `2025-04-01-preview` |
| `ERROR_LOG_ENABLED` | Interrupteur du journal d'erreurs | `false` | | `ERROR_LOG_ENABLED` | Interrupteur du journal d'erreurs | `false` |
| `PYROSCOPE_URL` | Adresse du serveur Pyroscope | - | | `PYROSCOPE_URL` | Adresse du serveur Pyroscope | - |
@@ -311,7 +312,7 @@ docker run --name new-api -d --restart always \
| `PYROSCOPE_BLOCK_RATE` | Taux d'échantillonnage block Pyroscope | `5` | | `PYROSCOPE_BLOCK_RATE` | Taux d'échantillonnage block Pyroscope | `5` |
| `HOSTNAME` | Nom d'hôte tagué pour Pyroscope | `new-api` | | `HOSTNAME` | Nom d'hôte tagué pour Pyroscope | `new-api` |
📖 **Configuration complète:** [Documentation des variables d'environnement](https://docs.newapi.pro/installation/environment-variables) 📖 **Configuration complète:** [Documentation des variables d'environnement](https://docs.newapi.pro/en/docs/installation/config-maintenance/environment-variables)
</details> </details>
@@ -411,10 +412,10 @@ docker run --name new-api -d --restart always \
| Ressource | Lien | | Ressource | Lien |
|------|------| |------|------|
| 📘 FAQ | [FAQ](https://docs.newapi.pro/support/faq) | | 📘 FAQ | [FAQ](https://docs.newapi.pro/en/docs/support/faq) |
| 💬 Interaction avec la communauté | [Canaux de communication](https://docs.newapi.pro/support/community-interaction) | | 💬 Interaction avec la communauté | [Canaux de communication](https://docs.newapi.pro/en/docs/support/community-interaction) |
| 🐛 Commentaires sur les problèmes | [Commentaires sur les problèmes](https://docs.newapi.pro/support/feedback-issues) | | 🐛 Commentaires sur les problèmes | [Commentaires sur les problèmes](https://docs.newapi.pro/en/docs/support/feedback-issues) |
| 📚 Documentation complète | [Documentation officielle](https://docs.newapi.pro/support) | | 📚 Documentation complète | [Documentation officielle](https://docs.newapi.pro/en/docs) |
### 🤝 Guide de contribution ### 🤝 Guide de contribution
@@ -443,7 +444,7 @@ Bienvenue à toutes les formes de contribution!
Si ce projet vous est utile, bienvenue à nous donner une ⭐️ Étoile Si ce projet vous est utile, bienvenue à nous donner une ⭐️ Étoile
**[Documentation officielle](https://docs.newapi.pro/)** • **[Commentaires sur les problèmes](https://github.com/Calcium-Ion/new-api/issues)** • **[Dernière version](https://github.com/Calcium-Ion/new-api/releases)** **[Documentation officielle](https://docs.newapi.pro/en/docs)** • **[Commentaires sur les problèmes](https://github.com/Calcium-Ion/new-api/issues)** • **[Dernière version](https://github.com/Calcium-Ion/new-api/releases)**
<sub>Construit avec ❤️ par QuantumNous</sub> <sub>Construit avec ❤️ par QuantumNous</sub>

View File

@@ -146,7 +146,7 @@ docker run --name new-api -d --restart always \
🎉 デプロイが完了したら、`http://localhost:3000` にアクセスして使用を開始してください! 🎉 デプロイが完了したら、`http://localhost:3000` にアクセスして使用を開始してください!
📖 その他のデプロイ方法については[デプロイガイド](https://docs.newapi.pro/installation)を参照してください。 📖 その他のデプロイ方法については[デプロイガイド](https://docs.newapi.pro/ja/docs/installation)を参照してください。
--- ---
@@ -154,7 +154,7 @@ docker run --name new-api -d --restart always \
<div align="center"> <div align="center">
### 📖 [公式ドキュメント](https://docs.newapi.pro/) | [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/QuantumNous/new-api) ### 📖 [公式ドキュメント](https://docs.newapi.pro/ja/docs) | [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/QuantumNous/new-api)
</div> </div>
@@ -162,17 +162,17 @@ docker run --name new-api -d --restart always \
| カテゴリ | リンク | | カテゴリ | リンク |
|------|------| |------|------|
| 🚀 デプロイガイド | [インストールドキュメント](https://docs.newapi.pro/installation) | | 🚀 デプロイガイド | [インストールドキュメント](https://docs.newapi.pro/ja/docs/installation) |
| ⚙️ 環境設定 | [環境変数](https://docs.newapi.pro/installation/environment-variables) | | ⚙️ 環境設定 | [環境変数](https://docs.newapi.pro/ja/docs/installation/config-maintenance/environment-variables) |
| 📡 APIドキュメント | [APIドキュメント](https://docs.newapi.pro/api) | | 📡 APIドキュメント | [APIドキュメント](https://docs.newapi.pro/ja/docs/api) |
| ❓ よくある質問 | [FAQ](https://docs.newapi.pro/support/faq) | | ❓ よくある質問 | [FAQ](https://docs.newapi.pro/ja/docs/support/faq) |
| 💬 コミュニティ交流 | [交流チャネル](https://docs.newapi.pro/support/community-interaction) | | 💬 コミュニティ交流 | [交流チャネル](https://docs.newapi.pro/ja/docs/support/community-interaction) |
--- ---
## ✨ 主な機能 ## ✨ 主な機能
> 詳細な機能については[機能説明](https://docs.newapi.pro/wiki/features-introduction)を参照してください。 > 詳細な機能については[機能説明](https://docs.newapi.pro/ja/docs/guide/wiki/basic-concepts/features-introduction)を参照してください。
### 🎨 コア機能 ### 🎨 コア機能
@@ -202,15 +202,15 @@ docker run --name new-api -d --restart always \
### 🚀 高度な機能 ### 🚀 高度な機能
**APIフォーマットサポート:** **APIフォーマットサポート:**
- ⚡ [OpenAI Responses](https://docs.newapi.pro/api/openai-responses) - ⚡ [OpenAI Responses](https://docs.newapi.pro/ja/docs/api/ai-model/chat/openai/create-response)
- ⚡ [OpenAI Realtime API](https://docs.newapi.pro/api/openai-realtime)Azureを含む - ⚡ [OpenAI Realtime API](https://docs.newapi.pro/ja/docs/api/ai-model/realtime/create-realtime-session)Azureを含む
- ⚡ [Claude Messages](https://docs.newapi.pro/api/anthropic-chat) - ⚡ [Claude Messages](https://docs.newapi.pro/ja/docs/api/ai-model/chat/create-message)
- ⚡ [Google Gemini](https://docs.newapi.pro/api/google-gemini-chat/) - ⚡ [Google Gemini](https://doc.newapi.pro/ja/api/google-gemini-chat)
- 🔄 [Rerankモデル](https://docs.newapi.pro/api/jinaai-rerank) - 🔄 [Rerankモデル](https://docs.newapi.pro/ja/docs/api/ai-model/rerank/create-rerank)
- ⚡ [OpenAI Realtime API](https://docs.newapi.pro/api/openai-realtime) - ⚡ [OpenAI Realtime API](https://docs.newapi.pro/ja/docs/api/ai-model/realtime/create-realtime-session)
- ⚡ [Claude Messages](https://docs.newapi.pro/api/anthropic-chat) - ⚡ [Claude Messages](https://docs.newapi.pro/ja/docs/api/ai-model/chat/create-message)
- ⚡ [Google Gemini](https://docs.newapi.pro/api/google-gemini-chat/) - ⚡ [Google Gemini](https://doc.newapi.pro/ja/api/google-gemini-chat)
- 🔄 [Rerankモデル](https://docs.newapi.pro/api/jinaai-rerank)Cohere、Jina - 🔄 [Rerankモデル](https://docs.newapi.pro/ja/docs/api/ai-model/rerank/create-rerank)Cohere、Jina
**インテリジェントルーティング:** **インテリジェントルーティング:**
- ⚖️ チャネル重み付けランダム - ⚖️ チャネル重み付けランダム
@@ -251,16 +251,16 @@ docker run --name new-api -d --restart always \
## 🤖 モデルサポート ## 🤖 モデルサポート
> 詳細については[APIドキュメント - 中継インターフェース](https://docs.newapi.pro/api) > 詳細については[APIドキュメント - 中継インターフェース](https://docs.newapi.pro/ja/docs/api)
| モデルタイプ | 説明 | ドキュメント | | モデルタイプ | 説明 | ドキュメント |
|---------|------|------| |---------|------|------|
| 🤖 OpenAI GPTs | gpt-4-gizmo-* シリーズ | - | | 🤖 OpenAI GPTs | gpt-4-gizmo-* シリーズ | - |
| 🎨 Midjourney-Proxy | [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy) | [ドキュメント](https://docs.newapi.pro/api/midjourney-proxy-image) | | 🎨 Midjourney-Proxy | [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy) | [ドキュメント](https://doc.newapi.pro/ja/api/midjourney-proxy-image) |
| 🎵 Suno-API | [Suno API](https://github.com/Suno-API/Suno-API) | [ドキュメント](https://docs.newapi.pro/api/suno-music) | | 🎵 Suno-API | [Suno API](https://github.com/Suno-API/Suno-API) | [ドキュメント](https://doc.newapi.pro/ja/api/suno-music) |
| 🔄 Rerank | Cohere、Jina | [ドキュメント](https://docs.newapi.pro/api/jinaai-rerank) | | 🔄 Rerank | Cohere、Jina | [ドキュメント](https://docs.newapi.pro/ja/docs/api/ai-model/rerank/create-rerank) |
| 💬 Claude | Messagesフォーマット | [ドキュメント](https://docs.newapi.pro/api/suno-music) | | 💬 Claude | Messagesフォーマット | [ドキュメント](https://docs.newapi.pro/ja/docs/api/ai-model/chat/create-message) |
| 🌐 Gemini | Google Geminiフォーマット | [ドキュメント](https://docs.newapi.pro/api/google-gemini-chat/) | | 🌐 Gemini | Google Geminiフォーマット | [ドキュメント](https://doc.newapi.pro/ja/api/google-gemini-chat) |
| 🔧 Dify | ChatFlowモード | - | | 🔧 Dify | ChatFlowモード | - |
| 🎯 カスタム | 完全な呼び出しアドレスの入力をサポート | - | | 🎯 カスタム | 完全な呼び出しアドレスの入力をサポート | - |
@@ -269,16 +269,16 @@ docker run --name new-api -d --restart always \
<details> <details>
<summary>完全なインターフェースリストを表示</summary> <summary>完全なインターフェースリストを表示</summary>
- [チャットインターフェース (Chat Completions)](https://docs.newapi.pro/api/openai-chat) - [チャットインターフェース (Chat Completions)](https://docs.newapi.pro/ja/docs/api/ai-model/chat/openai/create-chat-completion)
- [レスポンスインターフェース (Responses)](https://docs.newapi.pro/api/openai-responses) - [レスポンスインターフェース (Responses)](https://docs.newapi.pro/ja/docs/api/ai-model/chat/openai/create-response)
- [イメージインターフェース (Image)](https://docs.newapi.pro/api/openai-image) - [イメージインターフェース (Image)](https://docs.newapi.pro/ja/docs/api/ai-model/images/openai/v1-images-generations--post)
- [オーディオインターフェース (Audio)](https://docs.newapi.pro/api/openai-audio) - [オーディオインターフェース (Audio)](https://docs.newapi.pro/ja/docs/api/ai-model/audio/openai/create-transcription)
- [ビデオインターフェース (Video)](https://docs.newapi.pro/api/openai-video) - [ビデオインターフェース (Video)](https://docs.newapi.pro/ja/docs/api/ai-model/videos/create-video-generation)
- [エンベッドインターフェース (Embeddings)](https://docs.newapi.pro/api/openai-embeddings) - [エンベッドインターフェース (Embeddings)](https://docs.newapi.pro/ja/docs/api/ai-model/embeddings/create-embedding)
- [再ランク付けインターフェース (Rerank)](https://docs.newapi.pro/api/jinaai-rerank) - [再ランク付けインターフェース (Rerank)](https://docs.newapi.pro/ja/docs/api/ai-model/rerank/create-rerank)
- [リアルタイム対話インターフェース (Realtime)](https://docs.newapi.pro/api/openai-realtime) - [リアルタイム対話インターフェース (Realtime)](https://docs.newapi.pro/ja/docs/api/ai-model/realtime/create-realtime-session)
- [Claudeチャット](https://docs.newapi.pro/api/anthropic-chat) - [Claudeチャット](https://docs.newapi.pro/ja/docs/api/ai-model/chat/create-message)
- [Google Geminiチャット](https://docs.newapi.pro/api/google-gemini-chat/) - [Google Geminiチャット](https://doc.newapi.pro/ja/api/google-gemini-chat)
</details> </details>
@@ -310,6 +310,7 @@ docker run --name new-api -d --restart always \
| `REDIS_CONN_STRING` | Redis接続文字列 | - | | `REDIS_CONN_STRING` | Redis接続文字列 | - |
| `STREAMING_TIMEOUT` | ストリーミング応答のタイムアウト時間(秒) | `300` | | `STREAMING_TIMEOUT` | ストリーミング応答のタイムアウト時間(秒) | `300` |
| `STREAM_SCANNER_MAX_BUFFER_MB` | ストリームスキャナの1行あたりバッファ上限MB。4K画像など巨大なbase64 `data:` ペイロードを扱う場合は値を増加させてください | `64` | | `STREAM_SCANNER_MAX_BUFFER_MB` | ストリームスキャナの1行あたりバッファ上限MB。4K画像など巨大なbase64 `data:` ペイロードを扱う場合は値を増加させてください | `64` |
| `MAX_REQUEST_BODY_MB` | リクエストボディ最大サイズMB、**解凍後**に計測。巨大リクエスト/zip bomb によるメモリ枯渇を防止)。超過時は `413` | `32` |
| `AZURE_DEFAULT_API_VERSION` | Azure APIバージョン | `2025-04-01-preview` | | `AZURE_DEFAULT_API_VERSION` | Azure APIバージョン | `2025-04-01-preview` |
| `ERROR_LOG_ENABLED` | エラーログスイッチ | `false` | | `ERROR_LOG_ENABLED` | エラーログスイッチ | `false` |
| `PYROSCOPE_URL` | Pyroscopeサーバーのアドレス | - | | `PYROSCOPE_URL` | Pyroscopeサーバーのアドレス | - |
@@ -320,7 +321,7 @@ docker run --name new-api -d --restart always \
| `PYROSCOPE_BLOCK_RATE` | Pyroscope blockサンプリング率 | `5` | | `PYROSCOPE_BLOCK_RATE` | Pyroscope blockサンプリング率 | `5` |
| `HOSTNAME` | Pyroscope用のホスト名タグ | `new-api` | | `HOSTNAME` | Pyroscope用のホスト名タグ | `new-api` |
📖 **完全な設定:** [環境変数ドキュメント](https://docs.newapi.pro/installation/environment-variables) 📖 **完全な設定:** [環境変数ドキュメント](https://docs.newapi.pro/ja/docs/installation/config-maintenance/environment-variables)
</details> </details>
@@ -420,10 +421,10 @@ docker run --name new-api -d --restart always \
| リソース | リンク | | リソース | リンク |
|------|------| |------|------|
| 📘 よくある質問 | [FAQ](https://docs.newapi.pro/support/faq) | | 📘 よくある質問 | [FAQ](https://docs.newapi.pro/ja/docs/support/faq) |
| 💬 コミュニティ交流 | [交流チャネル](https://docs.newapi.pro/support/community-interaction) | | 💬 コミュニティ交流 | [交流チャネル](https://docs.newapi.pro/ja/docs/support/community-interaction) |
| 🐛 問題のフィードバック | [問題フィードバック](https://docs.newapi.pro/support/feedback-issues) | | 🐛 問題のフィードバック | [問題フィードバック](https://docs.newapi.pro/ja/docs/support/feedback-issues) |
| 📚 完全なドキュメント | [公式ドキュメント](https://docs.newapi.pro/support) | | 📚 完全なドキュメント | [公式ドキュメント](https://docs.newapi.pro/ja/docs) |
### 🤝 貢献ガイド ### 🤝 貢献ガイド
@@ -452,7 +453,7 @@ docker run --name new-api -d --restart always \
このプロジェクトがあなたのお役に立てたなら、ぜひ ⭐️ スターをください! このプロジェクトがあなたのお役に立てたなら、ぜひ ⭐️ スターをください!
**[公式ドキュメント](https://docs.newapi.pro/)** • **[問題フィードバック](https://github.com/Calcium-Ion/new-api/issues)** • **[最新リリース](https://github.com/Calcium-Ion/new-api/releases)** **[公式ドキュメント](https://docs.newapi.pro/ja/docs)** • **[問題フィードバック](https://github.com/Calcium-Ion/new-api/issues)** • **[最新リリース](https://github.com/Calcium-Ion/new-api/releases)**
<sub>❤️ で構築された QuantumNous</sub> <sub>❤️ で構築された QuantumNous</sub>

View File

@@ -146,7 +146,7 @@ docker run --name new-api -d --restart always \
🎉 部署完成后,访问 `http://localhost:3000` 即可使用! 🎉 部署完成后,访问 `http://localhost:3000` 即可使用!
📖 更多部署方式请参考 [部署指南](https://docs.newapi.pro/installation) 📖 更多部署方式请参考 [部署指南](https://docs.newapi.pro/zh/docs/installation)
--- ---
@@ -154,7 +154,7 @@ docker run --name new-api -d --restart always \
<div align="center"> <div align="center">
### 📖 [官方文档](https://docs.newapi.pro/) | [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/QuantumNous/new-api) ### 📖 [官方文档](https://docs.newapi.pro/zh/docs) | [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/QuantumNous/new-api)
</div> </div>
@@ -162,17 +162,17 @@ docker run --name new-api -d --restart always \
| 分类 | 链接 | | 分类 | 链接 |
|------|------| |------|------|
| 🚀 部署指南 | [安装文档](https://docs.newapi.pro/installation) | | 🚀 部署指南 | [安装文档](https://docs.newapi.pro/zh/docs/installation) |
| ⚙️ 环境配置 | [环境变量](https://docs.newapi.pro/installation/environment-variables) | | ⚙️ 环境配置 | [环境变量](https://docs.newapi.pro/zh/docs/installation/config-maintenance/environment-variables) |
| 📡 接口文档 | [API 文档](https://docs.newapi.pro/api) | | 📡 接口文档 | [API 文档](https://docs.newapi.pro/zh/docs/api) |
| ❓ 常见问题 | [FAQ](https://docs.newapi.pro/support/faq) | | ❓ 常见问题 | [FAQ](https://docs.newapi.pro/zh/docs/support/faq) |
| 💬 社区交流 | [交流渠道](https://docs.newapi.pro/support/community-interaction) | | 💬 社区交流 | [交流渠道](https://docs.newapi.pro/zh/docs/support/community-interaction) |
--- ---
## ✨ 主要特性 ## ✨ 主要特性
> 详细特性请参考 [特性说明](https://docs.newapi.pro/wiki/features-introduction) > 详细特性请参考 [特性说明](https://docs.newapi.pro/zh/docs/guide/wiki/basic-concepts/features-introduction)
### 🎨 核心功能 ### 🎨 核心功能
@@ -202,11 +202,11 @@ docker run --name new-api -d --restart always \
### 🚀 高级功能 ### 🚀 高级功能
**API 格式支持:** **API 格式支持:**
- ⚡ [OpenAI Responses](https://docs.newapi.pro/api/openai-responses) - ⚡ [OpenAI Responses](https://docs.newapi.pro/zh/docs/api/ai-model/chat/openai/create-response)
- ⚡ [OpenAI Realtime API](https://docs.newapi.pro/api/openai-realtime)(含 Azure - ⚡ [OpenAI Realtime API](https://docs.newapi.pro/zh/docs/api/ai-model/realtime/create-realtime-session)(含 Azure
- ⚡ [Claude Messages](https://docs.newapi.pro/api/anthropic-chat) - ⚡ [Claude Messages](https://docs.newapi.pro/zh/docs/api/ai-model/chat/create-message)
- ⚡ [Google Gemini](https://docs.newapi.pro/api/google-gemini-chat/) - ⚡ [Google Gemini](https://doc.newapi.pro/api/google-gemini-chat)
- 🔄 [Rerank 模型](https://docs.newapi.pro/api/jinaai-rerank)Cohere、Jina - 🔄 [Rerank 模型](https://docs.newapi.pro/zh/docs/api/ai-model/rerank/create-rerank)Cohere、Jina
**智能路由:** **智能路由:**
- ⚖️ 渠道加权随机 - ⚖️ 渠道加权随机
@@ -247,16 +247,16 @@ docker run --name new-api -d --restart always \
## 🤖 模型支持 ## 🤖 模型支持
> 详情请参考 [接口文档 - 中继接口](https://docs.newapi.pro/api) > 详情请参考 [接口文档 - 中继接口](https://docs.newapi.pro/zh/docs/api)
| 模型类型 | 说明 | 文档 | | 模型类型 | 说明 | 文档 |
|---------|------|------| |---------|------|------|
| 🤖 OpenAI GPTs | gpt-4-gizmo-* 系列 | - | | 🤖 OpenAI GPTs | gpt-4-gizmo-* 系列 | - |
| 🎨 Midjourney-Proxy | [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy) | [文档](https://docs.newapi.pro/api/midjourney-proxy-image) | | 🎨 Midjourney-Proxy | [Midjourney-Proxy(Plus)](https://github.com/novicezk/midjourney-proxy) | [文档](https://doc.newapi.pro/api/midjourney-proxy-image) |
| 🎵 Suno-API | [Suno API](https://github.com/Suno-API/Suno-API) | [文档](https://docs.newapi.pro/api/suno-music) | | 🎵 Suno-API | [Suno API](https://github.com/Suno-API/Suno-API) | [文档](https://doc.newapi.pro/api/suno-music) |
| 🔄 Rerank | Cohere、Jina | [文档](https://docs.newapi.pro/api/jinaai-rerank) | | 🔄 Rerank | Cohere、Jina | [文档](https://docs.newapi.pro/zh/docs/api/ai-model/rerank/create-rerank) |
| 💬 Claude | Messages 格式 | [文档](https://docs.newapi.pro/api/anthropic-chat) | | 💬 Claude | Messages 格式 | [文档](https://docs.newapi.pro/zh/docs/api/ai-model/chat/create-message) |
| 🌐 Gemini | Google Gemini 格式 | [文档](https://docs.newapi.pro/api/google-gemini-chat/) | | 🌐 Gemini | Google Gemini 格式 | [文档](https://doc.newapi.pro/api/google-gemini-chat) |
| 🔧 Dify | ChatFlow 模式 | - | | 🔧 Dify | ChatFlow 模式 | - |
| 🎯 自定义 | 支持完整调用地址 | - | | 🎯 自定义 | 支持完整调用地址 | - |
@@ -265,16 +265,16 @@ docker run --name new-api -d --restart always \
<details> <details>
<summary>查看完整接口列表</summary> <summary>查看完整接口列表</summary>
- [聊天接口 (Chat Completions)](https://docs.newapi.pro/api/openai-chat) - [聊天接口 (Chat Completions)](https://docs.newapi.pro/zh/docs/api/ai-model/chat/openai/create-chat-completion)
- [响应接口 (Responses)](https://docs.newapi.pro/api/openai-responses) - [响应接口 (Responses)](https://docs.newapi.pro/zh/docs/api/ai-model/chat/openai/create-response)
- [图像接口 (Image)](https://docs.newapi.pro/api/openai-image) - [图像接口 (Image)](https://docs.newapi.pro/zh/docs/api/ai-model/images/openai/v1-images-generations--post)
- [音频接口 (Audio)](https://docs.newapi.pro/api/openai-audio) - [音频接口 (Audio)](https://docs.newapi.pro/zh/docs/api/ai-model/audio/openai/create-transcription)
- [视频接口 (Video)](https://docs.newapi.pro/api/openai-video) - [视频接口 (Video)](https://docs.newapi.pro/zh/docs/api/ai-model/videos/create-video-generation)
- [嵌入接口 (Embeddings)](https://docs.newapi.pro/api/openai-embeddings) - [嵌入接口 (Embeddings)](https://docs.newapi.pro/zh/docs/api/ai-model/embeddings/create-embedding)
- [重排序接口 (Rerank)](https://docs.newapi.pro/api/jinaai-rerank) - [重排序接口 (Rerank)](https://docs.newapi.pro/zh/docs/api/ai-model/rerank/create-rerank)
- [实时对话 (Realtime)](https://docs.newapi.pro/api/openai-realtime) - [实时对话 (Realtime)](https://docs.newapi.pro/zh/docs/api/ai-model/realtime/create-realtime-session)
- [Claude 聊天](https://docs.newapi.pro/api/anthropic-chat) - [Claude 聊天](https://docs.newapi.pro/zh/docs/api/ai-model/chat/create-message)
- [Google Gemini 聊天](https://docs.newapi.pro/api/google-gemini-chat) - [Google Gemini 聊天](https://doc.newapi.pro/api/google-gemini-chat)
</details> </details>
@@ -306,6 +306,7 @@ docker run --name new-api -d --restart always \
| `REDIS_CONN_STRING` | Redis 连接字符串 | - | | `REDIS_CONN_STRING` | Redis 连接字符串 | - |
| `STREAMING_TIMEOUT` | 流式超时时间(秒) | `300` | | `STREAMING_TIMEOUT` | 流式超时时间(秒) | `300` |
| `STREAM_SCANNER_MAX_BUFFER_MB` | 流式扫描器单行最大缓冲MB图像生成等超大 `data:` 片段(如 4K 图片 base64需适当调大 | `64` | | `STREAM_SCANNER_MAX_BUFFER_MB` | 流式扫描器单行最大缓冲MB图像生成等超大 `data:` 片段(如 4K 图片 base64需适当调大 | `64` |
| `MAX_REQUEST_BODY_MB` | 请求体最大大小MB**解压后**计;防止超大请求/zip bomb 导致内存暴涨),超过将返回 `413` | `32` |
| `AZURE_DEFAULT_API_VERSION` | Azure API 版本 | `2025-04-01-preview` | | `AZURE_DEFAULT_API_VERSION` | Azure API 版本 | `2025-04-01-preview` |
| `ERROR_LOG_ENABLED` | 错误日志开关 | `false` | | `ERROR_LOG_ENABLED` | 错误日志开关 | `false` |
| `PYROSCOPE_URL` | Pyroscope 服务地址 | - | | `PYROSCOPE_URL` | Pyroscope 服务地址 | - |
@@ -316,7 +317,7 @@ docker run --name new-api -d --restart always \
| `PYROSCOPE_BLOCK_RATE` | Pyroscope block 采样率 | `5` | | `PYROSCOPE_BLOCK_RATE` | Pyroscope block 采样率 | `5` |
| `HOSTNAME` | Pyroscope 标签里的主机名 | `new-api` | | `HOSTNAME` | Pyroscope 标签里的主机名 | `new-api` |
📖 **完整配置:** [环境变量文档](https://docs.newapi.pro/installation/environment-variables) 📖 **完整配置:** [环境变量文档](https://docs.newapi.pro/zh/docs/installation/config-maintenance/environment-variables)
</details> </details>
@@ -418,10 +419,10 @@ docker run --name new-api -d --restart always \
| 资源 | 链接 | | 资源 | 链接 |
|------|------| |------|------|
| 📘 常见问题 | [FAQ](https://docs.newapi.pro/support/faq) | | 📘 常见问题 | [FAQ](https://docs.newapi.pro/zh/docs/support/faq) |
| 💬 社区交流 | [交流渠道](https://docs.newapi.pro/support/community-interaction) | | 💬 社区交流 | [交流渠道](https://docs.newapi.pro/zh/docs/support/community-interaction) |
| 🐛 反馈问题 | [问题反馈](https://docs.newapi.pro/support/feedback-issues) | | 🐛 反馈问题 | [问题反馈](https://docs.newapi.pro/zh/docs/support/feedback-issues) |
| 📚 完整文档 | [官方文档](https://docs.newapi.pro/support) | | 📚 完整文档 | [官方文档](https://docs.newapi.pro/zh/docs) |
### 🤝 贡献指南 ### 🤝 贡献指南
@@ -450,7 +451,7 @@ docker run --name new-api -d --restart always \
如果这个项目对你有帮助,欢迎给我们一个 ⭐️ Star 如果这个项目对你有帮助,欢迎给我们一个 ⭐️ Star
**[官方文档](https://docs.newapi.pro/)** • **[问题反馈](https://github.com/Calcium-Ion/new-api/issues)** • **[最新发布](https://github.com/Calcium-Ion/new-api/releases)** **[官方文档](https://docs.newapi.pro/zh/docs)** • **[问题反馈](https://github.com/Calcium-Ion/new-api/issues)** • **[最新发布](https://github.com/Calcium-Ion/new-api/releases)**
<sub>Built with ❤️ by QuantumNous</sub> <sub>Built with ❤️ by QuantumNous</sub>

View File

@@ -71,15 +71,66 @@ func getMP3Duration(r io.Reader) (float64, error) {
// getWAVDuration 解析 WAV 文件头以获取时长。 // getWAVDuration 解析 WAV 文件头以获取时长。
func getWAVDuration(r io.ReadSeeker) (float64, error) { func getWAVDuration(r io.ReadSeeker) (float64, error) {
// 1. 强制复位指针
r.Seek(0, io.SeekStart)
dec := wav.NewDecoder(r) dec := wav.NewDecoder(r)
// IsValidFile 会读取 fmt 块
if !dec.IsValidFile() { if !dec.IsValidFile() {
return 0, errors.New("invalid wav file") return 0, errors.New("invalid wav file")
} }
d, err := dec.Duration()
if err != nil { // 尝试寻找 data 块
return 0, errors.Wrap(err, "failed to get wav duration") if err := dec.FwdToPCM(); err != nil {
return 0, errors.Wrap(err, "failed to find PCM data chunk")
} }
return d.Seconds(), nil
pcmSize := int64(dec.PCMSize)
// 如果读出来的 Size 是 0尝试用文件大小反推
if pcmSize == 0 {
// 获取文件总大小
currentPos, _ := r.Seek(0, io.SeekCurrent) // 当前通常在 data chunk header 之后
endPos, _ := r.Seek(0, io.SeekEnd)
fileSize := endPos
// 恢复位置(虽然如果不继续读也没关系)
r.Seek(currentPos, io.SeekStart)
// 数据区大小 ≈ 文件总大小 - 当前指针位置(即Header大小)
// 注意FwdToPCM 成功后CurrentPos 应该刚好指向 Data 区数据的开始
// 或者是 Data Chunk ID + Size 之后。
// WAV Header 一般 44 字节。
if fileSize > 44 {
// 如果 FwdToPCM 成功Reader 应该位于 data 块的数据起始处
// 所以剩余的所有字节理论上都是音频数据
pcmSize = fileSize - currentPos
// 简单的兜底如果算出来还是负数或0强制按文件大小-44计算
if pcmSize <= 0 {
pcmSize = fileSize - 44
}
}
}
numChans := int64(dec.NumChans)
bitDepth := int64(dec.BitDepth)
sampleRate := float64(dec.SampleRate)
if sampleRate == 0 || numChans == 0 || bitDepth == 0 {
return 0, errors.New("invalid wav header metadata")
}
bytesPerFrame := numChans * (bitDepth / 8)
if bytesPerFrame == 0 {
return 0, errors.New("invalid byte depth calculation")
}
totalFrames := pcmSize / bytesPerFrame
durationSeconds := float64(totalFrames) / sampleRate
return durationSeconds, nil
} }
// getFLACDuration 解析 FLAC 文件的 STREAMINFO 块。 // getFLACDuration 解析 FLAC 文件的 STREAMINFO 块。

View File

@@ -2,7 +2,7 @@ package common
import ( import (
"bytes" "bytes"
"errors" "fmt"
"io" "io"
"mime" "mime"
"mime/multipart" "mime/multipart"
@@ -12,24 +12,61 @@ import (
"time" "time"
"github.com/QuantumNous/new-api/constant" "github.com/QuantumNous/new-api/constant"
"github.com/pkg/errors"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
const KeyRequestBody = "key_request_body" const KeyRequestBody = "key_request_body"
func GetRequestBody(c *gin.Context) ([]byte, error) { var ErrRequestBodyTooLarge = errors.New("request body too large")
requestBody, _ := c.Get(KeyRequestBody)
if requestBody != nil { func IsRequestBodyTooLargeError(err error) bool {
return requestBody.([]byte), nil if err == nil {
return false
} }
requestBody, err := io.ReadAll(c.Request.Body) if errors.Is(err, ErrRequestBodyTooLarge) {
return true
}
var mbe *http.MaxBytesError
return errors.As(err, &mbe)
}
func GetRequestBody(c *gin.Context) ([]byte, error) {
cached, exists := c.Get(KeyRequestBody)
if exists && cached != nil {
if b, ok := cached.([]byte); ok {
return b, nil
}
}
maxMB := constant.MaxRequestBodyMB
if maxMB < 0 {
// no limit
body, err := io.ReadAll(c.Request.Body)
_ = c.Request.Body.Close()
if err != nil {
return nil, err
}
c.Set(KeyRequestBody, body)
return body, nil
}
maxBytes := int64(maxMB) << 20
limited := io.LimitReader(c.Request.Body, maxBytes+1)
body, err := io.ReadAll(limited)
if err != nil { if err != nil {
_ = c.Request.Body.Close()
if IsRequestBodyTooLargeError(err) {
return nil, errors.Wrap(ErrRequestBodyTooLarge, fmt.Sprintf("request body exceeds %d MB", maxMB))
}
return nil, err return nil, err
} }
_ = c.Request.Body.Close() _ = c.Request.Body.Close()
c.Set(KeyRequestBody, requestBody) if int64(len(body)) > maxBytes {
return requestBody.([]byte), nil return nil, errors.Wrap(ErrRequestBodyTooLarge, fmt.Sprintf("request body exceeds %d MB", maxMB))
}
c.Set(KeyRequestBody, body)
return body, nil
} }
func UnmarshalBodyReusable(c *gin.Context, v any) error { func UnmarshalBodyReusable(c *gin.Context, v any) error {

View File

@@ -117,6 +117,8 @@ func initConstantEnv() {
constant.DifyDebug = GetEnvOrDefaultBool("DIFY_DEBUG", true) constant.DifyDebug = GetEnvOrDefaultBool("DIFY_DEBUG", true)
constant.MaxFileDownloadMB = GetEnvOrDefault("MAX_FILE_DOWNLOAD_MB", 20) constant.MaxFileDownloadMB = GetEnvOrDefault("MAX_FILE_DOWNLOAD_MB", 20)
constant.StreamScannerMaxBufferMB = GetEnvOrDefault("STREAM_SCANNER_MAX_BUFFER_MB", 64) constant.StreamScannerMaxBufferMB = GetEnvOrDefault("STREAM_SCANNER_MAX_BUFFER_MB", 64)
// MaxRequestBodyMB 请求体最大大小(解压后),用于防止超大请求/zip bomb导致内存暴涨
constant.MaxRequestBodyMB = GetEnvOrDefault("MAX_REQUEST_BODY_MB", 64)
// ForceStreamOption 覆盖请求参数强制返回usage信息 // ForceStreamOption 覆盖请求参数强制返回usage信息
constant.ForceStreamOption = GetEnvOrDefaultBool("FORCE_STREAM_OPTION", true) constant.ForceStreamOption = GetEnvOrDefaultBool("FORCE_STREAM_OPTION", true)
constant.CountToken = GetEnvOrDefaultBool("CountToken", true) constant.CountToken = GetEnvOrDefaultBool("CountToken", true)

View File

@@ -2,6 +2,15 @@ package common
import "net" import "net"
func IsIP(s string) bool {
ip := net.ParseIP(s)
return ip != nil
}
func ParseIP(s string) net.IP {
return net.ParseIP(s)
}
func IsPrivateIP(ip net.IP) bool { func IsPrivateIP(ip net.IP) bool {
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() { if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
return true return true
@@ -20,3 +29,23 @@ func IsPrivateIP(ip net.IP) bool {
} }
return false return false
} }
func IsIpInCIDRList(ip net.IP, cidrList []string) bool {
for _, cidr := range cidrList {
_, network, err := net.ParseCIDR(cidr)
if err != nil {
// 尝试作为单个IP处理
if whitelistIP := net.ParseIP(cidr); whitelistIP != nil {
if ip.Equal(whitelistIP) {
return true
}
}
continue
}
if network.Contains(ip) {
return true
}
}
return false
}

View File

@@ -186,23 +186,7 @@ func isIPListed(ip net.IP, list []string) bool {
return false return false
} }
for _, whitelistCIDR := range list { return IsIpInCIDRList(ip, list)
_, network, err := net.ParseCIDR(whitelistCIDR)
if err != nil {
// 尝试作为单个IP处理
if whitelistIP := net.ParseIP(whitelistCIDR); whitelistIP != nil {
if ip.Equal(whitelistIP) {
return true
}
}
continue
}
if network.Contains(ip) {
return true
}
}
return false
} }
// IsIPAccessAllowed 检查IP是否允许访问 // IsIPAccessAllowed 检查IP是否允许访问

View File

@@ -217,11 +217,6 @@ func IntMax(a int, b int) int {
} }
} }
func IsIP(s string) bool {
ip := net.ParseIP(s)
return ip != nil
}
func GetUUID() string { func GetUUID() string {
code := uuid.New().String() code := uuid.New().String()
code = strings.Replace(code, "-", "", -1) code = strings.Replace(code, "-", "", -1)

View File

@@ -21,7 +21,6 @@ const (
ContextKeyTokenCrossGroupRetry ContextKey = "token_cross_group_retry" ContextKeyTokenCrossGroupRetry ContextKey = "token_cross_group_retry"
/* channel related keys */ /* channel related keys */
ContextKeyAutoGroupIndex ContextKey = "auto_group_index"
ContextKeyChannelId ContextKey = "channel_id" ContextKeyChannelId ContextKey = "channel_id"
ContextKeyChannelName ContextKey = "channel_name" ContextKeyChannelName ContextKey = "channel_name"
ContextKeyChannelCreateTime ContextKey = "channel_create_time" ContextKeyChannelCreateTime ContextKey = "channel_create_time"
@@ -39,6 +38,10 @@ const (
ContextKeyChannelMultiKeyIndex ContextKey = "channel_multi_key_index" ContextKeyChannelMultiKeyIndex ContextKey = "channel_multi_key_index"
ContextKeyChannelKey ContextKey = "channel_key" ContextKeyChannelKey ContextKey = "channel_key"
ContextKeyAutoGroup ContextKey = "auto_group"
ContextKeyAutoGroupIndex ContextKey = "auto_group_index"
ContextKeyAutoGroupRetryIndex ContextKey = "auto_group_retry_index"
/* user related keys */ /* user related keys */
ContextKeyUserId ContextKey = "id" ContextKeyUserId ContextKey = "id"
ContextKeyUserSetting ContextKey = "user_setting" ContextKeyUserSetting ContextKey = "user_setting"

View File

@@ -9,6 +9,7 @@ var CountToken bool
var GetMediaToken bool var GetMediaToken bool
var GetMediaTokenNotStream bool var GetMediaTokenNotStream bool
var UpdateTask bool var UpdateTask bool
var MaxRequestBodyMB int
var AzureDefaultAPIVersion string var AzureDefaultAPIVersion string
var GeminiVisionMaxImageNum int var GeminiVisionMaxImageNum int
var NotifyLimitCount int var NotifyLimitCount int

View File

@@ -2,9 +2,9 @@ package controller
import ( import (
"github.com/QuantumNous/new-api/common" "github.com/QuantumNous/new-api/common"
"github.com/QuantumNous/new-api/dto"
"github.com/QuantumNous/new-api/model" "github.com/QuantumNous/new-api/model"
"github.com/QuantumNous/new-api/setting/operation_setting" "github.com/QuantumNous/new-api/setting/operation_setting"
"github.com/QuantumNous/new-api/types"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
@@ -29,7 +29,7 @@ func GetSubscription(c *gin.Context) {
expiredTime = 0 expiredTime = 0
} }
if err != nil { if err != nil {
openAIError := dto.OpenAIError{ openAIError := types.OpenAIError{
Message: err.Error(), Message: err.Error(),
Type: "upstream_error", Type: "upstream_error",
} }
@@ -81,7 +81,7 @@ func GetUsage(c *gin.Context) {
quota, err = model.GetUserUsedQuota(userId) quota, err = model.GetUserUsedQuota(userId)
} }
if err != nil { if err != nil {
openAIError := dto.OpenAIError{ openAIError := types.OpenAIError{
Message: err.Error(), Message: err.Error(),
Type: "new_api_error", Type: "new_api_error",
} }

View File

@@ -114,7 +114,7 @@ func DiscordOAuth(c *gin.Context) {
DiscordBind(c) DiscordBind(c)
return return
} }
if !system_setting.GetDiscordSettings().Enabled { if !system_setting.GetDiscordSettings().Enabled {
c.JSON(http.StatusOK, gin.H{ c.JSON(http.StatusOK, gin.H{
"success": false, "success": false,
"message": "管理员未开启通过 Discord 登录以及注册", "message": "管理员未开启通过 Discord 登录以及注册",

View File

@@ -18,6 +18,7 @@ import (
"github.com/QuantumNous/new-api/service" "github.com/QuantumNous/new-api/service"
"github.com/QuantumNous/new-api/setting/operation_setting" "github.com/QuantumNous/new-api/setting/operation_setting"
"github.com/QuantumNous/new-api/setting/ratio_setting" "github.com/QuantumNous/new-api/setting/ratio_setting"
"github.com/QuantumNous/new-api/types"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/samber/lo" "github.com/samber/lo"
) )
@@ -275,7 +276,7 @@ func RetrieveModel(c *gin.Context, modelType int) {
c.JSON(200, aiModel) c.JSON(200, aiModel)
} }
} else { } else {
openAIError := dto.OpenAIError{ openAIError := types.OpenAIError{
Message: fmt.Sprintf("The model '%s' does not exist", modelId), Message: fmt.Sprintf("The model '%s' does not exist", modelId),
Type: "invalid_request_error", Type: "invalid_request_error",
Param: "model", Param: "model",

View File

@@ -3,10 +3,7 @@ package controller
import ( import (
"errors" "errors"
"fmt" "fmt"
"time"
"github.com/QuantumNous/new-api/common"
"github.com/QuantumNous/new-api/constant"
"github.com/QuantumNous/new-api/middleware" "github.com/QuantumNous/new-api/middleware"
"github.com/QuantumNous/new-api/model" "github.com/QuantumNous/new-api/model"
relaycommon "github.com/QuantumNous/new-api/relay/common" relaycommon "github.com/QuantumNous/new-api/relay/common"
@@ -54,12 +51,6 @@ func Playground(c *gin.Context) {
Group: relayInfo.UsingGroup, Group: relayInfo.UsingGroup,
} }
_ = middleware.SetupContextForToken(c, tempToken) _ = middleware.SetupContextForToken(c, tempToken)
_, newAPIError = getChannel(c, relayInfo, 0)
if newAPIError != nil {
return
}
//middleware.SetupContextForSelectedChannel(c, channel, playgroundRequest.Model)
common.SetContextKey(c, constant.ContextKeyRequestStartTime, time.Now())
Relay(c, types.RelayFormatOpenAI) Relay(c, types.RelayFormatOpenAI)
} }

View File

@@ -2,6 +2,7 @@ package controller
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io" "io"
"log" "log"
@@ -104,7 +105,12 @@ func Relay(c *gin.Context, relayFormat types.RelayFormat) {
request, err := helper.GetAndValidateRequest(c, relayFormat) request, err := helper.GetAndValidateRequest(c, relayFormat)
if err != nil { if err != nil {
newAPIError = types.NewError(err, types.ErrorCodeInvalidRequest) // Map "request body too large" to 413 so clients can handle it correctly
if common.IsRequestBodyTooLargeError(err) || errors.Is(err, common.ErrRequestBodyTooLarge) {
newAPIError = types.NewErrorWithStatusCode(err, types.ErrorCodeReadRequestBodyFailed, http.StatusRequestEntityTooLarge, types.ErrOptionWithSkipRetry())
} else {
newAPIError = types.NewError(err, types.ErrorCodeInvalidRequest)
}
return return
} }
@@ -114,9 +120,17 @@ func Relay(c *gin.Context, relayFormat types.RelayFormat) {
return return
} }
meta := request.GetTokenCountMeta() needSensitiveCheck := setting.ShouldCheckPromptSensitive()
needCountToken := constant.CountToken
// Avoid building huge CombineText (strings.Join) when token counting and sensitive check are both disabled.
var meta *types.TokenCountMeta
if needSensitiveCheck || needCountToken {
meta = request.GetTokenCountMeta()
} else {
meta = fastTokenCountMetaForPricing(request)
}
if setting.ShouldCheckPromptSensitive() { if needSensitiveCheck && meta != nil {
contains, words := service.CheckSensitiveText(meta.CombineText) contains, words := service.CheckSensitiveText(meta.CombineText)
if contains { if contains {
logger.LogWarn(c, fmt.Sprintf("user sensitive words detected: %s", strings.Join(words, ", "))) logger.LogWarn(c, fmt.Sprintf("user sensitive words detected: %s", strings.Join(words, ", ")))
@@ -157,16 +171,32 @@ func Relay(c *gin.Context, relayFormat types.RelayFormat) {
} }
}() }()
for i := 0; i <= common.RetryTimes; i++ { retryParam := &service.RetryParam{
channel, err := getChannel(c, relayInfo, i) Ctx: c,
if err != nil { TokenGroup: relayInfo.TokenGroup,
logger.LogError(c, err.Error()) ModelName: relayInfo.OriginModelName,
newAPIError = err Retry: common.GetPointer(0),
}
for ; retryParam.GetRetry() <= common.RetryTimes; retryParam.IncreaseRetry() {
channel, channelErr := getChannel(c, relayInfo, retryParam)
if channelErr != nil {
logger.LogError(c, channelErr.Error())
newAPIError = channelErr
break break
} }
addUsedChannel(c, channel.Id) addUsedChannel(c, channel.Id)
requestBody, _ := common.GetRequestBody(c) requestBody, bodyErr := common.GetRequestBody(c)
if bodyErr != nil {
// Ensure consistent 413 for oversized bodies even when error occurs later (e.g., retry path)
if common.IsRequestBodyTooLargeError(bodyErr) || errors.Is(bodyErr, common.ErrRequestBodyTooLarge) {
newAPIError = types.NewErrorWithStatusCode(bodyErr, types.ErrorCodeReadRequestBodyFailed, http.StatusRequestEntityTooLarge, types.ErrOptionWithSkipRetry())
} else {
newAPIError = types.NewErrorWithStatusCode(bodyErr, types.ErrorCodeReadRequestBodyFailed, http.StatusBadRequest, types.ErrOptionWithSkipRetry())
}
break
}
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
switch relayFormat { switch relayFormat {
@@ -186,7 +216,7 @@ func Relay(c *gin.Context, relayFormat types.RelayFormat) {
processChannelError(c, *types.NewChannelError(channel.Id, channel.Type, channel.Name, channel.ChannelInfo.IsMultiKey, common.GetContextKeyString(c, constant.ContextKeyChannelKey), channel.GetAutoBan()), newAPIError) processChannelError(c, *types.NewChannelError(channel.Id, channel.Type, channel.Name, channel.ChannelInfo.IsMultiKey, common.GetContextKeyString(c, constant.ContextKeyChannelKey), channel.GetAutoBan()), newAPIError)
if !shouldRetry(c, newAPIError, common.RetryTimes-i) { if !shouldRetry(c, newAPIError, common.RetryTimes-retryParam.GetRetry()) {
break break
} }
} }
@@ -211,8 +241,35 @@ func addUsedChannel(c *gin.Context, channelId int) {
c.Set("use_channel", useChannel) c.Set("use_channel", useChannel)
} }
func getChannel(c *gin.Context, info *relaycommon.RelayInfo, retryCount int) (*model.Channel, *types.NewAPIError) { func fastTokenCountMetaForPricing(request dto.Request) *types.TokenCountMeta {
if retryCount == 0 { if request == nil {
return &types.TokenCountMeta{}
}
meta := &types.TokenCountMeta{
TokenType: types.TokenTypeTokenizer,
}
switch r := request.(type) {
case *dto.GeneralOpenAIRequest:
if r.MaxCompletionTokens > r.MaxTokens {
meta.MaxTokens = int(r.MaxCompletionTokens)
} else {
meta.MaxTokens = int(r.MaxTokens)
}
case *dto.OpenAIResponsesRequest:
meta.MaxTokens = int(r.MaxOutputTokens)
case *dto.ClaudeRequest:
meta.MaxTokens = int(r.MaxTokens)
case *dto.ImageRequest:
// Pricing for image requests depends on ImagePriceRatio; safe to compute even when CountToken is disabled.
return r.GetTokenCountMeta()
default:
// Best-effort: leave CombineText empty to avoid large allocations.
}
return meta
}
func getChannel(c *gin.Context, info *relaycommon.RelayInfo, retryParam *service.RetryParam) (*model.Channel, *types.NewAPIError) {
if info.ChannelMeta == nil {
autoBan := c.GetBool("auto_ban") autoBan := c.GetBool("auto_ban")
autoBanInt := 1 autoBanInt := 1
if !autoBan { if !autoBan {
@@ -225,7 +282,7 @@ func getChannel(c *gin.Context, info *relaycommon.RelayInfo, retryCount int) (*m
AutoBan: &autoBanInt, AutoBan: &autoBanInt,
}, nil }, nil
} }
channel, selectGroup, err := service.CacheGetRandomSatisfiedChannel(c, info.TokenGroup, info.OriginModelName, retryCount) channel, selectGroup, err := service.CacheGetRandomSatisfiedChannel(retryParam)
info.PriceData.GroupRatioInfo = helper.HandleGroupRatio(c, info) info.PriceData.GroupRatioInfo = helper.HandleGroupRatio(c, info)
@@ -370,7 +427,7 @@ func RelayMidjourney(c *gin.Context) {
} }
func RelayNotImplemented(c *gin.Context) { func RelayNotImplemented(c *gin.Context) {
err := dto.OpenAIError{ err := types.OpenAIError{
Message: "API not implemented", Message: "API not implemented",
Type: "new_api_error", Type: "new_api_error",
Param: "", Param: "",
@@ -382,7 +439,7 @@ func RelayNotImplemented(c *gin.Context) {
} }
func RelayNotFound(c *gin.Context) { func RelayNotFound(c *gin.Context) {
err := dto.OpenAIError{ err := types.OpenAIError{
Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path), Message: fmt.Sprintf("Invalid URL (%s %s)", c.Request.Method, c.Request.URL.Path),
Type: "invalid_request_error", Type: "invalid_request_error",
Param: "", Param: "",
@@ -405,8 +462,14 @@ func RelayTask(c *gin.Context) {
if taskErr == nil { if taskErr == nil {
retryTimes = 0 retryTimes = 0
} }
for i := 0; shouldRetryTaskRelay(c, channelId, taskErr, retryTimes) && i < retryTimes; i++ { retryParam := &service.RetryParam{
channel, newAPIError := getChannel(c, relayInfo, i) Ctx: c,
TokenGroup: relayInfo.TokenGroup,
ModelName: relayInfo.OriginModelName,
Retry: common.GetPointer(0),
}
for ; shouldRetryTaskRelay(c, channelId, taskErr, retryTimes) && retryParam.GetRetry() < retryTimes; retryParam.IncreaseRetry() {
channel, newAPIError := getChannel(c, relayInfo, retryParam)
if newAPIError != nil { if newAPIError != nil {
logger.LogError(c, fmt.Sprintf("CacheGetRandomSatisfiedChannel failed: %s", newAPIError.Error())) logger.LogError(c, fmt.Sprintf("CacheGetRandomSatisfiedChannel failed: %s", newAPIError.Error()))
taskErr = service.TaskErrorWrapperLocal(newAPIError.Err, "get_channel_failed", http.StatusInternalServerError) taskErr = service.TaskErrorWrapperLocal(newAPIError.Err, "get_channel_failed", http.StatusInternalServerError)
@@ -416,10 +479,18 @@ func RelayTask(c *gin.Context) {
useChannel := c.GetStringSlice("use_channel") useChannel := c.GetStringSlice("use_channel")
useChannel = append(useChannel, fmt.Sprintf("%d", channelId)) useChannel = append(useChannel, fmt.Sprintf("%d", channelId))
c.Set("use_channel", useChannel) c.Set("use_channel", useChannel)
logger.LogInfo(c, fmt.Sprintf("using channel #%d to retry (remain times %d)", channel.Id, i)) logger.LogInfo(c, fmt.Sprintf("using channel #%d to retry (remain times %d)", channel.Id, retryParam.GetRetry()))
//middleware.SetupContextForSelectedChannel(c, channel, originalModel) //middleware.SetupContextForSelectedChannel(c, channel, originalModel)
requestBody, _ := common.GetRequestBody(c) requestBody, err := common.GetRequestBody(c)
if err != nil {
if common.IsRequestBodyTooLargeError(err) || errors.Is(err, common.ErrRequestBodyTooLarge) {
taskErr = service.TaskErrorWrapperLocal(err, "read_request_body_failed", http.StatusRequestEntityTooLarge)
} else {
taskErr = service.TaskErrorWrapperLocal(err, "read_request_body_failed", http.StatusBadRequest)
}
break
}
c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody)) c.Request.Body = io.NopCloser(bytes.NewBuffer(requestBody))
taskErr = taskRelayHandler(c, relayInfo) taskErr = taskRelayHandler(c, relayInfo)
} }

View File

@@ -88,7 +88,7 @@ func UpdateSunoTaskAll(ctx context.Context, taskChannelM map[int][]string, taskM
for channelId, taskIds := range taskChannelM { for channelId, taskIds := range taskChannelM {
err := updateSunoTaskAll(ctx, channelId, taskIds, taskM) err := updateSunoTaskAll(ctx, channelId, taskIds, taskM)
if err != nil { if err != nil {
logger.LogError(ctx, fmt.Sprintf("渠道 #%d 更新异步任务失败: %d", channelId, err.Error())) logger.LogError(ctx, fmt.Sprintf("渠道 #%d 更新异步任务失败: %s", channelId, err.Error()))
} }
} }
return nil return nil
@@ -141,7 +141,7 @@ func updateSunoTaskAll(ctx context.Context, channelId int, taskIds []string, tas
return err return err
} }
if !responseItems.IsSuccess() { if !responseItems.IsSuccess() {
common.SysLog(fmt.Sprintf("渠道 #%d 未完成的任务有: %d, 成功获取到任务数: %d", channelId, len(taskIds), string(responseBody))) common.SysLog(fmt.Sprintf("渠道 #%d 未完成的任务有: %d, 成功获取到任务数: %s", channelId, len(taskIds), string(responseBody)))
return err return err
} }

View File

@@ -171,6 +171,7 @@ func AddToken(c *gin.Context) {
ModelLimits: token.ModelLimits, ModelLimits: token.ModelLimits,
AllowIps: token.AllowIps, AllowIps: token.AllowIps,
Group: token.Group, Group: token.Group,
CrossGroupRetry: token.CrossGroupRetry,
} }
err = cleanToken.Insert() err = cleanToken.Insert()
if err != nil { if err != nil {

View File

@@ -7,12 +7,12 @@ import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"log"
"net/http"
"github.com/QuantumNous/new-api/common" "github.com/QuantumNous/new-api/common"
"github.com/QuantumNous/new-api/model" "github.com/QuantumNous/new-api/model"
"github.com/QuantumNous/new-api/setting" "github.com/QuantumNous/new-api/setting"
"io"
"log"
"net/http"
"time" "time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"

View File

@@ -110,18 +110,17 @@ func setupLogin(user *model.User, c *gin.Context) {
}) })
return return
} }
cleanUser := model.User{
Id: user.Id,
Username: user.Username,
DisplayName: user.DisplayName,
Role: user.Role,
Status: user.Status,
Group: user.Group,
}
c.JSON(http.StatusOK, gin.H{ c.JSON(http.StatusOK, gin.H{
"message": "", "message": "",
"success": true, "success": true,
"data": cleanUser, "data": map[string]any{
"id": user.Id,
"username": user.Username,
"display_name": user.DisplayName,
"role": user.Role,
"status": user.Status,
"group": user.Group,
},
}) })
} }

View File

@@ -2,6 +2,7 @@ package dto
import ( import (
"encoding/json" "encoding/json"
"strings"
"github.com/QuantumNous/new-api/types" "github.com/QuantumNous/new-api/types"
@@ -24,11 +25,14 @@ func (r *AudioRequest) GetTokenCountMeta() *types.TokenCountMeta {
CombineText: r.Input, CombineText: r.Input,
TokenType: types.TokenTypeTextNumber, TokenType: types.TokenTypeTextNumber,
} }
if strings.Contains(r.Model, "gpt") {
meta.TokenType = types.TokenTypeTokenizer
}
return meta return meta
} }
func (r *AudioRequest) IsStream(c *gin.Context) bool { func (r *AudioRequest) IsStream(c *gin.Context) bool {
return false return r.StreamFormat == "sse"
} }
func (r *AudioRequest) SetModelName(modelName string) { func (r *AudioRequest) SetModelName(modelName string) {

View File

@@ -1,26 +1,31 @@
package dto package dto
import "github.com/QuantumNous/new-api/types" import (
"encoding/json"
type OpenAIError struct { "github.com/QuantumNous/new-api/common"
Message string `json:"message"` "github.com/QuantumNous/new-api/types"
Type string `json:"type"` )
Param string `json:"param"`
Code any `json:"code"` //type OpenAIError struct {
} // Message string `json:"message"`
// Type string `json:"type"`
// Param string `json:"param"`
// Code any `json:"code"`
//}
type OpenAIErrorWithStatusCode struct { type OpenAIErrorWithStatusCode struct {
Error OpenAIError `json:"error"` Error types.OpenAIError `json:"error"`
StatusCode int `json:"status_code"` StatusCode int `json:"status_code"`
LocalError bool LocalError bool
} }
type GeneralErrorResponse struct { type GeneralErrorResponse struct {
Error types.OpenAIError `json:"error"` Error json.RawMessage `json:"error"`
Message string `json:"message"` Message string `json:"message"`
Msg string `json:"msg"` Msg string `json:"msg"`
Err string `json:"err"` Err string `json:"err"`
ErrorMsg string `json:"error_msg"` ErrorMsg string `json:"error_msg"`
Header struct { Header struct {
Message string `json:"message"` Message string `json:"message"`
} `json:"header"` } `json:"header"`
@@ -31,9 +36,35 @@ type GeneralErrorResponse struct {
} `json:"response"` } `json:"response"`
} }
func (e GeneralErrorResponse) TryToOpenAIError() *types.OpenAIError {
var openAIError types.OpenAIError
if len(e.Error) > 0 {
err := common.Unmarshal(e.Error, &openAIError)
if err == nil && openAIError.Message != "" {
return &openAIError
}
}
return nil
}
func (e GeneralErrorResponse) ToMessage() string { func (e GeneralErrorResponse) ToMessage() string {
if e.Error.Message != "" { if len(e.Error) > 0 {
return e.Error.Message switch common.GetJsonType(e.Error) {
case "object":
var openAIError types.OpenAIError
err := common.Unmarshal(e.Error, &openAIError)
if err == nil && openAIError.Message != "" {
return openAIError.Message
}
case "string":
var msg string
err := common.Unmarshal(e.Error, &msg)
if err == nil && msg != "" {
return msg
}
default:
return string(e.Error)
}
} }
if e.Message != "" { if e.Message != "" {
return e.Message return e.Message

View File

@@ -2,12 +2,14 @@ package middleware
import ( import (
"fmt" "fmt"
"net"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"github.com/QuantumNous/new-api/common" "github.com/QuantumNous/new-api/common"
"github.com/QuantumNous/new-api/constant" "github.com/QuantumNous/new-api/constant"
"github.com/QuantumNous/new-api/logger"
"github.com/QuantumNous/new-api/model" "github.com/QuantumNous/new-api/model"
"github.com/QuantumNous/new-api/service" "github.com/QuantumNous/new-api/service"
"github.com/QuantumNous/new-api/setting/ratio_setting" "github.com/QuantumNous/new-api/setting/ratio_setting"
@@ -240,13 +242,20 @@ func TokenAuth() func(c *gin.Context) {
return return
} }
allowIpsMap := token.GetIpLimitsMap() allowIps := token.GetIpLimits()
if len(allowIpsMap) != 0 { if len(allowIps) > 0 {
clientIp := c.ClientIP() clientIp := c.ClientIP()
if _, ok := allowIpsMap[clientIp]; !ok { logger.LogDebug(c, "Token has IP restrictions, checking client IP %s", clientIp)
ip := net.ParseIP(clientIp)
if ip == nil {
abortWithOpenAiMessage(c, http.StatusForbidden, "无法解析客户端 IP 地址")
return
}
if common.IsIpInCIDRList(ip, allowIps) == false {
abortWithOpenAiMessage(c, http.StatusForbidden, "您的 IP 不在令牌允许访问的列表中") abortWithOpenAiMessage(c, http.StatusForbidden, "您的 IP 不在令牌允许访问的列表中")
return return
} }
logger.LogDebug(c, "Client IP %s passed the token IP restrictions check", clientIp)
} }
userCache, err := model.GetUserCache(token.UserId) userCache, err := model.GetUserCache(token.UserId)
@@ -308,7 +317,7 @@ func SetupContextForToken(c *gin.Context, token *model.Token, parts ...string) e
c.Set("token_model_limit_enabled", false) c.Set("token_model_limit_enabled", false)
} }
common.SetContextKey(c, constant.ContextKeyTokenGroup, token.Group) common.SetContextKey(c, constant.ContextKeyTokenGroup, token.Group)
c.Set("token_cross_group_retry", token.CrossGroupRetry) common.SetContextKey(c, constant.ContextKeyTokenCrossGroupRetry, token.CrossGroupRetry)
if len(parts) > 1 { if len(parts) > 1 {
if model.IsAdmin(token.UserId) { if model.IsAdmin(token.UserId) {
c.Set("specific_channel_id", parts[1]) c.Set("specific_channel_id", parts[1])

View File

@@ -97,7 +97,12 @@ func Distribute() func(c *gin.Context) {
common.SetContextKey(c, constant.ContextKeyUsingGroup, usingGroup) common.SetContextKey(c, constant.ContextKeyUsingGroup, usingGroup)
} }
} }
channel, selectGroup, err = service.CacheGetRandomSatisfiedChannel(c, usingGroup, modelRequest.Model, 0) channel, selectGroup, err = service.CacheGetRandomSatisfiedChannel(&service.RetryParam{
Ctx: c,
ModelName: modelRequest.Model,
TokenGroup: usingGroup,
Retry: common.GetPointer(0),
})
if err != nil { if err != nil {
showGroup := usingGroup showGroup := usingGroup
if usingGroup == "auto" { if usingGroup == "auto" {
@@ -157,7 +162,7 @@ func getModelRequest(c *gin.Context) (*ModelRequest, bool, error) {
} }
midjourneyModel, mjErr, success := service.GetMjRequestModel(relayMode, &midjourneyRequest) midjourneyModel, mjErr, success := service.GetMjRequestModel(relayMode, &midjourneyRequest)
if mjErr != nil { if mjErr != nil {
return nil, false, fmt.Errorf(mjErr.Description) return nil, false, fmt.Errorf("%s", mjErr.Description)
} }
if midjourneyModel == "" { if midjourneyModel == "" {
if !success { if !success {

View File

@@ -5,32 +5,69 @@ import (
"io" "io"
"net/http" "net/http"
"github.com/QuantumNous/new-api/constant"
"github.com/andybalholm/brotli" "github.com/andybalholm/brotli"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
type readCloser struct {
io.Reader
closeFn func() error
}
func (rc *readCloser) Close() error {
if rc.closeFn != nil {
return rc.closeFn()
}
return nil
}
func DecompressRequestMiddleware() gin.HandlerFunc { func DecompressRequestMiddleware() gin.HandlerFunc {
return func(c *gin.Context) { return func(c *gin.Context) {
if c.Request.Body == nil || c.Request.Method == http.MethodGet { if c.Request.Body == nil || c.Request.Method == http.MethodGet {
c.Next() c.Next()
return return
} }
maxMB := constant.MaxRequestBodyMB
if maxMB <= 0 {
maxMB = 32
}
maxBytes := int64(maxMB) << 20
origBody := c.Request.Body
wrapMaxBytes := func(body io.ReadCloser) io.ReadCloser {
return http.MaxBytesReader(c.Writer, body, maxBytes)
}
switch c.GetHeader("Content-Encoding") { switch c.GetHeader("Content-Encoding") {
case "gzip": case "gzip":
gzipReader, err := gzip.NewReader(c.Request.Body) gzipReader, err := gzip.NewReader(origBody)
if err != nil { if err != nil {
_ = origBody.Close()
c.AbortWithStatus(http.StatusBadRequest) c.AbortWithStatus(http.StatusBadRequest)
return return
} }
defer gzipReader.Close() // Replace the request body with the decompressed data, and enforce a max size (post-decompression).
c.Request.Body = wrapMaxBytes(&readCloser{
// Replace the request body with the decompressed data Reader: gzipReader,
c.Request.Body = io.NopCloser(gzipReader) closeFn: func() error {
_ = gzipReader.Close()
return origBody.Close()
},
})
c.Request.Header.Del("Content-Encoding") c.Request.Header.Del("Content-Encoding")
case "br": case "br":
reader := brotli.NewReader(c.Request.Body) reader := brotli.NewReader(origBody)
c.Request.Body = io.NopCloser(reader) c.Request.Body = wrapMaxBytes(&readCloser{
Reader: reader,
closeFn: func() error {
return origBody.Close()
},
})
c.Request.Header.Del("Content-Encoding") c.Request.Header.Del("Content-Encoding")
default:
// Even for uncompressed bodies, enforce a max size to avoid huge request allocations.
c.Request.Body = wrapMaxBytes(origBody)
} }
// Continue processing the request // Continue processing the request

View File

@@ -254,6 +254,9 @@ func (channel *Channel) Save() error {
} }
func (channel *Channel) SaveWithoutKey() error { func (channel *Channel) SaveWithoutKey() error {
if channel.Id == 0 {
return errors.New("channel ID is 0")
}
return DB.Omit("key").Save(channel).Error return DB.Omit("key").Save(channel).Error
} }

View File

@@ -6,7 +6,6 @@ import (
"strings" "strings"
"github.com/QuantumNous/new-api/common" "github.com/QuantumNous/new-api/common"
"github.com/bytedance/gopkg/util/gopool" "github.com/bytedance/gopkg/util/gopool"
"gorm.io/gorm" "gorm.io/gorm"
) )
@@ -35,26 +34,26 @@ func (token *Token) Clean() {
token.Key = "" token.Key = ""
} }
func (token *Token) GetIpLimitsMap() map[string]any { func (token *Token) GetIpLimits() []string {
// delete empty spaces // delete empty spaces
//split with \n //split with \n
ipLimitsMap := make(map[string]any) ipLimits := make([]string, 0)
if token.AllowIps == nil { if token.AllowIps == nil {
return ipLimitsMap return ipLimits
} }
cleanIps := strings.ReplaceAll(*token.AllowIps, " ", "") cleanIps := strings.ReplaceAll(*token.AllowIps, " ", "")
if cleanIps == "" { if cleanIps == "" {
return ipLimitsMap return ipLimits
} }
ips := strings.Split(cleanIps, "\n") ips := strings.Split(cleanIps, "\n")
for _, ip := range ips { for _, ip := range ips {
ip = strings.TrimSpace(ip) ip = strings.TrimSpace(ip)
ip = strings.ReplaceAll(ip, ",", "") ip = strings.ReplaceAll(ip, ",", "")
if common.IsIP(ip) { if ip != "" {
ipLimitsMap[ip] = true ipLimits = append(ipLimits, ip)
} }
} }
return ipLimitsMap return ipLimits
} }
func GetAllUserTokens(userId int, startIdx int, num int) ([]*Token, error) { func GetAllUserTokens(userId int, startIdx int, num int) ([]*Token, error) {
@@ -113,7 +112,12 @@ func ValidateUserToken(key string) (token *Token, err error) {
} }
return token, nil return token, nil
} }
return nil, errors.New("无效的令牌") common.SysLog("ValidateUserToken: failed to get token: " + err.Error())
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, errors.New("无效的令牌")
} else {
return nil, errors.New("无效的令牌,数据库查询出错,请联系管理员")
}
} }
func GetTokenByIds(id int, userId int) (*Token, error) { func GetTokenByIds(id int, userId int) (*Token, error) {

View File

@@ -67,8 +67,11 @@ func AudioHelper(c *gin.Context, info *relaycommon.RelayInfo) (newAPIError *type
service.ResetStatusCode(newAPIError, statusCodeMappingStr) service.ResetStatusCode(newAPIError, statusCodeMappingStr)
return newAPIError return newAPIError
} }
if usage.(*dto.Usage).CompletionTokenDetails.AudioTokens > 0 || usage.(*dto.Usage).PromptTokensDetails.AudioTokens > 0 {
postConsumeQuota(c, info, usage.(*dto.Usage), "") service.PostAudioConsumeQuota(c, info, usage.(*dto.Usage), "")
} else {
postConsumeQuota(c, info, usage.(*dto.Usage), "")
}
return nil return nil
} }

View File

@@ -18,7 +18,7 @@ var awsModelIDMap = map[string]string{
"claude-opus-4-1-20250805": "anthropic.claude-opus-4-1-20250805-v1:0", "claude-opus-4-1-20250805": "anthropic.claude-opus-4-1-20250805-v1:0",
"claude-sonnet-4-5-20250929": "anthropic.claude-sonnet-4-5-20250929-v1:0", "claude-sonnet-4-5-20250929": "anthropic.claude-sonnet-4-5-20250929-v1:0",
"claude-haiku-4-5-20251001": "anthropic.claude-haiku-4-5-20251001-v1:0", "claude-haiku-4-5-20251001": "anthropic.claude-haiku-4-5-20251001-v1:0",
"claude-opus-4-5-20251101": "anthropic.claude-opus-4-5-20251101-v1:0", "claude-opus-4-5-20251101": "anthropic.claude-opus-4-5-20251101-v1:0",
// Nova models // Nova models
"nova-micro-v1:0": "amazon.nova-micro-v1:0", "nova-micro-v1:0": "amazon.nova-micro-v1:0",
"nova-lite-v1:0": "amazon.nova-lite-v1:0", "nova-lite-v1:0": "amazon.nova-lite-v1:0",

View File

@@ -150,7 +150,7 @@ func baiduHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Respon
return types.NewError(err, types.ErrorCodeBadResponseBody), nil return types.NewError(err, types.ErrorCodeBadResponseBody), nil
} }
if baiduResponse.ErrorMsg != "" { if baiduResponse.ErrorMsg != "" {
return types.NewError(fmt.Errorf(baiduResponse.ErrorMsg), types.ErrorCodeBadResponseBody), nil return types.NewError(fmt.Errorf("%s", baiduResponse.ErrorMsg), types.ErrorCodeBadResponseBody), nil
} }
fullTextResponse := responseBaidu2OpenAI(&baiduResponse) fullTextResponse := responseBaidu2OpenAI(&baiduResponse)
jsonResponse, err := json.Marshal(fullTextResponse) jsonResponse, err := json.Marshal(fullTextResponse)
@@ -175,7 +175,7 @@ func baiduEmbeddingHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *ht
return types.NewError(err, types.ErrorCodeBadResponseBody), nil return types.NewError(err, types.ErrorCodeBadResponseBody), nil
} }
if baiduResponse.ErrorMsg != "" { if baiduResponse.ErrorMsg != "" {
return types.NewError(fmt.Errorf(baiduResponse.ErrorMsg), types.ErrorCodeBadResponseBody), nil return types.NewError(fmt.Errorf("%s", baiduResponse.ErrorMsg), types.ErrorCodeBadResponseBody), nil
} }
fullTextResponse := embeddingResponseBaidu2OpenAI(&baiduResponse) fullTextResponse := embeddingResponseBaidu2OpenAI(&baiduResponse)
jsonResponse, err := json.Marshal(fullTextResponse) jsonResponse, err := json.Marshal(fullTextResponse)

View File

@@ -483,9 +483,11 @@ func StreamResponseClaude2OpenAI(reqMode int, claudeResponse *dto.ClaudeResponse
} }
} }
} else if claudeResponse.Type == "message_delta" { } else if claudeResponse.Type == "message_delta" {
finishReason := stopReasonClaude2OpenAI(*claudeResponse.Delta.StopReason) if claudeResponse.Delta != nil && claudeResponse.Delta.StopReason != nil {
if finishReason != "null" { finishReason := stopReasonClaude2OpenAI(*claudeResponse.Delta.StopReason)
choice.FinishReason = &finishReason if finishReason != "null" {
choice.FinishReason = &finishReason
}
} }
//claudeUsage = &claudeResponse.Usage //claudeUsage = &claudeResponse.Usage
} else if claudeResponse.Type == "message_stop" { } else if claudeResponse.Type == "message_stop" {

View File

@@ -208,7 +208,7 @@ func handleCozeEvent(c *gin.Context, event string, data string, responseText *st
return return
} }
common.SysLog(fmt.Sprintf("stream event error: ", errorData.Code, errorData.Message)) common.SysLog(fmt.Sprintf("stream event error: %v %v", errorData.Code, errorData.Message))
} }
} }

View File

@@ -0,0 +1,145 @@
package openai
import (
"bytes"
"fmt"
"io"
"math"
"net/http"
"github.com/QuantumNous/new-api/common"
"github.com/QuantumNous/new-api/constant"
"github.com/QuantumNous/new-api/dto"
"github.com/QuantumNous/new-api/logger"
relaycommon "github.com/QuantumNous/new-api/relay/common"
"github.com/QuantumNous/new-api/relay/helper"
"github.com/QuantumNous/new-api/service"
"github.com/QuantumNous/new-api/types"
"github.com/gin-gonic/gin"
)
func OpenaiTTSHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) *dto.Usage {
// the status code has been judged before, if there is a body reading failure,
// it should be regarded as a non-recoverable error, so it should not return err for external retry.
// Analogous to nginx's load balancing, it will only retry if it can't be requested or
// if the upstream returns a specific status code, once the upstream has already written the header,
// the subsequent failure of the response body should be regarded as a non-recoverable error,
// and can be terminated directly.
defer service.CloseResponseBodyGracefully(resp)
usage := &dto.Usage{}
usage.PromptTokens = info.GetEstimatePromptTokens()
usage.TotalTokens = info.GetEstimatePromptTokens()
for k, v := range resp.Header {
c.Writer.Header().Set(k, v[0])
}
c.Writer.WriteHeader(resp.StatusCode)
if info.IsStream {
helper.StreamScannerHandler(c, resp, info, func(data string) bool {
if service.SundaySearch(data, "usage") {
var simpleResponse dto.SimpleResponse
err := common.Unmarshal([]byte(data), &simpleResponse)
if err != nil {
logger.LogError(c, err.Error())
}
if simpleResponse.Usage.TotalTokens != 0 {
usage.PromptTokens = simpleResponse.Usage.InputTokens
usage.CompletionTokens = simpleResponse.OutputTokens
usage.TotalTokens = simpleResponse.TotalTokens
}
}
_ = helper.StringData(c, data)
return true
})
} else {
common.SetContextKey(c, constant.ContextKeyLocalCountTokens, true)
// 读取响应体到缓冲区
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
logger.LogError(c, fmt.Sprintf("failed to read TTS response body: %v", err))
c.Writer.WriteHeaderNow()
return usage
}
// 写入响应到客户端
c.Writer.WriteHeaderNow()
_, err = c.Writer.Write(bodyBytes)
if err != nil {
logger.LogError(c, fmt.Sprintf("failed to write TTS response: %v", err))
}
// 计算音频时长并更新 usage
audioFormat := "mp3" // 默认格式
if audioReq, ok := info.Request.(*dto.AudioRequest); ok && audioReq.ResponseFormat != "" {
audioFormat = audioReq.ResponseFormat
}
var duration float64
var durationErr error
if audioFormat == "pcm" {
// PCM 格式没有文件头,根据 OpenAI TTS 的 PCM 参数计算时长
// 采样率: 24000 Hz, 位深度: 16-bit (2 bytes), 声道数: 1
const sampleRate = 24000
const bytesPerSample = 2
const channels = 1
duration = float64(len(bodyBytes)) / float64(sampleRate*bytesPerSample*channels)
} else {
ext := "." + audioFormat
reader := bytes.NewReader(bodyBytes)
duration, durationErr = common.GetAudioDuration(c.Request.Context(), reader, ext)
}
usage.PromptTokensDetails.TextTokens = usage.PromptTokens
if durationErr != nil {
logger.LogWarn(c, fmt.Sprintf("failed to get audio duration: %v", durationErr))
// 如果无法获取时长,则设置保底的 CompletionTokens根据body大小计算
sizeInKB := float64(len(bodyBytes)) / 1000.0
estimatedTokens := int(math.Ceil(sizeInKB)) // 粗略估算每KB约等于1 token
usage.CompletionTokens = estimatedTokens
usage.CompletionTokenDetails.AudioTokens = estimatedTokens
} else if duration > 0 {
// 计算 token: ceil(duration) / 60.0 * 1000即每分钟 1000 tokens
completionTokens := int(math.Round(math.Ceil(duration) / 60.0 * 1000))
usage.CompletionTokens = completionTokens
usage.CompletionTokenDetails.AudioTokens = completionTokens
}
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
}
return usage
}
func OpenaiSTTHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo, responseFormat string) (*types.NewAPIError, *dto.Usage) {
defer service.CloseResponseBodyGracefully(resp)
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return types.NewOpenAIError(err, types.ErrorCodeReadResponseBodyFailed, http.StatusInternalServerError), nil
}
// 写入新的 response body
service.IOCopyBytesGracefully(c, resp, responseBody)
var responseData struct {
Usage *dto.Usage `json:"usage"`
}
if err := common.Unmarshal(responseBody, &responseData); err == nil && responseData.Usage != nil {
if responseData.Usage.TotalTokens > 0 {
usage := responseData.Usage
if usage.PromptTokens == 0 {
usage.PromptTokens = usage.InputTokens
}
if usage.CompletionTokens == 0 {
usage.CompletionTokens = usage.OutputTokens
}
return nil, usage
}
}
usage := &dto.Usage{}
usage.PromptTokens = info.GetEstimatePromptTokens()
usage.CompletionTokens = 0
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
return nil, usage
}

View File

@@ -1,7 +1,6 @@
package openai package openai
import ( import (
"encoding/json"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@@ -151,7 +150,7 @@ func OaiStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Re
var streamResp struct { var streamResp struct {
Usage *dto.Usage `json:"usage"` Usage *dto.Usage `json:"usage"`
} }
err := json.Unmarshal([]byte(secondLastStreamData), &streamResp) err := common.Unmarshal([]byte(secondLastStreamData), &streamResp)
if err == nil && streamResp.Usage != nil && service.ValidUsage(streamResp.Usage) { if err == nil && streamResp.Usage != nil && service.ValidUsage(streamResp.Usage) {
usage = streamResp.Usage usage = streamResp.Usage
containStreamUsage = true containStreamUsage = true
@@ -327,68 +326,6 @@ func streamTTSResponse(c *gin.Context, resp *http.Response) {
} }
} }
func OpenaiTTSHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) *dto.Usage {
// the status code has been judged before, if there is a body reading failure,
// it should be regarded as a non-recoverable error, so it should not return err for external retry.
// Analogous to nginx's load balancing, it will only retry if it can't be requested or
// if the upstream returns a specific status code, once the upstream has already written the header,
// the subsequent failure of the response body should be regarded as a non-recoverable error,
// and can be terminated directly.
defer service.CloseResponseBodyGracefully(resp)
usage := &dto.Usage{}
usage.PromptTokens = info.GetEstimatePromptTokens()
usage.TotalTokens = info.GetEstimatePromptTokens()
for k, v := range resp.Header {
c.Writer.Header().Set(k, v[0])
}
c.Writer.WriteHeader(resp.StatusCode)
isStreaming := resp.ContentLength == -1 || resp.Header.Get("Content-Length") == ""
if isStreaming {
streamTTSResponse(c, resp)
} else {
c.Writer.WriteHeaderNow()
_, err := io.Copy(c.Writer, resp.Body)
if err != nil {
logger.LogError(c, err.Error())
}
}
return usage
}
func OpenaiSTTHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo, responseFormat string) (*types.NewAPIError, *dto.Usage) {
defer service.CloseResponseBodyGracefully(resp)
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return types.NewOpenAIError(err, types.ErrorCodeReadResponseBodyFailed, http.StatusInternalServerError), nil
}
// 写入新的 response body
service.IOCopyBytesGracefully(c, resp, responseBody)
var responseData struct {
Usage *dto.Usage `json:"usage"`
}
if err := json.Unmarshal(responseBody, &responseData); err == nil && responseData.Usage != nil {
if responseData.Usage.TotalTokens > 0 {
usage := responseData.Usage
if usage.PromptTokens == 0 {
usage.PromptTokens = usage.InputTokens
}
if usage.CompletionTokens == 0 {
usage.CompletionTokens = usage.OutputTokens
}
return nil, usage
}
}
usage := &dto.Usage{}
usage.PromptTokens = info.GetEstimatePromptTokens()
usage.CompletionTokens = 0
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
return nil, usage
}
func OpenaiRealtimeHandler(c *gin.Context, info *relaycommon.RelayInfo) (*types.NewAPIError, *dto.RealtimeUsage) { func OpenaiRealtimeHandler(c *gin.Context, info *relaycommon.RelayInfo) (*types.NewAPIError, *dto.RealtimeUsage) {
if info == nil || info.ClientWs == nil || info.TargetWs == nil { if info == nil || info.ClientWs == nil || info.TargetWs == nil {
return types.NewError(fmt.Errorf("invalid websocket connection"), types.ErrorCodeBadResponse), nil return types.NewError(fmt.Errorf("invalid websocket connection"), types.ErrorCodeBadResponse), nil
@@ -659,7 +596,7 @@ func applyUsagePostProcessing(info *relaycommon.RelayInfo, usage *dto.Usage, res
if usage.PromptTokensDetails.CachedTokens == 0 && usage.PromptCacheHitTokens != 0 { if usage.PromptTokensDetails.CachedTokens == 0 && usage.PromptCacheHitTokens != 0 {
usage.PromptTokensDetails.CachedTokens = usage.PromptCacheHitTokens usage.PromptTokensDetails.CachedTokens = usage.PromptCacheHitTokens
} }
case constant.ChannelTypeZhipu_v4: case constant.ChannelTypeZhipu_v4, constant.ChannelTypeMoonshot:
if usage.PromptTokensDetails.CachedTokens == 0 { if usage.PromptTokensDetails.CachedTokens == 0 {
if usage.InputTokensDetails != nil && usage.InputTokensDetails.CachedTokens > 0 { if usage.InputTokensDetails != nil && usage.InputTokensDetails.CachedTokens > 0 {
usage.PromptTokensDetails.CachedTokens = usage.InputTokensDetails.CachedTokens usage.PromptTokensDetails.CachedTokens = usage.InputTokensDetails.CachedTokens
@@ -687,7 +624,7 @@ func extractCachedTokensFromBody(body []byte) (int, bool) {
} `json:"usage"` } `json:"usage"`
} }
if err := json.Unmarshal(body, &payload); err != nil { if err := common.Unmarshal(body, &payload); err != nil {
return 0, false return 0, false
} }

View File

@@ -196,7 +196,7 @@ func (a *TaskAdaptor) DoResponse(c *gin.Context, resp *http.Response, info *rela
} }
if jResp.Code != 10000 { if jResp.Code != 10000 {
taskErr = service.TaskErrorWrapper(fmt.Errorf(jResp.Message), fmt.Sprintf("%d", jResp.Code), http.StatusInternalServerError) taskErr = service.TaskErrorWrapper(fmt.Errorf("%s", jResp.Message), fmt.Sprintf("%d", jResp.Code), http.StatusInternalServerError)
return return
} }

View File

@@ -186,7 +186,7 @@ func (a *TaskAdaptor) DoResponse(c *gin.Context, resp *http.Response, info *rela
return return
} }
if kResp.Code != 0 { if kResp.Code != 0 {
taskErr = service.TaskErrorWrapperLocal(fmt.Errorf(kResp.Message), "task_failed", http.StatusBadRequest) taskErr = service.TaskErrorWrapperLocal(fmt.Errorf("%s", kResp.Message), "task_failed", http.StatusBadRequest)
return return
} }
ov := dto.NewOpenAIVideo() ov := dto.NewOpenAIVideo()

View File

@@ -105,7 +105,7 @@ func (a *TaskAdaptor) DoResponse(c *gin.Context, resp *http.Response, info *rela
return return
} }
if !sunoResponse.IsSuccess() { if !sunoResponse.IsSuccess() {
taskErr = service.TaskErrorWrapper(fmt.Errorf(sunoResponse.Message), sunoResponse.Code, http.StatusInternalServerError) taskErr = service.TaskErrorWrapper(fmt.Errorf("%s", sunoResponse.Message), sunoResponse.Code, http.StatusInternalServerError)
return return
} }

View File

@@ -51,10 +51,43 @@ type Adaptor struct {
} }
func (a *Adaptor) ConvertGeminiRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeminiChatRequest) (any, error) { func (a *Adaptor) ConvertGeminiRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeminiChatRequest) (any, error) {
// Vertex AI does not support functionResponse.id; keep it stripped here for consistency.
if model_setting.GetGeminiSettings().RemoveFunctionResponseIdEnabled {
removeFunctionResponseID(request)
}
geminiAdaptor := gemini.Adaptor{} geminiAdaptor := gemini.Adaptor{}
return geminiAdaptor.ConvertGeminiRequest(c, info, request) return geminiAdaptor.ConvertGeminiRequest(c, info, request)
} }
func removeFunctionResponseID(request *dto.GeminiChatRequest) {
if request == nil {
return
}
if len(request.Contents) > 0 {
for i := range request.Contents {
if len(request.Contents[i].Parts) == 0 {
continue
}
for j := range request.Contents[i].Parts {
part := &request.Contents[i].Parts[j]
if part.FunctionResponse == nil {
continue
}
if len(part.FunctionResponse.ID) > 0 {
part.FunctionResponse.ID = nil
}
}
}
}
if len(request.Requests) > 0 {
for i := range request.Requests {
removeFunctionResponseID(&request.Requests[i])
}
}
}
func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.ClaudeRequest) (any, error) { func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.ClaudeRequest) (any, error) {
if v, ok := claudeModelMap[info.UpstreamModelName]; ok { if v, ok := claudeModelMap[info.UpstreamModelName]; ok {
c.Set("request_model", v) c.Set("request_model", v)

View File

@@ -4,6 +4,7 @@ import (
"time" "time"
"github.com/QuantumNous/new-api/dto" "github.com/QuantumNous/new-api/dto"
"github.com/QuantumNous/new-api/types"
) )
// type ZhipuMessage struct { // type ZhipuMessage struct {
@@ -37,7 +38,7 @@ type ZhipuV4Response struct {
Model string `json:"model"` Model string `json:"model"`
TextResponseChoices []dto.OpenAITextResponseChoice `json:"choices"` TextResponseChoices []dto.OpenAITextResponseChoice `json:"choices"`
Usage dto.Usage `json:"usage"` Usage dto.Usage `json:"usage"`
Error dto.OpenAIError `json:"error"` Error types.OpenAIError `json:"error"`
} }
// //

View File

@@ -11,6 +11,7 @@ import (
"github.com/QuantumNous/new-api/constant" "github.com/QuantumNous/new-api/constant"
"github.com/QuantumNous/new-api/dto" "github.com/QuantumNous/new-api/dto"
relayconstant "github.com/QuantumNous/new-api/relay/constant" relayconstant "github.com/QuantumNous/new-api/relay/constant"
"github.com/QuantumNous/new-api/setting/model_setting"
"github.com/QuantumNous/new-api/types" "github.com/QuantumNous/new-api/types"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@@ -634,3 +635,47 @@ func RemoveDisabledFields(jsonData []byte, channelOtherSettings dto.ChannelOther
} }
return jsonDataAfter, nil return jsonDataAfter, nil
} }
// RemoveGeminiDisabledFields removes disabled fields from Gemini request JSON data
// Currently supports removing functionResponse.id field which Vertex AI does not support
func RemoveGeminiDisabledFields(jsonData []byte) ([]byte, error) {
if !model_setting.GetGeminiSettings().RemoveFunctionResponseIdEnabled {
return jsonData, nil
}
var data map[string]interface{}
if err := common.Unmarshal(jsonData, &data); err != nil {
common.SysError("RemoveGeminiDisabledFields Unmarshal error: " + err.Error())
return jsonData, nil
}
// Process contents array
// Handle both camelCase (functionResponse) and snake_case (function_response)
if contents, ok := data["contents"].([]interface{}); ok {
for _, content := range contents {
if contentMap, ok := content.(map[string]interface{}); ok {
if parts, ok := contentMap["parts"].([]interface{}); ok {
for _, part := range parts {
if partMap, ok := part.(map[string]interface{}); ok {
// Check functionResponse (camelCase)
if funcResp, ok := partMap["functionResponse"].(map[string]interface{}); ok {
delete(funcResp, "id")
}
// Check function_response (snake_case)
if funcResp, ok := partMap["function_response"].(map[string]interface{}); ok {
delete(funcResp, "id")
}
}
}
}
}
}
}
jsonDataAfter, err := common.Marshal(data)
if err != nil {
common.SysError("RemoveGeminiDisabledFields Marshal error: " + err.Error())
return jsonData, nil
}
return jsonDataAfter, nil
}

View File

@@ -181,7 +181,7 @@ func TextHelper(c *gin.Context, info *relaycommon.RelayInfo) (newAPIError *types
return newApiErr return newApiErr
} }
if strings.HasPrefix(info.OriginModelName, "gpt-4o-audio") { if usage.(*dto.Usage).CompletionTokenDetails.AudioTokens > 0 || usage.(*dto.Usage).PromptTokensDetails.AudioTokens > 0 {
service.PostAudioConsumeQuota(c, info, usage.(*dto.Usage), "") service.PostAudioConsumeQuota(c, info, usage.(*dto.Usage), "")
} else { } else {
postConsumeQuota(c, info, usage.(*dto.Usage), "") postConsumeQuota(c, info, usage.(*dto.Usage), "")
@@ -300,14 +300,20 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, usage
if !relayInfo.PriceData.UsePrice { if !relayInfo.PriceData.UsePrice {
baseTokens := dPromptTokens baseTokens := dPromptTokens
// 减去 cached tokens // 减去 cached tokens
// Anthropic API 的 input_tokens 已经不包含缓存 tokens不需要减去
// OpenAI/OpenRouter 等 API 的 prompt_tokens 包含缓存 tokens需要减去
var cachedTokensWithRatio decimal.Decimal var cachedTokensWithRatio decimal.Decimal
if !dCacheTokens.IsZero() { if !dCacheTokens.IsZero() {
baseTokens = baseTokens.Sub(dCacheTokens) if relayInfo.ChannelType != constant.ChannelTypeAnthropic {
baseTokens = baseTokens.Sub(dCacheTokens)
}
cachedTokensWithRatio = dCacheTokens.Mul(dCacheRatio) cachedTokensWithRatio = dCacheTokens.Mul(dCacheRatio)
} }
var dCachedCreationTokensWithRatio decimal.Decimal var dCachedCreationTokensWithRatio decimal.Decimal
if !dCachedCreationTokens.IsZero() { if !dCachedCreationTokens.IsZero() {
baseTokens = baseTokens.Sub(dCachedCreationTokens) if relayInfo.ChannelType != constant.ChannelTypeAnthropic {
baseTokens = baseTokens.Sub(dCachedCreationTokens)
}
dCachedCreationTokensWithRatio = dCachedCreationTokens.Mul(dCachedCreationRatio) dCachedCreationTokensWithRatio = dCachedCreationTokens.Mul(dCachedCreationRatio)
} }

View File

@@ -196,7 +196,7 @@ func RelayTaskSubmit(c *gin.Context, info *relaycommon.RelayInfo) (taskErr *dto.
// handle response // handle response
if resp != nil && resp.StatusCode != http.StatusOK { if resp != nil && resp.StatusCode != http.StatusOK {
responseBody, _ := io.ReadAll(resp.Body) responseBody, _ := io.ReadAll(resp.Body)
taskErr = service.TaskErrorWrapper(fmt.Errorf(string(responseBody)), "fail_to_fetch_task", resp.StatusCode) taskErr = service.TaskErrorWrapper(fmt.Errorf("%s", string(responseBody)), "fail_to_fetch_task", resp.StatusCode)
return return
} }

View File

@@ -11,50 +11,151 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
type RetryParam struct {
Ctx *gin.Context
TokenGroup string
ModelName string
Retry *int
resetNextTry bool
}
func (p *RetryParam) GetRetry() int {
if p.Retry == nil {
return 0
}
return *p.Retry
}
func (p *RetryParam) SetRetry(retry int) {
p.Retry = &retry
}
func (p *RetryParam) IncreaseRetry() {
if p.resetNextTry {
p.resetNextTry = false
return
}
if p.Retry == nil {
p.Retry = new(int)
}
*p.Retry++
}
func (p *RetryParam) ResetRetryNextTry() {
p.resetNextTry = true
}
// CacheGetRandomSatisfiedChannel tries to get a random channel that satisfies the requirements. // CacheGetRandomSatisfiedChannel tries to get a random channel that satisfies the requirements.
func CacheGetRandomSatisfiedChannel(c *gin.Context, tokenGroup string, modelName string, retry int) (*model.Channel, string, error) { // 尝试获取一个满足要求的随机渠道。
//
// For "auto" tokenGroup with cross-group Retry enabled:
// 对于启用了跨分组重试的 "auto" tokenGroup
//
// - Each group will exhaust all its priorities before moving to the next group.
// 每个分组会用完所有优先级后才会切换到下一个分组。
//
// - Uses ContextKeyAutoGroupIndex to track current group index.
// 使用 ContextKeyAutoGroupIndex 跟踪当前分组索引。
//
// - Uses ContextKeyAutoGroupRetryIndex to track the global Retry count when current group started.
// 使用 ContextKeyAutoGroupRetryIndex 跟踪当前分组开始时的全局重试次数。
//
// - priorityRetry = Retry - startRetryIndex, represents the priority level within current group.
// priorityRetry = Retry - startRetryIndex表示当前分组内的优先级级别。
//
// - When GetRandomSatisfiedChannel returns nil (priorities exhausted), moves to next group.
// 当 GetRandomSatisfiedChannel 返回 nil优先级用完切换到下一个分组。
//
// Example flow (2 groups, each with 2 priorities, RetryTimes=3):
// 示例流程2个分组每个有2个优先级RetryTimes=3
//
// Retry=0: GroupA, priority0 (startRetryIndex=0, priorityRetry=0)
// 分组A, 优先级0
//
// Retry=1: GroupA, priority1 (startRetryIndex=0, priorityRetry=1)
// 分组A, 优先级1
//
// Retry=2: GroupA exhausted → GroupB, priority0 (startRetryIndex=2, priorityRetry=0)
// 分组A用完 → 分组B, 优先级0
//
// Retry=3: GroupB, priority1 (startRetryIndex=2, priorityRetry=1)
// 分组B, 优先级1
func CacheGetRandomSatisfiedChannel(param *RetryParam) (*model.Channel, string, error) {
var channel *model.Channel var channel *model.Channel
var err error var err error
selectGroup := tokenGroup selectGroup := param.TokenGroup
userGroup := common.GetContextKeyString(c, constant.ContextKeyUserGroup) userGroup := common.GetContextKeyString(param.Ctx, constant.ContextKeyUserGroup)
if tokenGroup == "auto" {
if param.TokenGroup == "auto" {
if len(setting.GetAutoGroups()) == 0 { if len(setting.GetAutoGroups()) == 0 {
return nil, selectGroup, errors.New("auto groups is not enabled") return nil, selectGroup, errors.New("auto groups is not enabled")
} }
autoGroups := GetUserAutoGroup(userGroup) autoGroups := GetUserAutoGroup(userGroup)
startIndex := 0
priorityRetry := retry // startGroupIndex: the group index to start searching from
crossGroupRetry := common.GetContextKeyBool(c, constant.ContextKeyTokenCrossGroupRetry) // startGroupIndex: 开始搜索的分组索引
if crossGroupRetry && retry > 0 { startGroupIndex := 0
logger.LogDebug(c, "Auto group retry cross group, retry: %d", retry) crossGroupRetry := common.GetContextKeyBool(param.Ctx, constant.ContextKeyTokenCrossGroupRetry)
if lastIndex, exists := common.GetContextKey(c, constant.ContextKeyAutoGroupIndex); exists {
if idx, ok := lastIndex.(int); ok { if lastGroupIndex, exists := common.GetContextKey(param.Ctx, constant.ContextKeyAutoGroupIndex); exists {
startIndex = idx + 1 if idx, ok := lastGroupIndex.(int); ok {
priorityRetry = 0 startGroupIndex = idx
}
} }
logger.LogDebug(c, "Auto group retry cross group, start index: %d", startIndex)
} }
for i := startIndex; i < len(autoGroups); i++ { for i := startGroupIndex; i < len(autoGroups); i++ {
autoGroup := autoGroups[i] autoGroup := autoGroups[i]
logger.LogDebug(c, "Auto selecting group: %s", autoGroup) // Calculate priorityRetry for current group
channel, _ = model.GetRandomSatisfiedChannel(autoGroup, modelName, priorityRetry) // 计算当前分组的 priorityRetry
if channel == nil { priorityRetry := param.GetRetry()
// If moved to a new group, reset priorityRetry and update startRetryIndex
// 如果切换到新分组,重置 priorityRetry 并更新 startRetryIndex
if i > startGroupIndex {
priorityRetry = 0 priorityRetry = 0
continue
} else {
c.Set("auto_group", autoGroup)
common.SetContextKey(c, constant.ContextKeyAutoGroupIndex, i)
selectGroup = autoGroup
logger.LogDebug(c, "Auto selected group: %s", autoGroup)
break
} }
logger.LogDebug(param.Ctx, "Auto selecting group: %s, priorityRetry: %d", autoGroup, priorityRetry)
channel, _ = model.GetRandomSatisfiedChannel(autoGroup, param.ModelName, priorityRetry)
if channel == nil {
// Current group has no available channel for this model, try next group
// 当前分组没有该模型的可用渠道,尝试下一个分组
logger.LogDebug(param.Ctx, "No available channel in group %s for model %s at priorityRetry %d, trying next group", autoGroup, param.ModelName, priorityRetry)
// 重置状态以尝试下一个分组
common.SetContextKey(param.Ctx, constant.ContextKeyAutoGroupIndex, i+1)
common.SetContextKey(param.Ctx, constant.ContextKeyAutoGroupRetryIndex, 0)
// Reset retry counter so outer loop can continue for next group
// 重置重试计数器,以便外层循环可以为下一个分组继续
param.SetRetry(0)
continue
}
common.SetContextKey(param.Ctx, constant.ContextKeyAutoGroup, autoGroup)
selectGroup = autoGroup
logger.LogDebug(param.Ctx, "Auto selected group: %s", autoGroup)
// Prepare state for next retry
// 为下一次重试准备状态
if crossGroupRetry && priorityRetry >= common.RetryTimes {
// Current group has exhausted all retries, prepare to switch to next group
// This request still uses current group, but next retry will use next group
// 当前分组已用完所有重试次数,准备切换到下一个分组
// 本次请求仍使用当前分组,但下次重试将使用下一个分组
logger.LogDebug(param.Ctx, "Current group %s retries exhausted (priorityRetry=%d >= RetryTimes=%d), preparing switch to next group for next retry", autoGroup, priorityRetry, common.RetryTimes)
common.SetContextKey(param.Ctx, constant.ContextKeyAutoGroupIndex, i+1)
// Reset retry counter so outer loop can continue for next group
// 重置重试计数器,以便外层循环可以为下一个分组继续
param.SetRetry(0)
param.ResetRetryNextTry()
} else {
// Stay in current group, save current state
// 保持在当前分组,保存当前状态
common.SetContextKey(param.Ctx, constant.ContextKeyAutoGroupIndex, i)
}
break
} }
} else { } else {
channel, err = model.GetRandomSatisfiedChannel(tokenGroup, modelName, retry) channel, err = model.GetRandomSatisfiedChannel(param.TokenGroup, param.ModelName, param.GetRetry())
if err != nil { if err != nil {
return nil, tokenGroup, err return nil, param.TokenGroup, err
} }
} }
return channel, selectGroup, nil return channel, selectGroup, nil

View File

@@ -389,25 +389,29 @@ func StreamResponseOpenAI2Claude(openAIResponse *dto.ChatCompletionsStreamRespon
} }
idx := blockIndex idx := blockIndex
claudeResponses = append(claudeResponses, &dto.ClaudeResponse{ if toolCall.Function.Name != "" {
Index: &idx, claudeResponses = append(claudeResponses, &dto.ClaudeResponse{
Type: "content_block_start", Index: &idx,
ContentBlock: &dto.ClaudeMediaMessage{ Type: "content_block_start",
Id: toolCall.ID, ContentBlock: &dto.ClaudeMediaMessage{
Type: "tool_use", Id: toolCall.ID,
Name: toolCall.Function.Name, Type: "tool_use",
Input: map[string]interface{}{}, Name: toolCall.Function.Name,
}, Input: map[string]interface{}{},
}) },
})
claudeResponses = append(claudeResponses, &dto.ClaudeResponse{ }
Index: &idx,
Type: "content_block_delta", if len(toolCall.Function.Arguments) > 0 {
Delta: &dto.ClaudeMediaMessage{ claudeResponses = append(claudeResponses, &dto.ClaudeResponse{
Type: "input_json_delta", Index: &idx,
PartialJson: &toolCall.Function.Arguments, Type: "content_block_delta",
}, Delta: &dto.ClaudeMediaMessage{
}) Type: "input_json_delta",
PartialJson: &toolCall.Function.Arguments,
},
})
}
info.ClaudeConvertInfo.Index = blockIndex info.ClaudeConvertInfo.Index = blockIndex
} }

View File

@@ -96,19 +96,21 @@ func RelayErrorHandler(ctx context.Context, resp *http.Response, showBodyWhenFai
if showBodyWhenFail { if showBodyWhenFail {
newApiErr.Err = fmt.Errorf("bad response status code %d, body: %s", resp.StatusCode, string(responseBody)) newApiErr.Err = fmt.Errorf("bad response status code %d, body: %s", resp.StatusCode, string(responseBody))
} else { } else {
if common.DebugEnabled { logger.LogError(ctx, fmt.Sprintf("bad response status code %d, body: %s", resp.StatusCode, string(responseBody)))
logger.LogInfo(ctx, fmt.Sprintf("bad response status code %d, body: %s", resp.StatusCode, string(responseBody)))
}
newApiErr.Err = fmt.Errorf("bad response status code %d", resp.StatusCode) newApiErr.Err = fmt.Errorf("bad response status code %d", resp.StatusCode)
} }
return return
} }
if errResponse.Error.Message != "" {
if common.GetJsonType(errResponse.Error) == "object" {
// General format error (OpenAI, Anthropic, Gemini, etc.) // General format error (OpenAI, Anthropic, Gemini, etc.)
newApiErr = types.WithOpenAIError(errResponse.Error, resp.StatusCode) oaiError := errResponse.TryToOpenAIError()
} else { if oaiError != nil {
newApiErr = types.NewOpenAIError(errors.New(errResponse.ToMessage()), types.ErrorCodeBadResponseStatusCode, resp.StatusCode) newApiErr = types.WithOpenAIError(*oaiError, resp.StatusCode)
return
}
} }
newApiErr = types.NewOpenAIError(errors.New(errResponse.ToMessage()), types.ErrorCodeBadResponseStatusCode, resp.StatusCode)
return return
} }

View File

@@ -108,7 +108,7 @@ func PreWssConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, usag
groupRatio := ratio_setting.GetGroupRatio(relayInfo.UsingGroup) groupRatio := ratio_setting.GetGroupRatio(relayInfo.UsingGroup)
modelRatio, _, _ := ratio_setting.GetModelRatio(modelName) modelRatio, _, _ := ratio_setting.GetModelRatio(modelName)
autoGroup, exists := ctx.Get("auto_group") autoGroup, exists := common.GetContextKey(ctx, constant.ContextKeyAutoGroup)
if exists { if exists {
groupRatio = ratio_setting.GetGroupRatio(autoGroup.(string)) groupRatio = ratio_setting.GetGroupRatio(autoGroup.(string))
log.Printf("final group ratio: %f", groupRatio) log.Printf("final group ratio: %f", groupRatio)

View File

@@ -4,7 +4,7 @@ import (
"github.com/QuantumNous/new-api/setting/config" "github.com/QuantumNous/new-api/setting/config"
) )
// GeminiSettings 定义Gemini模型的配置 // GeminiSettings defines Gemini model configuration. 注意bool要以enabled结尾才可以生效编辑
type GeminiSettings struct { type GeminiSettings struct {
SafetySettings map[string]string `json:"safety_settings"` SafetySettings map[string]string `json:"safety_settings"`
VersionSettings map[string]string `json:"version_settings"` VersionSettings map[string]string `json:"version_settings"`
@@ -12,6 +12,7 @@ type GeminiSettings struct {
ThinkingAdapterEnabled bool `json:"thinking_adapter_enabled"` ThinkingAdapterEnabled bool `json:"thinking_adapter_enabled"`
ThinkingAdapterBudgetTokensPercentage float64 `json:"thinking_adapter_budget_tokens_percentage"` ThinkingAdapterBudgetTokensPercentage float64 `json:"thinking_adapter_budget_tokens_percentage"`
FunctionCallThoughtSignatureEnabled bool `json:"function_call_thought_signature_enabled"` FunctionCallThoughtSignatureEnabled bool `json:"function_call_thought_signature_enabled"`
RemoveFunctionResponseIdEnabled bool `json:"remove_function_response_id_enabled"`
} }
// 默认配置 // 默认配置
@@ -30,6 +31,7 @@ var defaultGeminiSettings = GeminiSettings{
ThinkingAdapterEnabled: false, ThinkingAdapterEnabled: false,
ThinkingAdapterBudgetTokensPercentage: 0.6, ThinkingAdapterBudgetTokensPercentage: 0.6,
FunctionCallThoughtSignatureEnabled: true, FunctionCallThoughtSignatureEnabled: true,
RemoveFunctionResponseIdEnabled: true,
} }
// 全局实例 // 全局实例

View File

@@ -7,7 +7,6 @@ import (
"github.com/QuantumNous/new-api/common" "github.com/QuantumNous/new-api/common"
"github.com/QuantumNous/new-api/setting/operation_setting" "github.com/QuantumNous/new-api/setting/operation_setting"
"github.com/QuantumNous/new-api/setting/reasoning"
) )
// from songquanpeng/one-api // from songquanpeng/one-api
@@ -297,6 +296,7 @@ var defaultModelPrice = map[string]float64{
"mj_upload": 0.05, "mj_upload": 0.05,
"sora-2": 0.3, "sora-2": 0.3,
"sora-2-pro": 0.5, "sora-2-pro": 0.5,
"gpt-4o-mini-tts": 0.3,
} }
var defaultAudioRatio = map[string]float64{ var defaultAudioRatio = map[string]float64{
@@ -304,11 +304,13 @@ var defaultAudioRatio = map[string]float64{
"gpt-4o-mini-audio-preview": 66.67, "gpt-4o-mini-audio-preview": 66.67,
"gpt-4o-realtime-preview": 8, "gpt-4o-realtime-preview": 8,
"gpt-4o-mini-realtime-preview": 16.67, "gpt-4o-mini-realtime-preview": 16.67,
"gpt-4o-mini-tts": 25,
} }
var defaultAudioCompletionRatio = map[string]float64{ var defaultAudioCompletionRatio = map[string]float64{
"gpt-4o-realtime": 2, "gpt-4o-realtime": 2,
"gpt-4o-mini-realtime": 2, "gpt-4o-mini-realtime": 2,
"gpt-4o-mini-tts": 1,
} }
var ( var (
@@ -536,7 +538,10 @@ func getHardcodedCompletionModelRatio(name string) (float64, bool) {
if name == "gpt-4o-2024-05-13" { if name == "gpt-4o-2024-05-13" {
return 3, true return 3, true
} }
return 4, true if strings.HasPrefix(name, "gpt-4o-mini-tts") {
return 20, false
}
return 4, false
} }
// gpt-5 匹配 // gpt-5 匹配
if strings.HasPrefix(name, "gpt-5") { if strings.HasPrefix(name, "gpt-5") {
@@ -823,10 +828,6 @@ func FormatMatchingModelName(name string) string {
name = handleThinkingBudgetModel(name, "gemini-2.5-pro", "gemini-2.5-pro-thinking-*") name = handleThinkingBudgetModel(name, "gemini-2.5-pro", "gemini-2.5-pro-thinking-*")
} }
if base, _, ok := reasoning.TrimEffortSuffix(name); ok {
name = base
}
if strings.HasPrefix(name, "gpt-4-gizmo") { if strings.HasPrefix(name, "gpt-4-gizmo") {
name = "gpt-4-gizmo-*" name = "gpt-4-gizmo-*"
} }

View File

@@ -3,9 +3,9 @@ package system_setting
import "github.com/QuantumNous/new-api/setting/config" import "github.com/QuantumNous/new-api/setting/config"
type DiscordSettings struct { type DiscordSettings struct {
Enabled bool `json:"enabled"` Enabled bool `json:"enabled"`
ClientId string `json:"client_id"` ClientId string `json:"client_id"`
ClientSecret string `json:"client_secret"` ClientSecret string `json:"client_secret"`
} }
// 默认配置 // 默认配置

View File

@@ -94,6 +94,14 @@ type NewAPIError struct {
StatusCode int StatusCode int
} }
// Unwrap enables errors.Is / errors.As to work with NewAPIError by exposing the underlying error.
func (e *NewAPIError) Unwrap() error {
if e == nil {
return nil
}
return e.Err
}
func (e *NewAPIError) GetErrorCode() ErrorCode { func (e *NewAPIError) GetErrorCode() ErrorCode {
if e == nil { if e == nil {
return "" return ""

View File

@@ -48,6 +48,7 @@
"@so1ve/prettier-config": "^3.1.0", "@so1ve/prettier-config": "^3.1.0",
"@vitejs/plugin-react": "^4.2.1", "@vitejs/plugin-react": "^4.2.1",
"autoprefixer": "^10.4.21", "autoprefixer": "^10.4.21",
"code-inspector-plugin": "^1.3.3",
"eslint": "8.57.0", "eslint": "8.57.0",
"eslint-plugin-header": "^3.1.1", "eslint-plugin-header": "^3.1.1",
"eslint-plugin-react-hooks": "^5.2.0", "eslint-plugin-react-hooks": "^5.2.0",
@@ -139,6 +140,18 @@
"@chevrotain/utils": ["@chevrotain/utils@11.0.3", "", {}, "sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ=="], "@chevrotain/utils": ["@chevrotain/utils@11.0.3", "", {}, "sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ=="],
"@code-inspector/core": ["@code-inspector/core@1.3.3", "", { "dependencies": { "@vue/compiler-dom": "^3.5.13", "chalk": "^4.1.1", "dotenv": "^16.1.4", "launch-ide": "1.3.0", "portfinder": "^1.0.28" } }, "sha512-1SUCY/XiJ3LuA9TPfS9i7/cUcmdLsgB0chuDcP96ixB2tvYojzgCrglP7CHUGZa1dtWuRLuCiDzkclLetpV4ew=="],
"@code-inspector/esbuild": ["@code-inspector/esbuild@1.3.3", "", { "dependencies": { "@code-inspector/core": "1.3.3" } }, "sha512-GzX5LQbvh9DXINSUyWymG8Y7u5Tq4oJAnnrCoRiYxQvKBUuu2qVMzpZHIA2iDGxvazgZvr2OK+Sh/We4LutViA=="],
"@code-inspector/mako": ["@code-inspector/mako@1.3.3", "", { "dependencies": { "@code-inspector/core": "1.3.3" } }, "sha512-YPTHwpDtz9zn1vimMcJFCM6ELdBoivY7t2GzgY/iCTfgm6pu1H+oWZiBC35edqYAB7+xE8frspnNsmBhsrA36A=="],
"@code-inspector/turbopack": ["@code-inspector/turbopack@1.3.3", "", { "dependencies": { "@code-inspector/core": "1.3.3", "@code-inspector/webpack": "1.3.3" } }, "sha512-XhqsMtts/Int64LkpO00b4rlg1bw0otlRebX8dSVgZfsujj+Jdv2ngKmQ6RBN3vgj/zV7BfgBLeGgJn7D1kT3A=="],
"@code-inspector/vite": ["@code-inspector/vite@1.3.3", "", { "dependencies": { "@code-inspector/core": "1.3.3", "chalk": "4.1.1" } }, "sha512-phsHVYBsxAhfi6jJ+vpmxuF6jYMuVbozs5e8pkEJL2hQyGVkzP77vfCh1wzmQHcmKUKb2tlrFcvAsRb7oA1W7w=="],
"@code-inspector/webpack": ["@code-inspector/webpack@1.3.3", "", { "dependencies": { "@code-inspector/core": "1.3.3" } }, "sha512-qYih7syRXgM45KaWFNNk5Ed4WitVQHCI/2s/DZMFaF1Y2FA9qd1wPGiggNeqdcUsjf9TvVBQw/89gPQZIGwSqQ=="],
"@dnd-kit/accessibility": ["@dnd-kit/accessibility@3.1.1", "", { "dependencies": { "tslib": "^2.0.0" }, "peerDependencies": { "react": ">=16.8.0" } }, "sha512-2P+YgaXF+gRsIihwwY1gCsQSYnu9Zyj2py8kY5fFvUM1qm2WA2u639R6YNVfU4GWr+ZM5mqEsfHZZLoRONbemw=="], "@dnd-kit/accessibility": ["@dnd-kit/accessibility@3.1.1", "", { "dependencies": { "tslib": "^2.0.0" }, "peerDependencies": { "react": ">=16.8.0" } }, "sha512-2P+YgaXF+gRsIihwwY1gCsQSYnu9Zyj2py8kY5fFvUM1qm2WA2u639R6YNVfU4GWr+ZM5mqEsfHZZLoRONbemw=="],
"@dnd-kit/core": ["@dnd-kit/core@6.3.1", "", { "dependencies": { "@dnd-kit/accessibility": "^3.1.1", "@dnd-kit/utilities": "^3.2.2", "tslib": "^2.0.0" }, "peerDependencies": { "react": ">=16.8.0", "react-dom": ">=16.8.0" } }, "sha512-xkGBRQQab4RLwgXxoqETICr6S5JlogafbhNsidmrkVv2YRs5MLwpjoF2qpiGjQt8S9AoxtIV603s0GIUpY5eYQ=="], "@dnd-kit/core": ["@dnd-kit/core@6.3.1", "", { "dependencies": { "@dnd-kit/accessibility": "^3.1.1", "@dnd-kit/utilities": "^3.2.2", "tslib": "^2.0.0" }, "peerDependencies": { "react": ">=16.8.0", "react-dom": ">=16.8.0" } }, "sha512-xkGBRQQab4RLwgXxoqETICr6S5JlogafbhNsidmrkVv2YRs5MLwpjoF2qpiGjQt8S9AoxtIV603s0GIUpY5eYQ=="],
@@ -713,6 +726,12 @@
"@vitejs/plugin-react": ["@vitejs/plugin-react@4.3.4", "", { "dependencies": { "@babel/core": "^7.26.0", "@babel/plugin-transform-react-jsx-self": "^7.25.9", "@babel/plugin-transform-react-jsx-source": "^7.25.9", "@types/babel__core": "^7.20.5", "react-refresh": "^0.14.2" }, "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0" } }, "sha512-SCCPBJtYLdE8PX/7ZQAs1QAZ8Jqwih+0VBLum1EGqmCCQal+MIUqLCzj3ZUy8ufbC0cAM4LRlSTm7IQJwWT4ug=="], "@vitejs/plugin-react": ["@vitejs/plugin-react@4.3.4", "", { "dependencies": { "@babel/core": "^7.26.0", "@babel/plugin-transform-react-jsx-self": "^7.25.9", "@babel/plugin-transform-react-jsx-source": "^7.25.9", "@types/babel__core": "^7.20.5", "react-refresh": "^0.14.2" }, "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0" } }, "sha512-SCCPBJtYLdE8PX/7ZQAs1QAZ8Jqwih+0VBLum1EGqmCCQal+MIUqLCzj3ZUy8ufbC0cAM4LRlSTm7IQJwWT4ug=="],
"@vue/compiler-core": ["@vue/compiler-core@3.5.26", "", { "dependencies": { "@babel/parser": "^7.28.5", "@vue/shared": "3.5.26", "entities": "^7.0.0", "estree-walker": "^2.0.2", "source-map-js": "^1.2.1" } }, "sha512-vXyI5GMfuoBCnv5ucIT7jhHKl55Y477yxP6fc4eUswjP8FG3FFVFd41eNDArR+Uk3QKn2Z85NavjaxLxOC19/w=="],
"@vue/compiler-dom": ["@vue/compiler-dom@3.5.26", "", { "dependencies": { "@vue/compiler-core": "3.5.26", "@vue/shared": "3.5.26" } }, "sha512-y1Tcd3eXs834QjswshSilCBnKGeQjQXB6PqFn/1nxcQw4pmG42G8lwz+FZPAZAby6gZeHSt/8LMPfZ4Rb+Bd/A=="],
"@vue/shared": ["@vue/shared@3.5.26", "", {}, "sha512-7Z6/y3uFI5PRoKeorTOSXKcDj0MSasfNNltcslbFrPpcw6aXRUALq4IfJlaTRspiWIUOEZbrpM+iQGmCOiWe4A=="],
"abs-svg-path": ["abs-svg-path@0.1.1", "", {}, "sha512-d8XPSGjfyzlXC3Xx891DJRyZfqk5JU0BJrDQcsWomFIV1/BIzPW5HDH5iDdWpqWaav0YVIEzT1RHTwWr0FFshA=="], "abs-svg-path": ["abs-svg-path@0.1.1", "", {}, "sha512-d8XPSGjfyzlXC3Xx891DJRyZfqk5JU0BJrDQcsWomFIV1/BIzPW5HDH5iDdWpqWaav0YVIEzT1RHTwWr0FFshA=="],
"acorn": ["acorn@8.15.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg=="], "acorn": ["acorn@8.15.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg=="],
@@ -747,6 +766,8 @@
"astring": ["astring@1.9.0", "", { "bin": { "astring": "bin/astring" } }, "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg=="], "astring": ["astring@1.9.0", "", { "bin": { "astring": "bin/astring" } }, "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg=="],
"async": ["async@3.2.6", "", {}, "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA=="],
"async-validator": ["async-validator@3.5.2", "", {}, "sha512-8eLCg00W9pIRZSB781UUX/H6Oskmm8xloZfr09lz5bikRpBVDlJ3hRVuxxP1SxcwsEYfJ4IU8Q19Y8/893r3rQ=="], "async-validator": ["async-validator@3.5.2", "", {}, "sha512-8eLCg00W9pIRZSB781UUX/H6Oskmm8xloZfr09lz5bikRpBVDlJ3hRVuxxP1SxcwsEYfJ4IU8Q19Y8/893r3rQ=="],
"asynckit": ["asynckit@0.4.0", "", {}, "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="], "asynckit": ["asynckit@0.4.0", "", {}, "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="],
@@ -793,7 +814,7 @@
"ccount": ["ccount@2.0.1", "", {}, "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="], "ccount": ["ccount@2.0.1", "", {}, "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="],
"chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="], "chalk": ["chalk@4.1.1", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg=="],
"character-entities": ["character-entities@2.0.2", "", {}, "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ=="], "character-entities": ["character-entities@2.0.2", "", {}, "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ=="],
@@ -825,6 +846,8 @@
"clsx": ["clsx@2.1.1", "", {}, "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="], "clsx": ["clsx@2.1.1", "", {}, "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="],
"code-inspector-plugin": ["code-inspector-plugin@1.3.3", "", { "dependencies": { "@code-inspector/core": "1.3.3", "@code-inspector/esbuild": "1.3.3", "@code-inspector/mako": "1.3.3", "@code-inspector/turbopack": "1.3.3", "@code-inspector/vite": "1.3.3", "@code-inspector/webpack": "1.3.3", "chalk": "4.1.1" } }, "sha512-yDi84v5tgXFSZLLXqHl/Mc2qy9d2CxcYhIaP192NhqTG1zA5uVtiNIzvDAXh5Vaqy8QGYkvBfbG/i55b/sXaSQ=="],
"collapse-white-space": ["collapse-white-space@2.1.0", "", {}, "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw=="], "collapse-white-space": ["collapse-white-space@2.1.0", "", {}, "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw=="],
"color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], "color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="],
@@ -975,6 +998,8 @@
"dompurify": ["dompurify@3.2.6", "", { "optionalDependencies": { "@types/trusted-types": "^2.0.7" } }, "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ=="], "dompurify": ["dompurify@3.2.6", "", { "optionalDependencies": { "@types/trusted-types": "^2.0.7" } }, "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ=="],
"dotenv": ["dotenv@16.6.1", "", {}, "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow=="],
"dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="], "dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="],
"eastasianwidth": ["eastasianwidth@0.2.0", "", {}, "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="], "eastasianwidth": ["eastasianwidth@0.2.0", "", {}, "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="],
@@ -985,7 +1010,7 @@
"emoji-regex": ["emoji-regex@10.4.0", "", {}, "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw=="], "emoji-regex": ["emoji-regex@10.4.0", "", {}, "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw=="],
"entities": ["entities@6.0.0", "", {}, "sha512-aKstq2TDOndCn4diEyp9Uq/Flu2i1GlLkc6XIDQSDMuaFE3OPW5OphLCyQ5SpSJZTb4reN+kTcYru5yIfXoRPw=="], "entities": ["entities@7.0.0", "", {}, "sha512-FDWG5cmEYf2Z00IkYRhbFrwIwvdFKH07uV8dvNy0omp/Qb1xcyCWp2UDtcwJF4QZZvk0sLudP6/hAu42TaqVhQ=="],
"error-ex": ["error-ex@1.3.2", "", { "dependencies": { "is-arrayish": "^0.2.1" } }, "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g=="], "error-ex": ["error-ex@1.3.2", "", { "dependencies": { "is-arrayish": "^0.2.1" } }, "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g=="],
@@ -1305,6 +1330,8 @@
"langium": ["langium@3.3.1", "", { "dependencies": { "chevrotain": "~11.0.3", "chevrotain-allstar": "~0.3.0", "vscode-languageserver": "~9.0.1", "vscode-languageserver-textdocument": "~1.0.11", "vscode-uri": "~3.0.8" } }, "sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w=="], "langium": ["langium@3.3.1", "", { "dependencies": { "chevrotain": "~11.0.3", "chevrotain-allstar": "~0.3.0", "vscode-languageserver": "~9.0.1", "vscode-languageserver-textdocument": "~1.0.11", "vscode-uri": "~3.0.8" } }, "sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w=="],
"launch-ide": ["launch-ide@1.3.0", "", { "dependencies": { "chalk": "^4.1.1", "dotenv": "^16.1.4" } }, "sha512-pxiF+HVNMV0dDc6Z0q89RDmzMF9XmSGaOn4ueTegjMy3cUkezc3zrki5PCiz68zZIqAuhW7iwoWX7JO4Kn6B0A=="],
"layout-base": ["layout-base@1.0.2", "", {}, "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg=="], "layout-base": ["layout-base@1.0.2", "", {}, "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg=="],
"leva": ["leva@0.10.0", "", { "dependencies": { "@radix-ui/react-portal": "1.0.2", "@radix-ui/react-tooltip": "1.0.5", "@stitches/react": "^1.2.8", "@use-gesture/react": "^10.2.5", "colord": "^2.9.2", "dequal": "^2.0.2", "merge-value": "^1.0.0", "react-colorful": "^5.5.1", "react-dropzone": "^12.0.0", "v8n": "^1.3.3", "zustand": "^3.6.9" }, "peerDependencies": { "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" } }, "sha512-RiNJWmeqQdKIeHuVXgshmxIHu144a2AMYtLxKf8Nm1j93pisDPexuQDHKNdQlbo37wdyDQibLjY9JKGIiD7gaw=="], "leva": ["leva@0.10.0", "", { "dependencies": { "@radix-ui/react-portal": "1.0.2", "@radix-ui/react-tooltip": "1.0.5", "@stitches/react": "^1.2.8", "@use-gesture/react": "^10.2.5", "colord": "^2.9.2", "dequal": "^2.0.2", "merge-value": "^1.0.0", "react-colorful": "^5.5.1", "react-dropzone": "^12.0.0", "v8n": "^1.3.3", "zustand": "^3.6.9" }, "peerDependencies": { "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" } }, "sha512-RiNJWmeqQdKIeHuVXgshmxIHu144a2AMYtLxKf8Nm1j93pisDPexuQDHKNdQlbo37wdyDQibLjY9JKGIiD7gaw=="],
@@ -1595,6 +1622,8 @@
"polished": ["polished@4.3.1", "", { "dependencies": { "@babel/runtime": "^7.17.8" } }, "sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA=="], "polished": ["polished@4.3.1", "", { "dependencies": { "@babel/runtime": "^7.17.8" } }, "sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA=="],
"portfinder": ["portfinder@1.0.38", "", { "dependencies": { "async": "^3.2.6", "debug": "^4.3.6" } }, "sha512-rEwq/ZHlJIKw++XtLAO8PPuOQA/zaPJOZJ37BVuN97nLpMJeuDVLVGRwbFoBgLudgdTMP2hdRJP++H+8QOA3vg=="],
"postcss": ["postcss@8.5.3", "", { "dependencies": { "nanoid": "^3.3.8", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A=="], "postcss": ["postcss@8.5.3", "", { "dependencies": { "nanoid": "^3.3.8", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A=="],
"postcss-import": ["postcss-import@15.1.0", "", { "dependencies": { "postcss-value-parser": "^4.0.0", "read-cache": "^1.0.0", "resolve": "^1.1.7" }, "peerDependencies": { "postcss": "^8.0.0" } }, "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew=="], "postcss-import": ["postcss-import@15.1.0", "", { "dependencies": { "postcss-value-parser": "^4.0.0", "read-cache": "^1.0.0", "resolve": "^1.1.7" }, "peerDependencies": { "postcss": "^8.0.0" } }, "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew=="],
@@ -2081,6 +2110,8 @@
"@babel/traverse/globals": ["globals@11.12.0", "", {}, "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA=="], "@babel/traverse/globals": ["globals@11.12.0", "", {}, "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA=="],
"@code-inspector/core/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="],
"@douyinfe/semi-foundation/remark-gfm": ["remark-gfm@4.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-gfm": "^3.0.0", "micromark-extension-gfm": "^3.0.0", "remark-parse": "^11.0.0", "remark-stringify": "^11.0.0", "unified": "^11.0.0" } }, "sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA=="], "@douyinfe/semi-foundation/remark-gfm": ["remark-gfm@4.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-gfm": "^3.0.0", "micromark-extension-gfm": "^3.0.0", "remark-parse": "^11.0.0", "remark-stringify": "^11.0.0", "unified": "^11.0.0" } }, "sha512-U92vJgBPkbw4Zfu/IiW2oTZLSL3Zpv+uI7My2eq8JxKgqraFdU8YUGicEJCEgSbeaG+QDFqIcwwfMTOEelPxuA=="],
"@emotion/babel-plugin/@emotion/hash": ["@emotion/hash@0.9.2", "", {}, "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g=="], "@emotion/babel-plugin/@emotion/hash": ["@emotion/hash@0.9.2", "", {}, "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g=="],
@@ -2131,6 +2162,10 @@
"@visactor/vrender-kits/roughjs": ["roughjs@4.5.2", "", { "dependencies": { "path-data-parser": "^0.1.0", "points-on-curve": "^0.2.0", "points-on-path": "^0.2.1" } }, "sha512-2xSlLDKdsWyFxrveYWk9YQ/Y9UfK38EAMRNkYkMqYBJvPX8abCa9PN0x3w02H8Oa6/0bcZICJU+U95VumPqseg=="], "@visactor/vrender-kits/roughjs": ["roughjs@4.5.2", "", { "dependencies": { "path-data-parser": "^0.1.0", "points-on-curve": "^0.2.0", "points-on-path": "^0.2.1" } }, "sha512-2xSlLDKdsWyFxrveYWk9YQ/Y9UfK38EAMRNkYkMqYBJvPX8abCa9PN0x3w02H8Oa6/0bcZICJU+U95VumPqseg=="],
"@vue/compiler-core/@babel/parser": ["@babel/parser@7.28.5", "", { "dependencies": { "@babel/types": "^7.28.5" }, "bin": "./bin/babel-parser.js" }, "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ=="],
"@vue/compiler-core/estree-walker": ["estree-walker@2.0.2", "", {}, "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="],
"antd/rc-collapse": ["rc-collapse@3.9.0", "", { "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", "rc-motion": "^2.3.4", "rc-util": "^5.27.0" }, "peerDependencies": { "react": ">=16.9.0", "react-dom": ">=16.9.0" } }, "sha512-swDdz4QZ4dFTo4RAUMLL50qP0EY62N2kvmk2We5xYdRwcRn8WcYtuetCJpwpaCbUfUt5+huLpVxhvmnK+PHrkA=="], "antd/rc-collapse": ["rc-collapse@3.9.0", "", { "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", "rc-motion": "^2.3.4", "rc-util": "^5.27.0" }, "peerDependencies": { "react": ">=16.9.0", "react-dom": ">=16.9.0" } }, "sha512-swDdz4QZ4dFTo4RAUMLL50qP0EY62N2kvmk2We5xYdRwcRn8WcYtuetCJpwpaCbUfUt5+huLpVxhvmnK+PHrkA=="],
"antd/scroll-into-view-if-needed": ["scroll-into-view-if-needed@3.1.0", "", { "dependencies": { "compute-scroll-into-view": "^3.0.2" } }, "sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ=="], "antd/scroll-into-view-if-needed": ["scroll-into-view-if-needed@3.1.0", "", { "dependencies": { "compute-scroll-into-view": "^3.0.2" } }, "sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ=="],
@@ -2155,6 +2190,8 @@
"esast-util-from-js/acorn": ["acorn@8.14.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA=="], "esast-util-from-js/acorn": ["acorn@8.14.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA=="],
"eslint/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="],
"extend-shallow/is-extendable": ["is-extendable@0.1.1", "", {}, "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw=="], "extend-shallow/is-extendable": ["is-extendable@0.1.1", "", {}, "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw=="],
"fast-glob/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], "fast-glob/glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="],
@@ -2181,6 +2218,8 @@
"katex/commander": ["commander@8.3.0", "", {}, "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww=="], "katex/commander": ["commander@8.3.0", "", {}, "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww=="],
"launch-ide/chalk": ["chalk@4.1.2", "", { "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" } }, "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA=="],
"leva/react-dropzone": ["react-dropzone@12.1.0", "", { "dependencies": { "attr-accept": "^2.2.2", "file-selector": "^0.5.0", "prop-types": "^15.8.1" }, "peerDependencies": { "react": ">= 16.8" } }, "sha512-iBYHA1rbopIvtzokEX4QubO6qk5IF/x3BtKGu74rF2JkQDXnwC4uO/lHKpaw4PJIV6iIAYOlwLv2FpiGyqHNog=="], "leva/react-dropzone": ["react-dropzone@12.1.0", "", { "dependencies": { "attr-accept": "^2.2.2", "file-selector": "^0.5.0", "prop-types": "^15.8.1" }, "peerDependencies": { "react": ">= 16.8" } }, "sha512-iBYHA1rbopIvtzokEX4QubO6qk5IF/x3BtKGu74rF2JkQDXnwC4uO/lHKpaw4PJIV6iIAYOlwLv2FpiGyqHNog=="],
"mdast-util-find-and-replace/escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="], "mdast-util-find-and-replace/escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="],
@@ -2201,6 +2240,8 @@
"parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="], "parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="],
"parse5/entities": ["entities@6.0.0", "", {}, "sha512-aKstq2TDOndCn4diEyp9Uq/Flu2i1GlLkc6XIDQSDMuaFE3OPW5OphLCyQ5SpSJZTb4reN+kTcYru5yIfXoRPw=="],
"path-scurry/lru-cache": ["lru-cache@11.2.2", "", {}, "sha512-F9ODfyqML2coTIsQpSkRHnLSZMtkU8Q+mSfcaIyKwy58u+8k5nvAYeiNhsyMARvzNcXJ9QfWVrcPsC9e9rAxtg=="], "path-scurry/lru-cache": ["lru-cache@11.2.2", "", {}, "sha512-F9ODfyqML2coTIsQpSkRHnLSZMtkU8Q+mSfcaIyKwy58u+8k5nvAYeiNhsyMARvzNcXJ9QfWVrcPsC9e9rAxtg=="],
"prettier-package-json/commander": ["commander@4.1.1", "", {}, "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA=="], "prettier-package-json/commander": ["commander@4.1.1", "", {}, "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA=="],
@@ -2269,6 +2310,8 @@
"@radix-ui/react-primitive/@radix-ui/react-slot/@radix-ui/react-compose-refs": ["@radix-ui/react-compose-refs@1.0.0", "", { "dependencies": { "@babel/runtime": "^7.13.10" }, "peerDependencies": { "react": "^16.8 || ^17.0 || ^18.0" } }, "sha512-0KaSv6sx787/hK3eF53iOkiSLwAGlFMx5lotrqD2pTjB18KbybKoEIgkNZTKC60YECDQTKGTRcDBILwZVqVKvA=="], "@radix-ui/react-primitive/@radix-ui/react-slot/@radix-ui/react-compose-refs": ["@radix-ui/react-compose-refs@1.0.0", "", { "dependencies": { "@babel/runtime": "^7.13.10" }, "peerDependencies": { "react": "^16.8 || ^17.0 || ^18.0" } }, "sha512-0KaSv6sx787/hK3eF53iOkiSLwAGlFMx5lotrqD2pTjB18KbybKoEIgkNZTKC60YECDQTKGTRcDBILwZVqVKvA=="],
"@vue/compiler-core/@babel/parser/@babel/types": ["@babel/types@7.28.5", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.28.5" } }, "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA=="],
"antd/scroll-into-view-if-needed/compute-scroll-into-view": ["compute-scroll-into-view@3.1.1", "", {}, "sha512-VRhuHOLoKYOy4UbilLbUzbYg93XLjv2PncJC50EuTWPA3gaja1UjBsUP/D/9/juV3vQFr6XBEzn9KCAHdUvOHw=="], "antd/scroll-into-view-if-needed/compute-scroll-into-view": ["compute-scroll-into-view@3.1.1", "", {}, "sha512-VRhuHOLoKYOy4UbilLbUzbYg93XLjv2PncJC50EuTWPA3gaja1UjBsUP/D/9/juV3vQFr6XBEzn9KCAHdUvOHw=="],
"cytoscape-fcose/cose-base/layout-base": ["layout-base@2.0.1", "", {}, "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg=="], "cytoscape-fcose/cose-base/layout-base": ["layout-base@2.0.1", "", {}, "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg=="],
@@ -2325,6 +2368,10 @@
"@radix-ui/react-popper/@floating-ui/react-dom/@floating-ui/dom/@floating-ui/core": ["@floating-ui/core@0.7.3", "", {}, "sha512-buc8BXHmG9l82+OQXOFU3Kr2XQx9ys01U/Q9HMIrZ300iLc8HLMgh7dcCqgYzAzf4BkoQvDcXf5Y+CuEZ5JBYg=="], "@radix-ui/react-popper/@floating-ui/react-dom/@floating-ui/dom/@floating-ui/core": ["@floating-ui/core@0.7.3", "", {}, "sha512-buc8BXHmG9l82+OQXOFU3Kr2XQx9ys01U/Q9HMIrZ300iLc8HLMgh7dcCqgYzAzf4BkoQvDcXf5Y+CuEZ5JBYg=="],
"@vue/compiler-core/@babel/parser/@babel/types/@babel/helper-string-parser": ["@babel/helper-string-parser@7.27.1", "", {}, "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA=="],
"@vue/compiler-core/@babel/parser/@babel/types/@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.28.5", "", {}, "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q=="],
"simplify-geojson/concat-stream/readable-stream/string_decoder": ["string_decoder@0.10.31", "", {}, "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ=="], "simplify-geojson/concat-stream/readable-stream/string_decoder": ["string_decoder@0.10.31", "", {}, "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ=="],
"sucrase/glob/minimatch/brace-expansion": ["brace-expansion@2.0.1", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA=="], "sucrase/glob/minimatch/brace-expansion": ["brace-expansion@2.0.1", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA=="],

View File

@@ -78,15 +78,16 @@
"@so1ve/prettier-config": "^3.1.0", "@so1ve/prettier-config": "^3.1.0",
"@vitejs/plugin-react": "^4.2.1", "@vitejs/plugin-react": "^4.2.1",
"autoprefixer": "^10.4.21", "autoprefixer": "^10.4.21",
"code-inspector-plugin": "^1.3.3",
"eslint": "8.57.0", "eslint": "8.57.0",
"eslint-plugin-header": "^3.1.1", "eslint-plugin-header": "^3.1.1",
"eslint-plugin-react-hooks": "^5.2.0", "eslint-plugin-react-hooks": "^5.2.0",
"i18next-cli": "^1.10.3",
"postcss": "^8.5.3", "postcss": "^8.5.3",
"prettier": "^3.0.0", "prettier": "^3.0.0",
"tailwindcss": "^3", "tailwindcss": "^3",
"typescript": "4.4.2", "typescript": "4.4.2",
"vite": "^5.2.0", "vite": "^5.2.0"
"i18next-cli": "^1.10.3"
}, },
"prettier": { "prettier": {
"singleQuote": true, "singleQuote": true,

View File

@@ -32,6 +32,7 @@ const ModelSetting = () => {
'gemini.safety_settings': '', 'gemini.safety_settings': '',
'gemini.version_settings': '', 'gemini.version_settings': '',
'gemini.supported_imagine_models': '', 'gemini.supported_imagine_models': '',
'gemini.remove_function_response_id_enabled': true,
'claude.model_headers_settings': '', 'claude.model_headers_settings': '',
'claude.thinking_adapter_enabled': true, 'claude.thinking_adapter_enabled': true,
'claude.default_max_tokens': '', 'claude.default_max_tokens': '',
@@ -64,6 +65,7 @@ const ModelSetting = () => {
item.value = JSON.stringify(JSON.parse(item.value), null, 2); item.value = JSON.stringify(JSON.parse(item.value), null, 2);
} }
} }
// Keep boolean config keys ending with enabled/Enabled so UI parses correctly.
if (item.key.endsWith('Enabled') || item.key.endsWith('enabled')) { if (item.key.endsWith('Enabled') || item.key.endsWith('enabled')) {
newInputs[item.key] = toBoolean(item.value); newInputs[item.key] = toBoolean(item.value);
} else { } else {

View File

@@ -1604,7 +1604,7 @@ const EditChannelModal = (props) => {
> >
{() => ( {() => (
<Spin spinning={loading}> <Spin spinning={loading}>
<div className='p-2' ref={formContainerRef}> <div className='p-2 space-y-3' ref={formContainerRef}>
<div ref={(el) => (formSectionRefs.current.basicInfo = el)}> <div ref={(el) => (formSectionRefs.current.basicInfo = el)}>
<Card className='!rounded-2xl shadow-sm border-0 mb-6'> <Card className='!rounded-2xl shadow-sm border-0 mb-6'>
{/* Header: Basic Info */} {/* Header: Basic Info */}

View File

@@ -557,11 +557,11 @@ const EditTokenModal = (props) => {
<Col span={24}> <Col span={24}>
<Form.TextArea <Form.TextArea
field='allow_ips' field='allow_ips'
label={t('IP白名单')} label={t('IP白名单支持CIDR表达式')}
placeholder={t('允许的IP一行一个不填写则不限制')} placeholder={t('允许的IP一行一个不填写则不限制')}
autosize autosize
rows={1} rows={1}
extraText={t('请勿过度信任此功能IP可能被伪造')} extraText={t('请勿过度信任此功能IP可能被伪造请配合nginx和cdn等网关使用')}
showClear showClear
style={{ width: '100%' }} style={{ width: '100%' }}
/> />

View File

@@ -97,7 +97,7 @@
"Homepage URL 填": "Fill in the Homepage URL", "Homepage URL 填": "Fill in the Homepage URL",
"ID": "ID", "ID": "ID",
"IP": "IP", "IP": "IP",
"IP白名单": "IP whitelist", "IP白名单支持CIDR表达式": "IP whitelist (supports CIDR expressions)",
"IP限制": "IP restrictions", "IP限制": "IP restrictions",
"IP黑名单": "IP blacklist", "IP黑名单": "IP blacklist",
"JSON": "JSON", "JSON": "JSON",
@@ -153,6 +153,7 @@
"URL链接": "URL Link", "URL链接": "URL Link",
"USD (美元)": "USD (US Dollar)", "USD (美元)": "USD (US Dollar)",
"User Info Endpoint": "User Info Endpoint", "User Info Endpoint": "User Info Endpoint",
"Vertex AI 不支持 functionResponse.id 字段,开启后将自动移除该字段": "Vertex AI does not support the functionResponse.id field. When enabled, this field will be automatically removed",
"Webhook 密钥": "Webhook Secret", "Webhook 密钥": "Webhook Secret",
"Webhook 签名密钥": "Webhook Signature Key", "Webhook 签名密钥": "Webhook Signature Key",
"Webhook地址": "Webhook URL", "Webhook地址": "Webhook URL",
@@ -1510,6 +1511,7 @@
"私有IP访问详细说明": "⚠️ Security Warning: Enabling this allows access to internal network resources (localhost, private networks). Only enable if you need to access internal services and understand the security implications.", "私有IP访问详细说明": "⚠️ Security Warning: Enabling this allows access to internal network resources (localhost, private networks). Only enable if you need to access internal services and understand the security implications.",
"私有部署地址": "Private Deployment Address", "私有部署地址": "Private Deployment Address",
"秒": "Second", "秒": "Second",
"移除 functionResponse.id 字段": "Remove functionResponse.id Field",
"移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "Removal of One API copyright mark must first be authorized. Project maintenance requires a lot of effort. If this project is meaningful to you, please actively support it.", "移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "Removal of One API copyright mark must first be authorized. Project maintenance requires a lot of effort. If this project is meaningful to you, please actively support it.",
"窗口处理": "window handling", "窗口处理": "window handling",
"窗口等待": "window wait", "窗口等待": "window wait",
@@ -1752,7 +1754,7 @@
"请先阅读并同意用户协议和隐私政策": "Please read and agree to the user agreement and privacy policy first", "请先阅读并同意用户协议和隐私政策": "Please read and agree to the user agreement and privacy policy first",
"请再次输入新密码": "Please enter the new password again", "请再次输入新密码": "Please enter the new password again",
"请前往个人设置 → 安全设置进行配置。": "Please go to Personal Settings → Security Settings to configure.", "请前往个人设置 → 安全设置进行配置。": "Please go to Personal Settings → Security Settings to configure.",
"请勿过度信任此功能IP可能被伪造": "Do not over-trust this feature, IP can be spoofed", "请勿过度信任此功能IP可能被伪造请配合nginx和cdn等网关使用": "Do not over-trust this feature, IP can be spoofed, please use it in conjunction with gateways such as nginx and CDN",
"请在系统设置页面编辑分组倍率以添加新的分组:": "Please edit Group ratios in system settings to add new groups:", "请在系统设置页面编辑分组倍率以添加新的分组:": "Please edit Group ratios in system settings to add new groups:",
"请填写完整的产品信息": "Please fill in complete product information", "请填写完整的产品信息": "Please fill in complete product information",
"请填写完整的管理员账号信息": "Please fill in the complete administrator account information", "请填写完整的管理员账号信息": "Please fill in the complete administrator account information",

View File

@@ -99,7 +99,7 @@
"Homepage URL 填": "Remplir l'URL de la page d'accueil", "Homepage URL 填": "Remplir l'URL de la page d'accueil",
"ID": "ID", "ID": "ID",
"IP": "IP", "IP": "IP",
"IP白名单": "Liste blanche d'adresses IP", "IP白名单支持CIDR表达式": "Liste blanche d'adresses IP (prise en charge des expressions CIDR)",
"IP限制": "Restrictions d'IP", "IP限制": "Restrictions d'IP",
"IP黑名单": "Liste noire d'adresses IP", "IP黑名单": "Liste noire d'adresses IP",
"JSON": "JSON", "JSON": "JSON",
@@ -154,6 +154,7 @@
"URL链接": "Lien URL", "URL链接": "Lien URL",
"USD (美元)": "USD (Dollar US)", "USD (美元)": "USD (Dollar US)",
"User Info Endpoint": "Point de terminaison des informations utilisateur", "User Info Endpoint": "Point de terminaison des informations utilisateur",
"Vertex AI 不支持 functionResponse.id 字段,开启后将自动移除该字段": "Vertex AI ne prend pas en charge le champ functionResponse.id. Lorsqu'il est activé, ce champ sera automatiquement supprimé",
"Webhook 密钥": "Clé Webhook", "Webhook 密钥": "Clé Webhook",
"Webhook 签名密钥": "Clé de signature Webhook", "Webhook 签名密钥": "Clé de signature Webhook",
"Webhook地址": "URL du Webhook", "Webhook地址": "URL du Webhook",
@@ -1520,6 +1521,7 @@
"私有IP访问详细说明": "⚠️ Avertissement de sécurité : l'activation de cette option autorise l'accès aux ressources du réseau interne (localhost, réseaux privés). N'activez cette option que si vous devez accéder à des services internes et que vous comprenez les implications en matière de sécurité.", "私有IP访问详细说明": "⚠️ Avertissement de sécurité : l'activation de cette option autorise l'accès aux ressources du réseau interne (localhost, réseaux privés). N'activez cette option que si vous devez accéder à des services internes et que vous comprenez les implications en matière de sécurité.",
"私有部署地址": "Adresse de déploiement privée", "私有部署地址": "Adresse de déploiement privée",
"秒": "Seconde", "秒": "Seconde",
"移除 functionResponse.id 字段": "Supprimer le champ functionResponse.id",
"移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "La suppression de la marque de copyright de One API doit d'abord être autorisée. La maintenance du projet demande beaucoup d'efforts. Si ce projet a du sens pour vous, veuillez le soutenir activement.", "移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "La suppression de la marque de copyright de One API doit d'abord être autorisée. La maintenance du projet demande beaucoup d'efforts. Si ce projet a du sens pour vous, veuillez le soutenir activement.",
"窗口处理": "gestion des fenêtres", "窗口处理": "gestion des fenêtres",
"窗口等待": "attente de la fenêtre", "窗口等待": "attente de la fenêtre",
@@ -1762,7 +1764,7 @@
"请先阅读并同意用户协议和隐私政策": "Veuillez d'abord lire et accepter l'accord utilisateur et la politique de confidentialité", "请先阅读并同意用户协议和隐私政策": "Veuillez d'abord lire et accepter l'accord utilisateur et la politique de confidentialité",
"请再次输入新密码": "Veuillez saisir à nouveau le nouveau mot de passe", "请再次输入新密码": "Veuillez saisir à nouveau le nouveau mot de passe",
"请前往个人设置 → 安全设置进行配置。": "Veuillez aller dans Paramètres personnels → Paramètres de sécurité pour configurer.", "请前往个人设置 → 安全设置进行配置。": "Veuillez aller dans Paramètres personnels → Paramètres de sécurité pour configurer.",
"请勿过度信任此功能IP可能被伪造": "Ne faites pas trop confiance à cette fonctionnalité, l'IP peut être usurpée", "请勿过度信任此功能IP可能被伪造请配合nginx和cdn等网关使用": "Ne faites pas trop confiance à cette fonctionnalité, l'IP peut être usurpée, veuillez l'utiliser en conjonction avec des passerelles telles que nginx et cdn",
"请在系统设置页面编辑分组倍率以添加新的分组:": "Veuillez modifier les ratios de groupe dans les paramètres système pour ajouter de nouveaux groupes :", "请在系统设置页面编辑分组倍率以添加新的分组:": "Veuillez modifier les ratios de groupe dans les paramètres système pour ajouter de nouveaux groupes :",
"请填写完整的产品信息": "Veuillez renseigner l'ensemble des informations produit", "请填写完整的产品信息": "Veuillez renseigner l'ensemble des informations produit",
"请填写完整的管理员账号信息": "Veuillez remplir les informations complètes du compte administrateur", "请填写完整的管理员账号信息": "Veuillez remplir les informations complètes du compte administrateur",

View File

@@ -82,7 +82,7 @@
"Homepage URL 填": "ホームページURLを入力してください", "Homepage URL 填": "ホームページURLを入力してください",
"ID": "ID", "ID": "ID",
"IP": "IP", "IP": "IP",
"IP白名单": "IPホワイトリスト", "IP白名单支持CIDR表达式": "IPホワイトリストCIDR表記に対応",
"IP限制": "IP制限", "IP限制": "IP制限",
"IP黑名单": "IPブラックリスト", "IP黑名单": "IPブラックリスト",
"JSON": "JSON", "JSON": "JSON",
@@ -136,6 +136,7 @@
"Uptime Kuma监控分类管理可以配置多个监控分类用于服务状态展示最多20个": "Uptime Kumaの監視分類管理サービスステータス表示用に、複数の監視分類を設定できます最大20個", "Uptime Kuma监控分类管理可以配置多个监控分类用于服务状态展示最多20个": "Uptime Kumaの監視分類管理サービスステータス表示用に、複数の監視分類を設定できます最大20個",
"URL链接": "URL", "URL链接": "URL",
"User Info Endpoint": "User Info Endpoint", "User Info Endpoint": "User Info Endpoint",
"Vertex AI 不支持 functionResponse.id 字段,开启后将自动移除该字段": "Vertex AIはfunctionResponse.idフィールドをサポートしていません。有効にすると、このフィールドは自動的に削除されます",
"Webhook 签名密钥": "Webhook署名シークレット", "Webhook 签名密钥": "Webhook署名シークレット",
"Webhook地址": "Webhook URL", "Webhook地址": "Webhook URL",
"Webhook地址必须以https://开头": "Webhook URLは、https://で始まることが必須です", "Webhook地址必须以https://开头": "Webhook URLは、https://で始まることが必須です",
@@ -1440,6 +1441,7 @@
"私有IP访问详细说明": "プライベートIPアクセスの詳細説明", "私有IP访问详细说明": "プライベートIPアクセスの詳細説明",
"私有部署地址": "プライベートデプロイ先URL", "私有部署地址": "プライベートデプロイ先URL",
"秒": "秒", "秒": "秒",
"移除 functionResponse.id 字段": "functionResponse.idフィールドを削除",
"移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "One APIの著作権表示を削除するには、事前の許可が必要です。プロジェクトの維持には多大な労力がかかります。もしこのプロジェクトがあなたにとって有意義でしたら、積極的なご支援をお願いいたします", "移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "One APIの著作権表示を削除するには、事前の許可が必要です。プロジェクトの維持には多大な労力がかかります。もしこのプロジェクトがあなたにとって有意義でしたら、積極的なご支援をお願いいたします",
"窗口处理": "ウィンドウ処理", "窗口处理": "ウィンドウ処理",
"窗口等待": "ウィンドウ待機中", "窗口等待": "ウィンドウ待機中",
@@ -1669,7 +1671,7 @@
"请先阅读并同意用户协议和隐私政策": "まずユーザー利用規約とプライバシーポリシーをご確認の上、同意してください", "请先阅读并同意用户协议和隐私政策": "まずユーザー利用規約とプライバシーポリシーをご確認の上、同意してください",
"请再次输入新密码": "新しいパスワードを再入力してください", "请再次输入新密码": "新しいパスワードを再入力してください",
"请前往个人设置 → 安全设置进行配置。": "アカウント設定 → セキュリティ設定 にて設定してください。", "请前往个人设置 → 安全设置进行配置。": "アカウント設定 → セキュリティ設定 にて設定してください。",
"请勿过度信任此功能IP可能被伪造": "IPは偽装される可能性があるため、この機能を過信しないでください", "请勿过度信任此功能IP可能被伪造请配合nginx和cdn等网关使用": "IPは偽装される可能性があるため、この機能を過信しないでください。nginxやCDNなどのゲートウェイと組み合わせて使用してください。",
"请在系统设置页面编辑分组倍率以添加新的分组:": "新規グループを追加するには、システム設定ページでグループ倍率を編集してください:", "请在系统设置页面编辑分组倍率以添加新的分组:": "新規グループを追加するには、システム設定ページでグループ倍率を編集してください:",
"请填写完整的管理员账号信息": "管理者アカウント情報をすべて入力してください", "请填写完整的管理员账号信息": "管理者アカウント情報をすべて入力してください",
"请填写密钥": "APIキーを入力してください", "请填写密钥": "APIキーを入力してください",

View File

@@ -101,7 +101,7 @@
"Homepage URL 填": "URL домашней страницы:", "Homepage URL 填": "URL домашней страницы:",
"ID": "ID", "ID": "ID",
"IP": "IP", "IP": "IP",
"IP白名单": "Белый список IP", "IP白名单支持CIDR表达式": "Белый список IP (поддерживает выражения CIDR)",
"IP限制": "Ограничения IP", "IP限制": "Ограничения IP",
"IP黑名单": "Черный список IP", "IP黑名单": "Черный список IP",
"JSON": "JSON", "JSON": "JSON",
@@ -156,6 +156,7 @@
"URL链接": "URL ссылка", "URL链接": "URL ссылка",
"USD (美元)": "USD (доллар США)", "USD (美元)": "USD (доллар США)",
"User Info Endpoint": "Конечная точка информации о пользователе", "User Info Endpoint": "Конечная точка информации о пользователе",
"Vertex AI 不支持 functionResponse.id 字段,开启后将自动移除该字段": "Vertex AI не поддерживает поле functionResponse.id. При включении это поле будет автоматически удалено",
"Webhook 密钥": "Секрет вебхука", "Webhook 密钥": "Секрет вебхука",
"Webhook 签名密钥": "Ключ подписи Webhook", "Webhook 签名密钥": "Ключ подписи Webhook",
"Webhook地址": "Адрес Webhook", "Webhook地址": "Адрес Webhook",
@@ -1531,6 +1532,7 @@
"私有IP访问详细说明": "⚠️ Предупреждение безопасности: включение этой опции позволит доступ к ресурсам внутренней сети (localhost, частные сети). Включайте только при необходимости доступа к внутренним службам и понимании рисков безопасности.", "私有IP访问详细说明": "⚠️ Предупреждение безопасности: включение этой опции позволит доступ к ресурсам внутренней сети (localhost, частные сети). Включайте только при необходимости доступа к внутренним службам и понимании рисков безопасности.",
"私有部署地址": "Адрес частного развёртывания", "私有部署地址": "Адрес частного развёртывания",
"秒": "секунда", "秒": "секунда",
"移除 functionResponse.id 字段": "Удалить поле functionResponse.id",
"移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "Удаление авторских знаков One API требует предварительного разрешения, поддержка проекта требует больших усилий, если этот проект важен для вас, пожалуйста, поддержите его", "移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "Удаление авторских знаков One API требует предварительного разрешения, поддержка проекта требует больших усилий, если этот проект важен для вас, пожалуйста, поддержите его",
"窗口处理": "Обработка окна", "窗口处理": "Обработка окна",
"窗口等待": "Ожидание окна", "窗口等待": "Ожидание окна",
@@ -1773,7 +1775,7 @@
"请先阅读并同意用户协议和隐私政策": "Пожалуйста, сначала прочтите и согласитесь с пользовательским соглашением и политикой конфиденциальности", "请先阅读并同意用户协议和隐私政策": "Пожалуйста, сначала прочтите и согласитесь с пользовательским соглашением и политикой конфиденциальности",
"请再次输入新密码": "Пожалуйста, введите новый пароль ещё раз", "请再次输入新密码": "Пожалуйста, введите новый пароль ещё раз",
"请前往个人设置 → 安全设置进行配置。": "Пожалуйста, перейдите в Личные настройки → Настройки безопасности для конфигурации.", "请前往个人设置 → 安全设置进行配置。": "Пожалуйста, перейдите в Личные настройки → Настройки безопасности для конфигурации.",
"请勿过度信任此功能IP可能被伪造": "Не доверяйте этой функции чрезмерно, IP может быть подделан", "请勿过度信任此功能IP可能被伪造请配合nginx和cdn等网关使用": "Не доверяйте этой функции чрезмерно, IP может быть подделан, используйте её вместе с nginx и CDN и другими шлюзами",
"请在系统设置页面编辑分组倍率以添加新的分组:": "Пожалуйста, отредактируйте коэффициенты групп на странице системных настроек для добавления новой группы:", "请在系统设置页面编辑分组倍率以添加新的分组:": "Пожалуйста, отредактируйте коэффициенты групп на странице системных настроек для добавления новой группы:",
"请填写完整的产品信息": "Пожалуйста, заполните всю информацию о продукте", "请填写完整的产品信息": "Пожалуйста, заполните всю информацию о продукте",
"请填写完整的管理员账号信息": "Пожалуйста, заполните полную информацию об учётной записи администратора", "请填写完整的管理员账号信息": "Пожалуйста, заполните полную информацию об учётной записи администратора",

View File

@@ -82,7 +82,7 @@
"Homepage URL 填": "Điền URL trang chủ", "Homepage URL 填": "Điền URL trang chủ",
"ID": "ID", "ID": "ID",
"IP": "IP", "IP": "IP",
"IP白名单": "Danh sách trắng IP", "IP白名单支持CIDR表达式": "Danh sách trắng IP (hỗ trợ biểu thức CIDR)",
"IP限制": "Hạn chế IP", "IP限制": "Hạn chế IP",
"IP黑名单": "Danh sách đen IP", "IP黑名单": "Danh sách đen IP",
"JSON": "JSON", "JSON": "JSON",
@@ -136,6 +136,7 @@
"Uptime Kuma监控分类管理可以配置多个监控分类用于服务状态展示最多20个": "Quản lý danh mục giám sát Uptime Kuma, bạn có thể cấu hình nhiều danh mục giám sát để hiển thị trạng thái dịch vụ (tối đa 20)", "Uptime Kuma监控分类管理可以配置多个监控分类用于服务状态展示最多20个": "Quản lý danh mục giám sát Uptime Kuma, bạn có thể cấu hình nhiều danh mục giám sát để hiển thị trạng thái dịch vụ (tối đa 20)",
"URL链接": "Liên kết URL", "URL链接": "Liên kết URL",
"User Info Endpoint": "User Info Endpoint", "User Info Endpoint": "User Info Endpoint",
"Vertex AI 不支持 functionResponse.id 字段,开启后将自动移除该字段": "Vertex AI không hỗ trợ trường functionResponse.id. Khi bật, trường này sẽ tự động bị xóa",
"Webhook 签名密钥": "Khóa chữ ký Webhook", "Webhook 签名密钥": "Khóa chữ ký Webhook",
"Webhook地址": "URL Webhook", "Webhook地址": "URL Webhook",
"Webhook地址必须以https://开头": "URL Webhook phải bắt đầu bằng https://", "Webhook地址必须以https://开头": "URL Webhook phải bắt đầu bằng https://",
@@ -1987,7 +1988,7 @@
"请先阅读并同意用户协议和隐私政策": "Vui lòng đọc và đồng ý với thỏa thuận người dùng và chính sách bảo mật trước", "请先阅读并同意用户协议和隐私政策": "Vui lòng đọc và đồng ý với thỏa thuận người dùng và chính sách bảo mật trước",
"请再次输入新密码": "Vui lòng nhập lại mật khẩu mới", "请再次输入新密码": "Vui lòng nhập lại mật khẩu mới",
"请前往个人设置 → 安全设置进行配置。": "Vui lòng truy cập Cài đặt cá nhân → Cài đặt bảo mật để cấu hình.", "请前往个人设置 → 安全设置进行配置。": "Vui lòng truy cập Cài đặt cá nhân → Cài đặt bảo mật để cấu hình.",
"请勿过度信任此功能IP可能被伪造": "Đừng quá tin tưởng tính năng này, IP có thể bị giả mạo", "请勿过度信任此功能IP可能被伪造请配合nginx和cdn等网关使用": "Đừng quá tin tưởng tính năng này, IP có thể bị giả mạo, vui lòng sử dụng cùng với nginx và các cổng khác như cdn",
"请在系统设置页面编辑分组倍率以添加新的分组:": "Vui lòng chỉnh sửa tỷ lệ nhóm trên trang cài đặt hệ thống để thêm nhóm mới:", "请在系统设置页面编辑分组倍率以添加新的分组:": "Vui lòng chỉnh sửa tỷ lệ nhóm trên trang cài đặt hệ thống để thêm nhóm mới:",
"请填写完整的管理员账号信息": "Vui lòng điền đầy đủ thông tin tài khoản quản trị viên", "请填写完整的管理员账号信息": "Vui lòng điền đầy đủ thông tin tài khoản quản trị viên",
"请填写密钥": "Vui lòng điền khóa", "请填写密钥": "Vui lòng điền khóa",
@@ -2648,6 +2649,7 @@
"私有IP访问详细说明": "⚠️ Cảnh báo bảo mật: Bật tính năng này cho phép truy cập vào tài nguyên mạng nội bộ (localhost, mạng riêng). Chỉ bật nếu bạn cần truy cập các dịch vụ nội bộ và hiểu rõ các rủi ro bảo mật.", "私有IP访问详细说明": "⚠️ Cảnh báo bảo mật: Bật tính năng này cho phép truy cập vào tài nguyên mạng nội bộ (localhost, mạng riêng). Chỉ bật nếu bạn cần truy cập các dịch vụ nội bộ và hiểu rõ các rủi ro bảo mật.",
"私有部署地址": "Địa chỉ triển khai riêng", "私有部署地址": "Địa chỉ triển khai riêng",
"秒": "Giây", "秒": "Giây",
"移除 functionResponse.id 字段": "Xóa trường functionResponse.id",
"移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "Việc xóa dấu bản quyền One API trước tiên phải được ủy quyền. Việc bảo trì dự án đòi hỏi rất nhiều nỗ lực. Nếu dự án này có ý nghĩa với bạn, vui lòng chủ động ủng hộ dự án này.", "移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "Việc xóa dấu bản quyền One API trước tiên phải được ủy quyền. Việc bảo trì dự án đòi hỏi rất nhiều nỗ lực. Nếu dự án này có ý nghĩa với bạn, vui lòng chủ động ủng hộ dự án này.",
"窗口处理": "xử lý cửa sổ", "窗口处理": "xử lý cửa sổ",
"窗口等待": "chờ cửa sổ", "窗口等待": "chờ cửa sổ",

View File

@@ -95,7 +95,7 @@
"Homepage URL 填": "Homepage URL 填", "Homepage URL 填": "Homepage URL 填",
"ID": "ID", "ID": "ID",
"IP": "IP", "IP": "IP",
"IP白名单": "IP白名单", "IP白名单支持CIDR表达式": "IP白名单支持CIDR表达式",
"IP限制": "IP限制", "IP限制": "IP限制",
"IP黑名单": "IP黑名单", "IP黑名单": "IP黑名单",
"JSON": "JSON", "JSON": "JSON",
@@ -150,6 +150,7 @@
"URL链接": "URL链接", "URL链接": "URL链接",
"USD (美元)": "USD (美元)", "USD (美元)": "USD (美元)",
"User Info Endpoint": "User Info Endpoint", "User Info Endpoint": "User Info Endpoint",
"Vertex AI 不支持 functionResponse.id 字段,开启后将自动移除该字段": "Vertex AI 不支持 functionResponse.id 字段,开启后将自动移除该字段",
"Webhook 密钥": "Webhook 密钥", "Webhook 密钥": "Webhook 密钥",
"Webhook 签名密钥": "Webhook 签名密钥", "Webhook 签名密钥": "Webhook 签名密钥",
"Webhook地址": "Webhook地址", "Webhook地址": "Webhook地址",
@@ -1498,6 +1499,7 @@
"私有IP访问详细说明": "⚠️ 安全警告:启用此选项将允许访问内网资源(本地主机、私有网络)。仅在需要访问内部服务且了解安全风险的情况下启用。", "私有IP访问详细说明": "⚠️ 安全警告:启用此选项将允许访问内网资源(本地主机、私有网络)。仅在需要访问内部服务且了解安全风险的情况下启用。",
"私有部署地址": "私有部署地址", "私有部署地址": "私有部署地址",
"秒": "秒", "秒": "秒",
"移除 functionResponse.id 字段": "移除 functionResponse.id 字段",
"移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目", "移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目": "移除 One API 的版权标识必须首先获得授权,项目维护需要花费大量精力,如果本项目对你有意义,请主动支持本项目",
"窗口处理": "窗口处理", "窗口处理": "窗口处理",
"窗口等待": "窗口等待", "窗口等待": "窗口等待",
@@ -1740,7 +1742,7 @@
"请先阅读并同意用户协议和隐私政策": "请先阅读并同意用户协议和隐私政策", "请先阅读并同意用户协议和隐私政策": "请先阅读并同意用户协议和隐私政策",
"请再次输入新密码": "请再次输入新密码", "请再次输入新密码": "请再次输入新密码",
"请前往个人设置 → 安全设置进行配置。": "请前往个人设置 → 安全设置进行配置。", "请前往个人设置 → 安全设置进行配置。": "请前往个人设置 → 安全设置进行配置。",
"请勿过度信任此功能IP可能被伪造": "请勿过度信任此功能IP可能被伪造", "请勿过度信任此功能IP可能被伪造请配合nginx和cdn等网关使用": "请勿过度信任此功能IP可能被伪造请配合nginx和cdn等网关使用",
"请在系统设置页面编辑分组倍率以添加新的分组:": "请在系统设置页面编辑分组倍率以添加新的分组:", "请在系统设置页面编辑分组倍率以添加新的分组:": "请在系统设置页面编辑分组倍率以添加新的分组:",
"请填写完整的产品信息": "请填写完整的产品信息", "请填写完整的产品信息": "请填写完整的产品信息",
"请填写完整的管理员账号信息": "请填写完整的管理员账号信息", "请填写完整的管理员账号信息": "请填写完整的管理员账号信息",

View File

@@ -46,6 +46,7 @@ const DEFAULT_GEMINI_INPUTS = {
'gemini.thinking_adapter_enabled': false, 'gemini.thinking_adapter_enabled': false,
'gemini.thinking_adapter_budget_tokens_percentage': 0.6, 'gemini.thinking_adapter_budget_tokens_percentage': 0.6,
'gemini.function_call_thought_signature_enabled': true, 'gemini.function_call_thought_signature_enabled': true,
'gemini.remove_function_response_id_enabled': true,
}; };
export default function SettingGeminiModel(props) { export default function SettingGeminiModel(props) {
@@ -186,6 +187,23 @@ export default function SettingGeminiModel(props) {
/> />
</Col> </Col>
</Row> </Row>
<Row>
<Col span={16}>
<Form.Switch
label={t('移除 functionResponse.id 字段')}
field={'gemini.remove_function_response_id_enabled'}
extraText={t(
'Vertex AI 不支持 functionResponse.id 字段,开启后将自动移除该字段',
)}
onChange={(value) =>
setInputs({
...inputs,
'gemini.remove_function_response_id_enabled': value,
})
}
/>
</Col>
</Row>
<Row> <Row>
<Col xs={24} sm={12} md={8} lg={8} xl={8}> <Col xs={24} sm={12} md={8} lg={8} xl={8}>
<Form.TextArea <Form.TextArea

View File

@@ -21,6 +21,7 @@ import react from '@vitejs/plugin-react';
import { defineConfig, transformWithEsbuild } from 'vite'; import { defineConfig, transformWithEsbuild } from 'vite';
import pkg from '@douyinfe/vite-plugin-semi'; import pkg from '@douyinfe/vite-plugin-semi';
import path from 'path'; import path from 'path';
import { codeInspectorPlugin } from 'code-inspector-plugin';
const { vitePluginSemi } = pkg; const { vitePluginSemi } = pkg;
// https://vitejs.dev/config/ // https://vitejs.dev/config/
@@ -31,6 +32,9 @@ export default defineConfig({
}, },
}, },
plugins: [ plugins: [
codeInspectorPlugin({
bundler: 'vite',
}),
{ {
name: 'treat-js-files-as-jsx', name: 'treat-js-files-as-jsx',
async transform(code, id) { async transform(code, id) {