chore: 同步模型价格数据 (84c8298bbd0e6e9b6f1812ee81b449a4e20c712094ae0a71f362364ecd8aa7a1)

This commit is contained in:
github-actions[bot]
2025-11-20 02:49:29 +00:00
parent d84eaf49a8
commit 7344fe0172
2 changed files with 517 additions and 1 deletions

View File

@@ -1297,6 +1297,132 @@
"supports_tool_choice": true,
"supports_vision": true
},
"azure/eu/gpt-5.1": {
"cache_read_input_token_cost": 1.4e-07,
"input_cost_per_token": 1.38e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "chat",
"output_cost_per_token": 1.1e-05,
"supported_endpoints": [
"/v1/chat/completions",
"/v1/batch",
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text",
"image"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/eu/gpt-5.1-chat": {
"cache_read_input_token_cost": 1.4e-07,
"input_cost_per_token": 1.38e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "chat",
"output_cost_per_token": 1.1e-05,
"supported_endpoints": [
"/v1/chat/completions",
"/v1/batch",
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text",
"image"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/eu/gpt-5.1-codex": {
"cache_read_input_token_cost": 1.4e-07,
"input_cost_per_token": 1.38e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 1.1e-05,
"supported_endpoints": [
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": false,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/eu/gpt-5.1-codex-mini": {
"cache_read_input_token_cost": 2.8e-08,
"input_cost_per_token": 2.75e-07,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 2.2e-06,
"supported_endpoints": [
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": false,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/eu/gpt-5-nano-2025-08-07": {
"cache_read_input_token_cost": 5.5e-09,
"input_cost_per_token": 5.5e-08,
@@ -1471,6 +1597,132 @@
"supports_tool_choice": true,
"supports_vision": true
},
"azure/global/gpt-5.1": {
"cache_read_input_token_cost": 1.25e-07,
"input_cost_per_token": 1.25e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "chat",
"output_cost_per_token": 1e-05,
"supported_endpoints": [
"/v1/chat/completions",
"/v1/batch",
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text",
"image"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/global/gpt-5.1-chat": {
"cache_read_input_token_cost": 1.25e-07,
"input_cost_per_token": 1.25e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "chat",
"output_cost_per_token": 1e-05,
"supported_endpoints": [
"/v1/chat/completions",
"/v1/batch",
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text",
"image"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/global/gpt-5.1-codex": {
"cache_read_input_token_cost": 1.25e-07,
"input_cost_per_token": 1.25e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 1e-05,
"supported_endpoints": [
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": false,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/global/gpt-5.1-codex-mini": {
"cache_read_input_token_cost": 2.5e-08,
"input_cost_per_token": 2.5e-07,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 2e-06,
"supported_endpoints": [
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": false,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/gpt-3.5-turbo": {
"input_cost_per_token": 5e-07,
"litellm_provider": "azure",
@@ -2778,6 +3030,132 @@
"supports_tool_choice": true,
"supports_vision": true
},
"azure/gpt-5.1": {
"cache_read_input_token_cost": 1.25e-07,
"input_cost_per_token": 1.25e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "chat",
"output_cost_per_token": 1e-05,
"supported_endpoints": [
"/v1/chat/completions",
"/v1/batch",
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text",
"image"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/gpt-5.1-chat": {
"cache_read_input_token_cost": 1.25e-07,
"input_cost_per_token": 1.25e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "chat",
"output_cost_per_token": 1e-05,
"supported_endpoints": [
"/v1/chat/completions",
"/v1/batch",
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text",
"image"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/gpt-5.1-codex": {
"cache_read_input_token_cost": 1.25e-07,
"input_cost_per_token": 1.25e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 1e-05,
"supported_endpoints": [
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": false,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/gpt-5.1-codex-mini": {
"cache_read_input_token_cost": 2.5e-08,
"input_cost_per_token": 2.5e-07,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 2e-06,
"supported_endpoints": [
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": false,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/gpt-image-1": {
"input_cost_per_pixel": 4.0054321e-08,
"litellm_provider": "azure",
@@ -3695,6 +4073,132 @@
"supports_tool_choice": true,
"supports_vision": true
},
"azure/us/gpt-5.1": {
"cache_read_input_token_cost": 1.4e-07,
"input_cost_per_token": 1.38e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "chat",
"output_cost_per_token": 1.1e-05,
"supported_endpoints": [
"/v1/chat/completions",
"/v1/batch",
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text",
"image"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/us/gpt-5.1-chat": {
"cache_read_input_token_cost": 1.4e-07,
"input_cost_per_token": 1.38e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "chat",
"output_cost_per_token": 1.1e-05,
"supported_endpoints": [
"/v1/chat/completions",
"/v1/batch",
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text",
"image"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/us/gpt-5.1-codex": {
"cache_read_input_token_cost": 1.4e-07,
"input_cost_per_token": 1.38e-06,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 1.1e-05,
"supported_endpoints": [
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": false,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/us/gpt-5.1-codex-mini": {
"cache_read_input_token_cost": 2.8e-08,
"input_cost_per_token": 2.75e-07,
"litellm_provider": "azure",
"max_input_tokens": 272000,
"max_output_tokens": 128000,
"max_tokens": 128000,
"mode": "responses",
"output_cost_per_token": 2.2e-06,
"supported_endpoints": [
"/v1/responses"
],
"supported_modalities": [
"text",
"image"
],
"supported_output_modalities": [
"text"
],
"supports_function_calling": true,
"supports_native_streaming": true,
"supports_parallel_function_calling": true,
"supports_pdf_input": true,
"supports_prompt_caching": true,
"supports_reasoning": true,
"supports_response_schema": true,
"supports_system_messages": false,
"supports_tool_choice": true,
"supports_vision": true
},
"azure/us/o1-2024-12-17": {
"cache_read_input_token_cost": 8.25e-06,
"input_cost_per_token": 1.65e-05,
@@ -23930,6 +24434,12 @@
"supports_reasoning": true,
"supports_tool_choice": true
},
"vertex_ai/gemini-2.5-flash-image": {
"litellm_provider": "vertex_ai-language-models",
"mode": "image_generation",
"output_cost_per_image": 0.039,
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/image-generation#edit-an-image"
},
"vertex_ai/imagegeneration@006": {
"litellm_provider": "vertex_ai-image-models",
"mode": "image_generation",
@@ -23954,6 +24464,12 @@
"output_cost_per_image": 0.04,
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"vertex_ai/imagen-3.0-capability-001": {
"litellm_provider": "vertex_ai-image-models",
"mode": "image_generation",
"output_cost_per_image": 0.04,
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/image/edit-insert-objects"
},
"vertex_ai/imagen-4.0-fast-generate-001": {
"litellm_provider": "vertex_ai-image-models",
"mode": "image_generation",