feat: Provider/Mistral full support for Mistral on OpenClaw 🇫🇷 (#23845)

* Onboard: add Mistral auth choice and CLI flags

* Onboard/Auth: add Mistral provider config defaults

* Auth choice: wire Mistral API-key flow

* Onboard non-interactive: support --mistral-api-key

* Media understanding: add Mistral Voxtral audio provider

* Changelog: note Mistral onboarding and media support

* Docs: add Mistral provider and onboarding/media references

* Tests: cover Mistral media registry/defaults and auth mapping

* Memory: add Mistral embeddings provider support

* Onboarding: refresh Mistral model metadata

* Docs: document Mistral embeddings and endpoints

* Memory: persist Mistral embedding client state in managers

* Memory: add regressions for mistral provider wiring

* Gateway: add live tool probe retry helper

* Gateway: cover live tool probe retry helper

* Gateway: retry malformed live tool-read probe responses

* Memory: support plain-text batch error bodies

* Tests: add Mistral Voxtral live transcription smoke

* Docs: add Mistral live audio test command

* Revert: remove Mistral live voice test and docs entry

* Onboard: re-export Mistral default model ref from models

* Changelog: credit joeVenner for Mistral work

* fix: include Mistral in auto audio key fallback

* Update CHANGELOG.md

* Update CHANGELOG.md

---------

Co-authored-by: Shakker <shakkerdroid@gmail.com>
This commit is contained in:
Vincent Koc
2026-02-22 19:03:56 -05:00
committed by GitHub
parent a66b98a9da
commit d92ba4f8aa
55 changed files with 996 additions and 66 deletions

View File

@@ -661,7 +661,7 @@ export const FIELD_HELP: Record<string, string> = {
"agents.defaults.memorySearch.experimental.sessionMemory":
"Indexes session transcripts into memory search so responses can reference prior chat turns. Keep this off unless transcript recall is needed, because indexing cost and storage usage both increase.",
"agents.defaults.memorySearch.provider":
'Selects the embedding backend used to build/query memory vectors: "openai", "gemini", "voyage", or "local". Keep your most reliable provider here and configure fallback for resilience.',
'Selects the embedding backend used to build/query memory vectors: "openai", "gemini", "voyage", "mistral", or "local". Keep your most reliable provider here and configure fallback for resilience.',
"agents.defaults.memorySearch.model":
"Embedding model override used by the selected memory provider when a non-default model is required. Set this only when you need explicit recall quality/cost tuning beyond provider defaults.",
"agents.defaults.memorySearch.remote.baseUrl":
@@ -683,7 +683,7 @@ export const FIELD_HELP: Record<string, string> = {
"agents.defaults.memorySearch.local.modelPath":
"Specifies the local embedding model source for local memory search, such as a GGUF file path or `hf:` URI. Use this only when provider is `local`, and verify model compatibility before large index rebuilds.",
"agents.defaults.memorySearch.fallback":
'Backup provider used when primary embeddings fail: "openai", "gemini", "local", or "none". Set a real fallback for production reliability; use "none" only if you prefer explicit failures.',
'Backup provider used when primary embeddings fail: "openai", "gemini", "voyage", "mistral", "local", or "none". Set a real fallback for production reliability; use "none" only if you prefer explicit failures.',
"agents.defaults.memorySearch.store.path":
"Sets where the SQLite memory index is stored on disk for each agent. Keep the default `~/.openclaw/memory/{agentId}.sqlite` unless you need custom storage placement or backup policy alignment.",
"agents.defaults.memorySearch.store.vector.enabled":