feat: Provider/Mistral full support for Mistral on OpenClaw 🇫🇷 (#23845)

* Onboard: add Mistral auth choice and CLI flags

* Onboard/Auth: add Mistral provider config defaults

* Auth choice: wire Mistral API-key flow

* Onboard non-interactive: support --mistral-api-key

* Media understanding: add Mistral Voxtral audio provider

* Changelog: note Mistral onboarding and media support

* Docs: add Mistral provider and onboarding/media references

* Tests: cover Mistral media registry/defaults and auth mapping

* Memory: add Mistral embeddings provider support

* Onboarding: refresh Mistral model metadata

* Docs: document Mistral embeddings and endpoints

* Memory: persist Mistral embedding client state in managers

* Memory: add regressions for mistral provider wiring

* Gateway: add live tool probe retry helper

* Gateway: cover live tool probe retry helper

* Gateway: retry malformed live tool-read probe responses

* Memory: support plain-text batch error bodies

* Tests: add Mistral Voxtral live transcription smoke

* Docs: add Mistral live audio test command

* Revert: remove Mistral live voice test and docs entry

* Onboard: re-export Mistral default model ref from models

* Changelog: credit joeVenner for Mistral work

* fix: include Mistral in auto audio key fallback

* Update CHANGELOG.md

* Update CHANGELOG.md

---------

Co-authored-by: Shakker <shakkerdroid@gmail.com>
This commit is contained in:
Vincent Koc
2026-02-22 19:03:56 -05:00
committed by GitHub
parent a66b98a9da
commit d92ba4f8aa
55 changed files with 996 additions and 66 deletions

View File

@@ -28,6 +28,7 @@ import { DEFAULT_AGENT_ID } from "../routing/session-key.js";
import { GATEWAY_CLIENT_MODES, GATEWAY_CLIENT_NAMES } from "../utils/message-channel.js";
import { GatewayClient } from "./client.js";
import { renderCatNoncePngBase64 } from "./live-image-probe.js";
import { hasExpectedToolNonce, shouldRetryToolReadProbe } from "./live-tool-probe-utils.js";
import { startGatewayServer } from "./server.js";
import { extractPayloadText } from "./test-helpers.agent-results.js";
@@ -680,38 +681,75 @@ async function runGatewayModelSuite(params: GatewayModelSuiteParams) {
// Real tool invocation: force the agent to Read a local file and echo a nonce.
logProgress(`${progressLabel}: tool-read`);
const runIdTool = randomUUID();
const toolProbe = await client.request<AgentFinalPayload>(
"agent",
{
sessionKey,
idempotencyKey: `idem-${runIdTool}-tool`,
message:
"OpenClaw live tool probe (local, safe): " +
`use the tool named \`read\` (or \`Read\`) with JSON arguments {"path":"${toolProbePath}"}. ` +
"Then reply with the two nonce values you read (include both).",
thinking: params.thinkingLevel,
deliver: false,
},
{ expectFinal: true },
);
if (toolProbe?.status !== "ok") {
throw new Error(`tool probe failed: status=${String(toolProbe?.status)}`);
}
const toolText = extractPayloadText(toolProbe?.result);
if (
isEmptyStreamText(toolText) &&
(model.provider === "minimax" || model.provider === "openai-codex")
const maxToolReadAttempts = 3;
let toolText = "";
for (
let toolReadAttempt = 0;
toolReadAttempt < maxToolReadAttempts;
toolReadAttempt += 1
) {
logProgress(`${progressLabel}: skip (${model.provider} empty response)`);
break;
const strictReply = toolReadAttempt > 0;
const toolProbe = await client.request<AgentFinalPayload>(
"agent",
{
sessionKey,
idempotencyKey: `idem-${runIdTool}-tool-${toolReadAttempt + 1}`,
message: strictReply
? "OpenClaw live tool probe (local, safe): " +
`use the tool named \`read\` (or \`Read\`) with JSON arguments {"path":"${toolProbePath}"}. ` +
`Then reply with exactly: ${nonceA} ${nonceB}. No extra text.`
: "OpenClaw live tool probe (local, safe): " +
`use the tool named \`read\` (or \`Read\`) with JSON arguments {"path":"${toolProbePath}"}. ` +
"Then reply with the two nonce values you read (include both).",
thinking: params.thinkingLevel,
deliver: false,
},
{ expectFinal: true },
);
if (toolProbe?.status !== "ok") {
if (toolReadAttempt + 1 < maxToolReadAttempts) {
logProgress(
`${progressLabel}: tool-read retry (${toolReadAttempt + 2}/${maxToolReadAttempts}) status=${String(toolProbe?.status)}`,
);
continue;
}
throw new Error(`tool probe failed: status=${String(toolProbe?.status)}`);
}
toolText = extractPayloadText(toolProbe?.result);
if (
isEmptyStreamText(toolText) &&
(model.provider === "minimax" || model.provider === "openai-codex")
) {
logProgress(`${progressLabel}: skip (${model.provider} empty response)`);
break;
}
assertNoReasoningTags({
text: toolText,
model: modelKey,
phase: "tool-read",
label: params.label,
});
if (hasExpectedToolNonce(toolText, nonceA, nonceB)) {
break;
}
if (
shouldRetryToolReadProbe({
text: toolText,
nonceA,
nonceB,
provider: model.provider,
attempt: toolReadAttempt,
maxAttempts: maxToolReadAttempts,
})
) {
logProgress(
`${progressLabel}: tool-read retry (${toolReadAttempt + 2}/${maxToolReadAttempts}) malformed tool output`,
);
continue;
}
throw new Error(`tool probe missing nonce: ${toolText}`);
}
assertNoReasoningTags({
text: toolText,
model: modelKey,
phase: "tool-read",
label: params.label,
});
if (!toolText.includes(nonceA) || !toolText.includes(nonceB)) {
if (!hasExpectedToolNonce(toolText, nonceA, nonceB)) {
throw new Error(`tool probe missing nonce: ${toolText}`);
}

View File

@@ -0,0 +1,48 @@
import { describe, expect, it } from "vitest";
import { hasExpectedToolNonce, shouldRetryToolReadProbe } from "./live-tool-probe-utils.js";
describe("live tool probe utils", () => {
it("matches nonce pair when both are present", () => {
expect(hasExpectedToolNonce("value a-1 and b-2", "a-1", "b-2")).toBe(true);
expect(hasExpectedToolNonce("value a-1 only", "a-1", "b-2")).toBe(false);
});
it("retries malformed tool output when attempts remain", () => {
expect(
shouldRetryToolReadProbe({
text: "read[object Object],[object Object]",
nonceA: "nonce-a",
nonceB: "nonce-b",
provider: "mistral",
attempt: 0,
maxAttempts: 3,
}),
).toBe(true);
});
it("does not retry once max attempts are exhausted", () => {
expect(
shouldRetryToolReadProbe({
text: "read[object Object],[object Object]",
nonceA: "nonce-a",
nonceB: "nonce-b",
provider: "mistral",
attempt: 2,
maxAttempts: 3,
}),
).toBe(false);
});
it("does not retry when nonce pair is already present", () => {
expect(
shouldRetryToolReadProbe({
text: "nonce-a nonce-b",
nonceA: "nonce-a",
nonceB: "nonce-b",
provider: "mistral",
attempt: 0,
maxAttempts: 3,
}),
).toBe(false);
});
});

View File

@@ -0,0 +1,34 @@
export function hasExpectedToolNonce(text: string, nonceA: string, nonceB: string): boolean {
return text.includes(nonceA) && text.includes(nonceB);
}
export function shouldRetryToolReadProbe(params: {
text: string;
nonceA: string;
nonceB: string;
provider: string;
attempt: number;
maxAttempts: number;
}): boolean {
if (params.attempt + 1 >= params.maxAttempts) {
return false;
}
if (hasExpectedToolNonce(params.text, params.nonceA, params.nonceB)) {
return false;
}
const trimmed = params.text.trim();
if (!trimmed) {
return true;
}
const lower = trimmed.toLowerCase();
if (trimmed.includes("[object Object]")) {
return true;
}
if (/\bread\s*\[/.test(lower) || /\btool\b/.test(lower) || /\bfunction\b/.test(lower)) {
return true;
}
if (params.provider === "mistral" && (lower.includes("noncea=") || lower.includes("nonceb="))) {
return true;
}
return false;
}