fix(logging): include model and provider in overload/error log (#41236)

Merged via squash.

Prepared head SHA: bb16fecbf7
Co-authored-by: jiarung <16461359+jiarung@users.noreply.github.com>
Co-authored-by: altaywtf <9790196+altaywtf@users.noreply.github.com>
Reviewed-by: @altaywtf
This commit is contained in:
jiarung
2026-03-11 01:32:14 +08:00
committed by GitHub
parent 3b582f1d54
commit bc9b35d6ce
3 changed files with 32 additions and 3 deletions

View File

@@ -67,6 +67,7 @@ Docs: https://docs.openclaw.ai
- Plugins/global hook runner: harden singleton state handling so shared global hook runner reuse does not leak or corrupt runner state across executions. (#40184) Thanks @vincentkoc.
- Agents/fallback: recognize Poe `402 You've used up your points!` billing errors so configured model fallbacks trigger instead of surfacing the raw provider error. (#42278) Thanks @CryUshio.
- Telegram/outbound HTML sends: chunk long HTML-mode messages, preserve plain-text fallback and silent-delivery params across retries, and cut over to plain text when HTML chunk planning cannot safely preserve the full message. (#42240) thanks @obviyus.
- Agents/embedded overload logs: include the failing model and provider in error-path console output, with lifecycle regression coverage for the rendered and sanitized `consoleMessage`. (#41236) thanks @jiarung.
## 2026.3.8

View File

@@ -70,7 +70,7 @@ describe("handleAgentEnd", () => {
});
});
it("attaches raw provider error metadata without changing the console message", () => {
it("attaches raw provider error metadata and includes model/provider in console output", () => {
const ctx = createContext({
role: "assistant",
stopReason: "error",
@@ -91,9 +91,35 @@ describe("handleAgentEnd", () => {
error: "The AI service is temporarily overloaded. Please try again in a moment.",
failoverReason: "overloaded",
providerErrorType: "overloaded_error",
consoleMessage:
"embedded run agent end: runId=run-1 isError=true model=claude-test provider=anthropic error=The AI service is temporarily overloaded. Please try again in a moment.",
});
});
it("sanitizes model and provider before writing consoleMessage", () => {
const ctx = createContext({
role: "assistant",
stopReason: "error",
provider: "anthropic\u001b]8;;https://evil.test\u0007",
model: "claude\tsonnet\n4",
errorMessage: "connection refused",
content: [{ type: "text", text: "" }],
});
handleAgentEnd(ctx);
const warn = vi.mocked(ctx.log.warn);
const meta = warn.mock.calls[0]?.[1];
expect(meta).toMatchObject({
consoleMessage:
"embedded run agent end: runId=run-1 isError=true model=claude sonnet 4 provider=anthropic]8;;https://evil.test error=connection refused",
});
expect(meta?.consoleMessage).not.toContain("\n");
expect(meta?.consoleMessage).not.toContain("\r");
expect(meta?.consoleMessage).not.toContain("\t");
expect(meta?.consoleMessage).not.toContain("\u001b");
});
it("redacts logged error text before emitting lifecycle events", () => {
const onAgentEvent = vi.fn();
const ctx = createContext(

View File

@@ -48,6 +48,8 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) {
const safeErrorText =
buildTextObservationFields(errorText).textPreview ?? "LLM request failed.";
const safeRunId = sanitizeForConsole(ctx.params.runId) ?? "-";
const safeModel = sanitizeForConsole(lastAssistant.model) ?? "unknown";
const safeProvider = sanitizeForConsole(lastAssistant.provider) ?? "unknown";
ctx.log.warn("embedded run agent end", {
event: "embedded_run_agent_end",
tags: ["error_handling", "lifecycle", "agent_end", "assistant_error"],
@@ -55,10 +57,10 @@ export function handleAgentEnd(ctx: EmbeddedPiSubscribeContext) {
isError: true,
error: safeErrorText,
failoverReason,
provider: lastAssistant.provider,
model: lastAssistant.model,
provider: lastAssistant.provider,
...observedError,
consoleMessage: `embedded run agent end: runId=${safeRunId} isError=true error=${safeErrorText}`,
consoleMessage: `embedded run agent end: runId=${safeRunId} isError=true model=${safeModel} provider=${safeProvider} error=${safeErrorText}`,
});
emitAgentEvent({
runId: ctx.params.runId,