mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-10 13:44:58 +00:00
fix(usage): clamp negative input token counts to zero
Some OpenAI-format providers (via pi-ai) pre-subtract cached_tokens from prompt_tokens upstream. When cached_tokens exceeds prompt_tokens due to provider inconsistencies the subtraction produces a negative input value that flows through to the TUI status bar and /usage dashboard. Clamp rawInput to 0 in normalizeUsage() so downstream consumers never see nonsensical negative token counts. Closes #30765 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
committed by
Peter Steinberger
parent
08c35eb13f
commit
20467d987d
@@ -90,9 +90,13 @@ export function normalizeUsage(raw?: UsageLike | null): NormalizedUsage | undefi
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const input = asFiniteNumber(
|
||||
// Some providers (pi-ai OpenAI-format) pre-subtract cached_tokens from
|
||||
// prompt_tokens upstream. When cached_tokens > prompt_tokens the result is
|
||||
// negative, which is nonsensical. Clamp to 0.
|
||||
const rawInput = asFiniteNumber(
|
||||
raw.input ?? raw.inputTokens ?? raw.input_tokens ?? raw.promptTokens ?? raw.prompt_tokens,
|
||||
);
|
||||
const input = rawInput !== undefined && rawInput < 0 ? 0 : rawInput;
|
||||
const output = asFiniteNumber(
|
||||
raw.output ??
|
||||
raw.outputTokens ??
|
||||
|
||||
Reference in New Issue
Block a user