mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-08 11:11:23 +00:00
* initial commit * feat: implement deriveSessionTotalTokens function and update usage tests * Added deriveSessionTotalTokens function to calculate total tokens based on usage and context tokens. * Updated usage tests to include cases for derived session total tokens. * Refactored session usage calculations in multiple files to utilize the new function for improved accuracy. * fix: restore overflow truncation fallback + changelog/test hardening (#11551) (thanks @tyler6204)
78 lines
2.0 KiB
TypeScript
78 lines
2.0 KiB
TypeScript
import { describe, expect, it } from "vitest";
|
|
import { deriveSessionTotalTokens, hasNonzeroUsage, normalizeUsage } from "./usage.js";
|
|
|
|
describe("normalizeUsage", () => {
|
|
it("normalizes Anthropic-style snake_case usage", () => {
|
|
const usage = normalizeUsage({
|
|
input_tokens: 1200,
|
|
output_tokens: 340,
|
|
cache_creation_input_tokens: 200,
|
|
cache_read_input_tokens: 50,
|
|
total_tokens: 1790,
|
|
});
|
|
expect(usage).toEqual({
|
|
input: 1200,
|
|
output: 340,
|
|
cacheRead: 50,
|
|
cacheWrite: 200,
|
|
total: 1790,
|
|
});
|
|
});
|
|
|
|
it("normalizes OpenAI-style prompt/completion usage", () => {
|
|
const usage = normalizeUsage({
|
|
prompt_tokens: 987,
|
|
completion_tokens: 123,
|
|
total_tokens: 1110,
|
|
});
|
|
expect(usage).toEqual({
|
|
input: 987,
|
|
output: 123,
|
|
cacheRead: undefined,
|
|
cacheWrite: undefined,
|
|
total: 1110,
|
|
});
|
|
});
|
|
|
|
it("returns undefined for empty usage objects", () => {
|
|
expect(normalizeUsage({})).toBeUndefined();
|
|
});
|
|
|
|
it("guards against empty/zero usage overwrites", () => {
|
|
expect(hasNonzeroUsage(undefined)).toBe(false);
|
|
expect(hasNonzeroUsage(null)).toBe(false);
|
|
expect(hasNonzeroUsage({})).toBe(false);
|
|
expect(hasNonzeroUsage({ input: 0, output: 0 })).toBe(false);
|
|
expect(hasNonzeroUsage({ input: 1 })).toBe(true);
|
|
expect(hasNonzeroUsage({ total: 1 })).toBe(true);
|
|
});
|
|
|
|
it("caps derived session total tokens to the context window", () => {
|
|
expect(
|
|
deriveSessionTotalTokens({
|
|
usage: {
|
|
input: 27,
|
|
cacheRead: 2_400_000,
|
|
cacheWrite: 0,
|
|
total: 2_402_300,
|
|
},
|
|
contextTokens: 200_000,
|
|
}),
|
|
).toBe(200_000);
|
|
});
|
|
|
|
it("uses prompt tokens when within context window", () => {
|
|
expect(
|
|
deriveSessionTotalTokens({
|
|
usage: {
|
|
input: 1_200,
|
|
cacheRead: 300,
|
|
cacheWrite: 50,
|
|
total: 2_000,
|
|
},
|
|
contextTokens: 200_000,
|
|
}),
|
|
).toBe(1_550);
|
|
});
|
|
});
|