test: dedupe repeated test fixtures and assertions

This commit is contained in:
Peter Steinberger
2026-02-22 18:37:01 +00:00
parent 0e4f3ccbdf
commit 53ed7a0f5c
10 changed files with 166 additions and 212 deletions

View File

@@ -5,6 +5,23 @@ import "./test-helpers/fast-coding-tools.js";
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
function createMockUsage(input: number, output: number) {
return {
input,
output,
cacheRead: 0,
cacheWrite: 0,
totalTokens: input + output,
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
};
}
vi.mock("@mariozechner/pi-coding-agent", async () => {
const actual = await vi.importActual<typeof import("@mariozechner/pi-coding-agent")>(
"@mariozechner/pi-coding-agent",
@@ -40,20 +57,7 @@ vi.mock("@mariozechner/pi-ai", async () => {
api: model.api,
provider: model.provider,
model: model.id,
usage: {
input: 1,
output: 1,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 2,
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
},
usage: createMockUsage(1, 1),
timestamp: Date.now(),
});
@@ -65,20 +69,7 @@ vi.mock("@mariozechner/pi-ai", async () => {
api: model.api,
provider: model.provider,
model: model.id,
usage: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 0,
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
},
usage: createMockUsage(0, 0),
timestamp: Date.now(),
});
@@ -314,20 +305,7 @@ describe.concurrent("runEmbeddedPiAgent", () => {
api: "openai-responses",
provider: "openai",
model: "mock-1",
usage: {
input: 1,
output: 1,
cacheRead: 0,
cacheWrite: 0,
totalTokens: 2,
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
total: 0,
},
},
usage: createMockUsage(1, 1),
timestamp: Date.now(),
});

View File

@@ -4,6 +4,32 @@ import { createAssistantMessageEventStream } from "@mariozechner/pi-ai";
import { describe, expect, it } from "vitest";
import { applyExtraParamsToAgent } from "./extra-params.js";
type StreamPayload = {
messages: Array<{
role: string;
content: unknown;
}>;
};
function runOpenRouterPayload(payload: StreamPayload, modelId: string) {
const baseStreamFn: StreamFn = (_model, _context, options) => {
options?.onPayload?.(payload);
return createAssistantMessageEventStream();
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(agent, undefined, "openrouter", modelId);
const model = {
api: "openai-completions",
provider: "openrouter",
id: modelId,
} as Model<"openai-completions">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
}
describe("extra-params: OpenRouter Anthropic cache_control", () => {
it("injects cache_control into system message for OpenRouter Anthropic models", () => {
const payload = {
@@ -12,22 +38,8 @@ describe("extra-params: OpenRouter Anthropic cache_control", () => {
{ role: "user", content: "Hello" },
],
};
const baseStreamFn: StreamFn = (_model, _context, options) => {
options?.onPayload?.(payload);
return createAssistantMessageEventStream();
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(agent, undefined, "openrouter", "anthropic/claude-opus-4-6");
const model = {
api: "openai-completions",
provider: "openrouter",
id: "anthropic/claude-opus-4-6",
} as Model<"openai-completions">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
runOpenRouterPayload(payload, "anthropic/claude-opus-4-6");
expect(payload.messages[0].content).toEqual([
{ type: "text", text: "You are a helpful assistant.", cache_control: { type: "ephemeral" } },
@@ -47,22 +59,8 @@ describe("extra-params: OpenRouter Anthropic cache_control", () => {
},
],
};
const baseStreamFn: StreamFn = (_model, _context, options) => {
options?.onPayload?.(payload);
return createAssistantMessageEventStream();
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(agent, undefined, "openrouter", "anthropic/claude-opus-4-6");
const model = {
api: "openai-completions",
provider: "openrouter",
id: "anthropic/claude-opus-4-6",
} as Model<"openai-completions">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
runOpenRouterPayload(payload, "anthropic/claude-opus-4-6");
const content = payload.messages[0].content as Array<Record<string, unknown>>;
expect(content[0]).toEqual({ type: "text", text: "Part 1" });
@@ -77,23 +75,19 @@ describe("extra-params: OpenRouter Anthropic cache_control", () => {
const payload = {
messages: [{ role: "system", content: "You are a helpful assistant." }],
};
const baseStreamFn: StreamFn = (_model, _context, options) => {
options?.onPayload?.(payload);
return createAssistantMessageEventStream();
};
const agent = { streamFn: baseStreamFn };
applyExtraParamsToAgent(agent, undefined, "openrouter", "google/gemini-3-pro");
const model = {
api: "openai-completions",
provider: "openrouter",
id: "google/gemini-3-pro",
} as Model<"openai-completions">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
runOpenRouterPayload(payload, "google/gemini-3-pro");
expect(payload.messages[0].content).toBe("You are a helpful assistant.");
});
it("leaves payload unchanged when no system message exists", () => {
const payload = {
messages: [{ role: "user", content: "Hello" }],
};
runOpenRouterPayload(payload, "anthropic/claude-opus-4-6");
expect(payload.messages[0].content).toBe("Hello");
});
});