mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-07 23:11:25 +00:00
What: - disable tool-call id sanitization for OpenAI/OpenAI Codex transcript policy - gate id sanitization in image sanitizer to full mode only - keep orphan reasoning downgrade scoped to OpenAI model-switch replay path - update transcript policy, session-history, sanitizer, and reasoning replay tests - document OpenAI model-switch orphan-reasoning cleanup behavior in transcript hygiene reference Why: - OpenAI Responses replay depends on canonical call_id|fc_id pairings for reasoning followers - strict id rewriting in OpenAI path breaks follower matching and triggers rs_* orphan 400s - limiting scope avoids behavior expansion while fixing the identified regression Tests: - pnpm vitest run src/agents/transcript-policy.test.ts src/agents/pi-embedded-runner.sanitize-session-history.test.ts src/agents/openai-responses.reasoning-replay.test.ts - pnpm vitest run --config vitest.e2e.config.ts src/agents/transcript-policy.e2e.test.ts src/agents/pi-embedded-runner.sanitize-session-history.e2e.test.ts src/agents/pi-embedded-helpers.sanitize-session-messages-images.removes-empty-assistant-text-blocks-but-preserves.e2e.test.ts src/agents/pi-embedded-helpers.sanitizeuserfacingtext.e2e.test.ts - pnpm lint - pnpm format:check - pnpm check:docs - pnpm test (fails in current macOS bash 3.2 env at test/git-hooks-pre-commit.integration.test.ts: mapfile not found)
51 lines
1.7 KiB
TypeScript
51 lines
1.7 KiB
TypeScript
import type { AgentMessage } from "@mariozechner/pi-agent-core";
|
|
import { describe, expect, it } from "vitest";
|
|
import {
|
|
makeInMemorySessionManager,
|
|
makeModelSnapshotEntry,
|
|
} from "./pi-embedded-runner.sanitize-session-history.test-harness.js";
|
|
import { sanitizeSessionHistory } from "./pi-embedded-runner/google.js";
|
|
|
|
describe("sanitizeSessionHistory openai tool id preservation", () => {
|
|
it("keeps canonical call_id|fc_id pairings for same-model openai replay", async () => {
|
|
const sessionEntries = [
|
|
makeModelSnapshotEntry({
|
|
provider: "openai",
|
|
modelApi: "openai-responses",
|
|
modelId: "gpt-5.2-codex",
|
|
}),
|
|
];
|
|
const sessionManager = makeInMemorySessionManager(sessionEntries);
|
|
|
|
const messages: AgentMessage[] = [
|
|
{
|
|
role: "assistant",
|
|
content: [{ type: "toolCall", id: "call_123|fc_123", name: "noop", arguments: {} }],
|
|
},
|
|
{
|
|
role: "toolResult",
|
|
toolCallId: "call_123|fc_123",
|
|
toolName: "noop",
|
|
content: [{ type: "text", text: "ok" }],
|
|
isError: false,
|
|
} as unknown as AgentMessage,
|
|
];
|
|
|
|
const result = await sanitizeSessionHistory({
|
|
messages,
|
|
modelApi: "openai-responses",
|
|
provider: "openai",
|
|
modelId: "gpt-5.2-codex",
|
|
sessionManager,
|
|
sessionId: "test-session",
|
|
});
|
|
|
|
const assistant = result[0] as { content?: Array<{ type?: string; id?: string }> };
|
|
const toolCall = assistant.content?.find((block) => block.type === "toolCall");
|
|
expect(toolCall?.id).toBe("call_123|fc_123");
|
|
|
|
const toolResult = result[1] as { toolCallId?: string };
|
|
expect(toolResult.toolCallId).toBe("call_123|fc_123");
|
|
});
|
|
});
|