feat(openai): add websocket warm-up with configurable toggle

This commit is contained in:
Peter Steinberger
2026-03-01 22:44:57 +00:00
parent bc9f357ad7
commit d1615eb35f
7 changed files with 296 additions and 5 deletions

View File

@@ -138,9 +138,9 @@ describe("resolveExtraParams", () => {
describe("applyExtraParamsToAgent", () => {
function createOptionsCaptureAgent() {
const calls: Array<SimpleStreamOptions | undefined> = [];
const calls: Array<(SimpleStreamOptions & { openaiWsWarmup?: boolean }) | undefined> = [];
const baseStreamFn: StreamFn = (_model, _context, options) => {
calls.push(options);
calls.push(options as (SimpleStreamOptions & { openaiWsWarmup?: boolean }) | undefined);
return {} as ReturnType<StreamFn>;
};
return {
@@ -559,6 +559,7 @@ describe("applyExtraParamsToAgent", () => {
expect(calls).toHaveLength(1);
expect(calls[0]?.transport).toBe("auto");
expect(calls[0]?.openaiWsWarmup).toBe(true);
});
it("lets runtime options override OpenAI default transport", () => {
@@ -578,6 +579,68 @@ describe("applyExtraParamsToAgent", () => {
expect(calls[0]?.transport).toBe("sse");
});
it("allows disabling OpenAI websocket warm-up via model params", () => {
const { calls, agent } = createOptionsCaptureAgent();
const cfg = {
agents: {
defaults: {
models: {
"openai/gpt-5": {
params: {
openaiWsWarmup: false,
},
},
},
},
},
};
applyExtraParamsToAgent(agent, cfg, "openai", "gpt-5");
const model = {
api: "openai-responses",
provider: "openai",
id: "gpt-5",
} as Model<"openai-responses">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {});
expect(calls).toHaveLength(1);
expect(calls[0]?.openaiWsWarmup).toBe(false);
});
it("lets runtime options override configured OpenAI websocket warm-up", () => {
const { calls, agent } = createOptionsCaptureAgent();
const cfg = {
agents: {
defaults: {
models: {
"openai/gpt-5": {
params: {
openaiWsWarmup: false,
},
},
},
},
},
};
applyExtraParamsToAgent(agent, cfg, "openai", "gpt-5");
const model = {
api: "openai-responses",
provider: "openai",
id: "gpt-5",
} as Model<"openai-responses">;
const context: Context = { messages: [] };
void agent.streamFn?.(model, context, {
openaiWsWarmup: true,
} as unknown as SimpleStreamOptions);
expect(calls).toHaveLength(1);
expect(calls[0]?.openaiWsWarmup).toBe(true);
});
it("allows forcing Codex transport to SSE", () => {
const { calls, agent } = createOptionsCaptureAgent();
const cfg = {