mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-09 06:27:39 +00:00
openai-codex: add gpt-5.3-codex-spark forward-compat model (#15174)
Merged via maintainer flow after rebase + local gates.
Prepared head SHA: 6cac87cbf9
Co-authored-by: loiie45e <15420100+loiie45e@users.noreply.github.com>
Co-authored-by: mbelinky <2406260+mbelinky@users.noreply.github.com>
This commit is contained in:
@@ -20,6 +20,7 @@ Docs: https://docs.openclaw.ai
|
|||||||
- Onboarding/CLI: restore terminal state without resuming paused `stdin`, so onboarding exits cleanly after choosing Web UI and the installer returns instead of appearing stuck.
|
- Onboarding/CLI: restore terminal state without resuming paused `stdin`, so onboarding exits cleanly after choosing Web UI and the installer returns instead of appearing stuck.
|
||||||
- Onboarding/Providers: add vLLM as an onboarding provider with model discovery, auth profile wiring, and non-interactive auth-choice validation. (#12577) Thanks @gejifeng.
|
- Onboarding/Providers: add vLLM as an onboarding provider with model discovery, auth profile wiring, and non-interactive auth-choice validation. (#12577) Thanks @gejifeng.
|
||||||
- Onboarding/Providers: preserve Hugging Face auth intent in auth-choice remapping (`tokenProvider=huggingface` with `authChoice=apiKey`) and skip env-override prompts when an explicit token is provided. (#13472) Thanks @Josephrp.
|
- Onboarding/Providers: preserve Hugging Face auth intent in auth-choice remapping (`tokenProvider=huggingface` with `authChoice=apiKey`) and skip env-override prompts when an explicit token is provided. (#13472) Thanks @Josephrp.
|
||||||
|
- Models/Codex: resolve configured `openai-codex/gpt-5.3-codex-spark` through forward-compat fallback during `models list`, so it is not incorrectly tagged as missing when runtime resolution succeeds. (#15174) Thanks @loiie45e.
|
||||||
- macOS Voice Wake: fix a crash in trigger trimming for CJK/Unicode transcripts by matching and slicing on original-string ranges instead of transformed-string indices. (#11052) Thanks @Flash-LHR.
|
- macOS Voice Wake: fix a crash in trigger trimming for CJK/Unicode transcripts by matching and slicing on original-string ranges instead of transformed-string indices. (#11052) Thanks @Flash-LHR.
|
||||||
- Heartbeat: prevent scheduler silent-death races during runner reloads, preserve retry cooldown backoff under wake bursts, and prioritize user/action wake causes over interval/retry reasons when coalescing. (#15108) Thanks @joeykrug.
|
- Heartbeat: prevent scheduler silent-death races during runner reloads, preserve retry cooldown backoff under wake bursts, and prioritize user/action wake causes over interval/retry reasons when coalescing. (#15108) Thanks @joeykrug.
|
||||||
- Outbound targets: fail closed for WhatsApp/Twitch/Google Chat fallback paths so invalid or missing targets are dropped instead of rerouted, and align resolver hints with strict target requirements. (#13578) Thanks @mcaxtr.
|
- Outbound targets: fail closed for WhatsApp/Twitch/Google Chat fallback paths so invalid or missing targets are dropped instead of rerouted, and align resolver hints with strict target requirements. (#13578) Thanks @mcaxtr.
|
||||||
|
|||||||
@@ -40,11 +40,11 @@ function resolveOpenAICodexGpt53FallbackModel(
|
|||||||
if (normalizedProvider !== "openai-codex") {
|
if (normalizedProvider !== "openai-codex") {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
const loweredModelId = trimmedModelId.toLowerCase();
|
|
||||||
if (
|
const lower = trimmedModelId.toLowerCase();
|
||||||
loweredModelId !== OPENAI_CODEX_GPT_53_MODEL_ID &&
|
const isGpt53 = lower === OPENAI_CODEX_GPT_53_MODEL_ID;
|
||||||
loweredModelId !== OPENAI_CODEX_GPT_53_SPARK_MODEL_ID
|
const isSpark = lower === OPENAI_CODEX_GPT_53_SPARK_MODEL_ID;
|
||||||
) {
|
if (!isGpt53 && !isSpark) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,6 +57,8 @@ function resolveOpenAICodexGpt53FallbackModel(
|
|||||||
...template,
|
...template,
|
||||||
id: trimmedModelId,
|
id: trimmedModelId,
|
||||||
name: trimmedModelId,
|
name: trimmedModelId,
|
||||||
|
// Spark is a low-latency variant; keep api/baseUrl from template.
|
||||||
|
...(isSpark ? { reasoning: true } : {}),
|
||||||
} as Model<Api>);
|
} as Model<Api>);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
95
src/commands/models/list.list-command.forward-compat.test.ts
Normal file
95
src/commands/models/list.list-command.forward-compat.test.ts
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
import { describe, expect, it, vi } from "vitest";
|
||||||
|
|
||||||
|
const mocks = vi.hoisted(() => {
|
||||||
|
const printModelTable = vi.fn();
|
||||||
|
return {
|
||||||
|
loadConfig: vi.fn().mockReturnValue({
|
||||||
|
agents: { defaults: { model: { primary: "openai-codex/gpt-5.3-codex-spark" } } },
|
||||||
|
models: { providers: {} },
|
||||||
|
}),
|
||||||
|
ensureAuthProfileStore: vi.fn().mockReturnValue({ version: 1, profiles: {}, order: {} }),
|
||||||
|
loadModelRegistry: vi.fn().mockResolvedValue({ models: [], availableKeys: new Set() }),
|
||||||
|
resolveConfiguredEntries: vi.fn().mockReturnValue({
|
||||||
|
entries: [
|
||||||
|
{
|
||||||
|
key: "openai-codex/gpt-5.3-codex-spark",
|
||||||
|
ref: { provider: "openai-codex", model: "gpt-5.3-codex-spark" },
|
||||||
|
tags: new Set(["configured"]),
|
||||||
|
aliases: [],
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}),
|
||||||
|
printModelTable,
|
||||||
|
resolveModel: vi.fn().mockReturnValue({
|
||||||
|
model: {
|
||||||
|
provider: "openai-codex",
|
||||||
|
id: "gpt-5.3-codex-spark",
|
||||||
|
name: "GPT-5.3 Codex Spark",
|
||||||
|
api: "openai-codex-responses",
|
||||||
|
baseUrl: "https://chatgpt.com/backend-api",
|
||||||
|
input: ["text"],
|
||||||
|
contextWindow: 272000,
|
||||||
|
maxTokens: 128000,
|
||||||
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||||
|
},
|
||||||
|
error: undefined,
|
||||||
|
authStorage: {} as never,
|
||||||
|
modelRegistry: {} as never,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
vi.mock("../../config/config.js", () => ({
|
||||||
|
loadConfig: mocks.loadConfig,
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock("../../agents/auth-profiles.js", async (importOriginal) => {
|
||||||
|
const actual = await importOriginal<typeof import("../../agents/auth-profiles.js")>();
|
||||||
|
return {
|
||||||
|
...actual,
|
||||||
|
ensureAuthProfileStore: mocks.ensureAuthProfileStore,
|
||||||
|
listProfilesForProvider: vi.fn().mockReturnValue([]),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
vi.mock("./list.registry.js", async (importOriginal) => {
|
||||||
|
const actual = await importOriginal<typeof import("./list.registry.js")>();
|
||||||
|
return {
|
||||||
|
...actual,
|
||||||
|
loadModelRegistry: mocks.loadModelRegistry,
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
vi.mock("./list.configured.js", () => ({
|
||||||
|
resolveConfiguredEntries: mocks.resolveConfiguredEntries,
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock("./list.table.js", () => ({
|
||||||
|
printModelTable: mocks.printModelTable,
|
||||||
|
}));
|
||||||
|
|
||||||
|
vi.mock("../../agents/pi-embedded-runner/model.js", () => ({
|
||||||
|
resolveModel: mocks.resolveModel,
|
||||||
|
}));
|
||||||
|
|
||||||
|
import { modelsListCommand } from "./list.list-command.js";
|
||||||
|
|
||||||
|
describe("modelsListCommand forward-compat", () => {
|
||||||
|
it("does not mark configured codex spark as missing when resolveModel can build a fallback", async () => {
|
||||||
|
const runtime = { log: vi.fn(), error: vi.fn() };
|
||||||
|
|
||||||
|
await modelsListCommand({ json: true }, runtime as never);
|
||||||
|
|
||||||
|
expect(mocks.printModelTable).toHaveBeenCalled();
|
||||||
|
const rows = mocks.printModelTable.mock.calls[0]?.[0] as Array<{
|
||||||
|
key: string;
|
||||||
|
tags: string[];
|
||||||
|
missing: boolean;
|
||||||
|
}>;
|
||||||
|
|
||||||
|
const spark = rows.find((r) => r.key === "openai-codex/gpt-5.3-codex-spark");
|
||||||
|
expect(spark).toBeTruthy();
|
||||||
|
expect(spark?.missing).toBe(false);
|
||||||
|
expect(spark?.tags).not.toContain("missing");
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -3,6 +3,7 @@ import type { RuntimeEnv } from "../../runtime.js";
|
|||||||
import type { ModelRow } from "./list.types.js";
|
import type { ModelRow } from "./list.types.js";
|
||||||
import { ensureAuthProfileStore } from "../../agents/auth-profiles.js";
|
import { ensureAuthProfileStore } from "../../agents/auth-profiles.js";
|
||||||
import { parseModelRef } from "../../agents/model-selection.js";
|
import { parseModelRef } from "../../agents/model-selection.js";
|
||||||
|
import { resolveModel } from "../../agents/pi-embedded-runner/model.js";
|
||||||
import { loadConfig } from "../../config/config.js";
|
import { loadConfig } from "../../config/config.js";
|
||||||
import { resolveConfiguredEntries } from "./list.configured.js";
|
import { resolveConfiguredEntries } from "./list.configured.js";
|
||||||
import { loadModelRegistry, toModelRow } from "./list.registry.js";
|
import { loadModelRegistry, toModelRow } from "./list.registry.js";
|
||||||
@@ -99,7 +100,13 @@ export async function modelsListCommand(
|
|||||||
if (providerFilter && entry.ref.provider.toLowerCase() !== providerFilter) {
|
if (providerFilter && entry.ref.provider.toLowerCase() !== providerFilter) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
const model = modelByKey.get(entry.key);
|
let model = modelByKey.get(entry.key);
|
||||||
|
if (!model) {
|
||||||
|
const resolved = resolveModel(entry.ref.provider, entry.ref.model, undefined, cfg);
|
||||||
|
if (resolved.model && !resolved.error) {
|
||||||
|
model = resolved.model;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (opts.local && model && !isLocalBaseUrl(model.baseUrl)) {
|
if (opts.local && model && !isLocalBaseUrl(model.baseUrl)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user