mirror of
https://github.com/openclaw/openclaw.git
synced 2026-04-19 04:57:26 +00:00
fix: normalize openai-codex gpt-5.4 transport overrides
This commit is contained in:
committed by
Peter Steinberger
parent
3da8882a02
commit
024857050a
@@ -33,6 +33,7 @@ Docs: https://docs.openclaw.ai
|
|||||||
- Hooks/session-memory: keep `/new` and `/reset` memory artifacts in the bound agent workspace and align saved reset session keys with that workspace when stale main-agent keys leak into the hook path. (#39875) thanks @rbutera.
|
- Hooks/session-memory: keep `/new` and `/reset` memory artifacts in the bound agent workspace and align saved reset session keys with that workspace when stale main-agent keys leak into the hook path. (#39875) thanks @rbutera.
|
||||||
- Sessions/model switch: clear stale cached `contextTokens` when a session changes models so status and runtime paths recompute against the active model window. (#38044) thanks @yuweuii.
|
- Sessions/model switch: clear stale cached `contextTokens` when a session changes models so status and runtime paths recompute against the active model window. (#38044) thanks @yuweuii.
|
||||||
- ACP/session history: persist transcripts for successful ACP child runs, preserve exact transcript text, record ACP spawned-session lineage, and keep spawn-time transcript-path persistence best-effort so history storage failures do not block execution. (#40137) thanks @mbelinky.
|
- ACP/session history: persist transcripts for successful ACP child runs, preserve exact transcript text, record ACP spawned-session lineage, and keep spawn-time transcript-path persistence best-effort so history storage failures do not block execution. (#40137) thanks @mbelinky.
|
||||||
|
- Agents/openai-codex: normalize `gpt-5.4` fallback transport back to `openai-codex-responses` on `chatgpt.com/backend-api` when config drifts to the generic OpenAI responses endpoint. (#38736) Thanks @0xsline.
|
||||||
- Browser/CDP: normalize loopback direct WebSocket CDP URLs back to HTTP(S) for `/json/*` tab operations so local `ws://` / `wss://` profiles can still list, focus, open, and close tabs after the new direct-WS support lands. (#31085) Thanks @shrey150.
|
- Browser/CDP: normalize loopback direct WebSocket CDP URLs back to HTTP(S) for `/json/*` tab operations so local `ws://` / `wss://` profiles can still list, focus, open, and close tabs after the new direct-WS support lands. (#31085) Thanks @shrey150.
|
||||||
- Browser/CDP: rewrite wildcard `ws://0.0.0.0` and `ws://[::]` debugger URLs from remote `/json/version` responses back to the external CDP host/port, fixing Browserless-style container endpoints. (#17760) Thanks @joeharouni.
|
- Browser/CDP: rewrite wildcard `ws://0.0.0.0` and `ws://[::]` debugger URLs from remote `/json/version` responses back to the external CDP host/port, fixing Browserless-style container endpoints. (#17760) Thanks @joeharouni.
|
||||||
- Browser/extension relay: wait briefly for a previously attached Chrome tab to reappear after transient relay drops before failing with `tab not found`, reducing noisy reconnect flakes. (#32461) Thanks @AaronWander.
|
- Browser/extension relay: wait briefly for a previously attached Chrome tab to reappear after transient relay drops before failing with `tab not found`, reducing noisy reconnect flakes. (#32461) Thanks @AaronWander.
|
||||||
|
|||||||
@@ -664,6 +664,60 @@ describe("resolveModel", () => {
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("normalizes openai-codex gpt-5.4 overrides away from /v1/responses", () => {
|
||||||
|
mockOpenAICodexTemplateModel();
|
||||||
|
|
||||||
|
const cfg: OpenClawConfig = {
|
||||||
|
models: {
|
||||||
|
providers: {
|
||||||
|
"openai-codex": {
|
||||||
|
baseUrl: "https://api.openai.com/v1",
|
||||||
|
api: "openai-responses",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} as unknown as OpenClawConfig;
|
||||||
|
|
||||||
|
expectResolvedForwardCompatFallback({
|
||||||
|
provider: "openai-codex",
|
||||||
|
id: "gpt-5.4",
|
||||||
|
cfg,
|
||||||
|
expectedModel: {
|
||||||
|
api: "openai-codex-responses",
|
||||||
|
baseUrl: "https://chatgpt.com/backend-api",
|
||||||
|
id: "gpt-5.4",
|
||||||
|
provider: "openai-codex",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not rewrite openai baseUrl when openai-codex api stays non-codex", () => {
|
||||||
|
mockOpenAICodexTemplateModel();
|
||||||
|
|
||||||
|
const cfg: OpenClawConfig = {
|
||||||
|
models: {
|
||||||
|
providers: {
|
||||||
|
"openai-codex": {
|
||||||
|
baseUrl: "https://api.openai.com/v1",
|
||||||
|
api: "openai-completions",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} as unknown as OpenClawConfig;
|
||||||
|
|
||||||
|
expectResolvedForwardCompatFallback({
|
||||||
|
provider: "openai-codex",
|
||||||
|
id: "gpt-5.4",
|
||||||
|
cfg,
|
||||||
|
expectedModel: {
|
||||||
|
api: "openai-completions",
|
||||||
|
baseUrl: "https://api.openai.com/v1",
|
||||||
|
id: "gpt-5.4",
|
||||||
|
provider: "openai-codex",
|
||||||
|
},
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
it("includes auth hint for unknown ollama models (#17328)", () => {
|
it("includes auth hint for unknown ollama models (#17328)", () => {
|
||||||
// resetMockDiscoverModels() in beforeEach already sets find → null
|
// resetMockDiscoverModels() in beforeEach already sets find → null
|
||||||
const result = resolveModel("ollama", "gemma3:4b", "/tmp/agent");
|
const result = resolveModel("ollama", "gemma3:4b", "/tmp/agent");
|
||||||
|
|||||||
@@ -23,6 +23,8 @@ type InlineProviderConfig = {
|
|||||||
headers?: unknown;
|
headers?: unknown;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
|
||||||
|
|
||||||
function sanitizeModelHeaders(
|
function sanitizeModelHeaders(
|
||||||
headers: unknown,
|
headers: unknown,
|
||||||
opts?: { stripSecretRefMarkers?: boolean },
|
opts?: { stripSecretRefMarkers?: boolean },
|
||||||
@@ -43,6 +45,60 @@ function sanitizeModelHeaders(
|
|||||||
return Object.keys(next).length > 0 ? next : undefined;
|
return Object.keys(next).length > 0 ? next : undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function isOpenAIApiBaseUrl(baseUrl?: string): boolean {
|
||||||
|
const trimmed = baseUrl?.trim();
|
||||||
|
if (!trimmed) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return /^https?:\/\/api\.openai\.com(?:\/v1)?\/?$/i.test(trimmed);
|
||||||
|
}
|
||||||
|
|
||||||
|
function isOpenAICodexBaseUrl(baseUrl?: string): boolean {
|
||||||
|
const trimmed = baseUrl?.trim();
|
||||||
|
if (!trimmed) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return /^https?:\/\/chatgpt\.com\/backend-api\/?$/i.test(trimmed);
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeOpenAICodexTransport(params: {
|
||||||
|
provider: string;
|
||||||
|
model: Model<Api>;
|
||||||
|
}): Model<Api> {
|
||||||
|
if (normalizeProviderId(params.provider) !== "openai-codex") {
|
||||||
|
return params.model;
|
||||||
|
}
|
||||||
|
|
||||||
|
const useCodexTransport =
|
||||||
|
!params.model.baseUrl ||
|
||||||
|
isOpenAIApiBaseUrl(params.model.baseUrl) ||
|
||||||
|
isOpenAICodexBaseUrl(params.model.baseUrl);
|
||||||
|
|
||||||
|
const nextApi =
|
||||||
|
useCodexTransport && params.model.api === "openai-responses"
|
||||||
|
? ("openai-codex-responses" as const)
|
||||||
|
: params.model.api;
|
||||||
|
const nextBaseUrl =
|
||||||
|
nextApi === "openai-codex-responses" &&
|
||||||
|
(!params.model.baseUrl || isOpenAIApiBaseUrl(params.model.baseUrl))
|
||||||
|
? OPENAI_CODEX_BASE_URL
|
||||||
|
: params.model.baseUrl;
|
||||||
|
|
||||||
|
if (nextApi === params.model.api && nextBaseUrl === params.model.baseUrl) {
|
||||||
|
return params.model;
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
...params.model,
|
||||||
|
api: nextApi,
|
||||||
|
baseUrl: nextBaseUrl,
|
||||||
|
} as Model<Api>;
|
||||||
|
}
|
||||||
|
|
||||||
|
function normalizeResolvedModel(params: { provider: string; model: Model<Api> }): Model<Api> {
|
||||||
|
return normalizeModelCompat(normalizeOpenAICodexTransport(params));
|
||||||
|
}
|
||||||
|
|
||||||
export { buildModelAliasLines };
|
export { buildModelAliasLines };
|
||||||
|
|
||||||
function resolveConfiguredProviderConfig(
|
function resolveConfiguredProviderConfig(
|
||||||
@@ -145,13 +201,14 @@ export function resolveModelWithRegistry(params: {
|
|||||||
const model = modelRegistry.find(provider, modelId) as Model<Api> | null;
|
const model = modelRegistry.find(provider, modelId) as Model<Api> | null;
|
||||||
|
|
||||||
if (model) {
|
if (model) {
|
||||||
return normalizeModelCompat(
|
return normalizeResolvedModel({
|
||||||
applyConfiguredProviderOverrides({
|
provider,
|
||||||
|
model: applyConfiguredProviderOverrides({
|
||||||
discoveredModel: model,
|
discoveredModel: model,
|
||||||
providerConfig,
|
providerConfig,
|
||||||
modelId,
|
modelId,
|
||||||
}),
|
}),
|
||||||
);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
const providers = cfg?.models?.providers ?? {};
|
const providers = cfg?.models?.providers ?? {};
|
||||||
@@ -161,64 +218,71 @@ export function resolveModelWithRegistry(params: {
|
|||||||
(entry) => normalizeProviderId(entry.provider) === normalizedProvider && entry.id === modelId,
|
(entry) => normalizeProviderId(entry.provider) === normalizedProvider && entry.id === modelId,
|
||||||
);
|
);
|
||||||
if (inlineMatch?.api) {
|
if (inlineMatch?.api) {
|
||||||
return normalizeModelCompat(inlineMatch as Model<Api>);
|
return normalizeResolvedModel({ provider, model: inlineMatch as Model<Api> });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Forward-compat fallbacks must be checked BEFORE the generic providerCfg fallback.
|
// Forward-compat fallbacks must be checked BEFORE the generic providerCfg fallback.
|
||||||
// Otherwise, configured providers can default to a generic API and break specific transports.
|
// Otherwise, configured providers can default to a generic API and break specific transports.
|
||||||
const forwardCompat = resolveForwardCompatModel(provider, modelId, modelRegistry);
|
const forwardCompat = resolveForwardCompatModel(provider, modelId, modelRegistry);
|
||||||
if (forwardCompat) {
|
if (forwardCompat) {
|
||||||
return normalizeModelCompat(
|
return normalizeResolvedModel({
|
||||||
applyConfiguredProviderOverrides({
|
provider,
|
||||||
|
model: applyConfiguredProviderOverrides({
|
||||||
discoveredModel: forwardCompat,
|
discoveredModel: forwardCompat,
|
||||||
providerConfig,
|
providerConfig,
|
||||||
modelId,
|
modelId,
|
||||||
}),
|
}),
|
||||||
);
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// OpenRouter is a pass-through proxy - any model ID available on OpenRouter
|
// OpenRouter is a pass-through proxy - any model ID available on OpenRouter
|
||||||
// should work without being pre-registered in the local catalog.
|
// should work without being pre-registered in the local catalog.
|
||||||
if (normalizedProvider === "openrouter") {
|
if (normalizedProvider === "openrouter") {
|
||||||
return normalizeModelCompat({
|
return normalizeResolvedModel({
|
||||||
id: modelId,
|
|
||||||
name: modelId,
|
|
||||||
api: "openai-completions",
|
|
||||||
provider,
|
provider,
|
||||||
baseUrl: "https://openrouter.ai/api/v1",
|
model: {
|
||||||
reasoning: false,
|
id: modelId,
|
||||||
input: ["text"],
|
name: modelId,
|
||||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
api: "openai-completions",
|
||||||
contextWindow: DEFAULT_CONTEXT_TOKENS,
|
provider,
|
||||||
// Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts
|
baseUrl: "https://openrouter.ai/api/v1",
|
||||||
maxTokens: 8192,
|
reasoning: false,
|
||||||
} as Model<Api>);
|
input: ["text"],
|
||||||
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||||
|
contextWindow: DEFAULT_CONTEXT_TOKENS,
|
||||||
|
// Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts
|
||||||
|
maxTokens: 8192,
|
||||||
|
} as Model<Api>,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
const configuredModel = providerConfig?.models?.find((candidate) => candidate.id === modelId);
|
const configuredModel = providerConfig?.models?.find((candidate) => candidate.id === modelId);
|
||||||
const providerHeaders = sanitizeModelHeaders(providerConfig?.headers);
|
const providerHeaders = sanitizeModelHeaders(providerConfig?.headers);
|
||||||
const modelHeaders = sanitizeModelHeaders(configuredModel?.headers);
|
const modelHeaders = sanitizeModelHeaders(configuredModel?.headers);
|
||||||
if (providerConfig || modelId.startsWith("mock-")) {
|
if (providerConfig || modelId.startsWith("mock-")) {
|
||||||
return normalizeModelCompat({
|
return normalizeResolvedModel({
|
||||||
id: modelId,
|
|
||||||
name: modelId,
|
|
||||||
api: providerConfig?.api ?? "openai-responses",
|
|
||||||
provider,
|
provider,
|
||||||
baseUrl: providerConfig?.baseUrl,
|
model: {
|
||||||
reasoning: configuredModel?.reasoning ?? false,
|
id: modelId,
|
||||||
input: ["text"],
|
name: modelId,
|
||||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
api: providerConfig?.api ?? "openai-responses",
|
||||||
contextWindow:
|
provider,
|
||||||
configuredModel?.contextWindow ??
|
baseUrl: providerConfig?.baseUrl,
|
||||||
providerConfig?.models?.[0]?.contextWindow ??
|
reasoning: configuredModel?.reasoning ?? false,
|
||||||
DEFAULT_CONTEXT_TOKENS,
|
input: ["text"],
|
||||||
maxTokens:
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||||
configuredModel?.maxTokens ??
|
contextWindow:
|
||||||
providerConfig?.models?.[0]?.maxTokens ??
|
configuredModel?.contextWindow ??
|
||||||
DEFAULT_CONTEXT_TOKENS,
|
providerConfig?.models?.[0]?.contextWindow ??
|
||||||
headers:
|
DEFAULT_CONTEXT_TOKENS,
|
||||||
providerHeaders || modelHeaders ? { ...providerHeaders, ...modelHeaders } : undefined,
|
maxTokens:
|
||||||
} as Model<Api>);
|
configuredModel?.maxTokens ??
|
||||||
|
providerConfig?.models?.[0]?.maxTokens ??
|
||||||
|
DEFAULT_CONTEXT_TOKENS,
|
||||||
|
headers:
|
||||||
|
providerHeaders || modelHeaders ? { ...providerHeaders, ...modelHeaders } : undefined,
|
||||||
|
} as Model<Api>,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
return undefined;
|
return undefined;
|
||||||
|
|||||||
Reference in New Issue
Block a user