mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-11 01:04:32 +00:00
feat(openai): add gpt-5.4 support for API and Codex OAuth (#36590)
* feat(openai): add gpt-5.4 support and priority processing * feat(openai-codex): add gpt-5.4 oauth support * fix(openai): preserve provider overrides in gpt-5.4 fallback * fix(openai-codex): keep xhigh for gpt-5.4 default * fix(models): preserve configured overrides in list output * fix(models): close gpt-5.4 integration gaps * fix(openai): scope service tier to public api * fix(openai): complete prep followups for gpt-5.4 support (#36590) (thanks @dorukardahan) --------- Co-authored-by: Tyler Yust <TYTYYUST@YAHOO.COM>
This commit is contained in:
@@ -44,6 +44,7 @@ export function resolveExtraParams(params: {
|
||||
}
|
||||
|
||||
type CacheRetention = "none" | "short" | "long";
|
||||
type OpenAIServiceTier = "auto" | "default" | "flex" | "priority";
|
||||
type CacheRetentionStreamOptions = Partial<SimpleStreamOptions> & {
|
||||
cacheRetention?: CacheRetention;
|
||||
openaiWsWarmup?: boolean;
|
||||
@@ -208,6 +209,18 @@ function isDirectOpenAIBaseUrl(baseUrl: unknown): boolean {
|
||||
}
|
||||
}
|
||||
|
||||
function isOpenAIPublicApiBaseUrl(baseUrl: unknown): boolean {
|
||||
if (typeof baseUrl !== "string" || !baseUrl.trim()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
return new URL(baseUrl).hostname.toLowerCase() === "api.openai.com";
|
||||
} catch {
|
||||
return baseUrl.toLowerCase().includes("api.openai.com");
|
||||
}
|
||||
}
|
||||
|
||||
function shouldForceResponsesStore(model: {
|
||||
api?: unknown;
|
||||
provider?: unknown;
|
||||
@@ -314,6 +327,63 @@ function createOpenAIResponsesContextManagementWrapper(
|
||||
};
|
||||
}
|
||||
|
||||
function normalizeOpenAIServiceTier(value: unknown): OpenAIServiceTier | undefined {
|
||||
if (typeof value !== "string") {
|
||||
return undefined;
|
||||
}
|
||||
const normalized = value.trim().toLowerCase();
|
||||
if (
|
||||
normalized === "auto" ||
|
||||
normalized === "default" ||
|
||||
normalized === "flex" ||
|
||||
normalized === "priority"
|
||||
) {
|
||||
return normalized;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function resolveOpenAIServiceTier(
|
||||
extraParams: Record<string, unknown> | undefined,
|
||||
): OpenAIServiceTier | undefined {
|
||||
const raw = extraParams?.serviceTier ?? extraParams?.service_tier;
|
||||
const normalized = normalizeOpenAIServiceTier(raw);
|
||||
if (raw !== undefined && normalized === undefined) {
|
||||
const rawSummary = typeof raw === "string" ? raw : typeof raw;
|
||||
log.warn(`ignoring invalid OpenAI service tier param: ${rawSummary}`);
|
||||
}
|
||||
return normalized;
|
||||
}
|
||||
|
||||
function createOpenAIServiceTierWrapper(
|
||||
baseStreamFn: StreamFn | undefined,
|
||||
serviceTier: OpenAIServiceTier,
|
||||
): StreamFn {
|
||||
const underlying = baseStreamFn ?? streamSimple;
|
||||
return (model, context, options) => {
|
||||
if (
|
||||
model.api !== "openai-responses" ||
|
||||
model.provider !== "openai" ||
|
||||
!isOpenAIPublicApiBaseUrl(model.baseUrl)
|
||||
) {
|
||||
return underlying(model, context, options);
|
||||
}
|
||||
const originalOnPayload = options?.onPayload;
|
||||
return underlying(model, context, {
|
||||
...options,
|
||||
onPayload: (payload) => {
|
||||
if (payload && typeof payload === "object") {
|
||||
const payloadObj = payload as Record<string, unknown>;
|
||||
if (payloadObj.service_tier === undefined) {
|
||||
payloadObj.service_tier = serviceTier;
|
||||
}
|
||||
}
|
||||
originalOnPayload?.(payload);
|
||||
},
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
function createCodexDefaultTransportWrapper(baseStreamFn: StreamFn | undefined): StreamFn {
|
||||
const underlying = baseStreamFn ?? streamSimple;
|
||||
return (model, context, options) =>
|
||||
@@ -1073,6 +1143,12 @@ export function applyExtraParamsToAgent(
|
||||
// upstream model-ID heuristics for Gemini 3.1 variants.
|
||||
agent.streamFn = createGoogleThinkingPayloadWrapper(agent.streamFn, thinkingLevel);
|
||||
|
||||
const openAIServiceTier = resolveOpenAIServiceTier(merged);
|
||||
if (openAIServiceTier) {
|
||||
log.debug(`applying OpenAI service_tier=${openAIServiceTier} for ${provider}/${modelId}`);
|
||||
agent.streamFn = createOpenAIServiceTierWrapper(agent.streamFn, openAIServiceTier);
|
||||
}
|
||||
|
||||
// Work around upstream pi-ai hardcoding `store: false` for Responses API.
|
||||
// Force `store=true` for direct OpenAI Responses models and auto-enable
|
||||
// server-side compaction for compatible OpenAI Responses payloads.
|
||||
|
||||
@@ -49,6 +49,14 @@ describe("pi embedded model e2e smoke", () => {
|
||||
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.3-codex"));
|
||||
});
|
||||
|
||||
it("builds an openai-codex forward-compat fallback for gpt-5.4", () => {
|
||||
mockOpenAICodexTemplateModel();
|
||||
|
||||
const result = resolveModel("openai-codex", "gpt-5.4", "/tmp/agent");
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
|
||||
});
|
||||
|
||||
it("keeps unknown-model errors for non-forward-compat IDs", () => {
|
||||
const result = resolveModel("openai-codex", "gpt-4.1-mini", "/tmp/agent");
|
||||
expect(result.model).toBeUndefined();
|
||||
|
||||
@@ -23,7 +23,7 @@ function buildForwardCompatTemplate(params: {
|
||||
id: string;
|
||||
name: string;
|
||||
provider: string;
|
||||
api: "anthropic-messages" | "google-gemini-cli" | "openai-completions";
|
||||
api: "anthropic-messages" | "google-gemini-cli" | "openai-completions" | "openai-responses";
|
||||
baseUrl: string;
|
||||
input?: readonly ["text"] | readonly ["text", "image"];
|
||||
cost?: { input: number; output: number; cacheRead: number; cacheWrite: number };
|
||||
@@ -399,6 +399,53 @@ describe("resolveModel", () => {
|
||||
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.3-codex"));
|
||||
});
|
||||
|
||||
it("builds an openai-codex fallback for gpt-5.4", () => {
|
||||
mockOpenAICodexTemplateModel();
|
||||
|
||||
const result = resolveModel("openai-codex", "gpt-5.4", "/tmp/agent");
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.4"));
|
||||
});
|
||||
|
||||
it("applies provider overrides to openai gpt-5.4 forward-compat models", () => {
|
||||
mockDiscoveredModel({
|
||||
provider: "openai",
|
||||
modelId: "gpt-5.2",
|
||||
templateModel: buildForwardCompatTemplate({
|
||||
id: "gpt-5.2",
|
||||
name: "GPT-5.2",
|
||||
provider: "openai",
|
||||
api: "openai-responses",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
}),
|
||||
});
|
||||
|
||||
const cfg = {
|
||||
models: {
|
||||
providers: {
|
||||
openai: {
|
||||
baseUrl: "https://proxy.example.com/v1",
|
||||
headers: { "X-Proxy-Auth": "token-123" },
|
||||
},
|
||||
},
|
||||
},
|
||||
} as unknown as OpenClawConfig;
|
||||
|
||||
const result = resolveModel("openai", "gpt-5.4", "/tmp/agent", cfg);
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
provider: "openai",
|
||||
id: "gpt-5.4",
|
||||
api: "openai-responses",
|
||||
baseUrl: "https://proxy.example.com/v1",
|
||||
});
|
||||
expect((result.model as unknown as { headers?: Record<string, string> }).headers).toEqual({
|
||||
"X-Proxy-Auth": "token-123",
|
||||
});
|
||||
});
|
||||
|
||||
it("builds an anthropic forward-compat fallback for claude-opus-4-6", () => {
|
||||
mockDiscoveredModel({
|
||||
provider: "anthropic",
|
||||
|
||||
@@ -99,6 +99,96 @@ export function buildInlineProviderModels(
|
||||
});
|
||||
}
|
||||
|
||||
export function resolveModelWithRegistry(params: {
|
||||
provider: string;
|
||||
modelId: string;
|
||||
modelRegistry: ModelRegistry;
|
||||
cfg?: OpenClawConfig;
|
||||
}): Model<Api> | undefined {
|
||||
const { provider, modelId, modelRegistry, cfg } = params;
|
||||
const providerConfig = resolveConfiguredProviderConfig(cfg, provider);
|
||||
const model = modelRegistry.find(provider, modelId) as Model<Api> | null;
|
||||
|
||||
if (model) {
|
||||
return normalizeModelCompat(
|
||||
applyConfiguredProviderOverrides({
|
||||
discoveredModel: model,
|
||||
providerConfig,
|
||||
modelId,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
const providers = cfg?.models?.providers ?? {};
|
||||
const inlineModels = buildInlineProviderModels(providers);
|
||||
const normalizedProvider = normalizeProviderId(provider);
|
||||
const inlineMatch = inlineModels.find(
|
||||
(entry) => normalizeProviderId(entry.provider) === normalizedProvider && entry.id === modelId,
|
||||
);
|
||||
if (inlineMatch) {
|
||||
return normalizeModelCompat(inlineMatch as Model<Api>);
|
||||
}
|
||||
|
||||
// Forward-compat fallbacks must be checked BEFORE the generic providerCfg fallback.
|
||||
// Otherwise, configured providers can default to a generic API and break specific transports.
|
||||
const forwardCompat = resolveForwardCompatModel(provider, modelId, modelRegistry);
|
||||
if (forwardCompat) {
|
||||
return normalizeModelCompat(
|
||||
applyConfiguredProviderOverrides({
|
||||
discoveredModel: forwardCompat,
|
||||
providerConfig,
|
||||
modelId,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
// OpenRouter is a pass-through proxy - any model ID available on OpenRouter
|
||||
// should work without being pre-registered in the local catalog.
|
||||
if (normalizedProvider === "openrouter") {
|
||||
return normalizeModelCompat({
|
||||
id: modelId,
|
||||
name: modelId,
|
||||
api: "openai-completions",
|
||||
provider,
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: DEFAULT_CONTEXT_TOKENS,
|
||||
// Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts
|
||||
maxTokens: 8192,
|
||||
} as Model<Api>);
|
||||
}
|
||||
|
||||
const configuredModel = providerConfig?.models?.find((candidate) => candidate.id === modelId);
|
||||
if (providerConfig || modelId.startsWith("mock-")) {
|
||||
return normalizeModelCompat({
|
||||
id: modelId,
|
||||
name: modelId,
|
||||
api: providerConfig?.api ?? "openai-responses",
|
||||
provider,
|
||||
baseUrl: providerConfig?.baseUrl,
|
||||
reasoning: configuredModel?.reasoning ?? false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow:
|
||||
configuredModel?.contextWindow ??
|
||||
providerConfig?.models?.[0]?.contextWindow ??
|
||||
DEFAULT_CONTEXT_TOKENS,
|
||||
maxTokens:
|
||||
configuredModel?.maxTokens ??
|
||||
providerConfig?.models?.[0]?.maxTokens ??
|
||||
DEFAULT_CONTEXT_TOKENS,
|
||||
headers:
|
||||
providerConfig?.headers || configuredModel?.headers
|
||||
? { ...providerConfig?.headers, ...configuredModel?.headers }
|
||||
: undefined,
|
||||
} as Model<Api>);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export function resolveModel(
|
||||
provider: string,
|
||||
modelId: string,
|
||||
@@ -113,89 +203,13 @@ export function resolveModel(
|
||||
const resolvedAgentDir = agentDir ?? resolveOpenClawAgentDir();
|
||||
const authStorage = discoverAuthStorage(resolvedAgentDir);
|
||||
const modelRegistry = discoverModels(authStorage, resolvedAgentDir);
|
||||
const providerConfig = resolveConfiguredProviderConfig(cfg, provider);
|
||||
const model = modelRegistry.find(provider, modelId) as Model<Api> | null;
|
||||
|
||||
if (!model) {
|
||||
const providers = cfg?.models?.providers ?? {};
|
||||
const inlineModels = buildInlineProviderModels(providers);
|
||||
const normalizedProvider = normalizeProviderId(provider);
|
||||
const inlineMatch = inlineModels.find(
|
||||
(entry) => normalizeProviderId(entry.provider) === normalizedProvider && entry.id === modelId,
|
||||
);
|
||||
if (inlineMatch) {
|
||||
const normalized = normalizeModelCompat(inlineMatch as Model<Api>);
|
||||
return {
|
||||
model: normalized,
|
||||
authStorage,
|
||||
modelRegistry,
|
||||
};
|
||||
}
|
||||
// Forward-compat fallbacks must be checked BEFORE the generic providerCfg fallback.
|
||||
// Otherwise, configured providers can default to a generic API and break specific transports.
|
||||
const forwardCompat = resolveForwardCompatModel(provider, modelId, modelRegistry);
|
||||
if (forwardCompat) {
|
||||
return { model: forwardCompat, authStorage, modelRegistry };
|
||||
}
|
||||
// OpenRouter is a pass-through proxy — any model ID available on OpenRouter
|
||||
// should work without being pre-registered in the local catalog.
|
||||
if (normalizedProvider === "openrouter") {
|
||||
const fallbackModel: Model<Api> = normalizeModelCompat({
|
||||
id: modelId,
|
||||
name: modelId,
|
||||
api: "openai-completions",
|
||||
provider,
|
||||
baseUrl: "https://openrouter.ai/api/v1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: DEFAULT_CONTEXT_TOKENS,
|
||||
// Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts
|
||||
maxTokens: 8192,
|
||||
} as Model<Api>);
|
||||
return { model: fallbackModel, authStorage, modelRegistry };
|
||||
}
|
||||
const providerCfg = providerConfig;
|
||||
if (providerCfg || modelId.startsWith("mock-")) {
|
||||
const configuredModel = providerCfg?.models?.find((candidate) => candidate.id === modelId);
|
||||
const fallbackModel: Model<Api> = normalizeModelCompat({
|
||||
id: modelId,
|
||||
name: modelId,
|
||||
api: providerCfg?.api ?? "openai-responses",
|
||||
provider,
|
||||
baseUrl: providerCfg?.baseUrl,
|
||||
reasoning: configuredModel?.reasoning ?? false,
|
||||
input: ["text"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow:
|
||||
configuredModel?.contextWindow ??
|
||||
providerCfg?.models?.[0]?.contextWindow ??
|
||||
DEFAULT_CONTEXT_TOKENS,
|
||||
maxTokens:
|
||||
configuredModel?.maxTokens ??
|
||||
providerCfg?.models?.[0]?.maxTokens ??
|
||||
DEFAULT_CONTEXT_TOKENS,
|
||||
headers:
|
||||
providerCfg?.headers || configuredModel?.headers
|
||||
? { ...providerCfg?.headers, ...configuredModel?.headers }
|
||||
: undefined,
|
||||
} as Model<Api>);
|
||||
return { model: fallbackModel, authStorage, modelRegistry };
|
||||
}
|
||||
return {
|
||||
error: buildUnknownModelError(provider, modelId),
|
||||
authStorage,
|
||||
modelRegistry,
|
||||
};
|
||||
const model = resolveModelWithRegistry({ provider, modelId, modelRegistry, cfg });
|
||||
if (model) {
|
||||
return { model, authStorage, modelRegistry };
|
||||
}
|
||||
|
||||
return {
|
||||
model: normalizeModelCompat(
|
||||
applyConfiguredProviderOverrides({
|
||||
discoveredModel: model,
|
||||
providerConfig,
|
||||
modelId,
|
||||
}),
|
||||
),
|
||||
error: buildUnknownModelError(provider, modelId),
|
||||
authStorage,
|
||||
modelRegistry,
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user