fix(embedded): classify model_context_window_exceeded as context overflow, trigger compaction (#35934)

Merged via squash.

Prepared head SHA: 20fa77289c
Co-authored-by: RealKai42 <44634134+RealKai42@users.noreply.github.com>
Co-authored-by: jalehman <550978+jalehman@users.noreply.github.com>
Reviewed-by: @jalehman
This commit is contained in:
Kai
2026-03-06 03:30:24 +08:00
committed by GitHub
parent 72cf9253fc
commit 60a6d11116
5 changed files with 199 additions and 17 deletions

View File

@@ -278,6 +278,118 @@ describe("resolveModel", () => {
expect(result.model?.reasoning).toBe(true);
});
it("prefers configured provider api metadata over discovered registry model", () => {
mockDiscoveredModel({
provider: "onehub",
modelId: "glm-5",
templateModel: {
id: "glm-5",
name: "GLM-5 (cached)",
provider: "onehub",
api: "anthropic-messages",
baseUrl: "https://old-provider.example.com/v1",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 8192,
maxTokens: 2048,
},
});
const cfg = {
models: {
providers: {
onehub: {
baseUrl: "http://new-provider.example.com/v1",
api: "openai-completions",
models: [
{
...makeModel("glm-5"),
api: "openai-completions",
reasoning: true,
contextWindow: 198000,
maxTokens: 16000,
},
],
},
},
},
} as OpenClawConfig;
const result = resolveModel("onehub", "glm-5", "/tmp/agent", cfg);
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "onehub",
id: "glm-5",
api: "openai-completions",
baseUrl: "http://new-provider.example.com/v1",
reasoning: true,
contextWindow: 198000,
maxTokens: 16000,
});
});
it("prefers exact provider config over normalized alias match when both keys exist", () => {
mockDiscoveredModel({
provider: "qwen",
modelId: "qwen3-coder-plus",
templateModel: {
id: "qwen3-coder-plus",
name: "Qwen3 Coder Plus",
provider: "qwen",
api: "openai-completions",
baseUrl: "https://default-provider.example.com/v1",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 8192,
maxTokens: 2048,
},
});
const cfg = {
models: {
providers: {
"qwen-portal": {
baseUrl: "https://canonical-provider.example.com/v1",
api: "openai-completions",
headers: { "X-Provider": "canonical" },
models: [{ ...makeModel("qwen3-coder-plus"), reasoning: false }],
},
qwen: {
baseUrl: "https://alias-provider.example.com/v1",
api: "anthropic-messages",
headers: { "X-Provider": "alias" },
models: [
{
...makeModel("qwen3-coder-plus"),
api: "anthropic-messages",
reasoning: true,
contextWindow: 262144,
maxTokens: 32768,
},
],
},
},
},
} as OpenClawConfig;
const result = resolveModel("qwen", "qwen3-coder-plus", "/tmp/agent", cfg);
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "qwen",
id: "qwen3-coder-plus",
api: "anthropic-messages",
baseUrl: "https://alias-provider.example.com",
reasoning: true,
contextWindow: 262144,
maxTokens: 32768,
headers: { "X-Provider": "alias" },
});
});
it("builds an openai-codex fallback for gpt-5.3-codex", () => {
mockOpenAICodexTemplateModel();

View File

@@ -7,7 +7,7 @@ import { DEFAULT_CONTEXT_TOKENS } from "../defaults.js";
import { buildModelAliasLines } from "../model-alias-lines.js";
import { normalizeModelCompat } from "../model-compat.js";
import { resolveForwardCompatModel } from "../model-forward-compat.js";
import { normalizeProviderId } from "../model-selection.js";
import { findNormalizedProviderValue, normalizeProviderId } from "../model-selection.js";
import { discoverAuthStorage, discoverModels } from "../pi-model-discovery.js";
type InlineModelEntry = ModelDefinitionConfig & {
@@ -24,6 +24,60 @@ type InlineProviderConfig = {
export { buildModelAliasLines };
function resolveConfiguredProviderConfig(
cfg: OpenClawConfig | undefined,
provider: string,
): InlineProviderConfig | undefined {
const configuredProviders = cfg?.models?.providers;
if (!configuredProviders) {
return undefined;
}
const exactProviderConfig = configuredProviders[provider];
if (exactProviderConfig) {
return exactProviderConfig;
}
return findNormalizedProviderValue(configuredProviders, provider);
}
function applyConfiguredProviderOverrides(params: {
discoveredModel: Model<Api>;
providerConfig?: InlineProviderConfig;
modelId: string;
}): Model<Api> {
const { discoveredModel, providerConfig, modelId } = params;
if (!providerConfig) {
return discoveredModel;
}
const configuredModel = providerConfig.models?.find((candidate) => candidate.id === modelId);
if (
!configuredModel &&
!providerConfig.baseUrl &&
!providerConfig.api &&
!providerConfig.headers
) {
return discoveredModel;
}
return {
...discoveredModel,
api: configuredModel?.api ?? providerConfig.api ?? discoveredModel.api,
baseUrl: providerConfig.baseUrl ?? discoveredModel.baseUrl,
reasoning: configuredModel?.reasoning ?? discoveredModel.reasoning,
input: configuredModel?.input ?? discoveredModel.input,
cost: configuredModel?.cost ?? discoveredModel.cost,
contextWindow: configuredModel?.contextWindow ?? discoveredModel.contextWindow,
maxTokens: configuredModel?.maxTokens ?? discoveredModel.maxTokens,
headers:
providerConfig.headers || configuredModel?.headers
? {
...discoveredModel.headers,
...providerConfig.headers,
...configuredModel?.headers,
}
: discoveredModel.headers,
compat: configuredModel?.compat ?? discoveredModel.compat,
};
}
export function buildInlineProviderModels(
providers: Record<string, InlineProviderConfig>,
): InlineModelEntry[] {
@@ -59,6 +113,7 @@ export function resolveModel(
const resolvedAgentDir = agentDir ?? resolveOpenClawAgentDir();
const authStorage = discoverAuthStorage(resolvedAgentDir);
const modelRegistry = discoverModels(authStorage, resolvedAgentDir);
const providerConfig = resolveConfiguredProviderConfig(cfg, provider);
const model = modelRegistry.find(provider, modelId) as Model<Api> | null;
if (!model) {
@@ -100,7 +155,7 @@ export function resolveModel(
} as Model<Api>);
return { model: fallbackModel, authStorage, modelRegistry };
}
const providerCfg = providers[provider];
const providerCfg = providerConfig;
if (providerCfg || modelId.startsWith("mock-")) {
const configuredModel = providerCfg?.models?.find((candidate) => candidate.id === modelId);
const fallbackModel: Model<Api> = normalizeModelCompat({
@@ -133,21 +188,17 @@ export function resolveModel(
modelRegistry,
};
}
const providerOverride = cfg?.models?.providers?.[provider] as InlineProviderConfig | undefined;
if (providerOverride?.baseUrl || providerOverride?.headers) {
const overridden: Model<Api> & { headers?: Record<string, string> } = { ...model };
if (providerOverride.baseUrl) {
overridden.baseUrl = providerOverride.baseUrl;
}
if (providerOverride.headers) {
overridden.headers = {
...(model as Model<Api> & { headers?: Record<string, string> }).headers,
...providerOverride.headers,
};
}
return { model: normalizeModelCompat(overridden), authStorage, modelRegistry };
}
return { model: normalizeModelCompat(model), authStorage, modelRegistry };
return {
model: normalizeModelCompat(
applyConfiguredProviderOverrides({
discoveredModel: model,
providerConfig,
modelId,
}),
),
authStorage,
modelRegistry,
};
}
/**