Onboarding: enforce custom model context minimum

This commit is contained in:
Vignesh Natarajan
2026-02-28 13:37:21 -08:00
parent e90429794a
commit a623c9c8d2
3 changed files with 104 additions and 2 deletions

View File

@@ -1,4 +1,5 @@
import { afterEach, describe, expect, it, vi } from "vitest";
import { CONTEXT_WINDOW_HARD_MIN_TOKENS } from "../agents/context-window-guard.js";
import { defaultRuntime } from "../runtime.js";
import {
applyCustomApiConfig,
@@ -326,6 +327,91 @@ describe("promptCustomApiConfig", () => {
});
describe("applyCustomApiConfig", () => {
it("uses hard-min context window for newly added custom models", () => {
const result = applyCustomApiConfig({
config: {},
baseUrl: "https://llm.example.com/v1",
modelId: "foo-large",
compatibility: "openai",
providerId: "custom",
});
const model = result.config.models?.providers?.custom?.models?.find(
(entry) => entry.id === "foo-large",
);
expect(model?.contextWindow).toBe(CONTEXT_WINDOW_HARD_MIN_TOKENS);
});
it("upgrades existing custom model context window when below hard minimum", () => {
const result = applyCustomApiConfig({
config: {
models: {
providers: {
custom: {
api: "openai-completions",
baseUrl: "https://llm.example.com/v1",
models: [
{
id: "foo-large",
name: "foo-large",
contextWindow: 4096,
maxTokens: 1024,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
reasoning: false,
},
],
},
},
},
},
baseUrl: "https://llm.example.com/v1",
modelId: "foo-large",
compatibility: "openai",
providerId: "custom",
});
const model = result.config.models?.providers?.custom?.models?.find(
(entry) => entry.id === "foo-large",
);
expect(model?.contextWindow).toBe(CONTEXT_WINDOW_HARD_MIN_TOKENS);
});
it("preserves existing custom model context window when already above minimum", () => {
const result = applyCustomApiConfig({
config: {
models: {
providers: {
custom: {
api: "openai-completions",
baseUrl: "https://llm.example.com/v1",
models: [
{
id: "foo-large",
name: "foo-large",
contextWindow: 131072,
maxTokens: 4096,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
reasoning: false,
},
],
},
},
},
},
baseUrl: "https://llm.example.com/v1",
modelId: "foo-large",
compatibility: "openai",
providerId: "custom",
});
const model = result.config.models?.providers?.custom?.models?.find(
(entry) => entry.id === "foo-large",
);
expect(model?.contextWindow).toBe(131072);
});
it.each([
{
name: "invalid compatibility values at runtime",