Onboarding: add vLLM provider support

This commit is contained in:
gejifeng
2026-02-09 10:20:45 +00:00
committed by Peter Steinberger
parent 54bf5d0f41
commit e73d881c50
19 changed files with 555 additions and 3 deletions

View File

@@ -134,4 +134,14 @@ describe("buildAuthChoiceOptions", () => {
expect(options.some((opt) => opt.value === "xai-api-key")).toBe(true);
});
it("includes vLLM auth choice", () => {
const store: AuthProfileStore = { version: 1, profiles: {} };
const options = buildAuthChoiceOptions({
store,
includeSkip: false,
});
expect(options.some((opt) => opt.value === "vllm")).toBe(true);
});
});

View File

@@ -10,6 +10,7 @@ export type AuthChoiceOption = {
export type AuthChoiceGroupId =
| "openai"
| "anthropic"
| "vllm"
| "google"
| "copilot"
| "openrouter"
@@ -54,6 +55,12 @@ const AUTH_CHOICE_GROUP_DEFS: {
hint: "setup-token + API key",
choices: ["token", "apiKey"],
},
{
value: "vllm",
label: "vLLM",
hint: "Local/self-hosted OpenAI-compatible",
choices: ["vllm"],
},
{
value: "minimax",
label: "MiniMax",
@@ -182,6 +189,11 @@ export function buildAuthChoiceOptions(params: {
label: "OpenAI Codex (ChatGPT OAuth)",
});
options.push({ value: "chutes", label: "Chutes (OAuth)" });
options.push({
value: "vllm",
label: "vLLM (custom URL + model)",
hint: "Local/self-hosted OpenAI-compatible server",
});
options.push({ value: "openai-api-key", label: "OpenAI API key" });
options.push({ value: "xai-api-key", label: "xAI (Grok) API key" });
options.push({

View File

@@ -12,6 +12,7 @@ import { applyAuthChoiceMiniMax } from "./auth-choice.apply.minimax.js";
import { applyAuthChoiceOAuth } from "./auth-choice.apply.oauth.js";
import { applyAuthChoiceOpenAI } from "./auth-choice.apply.openai.js";
import { applyAuthChoiceQwenPortal } from "./auth-choice.apply.qwen-portal.js";
import { applyAuthChoiceVllm } from "./auth-choice.apply.vllm.js";
import { applyAuthChoiceXAI } from "./auth-choice.apply.xai.js";
export type ApplyAuthChoiceParams = {
@@ -42,6 +43,7 @@ export async function applyAuthChoice(
): Promise<ApplyAuthChoiceResult> {
const handlers: Array<(p: ApplyAuthChoiceParams) => Promise<ApplyAuthChoiceResult | null>> = [
applyAuthChoiceAnthropic,
applyAuthChoiceVllm,
applyAuthChoiceOpenAI,
applyAuthChoiceOAuth,
applyAuthChoiceApiProviders,

View File

@@ -0,0 +1,107 @@
import type { OpenClawConfig } from "../config/config.js";
import type { ApplyAuthChoiceParams, ApplyAuthChoiceResult } from "./auth-choice.apply.js";
import { upsertAuthProfile } from "../agents/auth-profiles.js";
const VLLM_DEFAULT_BASE_URL = "http://127.0.0.1:8000/v1";
const VLLM_DEFAULT_CONTEXT_WINDOW = 128000;
const VLLM_DEFAULT_MAX_TOKENS = 8192;
const VLLM_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
function applyVllmDefaultModel(cfg: OpenClawConfig, modelRef: string): OpenClawConfig {
const existingModel = cfg.agents?.defaults?.model;
const fallbacks =
existingModel && typeof existingModel === "object" && "fallbacks" in existingModel
? (existingModel as { fallbacks?: string[] }).fallbacks
: undefined;
return {
...cfg,
agents: {
...cfg.agents,
defaults: {
...cfg.agents?.defaults,
model: {
...(fallbacks ? { fallbacks } : undefined),
primary: modelRef,
},
},
},
};
}
export async function applyAuthChoiceVllm(
params: ApplyAuthChoiceParams,
): Promise<ApplyAuthChoiceResult | null> {
if (params.authChoice !== "vllm") {
return null;
}
const baseUrlRaw = await params.prompter.text({
message: "vLLM base URL",
initialValue: VLLM_DEFAULT_BASE_URL,
placeholder: VLLM_DEFAULT_BASE_URL,
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const apiKeyRaw = await params.prompter.text({
message: "vLLM API key",
placeholder: "sk-... (or any non-empty string)",
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const modelIdRaw = await params.prompter.text({
message: "vLLM model",
placeholder: "meta-llama/Meta-Llama-3-8B-Instruct",
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const baseUrl = String(baseUrlRaw ?? "")
.trim()
.replace(/\/+$/, "");
const apiKey = String(apiKeyRaw ?? "").trim();
const modelId = String(modelIdRaw ?? "").trim();
const modelRef = `vllm/${modelId}`;
upsertAuthProfile({
profileId: "vllm:default",
credential: { type: "api_key", provider: "vllm", key: apiKey },
agentDir: params.agentDir,
});
const nextConfig: OpenClawConfig = {
...params.config,
models: {
...params.config.models,
mode: params.config.models?.mode ?? "merge",
providers: {
...params.config.models?.providers,
vllm: {
baseUrl,
api: "openai-completions",
apiKey: "VLLM_API_KEY",
models: [
{
id: modelId,
name: modelId,
reasoning: false,
input: ["text"],
cost: VLLM_DEFAULT_COST,
contextWindow: VLLM_DEFAULT_CONTEXT_WINDOW,
maxTokens: VLLM_DEFAULT_MAX_TOKENS,
},
],
},
},
},
};
if (!params.setDefaultModel) {
return { config: nextConfig, agentModelOverride: modelRef };
}
await params.prompter.note(`Default model set to ${modelRef}`, "Model configured");
return { config: applyVllmDefaultModel(nextConfig, modelRef) };
}

View File

@@ -6,6 +6,7 @@ const PREFERRED_PROVIDER_BY_AUTH_CHOICE: Partial<Record<AuthChoice, string>> = {
"claude-cli": "anthropic",
token: "anthropic",
apiKey: "anthropic",
vllm: "vllm",
"openai-codex": "openai-codex",
"codex-cli": "openai-codex",
chutes: "chutes",

View File

@@ -77,6 +77,9 @@ export async function promptAuthConfig(
ignoreAllowlist: true,
preferredProvider: resolvePreferredProviderForAuthChoice(authChoice),
});
if (modelSelection.config) {
next = modelSelection.config;
}
if (modelSelection.model) {
next = applyPrimaryModel(next, modelSelection.model);
}

View File

@@ -20,9 +20,11 @@ const ensureAuthProfileStore = vi.hoisted(() =>
})),
);
const listProfilesForProvider = vi.hoisted(() => vi.fn(() => []));
const upsertAuthProfile = vi.hoisted(() => vi.fn());
vi.mock("../agents/auth-profiles.js", () => ({
ensureAuthProfileStore,
listProfilesForProvider,
upsertAuthProfile,
}));
const resolveEnvApiKey = vi.hoisted(() => vi.fn(() => undefined));
@@ -68,6 +70,53 @@ describe("promptDefaultModel", () => {
true,
);
});
it("supports configuring vLLM during onboarding", async () => {
loadModelCatalog.mockResolvedValue([
{
provider: "anthropic",
id: "claude-sonnet-4-5",
name: "Claude Sonnet 4.5",
},
]);
const select = vi.fn(async (params) => {
const vllm = params.options.find((opt: { value: string }) => opt.value === "__vllm__");
return (vllm?.value ?? "") as never;
});
const text = vi
.fn()
.mockResolvedValueOnce("http://127.0.0.1:8000/v1")
.mockResolvedValueOnce("sk-vllm-test")
.mockResolvedValueOnce("meta-llama/Meta-Llama-3-8B-Instruct");
const prompter = makePrompter({ select, text: text as never });
const config = { agents: { defaults: {} } } as OpenClawConfig;
const result = await promptDefaultModel({
config,
prompter,
allowKeep: false,
includeManual: false,
includeVllm: true,
ignoreAllowlist: true,
});
expect(upsertAuthProfile).toHaveBeenCalledWith(
expect.objectContaining({
profileId: "vllm:default",
credential: expect.objectContaining({ provider: "vllm" }),
}),
);
expect(result.model).toBe("vllm/meta-llama/Meta-Llama-3-8B-Instruct");
expect(result.config?.models?.providers?.vllm).toMatchObject({
baseUrl: "http://127.0.0.1:8000/v1",
api: "openai-completions",
apiKey: "VLLM_API_KEY",
models: [
{ id: "meta-llama/Meta-Llama-3-8B-Instruct", name: "meta-llama/Meta-Llama-3-8B-Instruct" },
],
});
});
});
describe("promptModelAllowlist", () => {

View File

@@ -1,6 +1,10 @@
import type { OpenClawConfig } from "../config/config.js";
import type { WizardPrompter, WizardSelectOption } from "../wizard/prompts.js";
import { ensureAuthProfileStore, listProfilesForProvider } from "../agents/auth-profiles.js";
import {
ensureAuthProfileStore,
listProfilesForProvider,
upsertAuthProfile,
} from "../agents/auth-profiles.js";
import { DEFAULT_MODEL, DEFAULT_PROVIDER } from "../agents/defaults.js";
import { getCustomProviderApiKey, resolveEnvApiKey } from "../agents/model-auth.js";
import { loadModelCatalog } from "../agents/model-catalog.js";
@@ -16,7 +20,17 @@ import { OPENAI_CODEX_DEFAULT_MODEL } from "./openai-codex-model-default.js";
const KEEP_VALUE = "__keep__";
const MANUAL_VALUE = "__manual__";
const VLLM_VALUE = "__vllm__";
const PROVIDER_FILTER_THRESHOLD = 30;
const VLLM_DEFAULT_BASE_URL = "http://127.0.0.1:8000/v1";
const VLLM_DEFAULT_CONTEXT_WINDOW = 128000;
const VLLM_DEFAULT_MAX_TOKENS = 8192;
const VLLM_DEFAULT_COST = {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
};
// Models that are internal routing features and should not be shown in selection lists.
// These may be valid as defaults (e.g., set automatically during auth flow) but are not
@@ -28,13 +42,14 @@ type PromptDefaultModelParams = {
prompter: WizardPrompter;
allowKeep?: boolean;
includeManual?: boolean;
includeVllm?: boolean;
ignoreAllowlist?: boolean;
preferredProvider?: string;
agentDir?: string;
message?: string;
};
type PromptDefaultModelResult = { model?: string };
type PromptDefaultModelResult = { model?: string; config?: OpenClawConfig };
type PromptModelAllowlistResult = { models?: string[] };
function hasAuthForProvider(
@@ -107,6 +122,7 @@ export async function promptDefaultModel(
const cfg = params.config;
const allowKeep = params.allowKeep ?? true;
const includeManual = params.includeManual ?? true;
const includeVllm = params.includeVllm ?? false;
const ignoreAllowlist = params.ignoreAllowlist ?? false;
const preferredProviderRaw = params.preferredProvider?.trim();
const preferredProvider = preferredProviderRaw
@@ -212,6 +228,13 @@ export async function promptDefaultModel(
if (includeManual) {
options.push({ value: MANUAL_VALUE, label: "Enter model manually" });
}
if (includeVllm) {
options.push({
value: VLLM_VALUE,
label: "vLLM (custom)",
hint: "Enter vLLM URL + API key + model",
});
}
const seen = new Set<string>();
const addModelOption = (entry: {
@@ -295,6 +318,65 @@ export async function promptDefaultModel(
initialValue: configuredRaw || resolvedKey || undefined,
});
}
if (selection === VLLM_VALUE) {
const baseUrlRaw = await params.prompter.text({
message: "vLLM base URL",
initialValue: VLLM_DEFAULT_BASE_URL,
placeholder: VLLM_DEFAULT_BASE_URL,
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const apiKeyRaw = await params.prompter.text({
message: "vLLM API key",
placeholder: "sk-... (or any non-empty string)",
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const modelIdRaw = await params.prompter.text({
message: "vLLM model",
placeholder: "meta-llama/Meta-Llama-3-8B-Instruct",
validate: (value) => (value?.trim() ? undefined : "Required"),
});
const baseUrl = String(baseUrlRaw ?? "")
.trim()
.replace(/\/+$/, "");
const apiKey = String(apiKeyRaw ?? "").trim();
const modelId = String(modelIdRaw ?? "").trim();
upsertAuthProfile({
profileId: "vllm:default",
credential: { type: "api_key", provider: "vllm", key: apiKey },
agentDir: params.agentDir,
});
const nextConfig: OpenClawConfig = {
...cfg,
models: {
...cfg.models,
mode: cfg.models?.mode ?? "merge",
providers: {
...cfg.models?.providers,
vllm: {
baseUrl,
api: "openai-completions",
apiKey: "VLLM_API_KEY",
models: [
{
id: modelId,
name: modelId,
reasoning: false,
input: ["text"],
cost: VLLM_DEFAULT_COST,
contextWindow: VLLM_DEFAULT_CONTEXT_WINDOW,
maxTokens: VLLM_DEFAULT_MAX_TOKENS,
},
],
},
},
},
};
return { model: `vllm/${modelId}`, config: nextConfig };
}
return { model: String(selection) };
}

View File

@@ -330,6 +330,24 @@ describe("onboard (non-interactive): provider auth", () => {
});
}, 60_000);
it("rejects vLLM auth choice in non-interactive mode", async () => {
await withOnboardEnv("openclaw-onboard-vllm-non-interactive-", async ({ runtime }) => {
await expect(
runNonInteractive(
{
nonInteractive: true,
authChoice: "vllm",
skipHealth: true,
skipChannels: true,
skipSkills: true,
json: true,
},
runtime,
),
).rejects.toThrow('Auth choice "vllm" requires interactive mode.');
});
}, 60_000);
it("stores LiteLLM API key and sets default model", async () => {
await withOnboardEnv("openclaw-onboard-litellm-", async ({ configPath, runtime }) => {
await runNonInteractive(

View File

@@ -88,6 +88,17 @@ export async function applyNonInteractiveAuthChoice(params: {
return null;
}
if (authChoice === "vllm") {
runtime.error(
[
'Auth choice "vllm" requires interactive mode.',
"Use interactive onboard/configure to enter base URL, API key, and model ID.",
].join("\n"),
);
runtime.exit(1);
return null;
}
if (authChoice === "apiKey") {
const resolved = await resolveNonInteractiveApiKey({
provider: "anthropic",

View File

@@ -9,6 +9,7 @@ export type AuthChoice =
| "claude-cli"
| "token"
| "chutes"
| "vllm"
| "openai-codex"
| "openai-api-key"
| "openrouter-api-key"