fix(agents): skip Ollama discovery when explicit models configured (#28762)

This commit is contained in:
damaozi
2026-02-27 22:32:28 +08:00
committed by Vincent Koc
parent be8a5b9d64
commit deb9560a2b
3 changed files with 58 additions and 9 deletions

View File

@@ -194,4 +194,40 @@ describe("Ollama provider", () => {
// Native Ollama provider does not need streaming: false workaround
expect(mockOllamaModel).not.toHaveProperty("params");
});
it("should skip discovery when explicit models are configured", async () => {
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
process.env.OLLAMA_API_KEY = "test-key";
try {
const explicitModels = [
{
id: "gpt-oss:20b",
name: "GPT-OSS 20B",
reasoning: false,
input: ["text"] as const,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 8192,
maxTokens: 81920,
},
];
const providers = await resolveImplicitProviders({
agentDir,
explicitProviders: {
ollama: {
baseUrl: "http://remote-ollama:11434",
api: "ollama",
models: explicitModels,
},
},
});
// Should use explicit models, not run discovery
expect(providers?.ollama?.models).toEqual(explicitModels);
expect(providers?.ollama?.baseUrl).toBe("http://remote-ollama:11434");
} finally {
delete process.env.OLLAMA_API_KEY;
}
});
});

View File

@@ -1021,21 +1021,33 @@ export async function resolveImplicitProviders(params: {
// Ollama provider - auto-discover if running locally, or add if explicitly configured.
// Use the user's configured baseUrl (from explicit providers) for model
// discovery so that remote / non-default Ollama instances are reachable.
// Skip discovery when explicit models are already defined.
const ollamaKey =
resolveEnvApiKeyVarName("ollama") ??
resolveApiKeyFromProfiles({ provider: "ollama", store: authStore });
const ollamaBaseUrl = params.explicitProviders?.ollama?.baseUrl;
const hasExplicitOllamaConfig = Boolean(params.explicitProviders?.ollama);
// Only suppress warnings for implicit local probing when user has not
// explicitly configured Ollama.
const ollamaProvider = await buildOllamaProvider(ollamaBaseUrl, {
quiet: !ollamaKey && !hasExplicitOllamaConfig,
});
if (ollamaProvider.models.length > 0 || ollamaKey) {
const explicitOllama = params.explicitProviders?.ollama;
const hasExplicitModels =
Array.isArray(explicitOllama?.models) && explicitOllama.models.length > 0;
if (hasExplicitModels) {
providers.ollama = {
...ollamaProvider,
...explicitOllama,
api: explicitOllama.api ?? "ollama",
apiKey: ollamaKey ?? "ollama-local",
};
} else {
const ollamaBaseUrl = explicitOllama?.baseUrl;
const hasExplicitOllamaConfig = Boolean(explicitOllama);
// Only suppress warnings for implicit local probing when user has not
// explicitly configured Ollama.
const ollamaProvider = await buildOllamaProvider(ollamaBaseUrl, {
quiet: !ollamaKey && !hasExplicitOllamaConfig,
});
if (ollamaProvider.models.length > 0 || ollamaKey) {
providers.ollama = {
...ollamaProvider,
apiKey: ollamaKey ?? "ollama-local",
};
}
}
// vLLM provider - OpenAI-compatible local server (opt-in via env/profile).