fix(ollama): prioritize provider baseUrl for embedded runner (#30964)

* fix(ollama): honor provider baseUrl in embedded runner

* Embedded Ollama: clarify provider baseUrl precedence comment

* Changelog: note embedded Ollama baseUrl precedence fix

* Telegram: apply required formatter update in accounts config merge

* Revert "Telegram: apply required formatter update in accounts config merge"

This reverts commit d372b26975.

* Update CHANGELOG.md

---------

Co-authored-by: User <user@example.com>
Co-authored-by: Vincent Koc <vincentkoc@ieee.org>
This commit is contained in:
不做了睡大觉
2026-03-02 10:38:42 +08:00
committed by GitHub
parent fd341d0d3f
commit e482da6682
3 changed files with 47 additions and 4 deletions

View File

@@ -3,6 +3,7 @@ import type { OpenClawConfig } from "../../../config/config.js";
import {
isOllamaCompatProvider,
resolveAttemptFsWorkspaceOnly,
resolveOllamaBaseUrlForRun,
resolveOllamaCompatNumCtxEnabled,
resolvePromptBuildHookResult,
resolvePromptModeForSession,
@@ -285,6 +286,29 @@ describe("isOllamaCompatProvider", () => {
});
});
describe("resolveOllamaBaseUrlForRun", () => {
it("prefers provider baseUrl over model baseUrl", () => {
expect(
resolveOllamaBaseUrlForRun({
modelBaseUrl: "http://model-host:11434",
providerBaseUrl: "http://provider-host:11434",
}),
).toBe("http://provider-host:11434");
});
it("falls back to model baseUrl when provider baseUrl is missing", () => {
expect(
resolveOllamaBaseUrlForRun({
modelBaseUrl: "http://model-host:11434",
}),
).toBe("http://model-host:11434");
});
it("falls back to native default when neither baseUrl is configured", () => {
expect(resolveOllamaBaseUrlForRun({})).toBe("http://127.0.0.1:11434");
});
});
describe("wrapOllamaCompatNumCtx", () => {
it("injects num_ctx and preserves downstream onPayload hooks", () => {
let payloadSeen: Record<string, unknown> | undefined;

View File

@@ -258,6 +258,21 @@ function normalizeToolCallNameForDispatch(rawName: string, allowedToolNames?: Se
return caseInsensitiveMatch ?? trimmed;
}
export function resolveOllamaBaseUrlForRun(params: {
modelBaseUrl?: string;
providerBaseUrl?: string;
}): string {
const providerBaseUrl = params.providerBaseUrl?.trim() ?? "";
if (providerBaseUrl) {
return providerBaseUrl;
}
const modelBaseUrl = params.modelBaseUrl?.trim() ?? "";
if (modelBaseUrl) {
return modelBaseUrl;
}
return OLLAMA_NATIVE_BASE_URL;
}
function trimWhitespaceFromToolCallNamesInMessage(
message: unknown,
allowedToolNames?: Set<string>,
@@ -902,13 +917,16 @@ export async function runEmbeddedAttempt(
// Ollama native API: bypass SDK's streamSimple and use direct /api/chat calls
// for reliable streaming + tool calling support (#11828).
if (params.model.api === "ollama") {
// Use the resolved model baseUrl first so custom provider aliases work.
// Prioritize configured provider baseUrl so Docker/remote Ollama hosts work reliably.
const providerConfig = params.config?.models?.providers?.[params.model.provider];
const modelBaseUrl =
typeof params.model.baseUrl === "string" ? params.model.baseUrl.trim() : "";
typeof params.model.baseUrl === "string" ? params.model.baseUrl : undefined;
const providerBaseUrl =
typeof providerConfig?.baseUrl === "string" ? providerConfig.baseUrl.trim() : "";
const ollamaBaseUrl = modelBaseUrl || providerBaseUrl || OLLAMA_NATIVE_BASE_URL;
typeof providerConfig?.baseUrl === "string" ? providerConfig.baseUrl : undefined;
const ollamaBaseUrl = resolveOllamaBaseUrlForRun({
modelBaseUrl,
providerBaseUrl,
});
activeSession.agent.streamFn = createOllamaStreamFn(ollamaBaseUrl);
} else if (params.model.api === "openai-responses" && params.provider === "openai") {
const wsApiKey = await params.authStorage.getApiKey(params.provider);