mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-11 08:11:42 +00:00
fix(agents): skip Ollama discovery when explicit models configured (#28762)
This commit is contained in:
@@ -103,6 +103,7 @@ Docs: https://docs.openclaw.ai
|
|||||||
### Fixes
|
### Fixes
|
||||||
|
|
||||||
- FS tools/workspaceOnly: honor `tools.fs.workspaceOnly=false` for host write and edit operations so FS tools can access paths outside the workspace when sandbox is off. (#28822) thanks @lailoo. Fixes #28763. Thanks @cjscld for reporting.
|
- FS tools/workspaceOnly: honor `tools.fs.workspaceOnly=false` for host write and edit operations so FS tools can access paths outside the workspace when sandbox is off. (#28822) thanks @lailoo. Fixes #28763. Thanks @cjscld for reporting.
|
||||||
|
- Ollama/Model discovery: skip model discovery when explicit models are configured, preventing 5s timeout delays at startup for remote Ollama hosts. (#28762)
|
||||||
- Telegram/DM allowlist runtime inheritance: enforce `dmPolicy: "allowlist"` `allowFrom` requirements using effective account-plus-parent config across account-capable channels (Telegram, Discord, Slack, Signal, iMessage, IRC, BlueBubbles, WhatsApp), and align `openclaw doctor` checks to the same inheritance logic so DM traffic is not silently dropped after upgrades. (#27936) Thanks @widingmarcus-cyber.
|
- Telegram/DM allowlist runtime inheritance: enforce `dmPolicy: "allowlist"` `allowFrom` requirements using effective account-plus-parent config across account-capable channels (Telegram, Discord, Slack, Signal, iMessage, IRC, BlueBubbles, WhatsApp), and align `openclaw doctor` checks to the same inheritance logic so DM traffic is not silently dropped after upgrades. (#27936) Thanks @widingmarcus-cyber.
|
||||||
- Delivery queue/recovery backoff: prevent retry starvation by persisting `lastAttemptAt` on failed sends and deferring recovery retries until each entry's `lastAttemptAt + backoff` window is eligible, while continuing to recover ready entries behind deferred ones. Landed from contributor PR #27710 by @Jimmy-xuzimo. Thanks @Jimmy-xuzimo.
|
- Delivery queue/recovery backoff: prevent retry starvation by persisting `lastAttemptAt` on failed sends and deferring recovery retries until each entry's `lastAttemptAt + backoff` window is eligible, while continuing to recover ready entries behind deferred ones. Landed from contributor PR #27710 by @Jimmy-xuzimo. Thanks @Jimmy-xuzimo.
|
||||||
- Gemini OAuth/Auth flow: align OAuth project discovery metadata and endpoint fallback handling for Gemini CLI auth, including fallback coverage for environment-provided project IDs. (#16684) Thanks @vincentkoc.
|
- Gemini OAuth/Auth flow: align OAuth project discovery metadata and endpoint fallback handling for Gemini CLI auth, including fallback coverage for environment-provided project IDs. (#16684) Thanks @vincentkoc.
|
||||||
|
|||||||
@@ -194,4 +194,40 @@ describe("Ollama provider", () => {
|
|||||||
// Native Ollama provider does not need streaming: false workaround
|
// Native Ollama provider does not need streaming: false workaround
|
||||||
expect(mockOllamaModel).not.toHaveProperty("params");
|
expect(mockOllamaModel).not.toHaveProperty("params");
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it("should skip discovery when explicit models are configured", async () => {
|
||||||
|
const agentDir = mkdtempSync(join(tmpdir(), "openclaw-test-"));
|
||||||
|
process.env.OLLAMA_API_KEY = "test-key";
|
||||||
|
|
||||||
|
try {
|
||||||
|
const explicitModels = [
|
||||||
|
{
|
||||||
|
id: "gpt-oss:20b",
|
||||||
|
name: "GPT-OSS 20B",
|
||||||
|
reasoning: false,
|
||||||
|
input: ["text"] as const,
|
||||||
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||||
|
contextWindow: 8192,
|
||||||
|
maxTokens: 81920,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const providers = await resolveImplicitProviders({
|
||||||
|
agentDir,
|
||||||
|
explicitProviders: {
|
||||||
|
ollama: {
|
||||||
|
baseUrl: "http://remote-ollama:11434",
|
||||||
|
api: "ollama",
|
||||||
|
models: explicitModels,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Should use explicit models, not run discovery
|
||||||
|
expect(providers?.ollama?.models).toEqual(explicitModels);
|
||||||
|
expect(providers?.ollama?.baseUrl).toBe("http://remote-ollama:11434");
|
||||||
|
} finally {
|
||||||
|
delete process.env.OLLAMA_API_KEY;
|
||||||
|
}
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1021,11 +1021,22 @@ export async function resolveImplicitProviders(params: {
|
|||||||
// Ollama provider - auto-discover if running locally, or add if explicitly configured.
|
// Ollama provider - auto-discover if running locally, or add if explicitly configured.
|
||||||
// Use the user's configured baseUrl (from explicit providers) for model
|
// Use the user's configured baseUrl (from explicit providers) for model
|
||||||
// discovery so that remote / non-default Ollama instances are reachable.
|
// discovery so that remote / non-default Ollama instances are reachable.
|
||||||
|
// Skip discovery when explicit models are already defined.
|
||||||
const ollamaKey =
|
const ollamaKey =
|
||||||
resolveEnvApiKeyVarName("ollama") ??
|
resolveEnvApiKeyVarName("ollama") ??
|
||||||
resolveApiKeyFromProfiles({ provider: "ollama", store: authStore });
|
resolveApiKeyFromProfiles({ provider: "ollama", store: authStore });
|
||||||
const ollamaBaseUrl = params.explicitProviders?.ollama?.baseUrl;
|
const explicitOllama = params.explicitProviders?.ollama;
|
||||||
const hasExplicitOllamaConfig = Boolean(params.explicitProviders?.ollama);
|
const hasExplicitModels =
|
||||||
|
Array.isArray(explicitOllama?.models) && explicitOllama.models.length > 0;
|
||||||
|
if (hasExplicitModels) {
|
||||||
|
providers.ollama = {
|
||||||
|
...explicitOllama,
|
||||||
|
api: explicitOllama.api ?? "ollama",
|
||||||
|
apiKey: ollamaKey ?? "ollama-local",
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
const ollamaBaseUrl = explicitOllama?.baseUrl;
|
||||||
|
const hasExplicitOllamaConfig = Boolean(explicitOllama);
|
||||||
// Only suppress warnings for implicit local probing when user has not
|
// Only suppress warnings for implicit local probing when user has not
|
||||||
// explicitly configured Ollama.
|
// explicitly configured Ollama.
|
||||||
const ollamaProvider = await buildOllamaProvider(ollamaBaseUrl, {
|
const ollamaProvider = await buildOllamaProvider(ollamaBaseUrl, {
|
||||||
@@ -1037,6 +1048,7 @@ export async function resolveImplicitProviders(params: {
|
|||||||
apiKey: ollamaKey ?? "ollama-local",
|
apiKey: ollamaKey ?? "ollama-local",
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// vLLM provider - OpenAI-compatible local server (opt-in via env/profile).
|
// vLLM provider - OpenAI-compatible local server (opt-in via env/profile).
|
||||||
// If explicitly configured, keep user-defined models/settings as-is.
|
// If explicitly configured, keep user-defined models/settings as-is.
|
||||||
|
|||||||
Reference in New Issue
Block a user