mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-09 04:37:40 +00:00
Pi Runner: gate parallel_tool_calls to compatible APIs (#39356)
* Pi Runner: gate parallel_tool_calls payload injection * Pi Runner: cover parallel_tool_calls alias precedence * Changelog: note parallel_tool_calls compatibility fix * Update CHANGELOG.md * Pi Runner: clarify null parallel_tool_calls override logging
This commit is contained in:
@@ -116,6 +116,39 @@ describe("resolveExtraParams", () => {
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves higher-precedence agent parallelToolCalls override across alias styles", () => {
|
||||
const result = resolveExtraParams({
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"openai/gpt-4.1": {
|
||||
params: {
|
||||
parallel_tool_calls: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
list: [
|
||||
{
|
||||
id: "main",
|
||||
params: {
|
||||
parallelToolCalls: false,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
provider: "openai",
|
||||
modelId: "gpt-4.1",
|
||||
agentId: "main",
|
||||
});
|
||||
|
||||
expect(result).toEqual({
|
||||
parallel_tool_calls: false,
|
||||
});
|
||||
});
|
||||
|
||||
it("ignores per-agent params when agentId does not match", () => {
|
||||
const result = resolveExtraParams({
|
||||
cfg: {
|
||||
@@ -190,6 +223,32 @@ describe("applyExtraParamsToAgent", () => {
|
||||
return payload;
|
||||
}
|
||||
|
||||
function runParallelToolCallsPayloadMutationCase(params: {
|
||||
applyProvider: string;
|
||||
applyModelId: string;
|
||||
model: Model<"openai-completions"> | Model<"openai-responses"> | Model<"anthropic-messages">;
|
||||
cfg?: Record<string, unknown>;
|
||||
extraParamsOverride?: Record<string, unknown>;
|
||||
payload?: Record<string, unknown>;
|
||||
}) {
|
||||
const payload = params.payload ?? {};
|
||||
const baseStreamFn: StreamFn = (_model, _context, options) => {
|
||||
options?.onPayload?.(payload);
|
||||
return {} as ReturnType<StreamFn>;
|
||||
};
|
||||
const agent = { streamFn: baseStreamFn };
|
||||
applyExtraParamsToAgent(
|
||||
agent,
|
||||
params.cfg as Parameters<typeof applyExtraParamsToAgent>[1],
|
||||
params.applyProvider,
|
||||
params.applyModelId,
|
||||
params.extraParamsOverride,
|
||||
);
|
||||
const context: Context = { messages: [] };
|
||||
void agent.streamFn?.(params.model, context, {});
|
||||
return payload;
|
||||
}
|
||||
|
||||
function runAnthropicHeaderCase(params: {
|
||||
cfg: Record<string, unknown>;
|
||||
modelId: string;
|
||||
@@ -350,6 +409,181 @@ describe("applyExtraParamsToAgent", () => {
|
||||
expect(payloads[0]).not.toHaveProperty("reasoning_effort");
|
||||
});
|
||||
|
||||
it("injects parallel_tool_calls for openai-completions payloads when configured", () => {
|
||||
const payload = runParallelToolCallsPayloadMutationCase({
|
||||
applyProvider: "nvidia-nim",
|
||||
applyModelId: "moonshotai/kimi-k2.5",
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"nvidia-nim/moonshotai/kimi-k2.5": {
|
||||
params: {
|
||||
parallel_tool_calls: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
model: {
|
||||
api: "openai-completions",
|
||||
provider: "nvidia-nim",
|
||||
id: "moonshotai/kimi-k2.5",
|
||||
} as Model<"openai-completions">,
|
||||
});
|
||||
|
||||
expect(payload.parallel_tool_calls).toBe(false);
|
||||
});
|
||||
|
||||
it("injects parallel_tool_calls for openai-responses payloads when configured", () => {
|
||||
const payload = runParallelToolCallsPayloadMutationCase({
|
||||
applyProvider: "openai",
|
||||
applyModelId: "gpt-5",
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"openai/gpt-5": {
|
||||
params: {
|
||||
parallelToolCalls: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
model: {
|
||||
api: "openai-responses",
|
||||
provider: "openai",
|
||||
id: "gpt-5",
|
||||
baseUrl: "https://api.openai.com/v1",
|
||||
} as unknown as Model<"openai-responses">,
|
||||
});
|
||||
|
||||
expect(payload.parallel_tool_calls).toBe(true);
|
||||
});
|
||||
|
||||
it("does not inject parallel_tool_calls for unsupported APIs", () => {
|
||||
const payload = runParallelToolCallsPayloadMutationCase({
|
||||
applyProvider: "anthropic",
|
||||
applyModelId: "claude-sonnet-4-6",
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"anthropic/claude-sonnet-4-6": {
|
||||
params: {
|
||||
parallel_tool_calls: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
model: {
|
||||
api: "anthropic-messages",
|
||||
provider: "anthropic",
|
||||
id: "claude-sonnet-4-6",
|
||||
} as Model<"anthropic-messages">,
|
||||
});
|
||||
|
||||
expect(payload).not.toHaveProperty("parallel_tool_calls");
|
||||
});
|
||||
|
||||
it("lets runtime override win across alias styles for parallel_tool_calls", () => {
|
||||
const payload = runParallelToolCallsPayloadMutationCase({
|
||||
applyProvider: "nvidia-nim",
|
||||
applyModelId: "moonshotai/kimi-k2.5",
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"nvidia-nim/moonshotai/kimi-k2.5": {
|
||||
params: {
|
||||
parallel_tool_calls: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
extraParamsOverride: {
|
||||
parallelToolCalls: false,
|
||||
},
|
||||
model: {
|
||||
api: "openai-completions",
|
||||
provider: "nvidia-nim",
|
||||
id: "moonshotai/kimi-k2.5",
|
||||
} as Model<"openai-completions">,
|
||||
});
|
||||
|
||||
expect(payload.parallel_tool_calls).toBe(false);
|
||||
});
|
||||
|
||||
it("lets null runtime override suppress inherited parallel_tool_calls injection", () => {
|
||||
const payload = runParallelToolCallsPayloadMutationCase({
|
||||
applyProvider: "nvidia-nim",
|
||||
applyModelId: "moonshotai/kimi-k2.5",
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"nvidia-nim/moonshotai/kimi-k2.5": {
|
||||
params: {
|
||||
parallel_tool_calls: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
extraParamsOverride: {
|
||||
parallelToolCalls: null,
|
||||
},
|
||||
model: {
|
||||
api: "openai-completions",
|
||||
provider: "nvidia-nim",
|
||||
id: "moonshotai/kimi-k2.5",
|
||||
} as Model<"openai-completions">,
|
||||
});
|
||||
|
||||
expect(payload).not.toHaveProperty("parallel_tool_calls");
|
||||
});
|
||||
|
||||
it("warns and skips invalid parallel_tool_calls values", () => {
|
||||
const warnSpy = vi.spyOn(log, "warn").mockImplementation(() => undefined);
|
||||
try {
|
||||
const payload = runParallelToolCallsPayloadMutationCase({
|
||||
applyProvider: "nvidia-nim",
|
||||
applyModelId: "moonshotai/kimi-k2.5",
|
||||
cfg: {
|
||||
agents: {
|
||||
defaults: {
|
||||
models: {
|
||||
"nvidia-nim/moonshotai/kimi-k2.5": {
|
||||
params: {
|
||||
parallelToolCalls: "false",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
model: {
|
||||
api: "openai-completions",
|
||||
provider: "nvidia-nim",
|
||||
id: "moonshotai/kimi-k2.5",
|
||||
} as Model<"openai-completions">,
|
||||
});
|
||||
|
||||
expect(payload).not.toHaveProperty("parallel_tool_calls");
|
||||
expect(warnSpy).toHaveBeenCalledWith("ignoring invalid parallel_tool_calls param: false");
|
||||
} finally {
|
||||
warnSpy.mockRestore();
|
||||
}
|
||||
});
|
||||
|
||||
it("normalizes thinking=off to null for SiliconFlow Pro models", () => {
|
||||
const payloads: Record<string, unknown>[] = [];
|
||||
const baseStreamFn: StreamFn = (_model, _context, options) => {
|
||||
|
||||
@@ -49,7 +49,18 @@ export function resolveExtraParams(params: {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return Object.assign({}, globalParams, agentParams);
|
||||
const merged = Object.assign({}, globalParams, agentParams);
|
||||
const resolvedParallelToolCalls = resolveAliasedParamValue(
|
||||
[globalParams, agentParams],
|
||||
"parallel_tool_calls",
|
||||
"parallelToolCalls",
|
||||
);
|
||||
if (resolvedParallelToolCalls !== undefined) {
|
||||
merged.parallel_tool_calls = resolvedParallelToolCalls;
|
||||
delete merged.parallelToolCalls;
|
||||
}
|
||||
|
||||
return merged;
|
||||
}
|
||||
|
||||
type CacheRetention = "none" | "short" | "long";
|
||||
@@ -1108,6 +1119,53 @@ function createZaiToolStreamWrapper(
|
||||
};
|
||||
}
|
||||
|
||||
function resolveAliasedParamValue(
|
||||
sources: Array<Record<string, unknown> | undefined>,
|
||||
snakeCaseKey: string,
|
||||
camelCaseKey: string,
|
||||
): unknown {
|
||||
let resolved: unknown = undefined;
|
||||
let seen = false;
|
||||
for (const source of sources) {
|
||||
if (!source) {
|
||||
continue;
|
||||
}
|
||||
const hasSnakeCaseKey = Object.hasOwn(source, snakeCaseKey);
|
||||
const hasCamelCaseKey = Object.hasOwn(source, camelCaseKey);
|
||||
if (!hasSnakeCaseKey && !hasCamelCaseKey) {
|
||||
continue;
|
||||
}
|
||||
resolved = hasSnakeCaseKey ? source[snakeCaseKey] : source[camelCaseKey];
|
||||
seen = true;
|
||||
}
|
||||
return seen ? resolved : undefined;
|
||||
}
|
||||
|
||||
function createParallelToolCallsWrapper(
|
||||
baseStreamFn: StreamFn | undefined,
|
||||
enabled: boolean,
|
||||
): StreamFn {
|
||||
const underlying = baseStreamFn ?? streamSimple;
|
||||
return (model, context, options) => {
|
||||
if (model.api !== "openai-completions" && model.api !== "openai-responses") {
|
||||
return underlying(model, context, options);
|
||||
}
|
||||
log.debug(
|
||||
`applying parallel_tool_calls=${enabled} for ${model.provider ?? "unknown"}/${model.id ?? "unknown"} api=${model.api}`,
|
||||
);
|
||||
const originalOnPayload = options?.onPayload;
|
||||
return underlying(model, context, {
|
||||
...options,
|
||||
onPayload: (payload) => {
|
||||
if (payload && typeof payload === "object") {
|
||||
(payload as Record<string, unknown>).parallel_tool_calls = enabled;
|
||||
}
|
||||
originalOnPayload?.(payload);
|
||||
},
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply extra params (like temperature) to an agent's streamFn.
|
||||
* Also adds OpenRouter app attribution headers when using the OpenRouter provider.
|
||||
@@ -1123,7 +1181,7 @@ export function applyExtraParamsToAgent(
|
||||
thinkingLevel?: ThinkLevel,
|
||||
agentId?: string,
|
||||
): void {
|
||||
const extraParams = resolveExtraParams({
|
||||
const resolvedExtraParams = resolveExtraParams({
|
||||
cfg,
|
||||
provider,
|
||||
modelId,
|
||||
@@ -1142,7 +1200,7 @@ export function applyExtraParamsToAgent(
|
||||
Object.entries(extraParamsOverride).filter(([, value]) => value !== undefined),
|
||||
)
|
||||
: undefined;
|
||||
const merged = Object.assign({}, extraParams, override);
|
||||
const merged = Object.assign({}, resolvedExtraParams, override);
|
||||
const wrappedStreamFn = createStreamFnWithExtraParams(agent.streamFn, merged, provider);
|
||||
|
||||
if (wrappedStreamFn) {
|
||||
@@ -1238,4 +1296,23 @@ export function applyExtraParamsToAgent(
|
||||
// Force `store=true` for direct OpenAI Responses models and auto-enable
|
||||
// server-side compaction for compatible OpenAI Responses payloads.
|
||||
agent.streamFn = createOpenAIResponsesContextManagementWrapper(agent.streamFn, merged);
|
||||
|
||||
const rawParallelToolCalls = resolveAliasedParamValue(
|
||||
[resolvedExtraParams, override],
|
||||
"parallel_tool_calls",
|
||||
"parallelToolCalls",
|
||||
);
|
||||
if (rawParallelToolCalls !== undefined) {
|
||||
if (typeof rawParallelToolCalls === "boolean") {
|
||||
agent.streamFn = createParallelToolCallsWrapper(agent.streamFn, rawParallelToolCalls);
|
||||
} else if (rawParallelToolCalls === null) {
|
||||
log.debug("parallel_tool_calls suppressed by null override, skipping injection");
|
||||
} else {
|
||||
const summary =
|
||||
typeof rawParallelToolCalls === "string"
|
||||
? rawParallelToolCalls
|
||||
: typeof rawParallelToolCalls;
|
||||
log.warn(`ignoring invalid parallel_tool_calls param: ${summary}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user