mirror of
https://github.com/openclaw/openclaw.git
synced 2026-04-18 11:17:27 +00:00
* Agents: add subagent orchestration controls
* Agents: add subagent orchestration controls (WIP uncommitted changes)
* feat(subagents): add depth-based spawn gating for sub-sub-agents
* feat(subagents): tool policy, registry, and announce chain for nested agents
* feat(subagents): system prompt, docs, changelog for nested sub-agents
* fix(subagents): prevent model fallback override, show model during active runs, and block context overflow fallback
Bug 1: When a session has an explicit model override (e.g., gpt/openai-codex),
the fallback candidate logic in resolveFallbackCandidates silently appended the
global primary model (opus) as a backstop. On reinjection/steer with a transient
error, the session could fall back to opus which has a smaller context window
and crash. Fix: when storedModelOverride is set, pass fallbacksOverride ?? []
instead of undefined, preventing the implicit primary backstop.
Bug 2: Active subagents showed 'model n/a' in /subagents list because
resolveModelDisplay only read entry.model/modelProvider (populated after run
completes). Fix: fall back to modelOverride/providerOverride fields which are
populated at spawn time via sessions.patch.
Bug 3: Context overflow errors (prompt too long, context_length_exceeded) could
theoretically escape runEmbeddedPiAgent and be treated as failover candidates
in runWithModelFallback, causing a switch to a model with a smaller context
window. Fix: in runWithModelFallback, detect context overflow errors via
isLikelyContextOverflowError and rethrow them immediately instead of trying the
next model candidate.
* fix(subagents): track spawn depth in session store and fix announce routing for nested agents
* Fix compaction status tracking and dedupe overflow compaction triggers
* fix(subagents): enforce depth block via session store and implement cascade kill
* fix: inject group chat context into system prompt
* fix(subagents): always write model to session store at spawn time
* Preserve spawnDepth when agent handler rewrites session entry
* fix(subagents): suppress announce on steer-restart
* fix(subagents): fallback spawned session model to runtime default
* fix(subagents): enforce spawn depth when caller key resolves by sessionId
* feat(subagents): implement active-first ordering for numeric targets and enhance task display
- Added a test to verify that subagents with numeric targets follow an active-first list ordering.
- Updated `resolveSubagentTarget` to sort subagent runs based on active status and recent activity.
- Enhanced task display in command responses to prevent truncation of long task descriptions.
- Introduced new utility functions for compacting task text and managing subagent run states.
* fix(subagents): show model for active runs via run record fallback
When the spawned model matches the agent's default model, the session
store's override fields are intentionally cleared (isDefault: true).
The model/modelProvider fields are only populated after the run
completes. This left active subagents showing 'model n/a'.
Fix: store the resolved model on SubagentRunRecord at registration
time, and use it as a fallback in both display paths (subagents tool
and /subagents command) when the session store entry has no model info.
Changes:
- SubagentRunRecord: add optional model field
- registerSubagentRun: accept and persist model param
- sessions-spawn-tool: pass resolvedModel to registerSubagentRun
- subagents-tool: pass run record model as fallback to resolveModelDisplay
- commands-subagents: pass run record model as fallback to resolveModelDisplay
* feat(chat): implement session key resolution and reset on sidebar navigation
- Added functions to resolve the main session key and reset chat state when switching sessions from the sidebar.
- Updated the `renderTab` function to handle session key changes when navigating to the chat tab.
- Introduced a test to verify that the session resets to "main" when opening chat from the sidebar navigation.
* fix: subagent timeout=0 passthrough and fallback prompt duplication
Bug 1: runTimeoutSeconds=0 now means 'no timeout' instead of applying 600s default
- sessions-spawn-tool: default to undefined (not 0) when neither timeout param
is provided; use != null check so explicit 0 passes through to gateway
- agent.ts: accept 0 as valid timeout (resolveAgentTimeoutMs already handles
0 → MAX_SAFE_TIMEOUT_MS)
Bug 2: model fallback no longer re-injects the original prompt as a duplicate
- agent.ts: track fallback attempt index; on retries use a short continuation
message instead of the full original prompt since the session file already
contains it from the first attempt
- Also skip re-sending images on fallback retries (already in session)
* feat(subagents): truncate long task descriptions in subagents command output
- Introduced a new utility function to format task previews, limiting their length to improve readability.
- Updated the command handler to use the new formatting function, ensuring task descriptions are truncated appropriately.
- Adjusted related tests to verify that long task descriptions are now truncated in the output.
* refactor(subagents): update subagent registry path resolution and improve command output formatting
- Replaced direct import of STATE_DIR with a utility function to resolve the state directory dynamically.
- Enhanced the formatting of command output for active and recent subagents, adding separators for better readability.
- Updated related tests to reflect changes in command output structure.
* fix(subagent): default sessions_spawn to no timeout when runTimeoutSeconds omitted
The previous fix (75a791106) correctly handled the case where
runTimeoutSeconds was explicitly set to 0 ("no timeout"). However,
when models omit the parameter entirely (which is common since the
schema marks it as optional), runTimeoutSeconds resolved to undefined.
undefined flowed through the chain as:
sessions_spawn → timeout: undefined (since undefined != null is false)
→ gateway agent handler → agentCommand opts.timeout: undefined
→ resolveAgentTimeoutMs({ overrideSeconds: undefined })
→ DEFAULT_AGENT_TIMEOUT_SECONDS (600s = 10 minutes)
This caused subagents to be killed at exactly 10 minutes even though
the user's intent (via TOOLS.md) was for subagents to run without a
timeout.
Fix: default runTimeoutSeconds to 0 (no timeout) when neither
runTimeoutSeconds nor timeoutSeconds is provided by the caller.
Subagent spawns are long-running by design and should not inherit the
600s agent-command default timeout.
* fix(subagent): accept timeout=0 in agent-via-gateway path (second 600s default)
* fix: thread timeout override through getReplyFromConfig dispatch path
getReplyFromConfig called resolveAgentTimeoutMs({ cfg }) with no override,
always falling back to the config default (600s). Add timeoutOverrideSeconds
to GetReplyOptions and pass it through as overrideSeconds so callers of the
dispatch chain can specify a custom timeout (0 = no timeout).
This complements the existing timeout threading in agentCommand and the
cron isolated-agent runner, which already pass overrideSeconds correctly.
* feat(model-fallback): normalize OpenAI Codex model references and enhance fallback handling
- Added normalization for OpenAI Codex model references, specifically converting "gpt-5.3-codex" to "openai-codex" before execution.
- Updated the `resolveFallbackCandidates` function to utilize the new normalization logic.
- Enhanced tests to verify the correct behavior of model normalization and fallback mechanisms.
- Introduced a new test case to ensure that the normalization process works as expected for various input formats.
* feat(tests): add unit tests for steer failure behavior in openclaw-tools
- Introduced a new test file to validate the behavior of subagents when steer replacement dispatch fails.
- Implemented tests to ensure that the announce behavior is restored correctly and that the suppression reason is cleared as expected.
- Enhanced the subagent registry with a new function to clear steer restart suppression.
- Updated related components to support the new test scenarios.
* fix(subagents): replace stop command with kill in slash commands and documentation
- Updated the `/subagents` command to replace `stop` with `kill` for consistency in controlling sub-agent runs.
- Modified related documentation to reflect the change in command usage.
- Removed legacy timeoutSeconds references from the sessions-spawn-tool schema and tests to streamline timeout handling.
- Enhanced tests to ensure correct behavior of the updated commands and their interactions.
* feat(tests): add unit tests for readLatestAssistantReply function
- Introduced a new test file for the `readLatestAssistantReply` function to validate its behavior with various message scenarios.
- Implemented tests to ensure the function correctly retrieves the latest assistant message and handles cases where the latest message has no text.
- Mocked the gateway call to simulate different message histories for comprehensive testing.
* feat(tests): enhance subagent kill-all cascade tests and announce formatting
- Added a new test to verify that the `kill-all` command cascades through ended parents to active descendants in subagents.
- Updated the subagent announce formatting tests to reflect changes in message structure, including the replacement of "Findings:" with "Result:" and the addition of new expectations for message content.
- Improved the handling of long findings and stats in the announce formatting logic to ensure concise output.
- Refactored related functions to enhance clarity and maintainability in the subagent registry and tools.
* refactor(subagent): update announce formatting and remove unused constants
- Modified the subagent announce formatting to replace "Findings:" with "Result:" and adjusted related expectations in tests.
- Removed constants for maximum announce findings characters and summary words, simplifying the announcement logic.
- Updated the handling of findings to retain full content instead of truncating, ensuring more informative outputs.
- Cleaned up unused imports in the commands-subagents file to enhance code clarity.
* feat(tests): enhance billing error handling in user-facing text
- Added tests to ensure that normal text mentioning billing plans is not rewritten, preserving user context.
- Updated the `isBillingErrorMessage` and `sanitizeUserFacingText` functions to improve handling of billing-related messages.
- Introduced new test cases for various scenarios involving billing messages to ensure accurate processing and output.
- Enhanced the subagent announce flow to correctly manage active descendant runs, preventing premature announcements.
* feat(subagent): enhance workflow guidance and auto-announcement clarity
- Added a new guideline in the subagent system prompt to emphasize trust in push-based completion, discouraging busy polling for status updates.
- Updated documentation to clarify that sub-agents will automatically announce their results, improving user understanding of the workflow.
- Enhanced tests to verify the new guidance on avoiding polling loops and to ensure the accuracy of the updated prompts.
* fix(cron): avoid announcing interim subagent spawn acks
* chore: clean post-rebase imports
* fix(cron): fall back to child replies when parent stays interim
* fix(subagents): make active-run guidance advisory
* fix(subagents): update announce flow to handle active descendants and enhance test coverage
- Modified the announce flow to defer announcements when active descendant runs are present, ensuring accurate status reporting.
- Updated tests to verify the new behavior, including scenarios where no fallback requester is available and ensuring proper handling of finished subagents.
- Enhanced the announce formatting to include an `expectFinal` flag for better clarity in the announcement process.
* fix(subagents): enhance announce flow and formatting for user updates
- Updated the announce flow to provide clearer instructions for user updates based on active subagent runs and requester context.
- Refactored the announcement logic to improve clarity and ensure internal context remains private.
- Enhanced tests to verify the new message expectations and formatting, including updated prompts for user-facing updates.
- Introduced a new function to build reply instructions based on session context, improving the overall announcement process.
* fix: resolve prep blockers and changelog placement (#14447) (thanks @tyler6204)
* fix: restore cron delivery-plan import after rebase (#14447) (thanks @tyler6204)
* fix: resolve test failures from rebase conflicts (#14447) (thanks @tyler6204)
* fix: apply formatting after rebase (#14447) (thanks @tyler6204)
752 lines
26 KiB
TypeScript
752 lines
26 KiB
TypeScript
import fs from "node:fs/promises";
|
|
import os from "node:os";
|
|
import path from "node:path";
|
|
import { afterAll, beforeAll, describe, expect, it, vi } from "vitest";
|
|
import type { OpenClawConfig } from "../../config/config.js";
|
|
import type { MsgContext } from "../templating.js";
|
|
import {
|
|
addSubagentRunForTests,
|
|
listSubagentRunsForRequester,
|
|
resetSubagentRegistryForTests,
|
|
} from "../../agents/subagent-registry.js";
|
|
import { updateSessionStore } from "../../config/sessions.js";
|
|
import * as internalHooks from "../../hooks/internal-hooks.js";
|
|
import { clearPluginCommands, registerPluginCommand } from "../../plugins/commands.js";
|
|
import { resetBashChatCommandForTests } from "./bash-command.js";
|
|
import { parseInlineDirectives } from "./directive-handling.js";
|
|
|
|
const callGatewayMock = vi.fn();
|
|
vi.mock("../../gateway/call.js", () => ({
|
|
callGateway: (opts: unknown) => callGatewayMock(opts),
|
|
}));
|
|
|
|
import { buildCommandContext, handleCommands } from "./commands.js";
|
|
|
|
// Avoid expensive workspace scans during /context tests.
|
|
vi.mock("./commands-context-report.js", () => ({
|
|
buildContextReply: async (params: { command: { commandBodyNormalized: string } }) => {
|
|
const normalized = params.command.commandBodyNormalized;
|
|
if (normalized === "/context list") {
|
|
return { text: "Injected workspace files:\n- AGENTS.md" };
|
|
}
|
|
if (normalized === "/context detail") {
|
|
return { text: "Context breakdown (detailed)\nTop tools (schema size):" };
|
|
}
|
|
return { text: "/context\n- /context list\nInline shortcut" };
|
|
},
|
|
}));
|
|
|
|
let testWorkspaceDir = os.tmpdir();
|
|
|
|
beforeAll(async () => {
|
|
testWorkspaceDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-commands-"));
|
|
await fs.writeFile(path.join(testWorkspaceDir, "AGENTS.md"), "# Agents\n", "utf-8");
|
|
});
|
|
|
|
afterAll(async () => {
|
|
await fs.rm(testWorkspaceDir, { recursive: true, force: true });
|
|
});
|
|
|
|
function buildParams(commandBody: string, cfg: OpenClawConfig, ctxOverrides?: Partial<MsgContext>) {
|
|
const ctx = {
|
|
Body: commandBody,
|
|
CommandBody: commandBody,
|
|
CommandSource: "text",
|
|
CommandAuthorized: true,
|
|
Provider: "whatsapp",
|
|
Surface: "whatsapp",
|
|
...ctxOverrides,
|
|
} as MsgContext;
|
|
|
|
const command = buildCommandContext({
|
|
ctx,
|
|
cfg,
|
|
isGroup: false,
|
|
triggerBodyNormalized: commandBody.trim().toLowerCase(),
|
|
commandAuthorized: true,
|
|
});
|
|
|
|
return {
|
|
ctx,
|
|
cfg,
|
|
command,
|
|
directives: parseInlineDirectives(commandBody),
|
|
elevated: { enabled: true, allowed: true, failures: [] },
|
|
sessionKey: "agent:main:main",
|
|
workspaceDir: testWorkspaceDir,
|
|
defaultGroupActivation: () => "mention",
|
|
resolvedVerboseLevel: "off" as const,
|
|
resolvedReasoningLevel: "off" as const,
|
|
resolveDefaultThinkingLevel: async () => undefined,
|
|
provider: "whatsapp",
|
|
model: "test-model",
|
|
contextTokens: 0,
|
|
isGroup: false,
|
|
};
|
|
}
|
|
|
|
describe("handleCommands gating", () => {
|
|
it("blocks /bash when disabled", async () => {
|
|
resetBashChatCommandForTests();
|
|
const cfg = {
|
|
commands: { bash: false, text: true },
|
|
whatsapp: { allowFrom: ["*"] },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/bash echo hi", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("bash is disabled");
|
|
});
|
|
|
|
it("blocks /bash when elevated is not allowlisted", async () => {
|
|
resetBashChatCommandForTests();
|
|
const cfg = {
|
|
commands: { bash: true, text: true },
|
|
whatsapp: { allowFrom: ["*"] },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/bash echo hi", cfg);
|
|
params.elevated = {
|
|
enabled: true,
|
|
allowed: false,
|
|
failures: [{ gate: "allowFrom", key: "tools.elevated.allowFrom.whatsapp" }],
|
|
};
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("elevated is not available");
|
|
});
|
|
|
|
it("blocks /config when disabled", async () => {
|
|
const cfg = {
|
|
commands: { config: false, debug: false, text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/config show", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("/config is disabled");
|
|
});
|
|
|
|
it("blocks /debug when disabled", async () => {
|
|
const cfg = {
|
|
commands: { config: false, debug: false, text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/debug show", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("/debug is disabled");
|
|
});
|
|
});
|
|
|
|
describe("handleCommands bash alias", () => {
|
|
it("routes !poll through the /bash handler", async () => {
|
|
resetBashChatCommandForTests();
|
|
const cfg = {
|
|
commands: { bash: true, text: true },
|
|
whatsapp: { allowFrom: ["*"] },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("!poll", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("No active bash job");
|
|
});
|
|
|
|
it("routes !stop through the /bash handler", async () => {
|
|
resetBashChatCommandForTests();
|
|
const cfg = {
|
|
commands: { bash: true, text: true },
|
|
whatsapp: { allowFrom: ["*"] },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("!stop", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("No active bash job");
|
|
});
|
|
});
|
|
|
|
describe("handleCommands plugin commands", () => {
|
|
it("dispatches registered plugin commands", async () => {
|
|
clearPluginCommands();
|
|
const result = registerPluginCommand("test-plugin", {
|
|
name: "card",
|
|
description: "Test card",
|
|
handler: async () => ({ text: "from plugin" }),
|
|
});
|
|
expect(result.ok).toBe(true);
|
|
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/card", cfg);
|
|
const commandResult = await handleCommands(params);
|
|
|
|
expect(commandResult.shouldContinue).toBe(false);
|
|
expect(commandResult.reply?.text).toBe("from plugin");
|
|
clearPluginCommands();
|
|
});
|
|
});
|
|
|
|
describe("handleCommands identity", () => {
|
|
it("returns sender details for /whoami", async () => {
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/whoami", cfg, {
|
|
SenderId: "12345",
|
|
SenderUsername: "TestUser",
|
|
ChatType: "direct",
|
|
});
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("Channel: whatsapp");
|
|
expect(result.reply?.text).toContain("User id: 12345");
|
|
expect(result.reply?.text).toContain("Username: @TestUser");
|
|
expect(result.reply?.text).toContain("AllowFrom: 12345");
|
|
});
|
|
});
|
|
|
|
describe("handleCommands hooks", () => {
|
|
it("triggers hooks for /new with arguments", async () => {
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/new take notes", cfg);
|
|
const spy = vi.spyOn(internalHooks, "triggerInternalHook").mockResolvedValue();
|
|
|
|
await handleCommands(params);
|
|
|
|
expect(spy).toHaveBeenCalledWith(expect.objectContaining({ type: "command", action: "new" }));
|
|
spy.mockRestore();
|
|
});
|
|
});
|
|
|
|
describe("handleCommands context", () => {
|
|
it("returns context help for /context", async () => {
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/context", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("/context list");
|
|
expect(result.reply?.text).toContain("Inline shortcut");
|
|
});
|
|
|
|
it("returns a per-file breakdown for /context list", async () => {
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/context list", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("Injected workspace files:");
|
|
expect(result.reply?.text).toContain("AGENTS.md");
|
|
});
|
|
|
|
it("returns a detailed breakdown for /context detail", async () => {
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/context detail", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("Context breakdown (detailed)");
|
|
expect(result.reply?.text).toContain("Top tools (schema size):");
|
|
});
|
|
});
|
|
|
|
describe("handleCommands subagents", () => {
|
|
it("lists subagents when none exist", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/subagents list", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("active subagents:");
|
|
expect(result.reply?.text).toContain("active subagents:\n-----\n");
|
|
expect(result.reply?.text).toContain("recent subagents (last 30m):");
|
|
expect(result.reply?.text).toContain("\n\nrecent subagents (last 30m):");
|
|
expect(result.reply?.text).toContain("recent subagents (last 30m):\n-----\n");
|
|
});
|
|
|
|
it("truncates long subagent task text in /subagents list", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
addSubagentRunForTests({
|
|
runId: "run-long-task",
|
|
childSessionKey: "agent:main:subagent:long-task",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "This is a deliberately long task description used to verify that subagent list output keeps the full task text instead of appending ellipsis after a short hard cutoff.",
|
|
cleanup: "keep",
|
|
createdAt: 1000,
|
|
startedAt: 1000,
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/subagents list", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain(
|
|
"This is a deliberately long task description used to verify that subagent list output keeps the full task text",
|
|
);
|
|
expect(result.reply?.text).toContain("...");
|
|
expect(result.reply?.text).not.toContain("after a short hard cutoff.");
|
|
});
|
|
|
|
it("lists subagents for the current command session over the target session", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
addSubagentRunForTests({
|
|
runId: "run-1",
|
|
childSessionKey: "agent:main:subagent:abc",
|
|
requesterSessionKey: "agent:main:slack:slash:u1",
|
|
requesterDisplayKey: "agent:main:slack:slash:u1",
|
|
task: "do thing",
|
|
cleanup: "keep",
|
|
createdAt: 1000,
|
|
startedAt: 1000,
|
|
});
|
|
addSubagentRunForTests({
|
|
runId: "run-2",
|
|
childSessionKey: "agent:main:subagent:def",
|
|
requesterSessionKey: "agent:main:slack:slash:u1",
|
|
requesterDisplayKey: "agent:main:slack:slash:u1",
|
|
task: "another thing",
|
|
cleanup: "keep",
|
|
createdAt: 2000,
|
|
startedAt: 2000,
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/subagents list", cfg, {
|
|
CommandSource: "native",
|
|
CommandTargetSessionKey: "agent:main:main",
|
|
});
|
|
params.sessionKey = "agent:main:slack:slash:u1";
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("active subagents:");
|
|
expect(result.reply?.text).toContain("do thing");
|
|
expect(result.reply?.text).not.toContain("\n\n2.");
|
|
});
|
|
|
|
it("formats subagent usage with io and prompt/cache breakdown", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
addSubagentRunForTests({
|
|
runId: "run-usage",
|
|
childSessionKey: "agent:main:subagent:usage",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "do thing",
|
|
cleanup: "keep",
|
|
createdAt: 1000,
|
|
startedAt: 1000,
|
|
});
|
|
const storePath = path.join(testWorkspaceDir, "sessions-subagents-usage.json");
|
|
await updateSessionStore(storePath, (store) => {
|
|
store["agent:main:subagent:usage"] = {
|
|
sessionId: "child-session-usage",
|
|
updatedAt: Date.now(),
|
|
inputTokens: 12,
|
|
outputTokens: 1000,
|
|
totalTokens: 197000,
|
|
model: "opencode/claude-opus-4-6",
|
|
};
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
session: { store: storePath },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/subagents list", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("tokens 1k (in 12 / out 1k)");
|
|
expect(result.reply?.text).toContain("prompt/cache 197k");
|
|
expect(result.reply?.text).not.toContain("1k io");
|
|
});
|
|
|
|
it("omits subagent status line when none exist", async () => {
|
|
resetSubagentRegistryForTests();
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
session: { mainKey: "main", scope: "per-sender" },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/status", cfg);
|
|
params.resolvedVerboseLevel = "on";
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).not.toContain("Subagents:");
|
|
});
|
|
|
|
it("returns help for unknown subagents action", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/subagents foo", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("/subagents");
|
|
});
|
|
|
|
it("returns usage for subagents info without target", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/subagents info", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("/subagents info");
|
|
});
|
|
|
|
it("includes subagent count in /status when active", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
addSubagentRunForTests({
|
|
runId: "run-1",
|
|
childSessionKey: "agent:main:subagent:abc",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "do thing",
|
|
cleanup: "keep",
|
|
createdAt: 1000,
|
|
startedAt: 1000,
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
session: { mainKey: "main", scope: "per-sender" },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/status", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("🤖 Subagents: 1 active");
|
|
});
|
|
|
|
it("includes subagent details in /status when verbose", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
addSubagentRunForTests({
|
|
runId: "run-1",
|
|
childSessionKey: "agent:main:subagent:abc",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "do thing",
|
|
cleanup: "keep",
|
|
createdAt: 1000,
|
|
startedAt: 1000,
|
|
});
|
|
addSubagentRunForTests({
|
|
runId: "run-2",
|
|
childSessionKey: "agent:main:subagent:def",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "finished task",
|
|
cleanup: "keep",
|
|
createdAt: 900,
|
|
startedAt: 900,
|
|
endedAt: 1200,
|
|
outcome: { status: "ok" },
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
session: { mainKey: "main", scope: "per-sender" },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/status", cfg);
|
|
params.resolvedVerboseLevel = "on";
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("🤖 Subagents: 1 active");
|
|
expect(result.reply?.text).toContain("· 1 done");
|
|
});
|
|
|
|
it("returns info for a subagent", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
const now = Date.now();
|
|
addSubagentRunForTests({
|
|
runId: "run-1",
|
|
childSessionKey: "agent:main:subagent:abc",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "do thing",
|
|
cleanup: "keep",
|
|
createdAt: now - 20_000,
|
|
startedAt: now - 20_000,
|
|
endedAt: now - 1_000,
|
|
outcome: { status: "ok" },
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
session: { mainKey: "main", scope: "per-sender" },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/subagents info 1", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("Subagent info");
|
|
expect(result.reply?.text).toContain("Run: run-1");
|
|
expect(result.reply?.text).toContain("Status: done");
|
|
});
|
|
|
|
it("kills subagents via /kill alias without a confirmation reply", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
addSubagentRunForTests({
|
|
runId: "run-1",
|
|
childSessionKey: "agent:main:subagent:abc",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "do thing",
|
|
cleanup: "keep",
|
|
createdAt: 1000,
|
|
startedAt: 1000,
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/kill 1", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply).toBeUndefined();
|
|
});
|
|
|
|
it("resolves numeric aliases in active-first display order", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
const now = Date.now();
|
|
addSubagentRunForTests({
|
|
runId: "run-active",
|
|
childSessionKey: "agent:main:subagent:active",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "active task",
|
|
cleanup: "keep",
|
|
createdAt: now - 120_000,
|
|
startedAt: now - 120_000,
|
|
});
|
|
addSubagentRunForTests({
|
|
runId: "run-recent",
|
|
childSessionKey: "agent:main:subagent:recent",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "recent task",
|
|
cleanup: "keep",
|
|
createdAt: now - 30_000,
|
|
startedAt: now - 30_000,
|
|
endedAt: now - 10_000,
|
|
outcome: { status: "ok" },
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/kill 1", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply).toBeUndefined();
|
|
});
|
|
|
|
it("sends follow-up messages to finished subagents", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
|
const request = opts as { method?: string; params?: { runId?: string } };
|
|
if (request.method === "agent") {
|
|
return { runId: "run-followup-1" };
|
|
}
|
|
if (request.method === "agent.wait") {
|
|
return { status: "done" };
|
|
}
|
|
if (request.method === "chat.history") {
|
|
return { messages: [] };
|
|
}
|
|
return {};
|
|
});
|
|
const now = Date.now();
|
|
addSubagentRunForTests({
|
|
runId: "run-1",
|
|
childSessionKey: "agent:main:subagent:abc",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "do thing",
|
|
cleanup: "keep",
|
|
createdAt: now - 20_000,
|
|
startedAt: now - 20_000,
|
|
endedAt: now - 1_000,
|
|
outcome: { status: "ok" },
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/subagents send 1 continue with follow-up details", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("✅ Sent to");
|
|
|
|
const agentCall = callGatewayMock.mock.calls.find(
|
|
(call) => (call[0] as { method?: string }).method === "agent",
|
|
);
|
|
expect(agentCall?.[0]).toMatchObject({
|
|
method: "agent",
|
|
params: {
|
|
lane: "subagent",
|
|
sessionKey: "agent:main:subagent:abc",
|
|
timeout: 0,
|
|
},
|
|
});
|
|
|
|
const waitCall = callGatewayMock.mock.calls.find(
|
|
(call) =>
|
|
(call[0] as { method?: string; params?: { runId?: string } }).method === "agent.wait" &&
|
|
(call[0] as { method?: string; params?: { runId?: string } }).params?.runId ===
|
|
"run-followup-1",
|
|
);
|
|
expect(waitCall).toBeDefined();
|
|
});
|
|
|
|
it("steers subagents via /steer alias", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
|
const request = opts as { method?: string };
|
|
if (request.method === "agent") {
|
|
return { runId: "run-steer-1" };
|
|
}
|
|
return {};
|
|
});
|
|
const storePath = path.join(testWorkspaceDir, "sessions-subagents-steer.json");
|
|
await updateSessionStore(storePath, (store) => {
|
|
store["agent:main:subagent:abc"] = {
|
|
sessionId: "child-session-steer",
|
|
updatedAt: Date.now(),
|
|
};
|
|
});
|
|
addSubagentRunForTests({
|
|
runId: "run-1",
|
|
childSessionKey: "agent:main:subagent:abc",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "do thing",
|
|
cleanup: "keep",
|
|
createdAt: 1000,
|
|
startedAt: 1000,
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
session: { store: storePath },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/steer 1 check timer.ts instead", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("steered");
|
|
const steerWaitIndex = callGatewayMock.mock.calls.findIndex(
|
|
(call) =>
|
|
(call[0] as { method?: string; params?: { runId?: string } }).method === "agent.wait" &&
|
|
(call[0] as { method?: string; params?: { runId?: string } }).params?.runId === "run-1",
|
|
);
|
|
expect(steerWaitIndex).toBeGreaterThanOrEqual(0);
|
|
const steerRunIndex = callGatewayMock.mock.calls.findIndex(
|
|
(call) => (call[0] as { method?: string }).method === "agent",
|
|
);
|
|
expect(steerRunIndex).toBeGreaterThan(steerWaitIndex);
|
|
expect(callGatewayMock.mock.calls[steerWaitIndex]?.[0]).toMatchObject({
|
|
method: "agent.wait",
|
|
params: { runId: "run-1", timeoutMs: 5_000 },
|
|
timeoutMs: 7_000,
|
|
});
|
|
expect(callGatewayMock.mock.calls[steerRunIndex]?.[0]).toMatchObject({
|
|
method: "agent",
|
|
params: {
|
|
lane: "subagent",
|
|
sessionKey: "agent:main:subagent:abc",
|
|
sessionId: "child-session-steer",
|
|
timeout: 0,
|
|
},
|
|
});
|
|
const trackedRuns = listSubagentRunsForRequester("agent:main:main");
|
|
expect(trackedRuns).toHaveLength(1);
|
|
expect(trackedRuns[0].runId).toBe("run-steer-1");
|
|
expect(trackedRuns[0].endedAt).toBeUndefined();
|
|
});
|
|
|
|
it("restores announce behavior when /steer replacement dispatch fails", async () => {
|
|
resetSubagentRegistryForTests();
|
|
callGatewayMock.mockReset();
|
|
callGatewayMock.mockImplementation(async (opts: unknown) => {
|
|
const request = opts as { method?: string };
|
|
if (request.method === "agent.wait") {
|
|
return { status: "timeout" };
|
|
}
|
|
if (request.method === "agent") {
|
|
throw new Error("dispatch failed");
|
|
}
|
|
return {};
|
|
});
|
|
addSubagentRunForTests({
|
|
runId: "run-1",
|
|
childSessionKey: "agent:main:subagent:abc",
|
|
requesterSessionKey: "agent:main:main",
|
|
requesterDisplayKey: "main",
|
|
task: "do thing",
|
|
cleanup: "keep",
|
|
createdAt: 1000,
|
|
startedAt: 1000,
|
|
});
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/steer 1 check timer.ts instead", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("send failed: dispatch failed");
|
|
|
|
const trackedRuns = listSubagentRunsForRequester("agent:main:main");
|
|
expect(trackedRuns).toHaveLength(1);
|
|
expect(trackedRuns[0].runId).toBe("run-1");
|
|
expect(trackedRuns[0].suppressAnnounceReason).toBeUndefined();
|
|
});
|
|
});
|
|
|
|
describe("handleCommands /tts", () => {
|
|
it("returns status for bare /tts on text command surfaces", async () => {
|
|
const cfg = {
|
|
commands: { text: true },
|
|
channels: { whatsapp: { allowFrom: ["*"] } },
|
|
messages: { tts: { prefsPath: path.join(testWorkspaceDir, "tts.json") } },
|
|
} as OpenClawConfig;
|
|
const params = buildParams("/tts", cfg);
|
|
const result = await handleCommands(params);
|
|
expect(result.shouldContinue).toBe(false);
|
|
expect(result.reply?.text).toContain("TTS status");
|
|
});
|
|
});
|