fix(subagent): harden read-tool overflow guards and sticky reply threading (#19508)

* fix(gateway): avoid premature agent.wait completion on transient errors

* fix(agent): preemptively guard tool results against context overflow

* fix: harden tool-result context guard and add message_id metadata

* fix: use importOriginal in session-key mock to include DEFAULT_ACCOUNT_ID

The run.skill-filter test was mocking ../../routing/session-key.js with only
buildAgentMainSessionKey and normalizeAgentId, but the module also exports
DEFAULT_ACCOUNT_ID which is required transitively by src/web/auth-store.ts.

Switch to importOriginal pattern so all real exports are preserved alongside
the mocked functions.

* pi-runner: guard accumulated tool-result overflow in transformContext

* PI runner: compact overflowing tool-result context

* Subagent: harden tool-result context recovery

* Enhance tool-result context handling by adding support for legacy tool outputs and improving character estimation for message truncation. This includes a new function to create legacy tool results and updates to existing functions to better manage context overflow scenarios.

* Enhance iMessage handling by adding reply tag support in send functions and tests. This includes modifications to prepend or rewrite reply tags based on provided replyToId, ensuring proper message formatting for replies.

* Enhance message delivery across multiple channels by implementing sticky reply context for chunked messages. This includes preserving reply references in Discord, Telegram, and iMessage, ensuring that follow-up messages maintain their intended reply targets. Additionally, improve handling of reply tags in system prompts and tests to support consistent reply behavior.

* Enhance read tool functionality by implementing auto-paging across chunks when no explicit limit is provided, scaling output budget based on model context window. Additionally, add tests for adaptive reading behavior and capped continuation guidance for large outputs. Update related functions to support these features.

* Refine tool-result context management by stripping oversized read-tool details payloads during compaction, ensuring repeated read calls do not bypass context limits. Introduce new utility functions for handling truncation content and enhance character estimation for tool results. Add tests to validate the removal of excessive details in context overflow scenarios.

* Refine message delivery logic in Matrix and Telegram by introducing a flag to track if a text chunk was sent. This ensures that replies are only marked as delivered when a text chunk has been successfully sent, improving the accuracy of reply handling in both channels.

* fix: tighten reply threading coverage and prep fixes (#19508) (thanks @tyler6204)
This commit is contained in:
Tyler Yust
2026-02-17 15:32:52 -08:00
committed by GitHub
parent 75e11fed5d
commit 087dca8fa9
40 changed files with 2108 additions and 216 deletions

View File

@@ -13,6 +13,247 @@ type ToolContentBlock = AgentToolResult<unknown>["content"][number];
type ImageContentBlock = Extract<ToolContentBlock, { type: "image" }>;
type TextContentBlock = Extract<ToolContentBlock, { type: "text" }>;
const DEFAULT_READ_PAGE_MAX_BYTES = 50 * 1024;
const MAX_ADAPTIVE_READ_MAX_BYTES = 512 * 1024;
const ADAPTIVE_READ_CONTEXT_SHARE = 0.2;
const CHARS_PER_TOKEN_ESTIMATE = 4;
const MAX_ADAPTIVE_READ_PAGES = 8;
type OpenClawReadToolOptions = {
modelContextWindowTokens?: number;
};
type ReadTruncationDetails = {
truncated: boolean;
outputLines: number;
firstLineExceedsLimit: boolean;
};
const READ_CONTINUATION_NOTICE_RE =
/\n\n\[(?:Showing lines [^\]]*?Use offset=\d+ to continue\.|\d+ more lines in file\. Use offset=\d+ to continue\.)\]\s*$/;
function clamp(value: number, min: number, max: number): number {
return Math.max(min, Math.min(max, value));
}
function resolveAdaptiveReadMaxBytes(options?: OpenClawReadToolOptions): number {
const contextWindowTokens = options?.modelContextWindowTokens;
if (
typeof contextWindowTokens !== "number" ||
!Number.isFinite(contextWindowTokens) ||
contextWindowTokens <= 0
) {
return DEFAULT_READ_PAGE_MAX_BYTES;
}
const fromContext = Math.floor(
contextWindowTokens * CHARS_PER_TOKEN_ESTIMATE * ADAPTIVE_READ_CONTEXT_SHARE,
);
return clamp(fromContext, DEFAULT_READ_PAGE_MAX_BYTES, MAX_ADAPTIVE_READ_MAX_BYTES);
}
function formatBytes(bytes: number): string {
if (bytes >= 1024 * 1024) {
return `${(bytes / (1024 * 1024)).toFixed(1)}MB`;
}
if (bytes >= 1024) {
return `${Math.round(bytes / 1024)}KB`;
}
return `${bytes}B`;
}
function getToolResultText(result: AgentToolResult<unknown>): string | undefined {
const content = Array.isArray(result.content) ? result.content : [];
const textBlocks = content
.map((block) => {
if (
block &&
typeof block === "object" &&
(block as { type?: unknown }).type === "text" &&
typeof (block as { text?: unknown }).text === "string"
) {
return (block as { text: string }).text;
}
return undefined;
})
.filter((value): value is string => typeof value === "string");
if (textBlocks.length === 0) {
return undefined;
}
return textBlocks.join("\n");
}
function withToolResultText(
result: AgentToolResult<unknown>,
text: string,
): AgentToolResult<unknown> {
const content = Array.isArray(result.content) ? result.content : [];
let replaced = false;
const nextContent: ToolContentBlock[] = content.map((block) => {
if (
!replaced &&
block &&
typeof block === "object" &&
(block as { type?: unknown }).type === "text"
) {
replaced = true;
return {
...(block as TextContentBlock),
text,
};
}
return block;
});
if (replaced) {
return {
...result,
content: nextContent as unknown as AgentToolResult<unknown>["content"],
};
}
const textBlock = { type: "text", text } as unknown as TextContentBlock;
return {
...result,
content: [textBlock] as unknown as AgentToolResult<unknown>["content"],
};
}
function extractReadTruncationDetails(
result: AgentToolResult<unknown>,
): ReadTruncationDetails | null {
const details = (result as { details?: unknown }).details;
if (!details || typeof details !== "object") {
return null;
}
const truncation = (details as { truncation?: unknown }).truncation;
if (!truncation || typeof truncation !== "object") {
return null;
}
const record = truncation as Record<string, unknown>;
if (record.truncated !== true) {
return null;
}
const outputLinesRaw = record.outputLines;
const outputLines =
typeof outputLinesRaw === "number" && Number.isFinite(outputLinesRaw)
? Math.max(0, Math.floor(outputLinesRaw))
: 0;
return {
truncated: true,
outputLines,
firstLineExceedsLimit: record.firstLineExceedsLimit === true,
};
}
function stripReadContinuationNotice(text: string): string {
return text.replace(READ_CONTINUATION_NOTICE_RE, "");
}
function stripReadTruncationContentDetails(
result: AgentToolResult<unknown>,
): AgentToolResult<unknown> {
const details = (result as { details?: unknown }).details;
if (!details || typeof details !== "object") {
return result;
}
const detailsRecord = details as Record<string, unknown>;
const truncationRaw = detailsRecord.truncation;
if (!truncationRaw || typeof truncationRaw !== "object") {
return result;
}
const truncation = truncationRaw as Record<string, unknown>;
if (!Object.prototype.hasOwnProperty.call(truncation, "content")) {
return result;
}
const { content: _content, ...restTruncation } = truncation;
return {
...result,
details: {
...detailsRecord,
truncation: restTruncation,
},
};
}
async function executeReadWithAdaptivePaging(params: {
base: AnyAgentTool;
toolCallId: string;
args: Record<string, unknown>;
signal?: AbortSignal;
maxBytes: number;
}): Promise<AgentToolResult<unknown>> {
const userLimit = params.args.limit;
const hasExplicitLimit =
typeof userLimit === "number" && Number.isFinite(userLimit) && userLimit > 0;
if (hasExplicitLimit) {
return await params.base.execute(params.toolCallId, params.args, params.signal);
}
const offsetRaw = params.args.offset;
let nextOffset =
typeof offsetRaw === "number" && Number.isFinite(offsetRaw) && offsetRaw > 0
? Math.floor(offsetRaw)
: 1;
let firstResult: AgentToolResult<unknown> | null = null;
let aggregatedText = "";
let aggregatedBytes = 0;
let capped = false;
let continuationOffset: number | undefined;
for (let page = 0; page < MAX_ADAPTIVE_READ_PAGES; page += 1) {
const pageArgs = { ...params.args, offset: nextOffset };
const pageResult = await params.base.execute(params.toolCallId, pageArgs, params.signal);
firstResult ??= pageResult;
const rawText = getToolResultText(pageResult);
if (typeof rawText !== "string") {
return pageResult;
}
const truncation = extractReadTruncationDetails(pageResult);
const canContinue =
Boolean(truncation?.truncated) &&
!truncation?.firstLineExceedsLimit &&
(truncation?.outputLines ?? 0) > 0 &&
page < MAX_ADAPTIVE_READ_PAGES - 1;
const pageText = canContinue ? stripReadContinuationNotice(rawText) : rawText;
const delimiter = aggregatedText ? "\n\n" : "";
const nextBytes = Buffer.byteLength(`${delimiter}${pageText}`, "utf-8");
if (aggregatedText && aggregatedBytes + nextBytes > params.maxBytes) {
capped = true;
continuationOffset = nextOffset;
break;
}
aggregatedText += `${delimiter}${pageText}`;
aggregatedBytes += nextBytes;
if (!canContinue || !truncation) {
return withToolResultText(pageResult, aggregatedText);
}
nextOffset += truncation.outputLines;
continuationOffset = nextOffset;
if (aggregatedBytes >= params.maxBytes) {
capped = true;
break;
}
}
if (!firstResult) {
return await params.base.execute(params.toolCallId, params.args, params.signal);
}
let finalText = aggregatedText;
if (capped && continuationOffset) {
finalText += `\n\n[Read output capped at ${formatBytes(params.maxBytes)} for this call. Use offset=${continuationOffset} to continue.]`;
}
return withToolResultText(firstResult, finalText);
}
function rewriteReadImageHeader(text: string, mimeType: string): string {
// pi-coding-agent uses: "Read image file [image/png]"
if (text.startsWith("Read image file [") && text.endsWith("]")) {
@@ -324,13 +565,16 @@ export function wrapToolWorkspaceRootGuard(tool: AnyAgentTool, root: string): An
type SandboxToolParams = {
root: string;
bridge: SandboxFsBridge;
modelContextWindowTokens?: number;
};
export function createSandboxedReadTool(params: SandboxToolParams) {
const base = createReadTool(params.root, {
operations: createSandboxReadOperations(params),
}) as unknown as AnyAgentTool;
return createOpenClawReadTool(base);
return createOpenClawReadTool(base, {
modelContextWindowTokens: params.modelContextWindowTokens,
});
}
export function createSandboxedWriteTool(params: SandboxToolParams) {
@@ -347,7 +591,10 @@ export function createSandboxedEditTool(params: SandboxToolParams) {
return wrapToolParamNormalization(base, CLAUDE_PARAM_GROUPS.edit);
}
export function createOpenClawReadTool(base: AnyAgentTool): AnyAgentTool {
export function createOpenClawReadTool(
base: AnyAgentTool,
options?: OpenClawReadToolOptions,
): AnyAgentTool {
const patched = patchToolSchemaForClaudeCompatibility(base);
return {
...patched,
@@ -357,9 +604,16 @@ export function createOpenClawReadTool(base: AnyAgentTool): AnyAgentTool {
normalized ??
(params && typeof params === "object" ? (params as Record<string, unknown>) : undefined);
assertRequiredParams(record, CLAUDE_PARAM_GROUPS.read, base.name);
const result = await base.execute(toolCallId, normalized ?? params, signal);
const result = await executeReadWithAdaptivePaging({
base,
toolCallId,
args: (normalized ?? params ?? {}) as Record<string, unknown>,
signal,
maxBytes: resolveAdaptiveReadMaxBytes(options),
});
const filePath = typeof record?.path === "string" ? String(record.path) : "<unknown>";
const normalizedResult = await normalizeReadImageResult(result, filePath);
const strippedDetailsResult = stripReadTruncationContentDetails(result);
const normalizedResult = await normalizeReadImageResult(strippedDetailsResult, filePath);
return sanitizeToolResultImages(normalizedResult, `read:${filePath}`);
},
};