sessions_spawn: inline attachments with redaction, lifecycle cleanup, and docs (#16761)

Add inline file attachment support for sessions_spawn (subagent runtime only):

- Schema: attachments[] (name, content, encoding, mimeType) and attachAs.mountPath hint
- Materialization: files written to .openclaw/attachments/<uuid>/ with manifest.json
- Validation: strict base64 decode, filename checks, size limits, duplicate detection
- Transcript redaction: sanitizeToolCallInputs redacts attachment content from persisted transcripts
- Lifecycle cleanup: safeRemoveAttachmentsDir with symlink-safe path containment check
- Config: tools.sessions_spawn.attachments (enabled, maxFiles, maxFileBytes, maxTotalBytes, retainOnSessionKeep)
- Registry: attachmentsDir/attachmentsRootDir/retainAttachmentsOnKeep on SubagentRunRecord
- ACP rejection: attachments rejected for runtime=acp with clear error message
- Docs: updated tools/index.md, concepts/session-tool.md, configuration-reference.md
- Tests: 85 new/updated tests across 5 test files

Fixes:
- Guard fs.rm in materialization catch block with try/catch (review concern #1)
- Remove unreachable fallback in safeRemoveAttachmentsDir (review concern #7)
- Move attachment cleanup out of retry path to avoid timing issues with announce loop

Co-authored-by: Tyler Yust <TYTYYUST@YAHOO.COM>
Co-authored-by: napetrov <napetrov@users.noreply.github.com>
This commit is contained in:
Nikolay Petrov
2026-03-01 21:33:51 -08:00
committed by GitHub
parent 842deefe5d
commit a9f1188785
15 changed files with 1039 additions and 135 deletions

View File

@@ -922,7 +922,7 @@ describe("applyExtraParamsToAgent", () => {
provider: "openai",
id: "gpt-5",
baseUrl: "https://api.openai.com/v1",
} as Model<"openai-responses">,
} as unknown as Model<"openai-responses">,
});
expect(payload.store).toBe(true);
});
@@ -936,7 +936,7 @@ describe("applyExtraParamsToAgent", () => {
provider: "openai",
id: "gpt-5",
baseUrl: "https://proxy.example.com/v1",
} as Model<"openai-responses">,
} as unknown as Model<"openai-responses">,
});
expect(payload.store).toBe(false);
});
@@ -950,7 +950,7 @@ describe("applyExtraParamsToAgent", () => {
provider: "openai",
id: "gpt-5",
baseUrl: "",
} as Model<"openai-responses">,
} as unknown as Model<"openai-responses">,
});
expect(payload.store).toBe(false);
});
@@ -971,7 +971,7 @@ describe("applyExtraParamsToAgent", () => {
contextWindow: 128_000,
maxTokens: 16_384,
compat: { supportsStore: false },
} as Model<"openai-responses"> & { compat?: { supportsStore?: boolean } },
} as unknown as Model<"openai-responses">,
});
expect(payload.store).toBe(false);
});
@@ -986,7 +986,7 @@ describe("applyExtraParamsToAgent", () => {
id: "gpt-5",
baseUrl: "https://api.openai.com/v1",
contextWindow: 200_000,
} as Model<"openai-responses">,
} as unknown as Model<"openai-responses">,
});
expect(payload.context_management).toEqual([
{
@@ -1005,7 +1005,7 @@ describe("applyExtraParamsToAgent", () => {
provider: "azure-openai-responses",
id: "gpt-4o",
baseUrl: "https://example.openai.azure.com/openai/v1",
} as Model<"openai-responses">,
} as unknown as Model<"openai-responses">,
});
expect(payload).not.toHaveProperty("context_management");
});
@@ -1033,7 +1033,7 @@ describe("applyExtraParamsToAgent", () => {
provider: "azure-openai-responses",
id: "gpt-4o",
baseUrl: "https://example.openai.azure.com/openai/v1",
} as Model<"openai-responses">,
} as unknown as Model<"openai-responses">,
});
expect(payload.context_management).toEqual([
{
@@ -1052,7 +1052,7 @@ describe("applyExtraParamsToAgent", () => {
provider: "openai",
id: "gpt-5",
baseUrl: "https://api.openai.com/v1",
} as Model<"openai-responses">,
} as unknown as Model<"openai-responses">,
payload: {
store: false,
context_management: [{ type: "compaction", compact_threshold: 12_345 }],
@@ -1083,7 +1083,7 @@ describe("applyExtraParamsToAgent", () => {
provider: "openai",
id: "gpt-5",
baseUrl: "https://api.openai.com/v1",
} as Model<"openai-responses">,
} as unknown as Model<"openai-responses">,
});
expect(payload).not.toHaveProperty("context_management");
});