test: dedupe gateway browser discord and channel coverage

This commit is contained in:
Peter Steinberger
2026-02-22 17:11:42 +00:00
parent 34ea33f057
commit 296b19e413
29 changed files with 938 additions and 1041 deletions

View File

@@ -92,6 +92,21 @@ function createMediaDisabledConfig(): OpenClawConfig {
};
}
function createMediaDisabledConfigWithAllowedMimes(allowedMimes: string[]): OpenClawConfig {
return {
...createMediaDisabledConfig(),
gateway: {
http: {
endpoints: {
responses: {
files: { allowedMimes },
},
},
},
},
};
}
async function createTempMediaFile(params: { fileName: string; content: Buffer | string }) {
const dir = await createTempMediaDir();
const mediaPath = path.join(dir, params.fileName);
@@ -135,6 +150,16 @@ async function applyWithDisabledMedia(params: {
return { ctx, result };
}
function expectFileNotApplied(params: {
ctx: MsgContext;
result: { appliedFile: boolean };
body: string;
}) {
expect(params.result.appliedFile).toBe(false);
expect(params.ctx.Body).toBe(params.body);
expect(params.ctx.Body).not.toContain("<file");
}
describe("applyMediaUnderstanding", () => {
const mockedResolveApiKey = vi.mocked(resolveApiKeyForProvider);
const mockedFetchRemoteMedia = vi.mocked(fetchRemoteMedia);
@@ -627,9 +652,7 @@ describe("applyMediaUnderstanding", () => {
mediaType: "audio/mpeg",
});
expect(result.appliedFile).toBe(false);
expect(ctx.Body).toBe("<media:audio>");
expect(ctx.Body).not.toContain("<file");
expectFileNotApplied({ ctx, result, body: "<media:audio>" });
});
it("does not reclassify PDF attachments as text/plain", async () => {
@@ -639,18 +662,7 @@ describe("applyMediaUnderstanding", () => {
content: pseudoPdf,
});
const cfg: OpenClawConfig = {
...createMediaDisabledConfig(),
gateway: {
http: {
endpoints: {
responses: {
files: { allowedMimes: ["text/plain"] },
},
},
},
},
};
const cfg = createMediaDisabledConfigWithAllowedMimes(["text/plain"]);
const { ctx, result } = await applyWithDisabledMedia({
body: "<media:file>",
@@ -659,9 +671,7 @@ describe("applyMediaUnderstanding", () => {
cfg,
});
expect(result.appliedFile).toBe(false);
expect(ctx.Body).toBe("<media:file>");
expect(ctx.Body).not.toContain("<file");
expectFileNotApplied({ ctx, result, body: "<media:file>" });
});
it("respects configured allowedMimes for text-like attachments", async () => {
@@ -671,27 +681,14 @@ describe("applyMediaUnderstanding", () => {
content: tsvText,
});
const cfg: OpenClawConfig = {
...createMediaDisabledConfig(),
gateway: {
http: {
endpoints: {
responses: {
files: { allowedMimes: ["text/plain"] },
},
},
},
},
};
const cfg = createMediaDisabledConfigWithAllowedMimes(["text/plain"]);
const { ctx, result } = await applyWithDisabledMedia({
body: "<media:file>",
mediaPath: tsvPath,
cfg,
});
expect(result.appliedFile).toBe(false);
expect(ctx.Body).toBe("<media:file>");
expect(ctx.Body).not.toContain("<file");
expectFileNotApplied({ ctx, result, body: "<media:file>" });
});
it("escapes XML special characters in filenames to prevent injection", async () => {
@@ -824,9 +821,7 @@ describe("applyMediaUnderstanding", () => {
mediaType: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
});
expect(result.appliedFile).toBe(false);
expect(ctx.Body).toBe("<media:file>");
expect(ctx.Body).not.toContain("<file");
expectFileNotApplied({ ctx, result, body: "<media:file>" });
});
it("keeps vendor +json attachments eligible for text extraction", async () => {

View File

@@ -29,37 +29,50 @@ function createOpenAiAudioCfg(extra?: Partial<OpenClawConfig>): OpenClawConfig {
} as unknown as OpenClawConfig;
}
async function runAutoAudioCase(params: {
transcribeAudio: (req: { model?: string }) => Promise<{ text: string; model: string }>;
cfgExtra?: Partial<OpenClawConfig>;
}) {
let runResult: Awaited<ReturnType<typeof runCapability>> | undefined;
await withAudioFixture("openclaw-auto-audio", async ({ ctx, media, cache }) => {
const providerRegistry = createOpenAiAudioProvider(params.transcribeAudio);
const cfg = createOpenAiAudioCfg(params.cfgExtra);
runResult = await runCapability({
capability: "audio",
cfg,
ctx,
attachments: cache,
media,
providerRegistry,
});
});
if (!runResult) {
throw new Error("Expected auto audio case result");
}
return runResult;
}
describe("runCapability auto audio entries", () => {
it("uses provider keys to auto-enable audio transcription", async () => {
await withAudioFixture("openclaw-auto-audio", async ({ ctx, media, cache }) => {
let seenModel: string | undefined;
const providerRegistry = createOpenAiAudioProvider(async (req) => {
let seenModel: string | undefined;
const result = await runAutoAudioCase({
transcribeAudio: async (req) => {
seenModel = req.model;
return { text: "ok", model: req.model ?? "unknown" };
});
const cfg = createOpenAiAudioCfg();
const result = await runCapability({
capability: "audio",
cfg,
ctx,
attachments: cache,
media,
providerRegistry,
});
expect(result.outputs[0]?.text).toBe("ok");
expect(seenModel).toBe("gpt-4o-mini-transcribe");
expect(result.decision.outcome).toBe("success");
},
});
expect(result.outputs[0]?.text).toBe("ok");
expect(seenModel).toBe("gpt-4o-mini-transcribe");
expect(result.decision.outcome).toBe("success");
});
it("skips auto audio when disabled", async () => {
await withAudioFixture("openclaw-auto-audio", async ({ ctx, media, cache }) => {
const providerRegistry = createOpenAiAudioProvider(async () => ({
const result = await runAutoAudioCase({
transcribeAudio: async () => ({
text: "ok",
model: "whisper-1",
}));
const cfg = createOpenAiAudioCfg({
}),
cfgExtra: {
tools: {
media: {
audio: {
@@ -67,29 +80,20 @@ describe("runCapability auto audio entries", () => {
},
},
},
});
const result = await runCapability({
capability: "audio",
cfg,
ctx,
attachments: cache,
media,
providerRegistry,
});
expect(result.outputs).toHaveLength(0);
expect(result.decision.outcome).toBe("disabled");
},
});
expect(result.outputs).toHaveLength(0);
expect(result.decision.outcome).toBe("disabled");
});
it("prefers explicitly configured audio model entries", async () => {
await withAudioFixture("openclaw-auto-audio", async ({ ctx, media, cache }) => {
let seenModel: string | undefined;
const providerRegistry = createOpenAiAudioProvider(async (req) => {
let seenModel: string | undefined;
const result = await runAutoAudioCase({
transcribeAudio: async (req) => {
seenModel = req.model;
return { text: "ok", model: req.model ?? "unknown" };
});
const cfg = createOpenAiAudioCfg({
},
cfgExtra: {
tools: {
media: {
audio: {
@@ -97,19 +101,10 @@ describe("runCapability auto audio entries", () => {
},
},
},
});
const result = await runCapability({
capability: "audio",
cfg,
ctx,
attachments: cache,
media,
providerRegistry,
});
expect(result.outputs[0]?.text).toBe("ok");
expect(seenModel).toBe("whisper-1");
},
});
expect(result.outputs[0]?.text).toBe("ok");
expect(seenModel).toBe("whisper-1");
});
});