refactor(runtime): consolidate followup, gateway, and provider dedupe paths

This commit is contained in:
Peter Steinberger
2026-02-22 14:06:03 +00:00
parent 38752338dc
commit d116bcfb14
36 changed files with 848 additions and 908 deletions

View File

@@ -1,5 +1,10 @@
import type { AudioTranscriptionRequest, AudioTranscriptionResult } from "../../types.js";
import { assertOkOrThrowHttpError, fetchWithTimeoutGuarded, normalizeBaseUrl } from "../shared.js";
import {
assertOkOrThrowHttpError,
normalizeBaseUrl,
postTranscriptionRequest,
requireTranscriptionText,
} from "../shared.js";
export const DEFAULT_DEEPGRAM_AUDIO_BASE_URL = "https://api.deepgram.com/v1";
export const DEFAULT_DEEPGRAM_AUDIO_MODEL = "nova-3";
@@ -50,26 +55,23 @@ export async function transcribeDeepgramAudio(
}
const body = new Uint8Array(params.buffer);
const { response: res, release } = await fetchWithTimeoutGuarded(
url.toString(),
{
method: "POST",
headers,
body,
},
params.timeoutMs,
const { response: res, release } = await postTranscriptionRequest({
url: url.toString(),
headers,
body,
timeoutMs: params.timeoutMs,
fetchFn,
allowPrivate ? { ssrfPolicy: { allowPrivateNetwork: true } } : undefined,
);
allowPrivateNetwork: allowPrivate,
});
try {
await assertOkOrThrowHttpError(res, "Audio transcription failed");
const payload = (await res.json()) as DeepgramTranscriptResponse;
const transcript = payload.results?.channels?.[0]?.alternatives?.[0]?.transcript?.trim();
if (!transcript) {
throw new Error("Audio transcription response missing transcript");
}
const transcript = requireTranscriptionText(
payload.results?.channels?.[0]?.alternatives?.[0]?.transcript,
"Audio transcription response missing transcript",
);
return { text: transcript, model };
} finally {
await release();

View File

@@ -1,20 +1,11 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import * as ssrf from "../../../infra/net/ssrf.js";
import { withFetchPreconnect } from "../../../test-utils/fetch-mock.js";
import { createRequestCaptureJsonFetch } from "../audio.test-helpers.js";
import { describeGeminiVideo } from "./video.js";
const TEST_NET_IP = "203.0.113.10";
const resolveRequestUrl = (input: RequestInfo | URL) => {
if (typeof input === "string") {
return input;
}
if (input instanceof URL) {
return input.toString();
}
return input.url;
};
function stubPinnedHostname(hostname: string) {
const normalized = hostname.trim().toLowerCase().replace(/\.$/, "");
const addresses = [TEST_NET_IP];
@@ -73,23 +64,14 @@ describe("describeGeminiVideo", () => {
});
it("builds the expected request payload", async () => {
let seenUrl: string | null = null;
let seenInit: RequestInit | undefined;
const fetchFn = withFetchPreconnect(async (input: RequestInfo | URL, init?: RequestInit) => {
seenUrl = resolveRequestUrl(input);
seenInit = init;
return new Response(
JSON.stringify({
candidates: [
{
content: {
parts: [{ text: "first" }, { text: " second " }, { text: "" }],
},
},
],
}),
{ status: 200, headers: { "content-type": "application/json" } },
);
const { fetchFn, getRequest } = createRequestCaptureJsonFetch({
candidates: [
{
content: {
parts: [{ text: "first" }, { text: " second " }, { text: "" }],
},
},
],
});
const result = await describeGeminiVideo({
@@ -102,6 +84,7 @@ describe("describeGeminiVideo", () => {
headers: { "X-Other": "1" },
fetchFn,
});
const { url: seenUrl, init: seenInit } = getRequest();
expect(result.model).toBe("gemini-3-pro-preview");
expect(result.text).toBe("first\nsecond");

View File

@@ -1,6 +1,11 @@
import path from "node:path";
import type { AudioTranscriptionRequest, AudioTranscriptionResult } from "../../types.js";
import { assertOkOrThrowHttpError, fetchWithTimeoutGuarded, normalizeBaseUrl } from "../shared.js";
import {
assertOkOrThrowHttpError,
normalizeBaseUrl,
postTranscriptionRequest,
requireTranscriptionText,
} from "../shared.js";
export const DEFAULT_OPENAI_AUDIO_BASE_URL = "https://api.openai.com/v1";
const DEFAULT_OPENAI_AUDIO_MODEL = "gpt-4o-mini-transcribe";
@@ -39,26 +44,23 @@ export async function transcribeOpenAiCompatibleAudio(
headers.set("authorization", `Bearer ${params.apiKey}`);
}
const { response: res, release } = await fetchWithTimeoutGuarded(
const { response: res, release } = await postTranscriptionRequest({
url,
{
method: "POST",
headers,
body: form,
},
params.timeoutMs,
headers,
body: form,
timeoutMs: params.timeoutMs,
fetchFn,
allowPrivate ? { ssrfPolicy: { allowPrivateNetwork: true } } : undefined,
);
allowPrivateNetwork: allowPrivate,
});
try {
await assertOkOrThrowHttpError(res, "Audio transcription failed");
const payload = (await res.json()) as { text?: string };
const text = payload.text?.trim();
if (!text) {
throw new Error("Audio transcription response missing text");
}
const text = requireTranscriptionText(
payload.text,
"Audio transcription response missing text",
);
return { text, model };
} finally {
await release();

View File

@@ -32,6 +32,27 @@ export async function fetchWithTimeoutGuarded(
});
}
export async function postTranscriptionRequest(params: {
url: string;
headers: Headers;
body: BodyInit;
timeoutMs: number;
fetchFn: typeof fetch;
allowPrivateNetwork?: boolean;
}) {
return fetchWithTimeoutGuarded(
params.url,
{
method: "POST",
headers: params.headers,
body: params.body,
},
params.timeoutMs,
params.fetchFn,
params.allowPrivateNetwork ? { ssrfPolicy: { allowPrivateNetwork: true } } : undefined,
);
}
export async function readErrorResponse(res: Response): Promise<string | undefined> {
try {
const text = await res.text();
@@ -56,3 +77,14 @@ export async function assertOkOrThrowHttpError(res: Response, label: string): Pr
const suffix = detail ? `: ${detail}` : "";
throw new Error(`${label} (HTTP ${res.status})${suffix}`);
}
export function requireTranscriptionText(
value: string | undefined,
missingMessage: string,
): string {
const text = value?.trim();
if (!text) {
throw new Error(missingMessage);
}
return text;
}