fix: finalize vLLM onboarding integration (#12577) (thanks @gejifeng)

This commit is contained in:
Peter Steinberger
2026-02-13 15:47:30 +01:00
parent 513fd835a1
commit 3bcde8df32
2 changed files with 7 additions and 13 deletions

View File

@@ -121,6 +121,12 @@ interface OllamaTagsResponse {
models: OllamaModel[];
}
type VllmModelsResponse = {
data?: Array<{
id?: string;
}>;
};
/**
* Derive the Ollama native API base URL from a configured base URL.
*
@@ -139,11 +145,6 @@ export function resolveOllamaApiBase(configuredBaseUrl?: string): string {
}
async function discoverOllamaModels(baseUrl?: string): Promise<ModelDefinitionConfig[]> {
type VllmModelsResponse = {
data?: Array<{
id?: string;
}>;
};
// Skip Ollama discovery in test environments
if (process.env.VITEST || process.env.NODE_ENV === "test") {
return [];
@@ -470,14 +471,6 @@ function buildMoonshotProvider(): ProviderConfig {
};
}
function buildTogetherProvider(): ProviderConfig {
return {
baseUrl: TOGETHER_BASE_URL,
api: "openai-completions",
models: TOGETHER_MODEL_CATALOG.map(buildTogetherModelDefinition),
};
}
function buildQwenPortalProvider(): ProviderConfig {
return {
baseUrl: QWEN_PORTAL_BASE_URL,