mirror of
https://github.com/openclaw/openclaw.git
synced 2026-05-08 13:21:25 +00:00
feat: adding support for Together ai provider (#10304)
This commit is contained in:
@@ -305,6 +305,7 @@ export function resolveEnvApiKey(provider: string): EnvApiKeyResult | null {
|
||||
venice: "VENICE_API_KEY",
|
||||
mistral: "MISTRAL_API_KEY",
|
||||
opencode: "OPENCODE_API_KEY",
|
||||
together: "TOGETHER_API_KEY",
|
||||
qianfan: "QIANFAN_API_KEY",
|
||||
ollama: "OLLAMA_API_KEY",
|
||||
};
|
||||
|
||||
@@ -16,6 +16,11 @@ import {
|
||||
SYNTHETIC_BASE_URL,
|
||||
SYNTHETIC_MODEL_CATALOG,
|
||||
} from "./synthetic-models.js";
|
||||
import {
|
||||
TOGETHER_BASE_URL,
|
||||
TOGETHER_MODEL_CATALOG,
|
||||
buildTogetherModelDefinition,
|
||||
} from "./together-models.js";
|
||||
import { discoverVeniceModels, VENICE_BASE_URL } from "./venice-models.js";
|
||||
|
||||
type ModelsConfig = NonNullable<OpenClawConfig["models"]>;
|
||||
@@ -414,6 +419,14 @@ async function buildOllamaProvider(): Promise<ProviderConfig> {
|
||||
};
|
||||
}
|
||||
|
||||
function buildTogetherProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: TOGETHER_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: TOGETHER_MODEL_CATALOG.map(buildTogetherModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildQianfanProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: QIANFAN_BASE_URL,
|
||||
@@ -536,6 +549,16 @@ export async function resolveImplicitProviders(params: {
|
||||
providers.ollama = { ...(await buildOllamaProvider()), apiKey: ollamaKey };
|
||||
}
|
||||
|
||||
const togetherKey =
|
||||
resolveEnvApiKeyVarName("together") ??
|
||||
resolveApiKeyFromProfiles({ provider: "together", store: authStore });
|
||||
if (togetherKey) {
|
||||
providers.together = {
|
||||
...buildTogetherProvider(),
|
||||
apiKey: togetherKey,
|
||||
};
|
||||
}
|
||||
|
||||
const qianfanKey =
|
||||
resolveEnvApiKeyVarName("qianfan") ??
|
||||
resolveApiKeyFromProfiles({ provider: "qianfan", store: authStore });
|
||||
@@ -551,7 +574,9 @@ export async function resolveImplicitCopilotProvider(params: {
|
||||
env?: NodeJS.ProcessEnv;
|
||||
}): Promise<ProviderConfig | null> {
|
||||
const env = params.env ?? process.env;
|
||||
const authStore = ensureAuthProfileStore(params.agentDir, { allowKeychainPrompt: false });
|
||||
const authStore = ensureAuthProfileStore(params.agentDir, {
|
||||
allowKeychainPrompt: false,
|
||||
});
|
||||
const hasProfile = listProfilesForProvider(authStore, "github-copilot").length > 0;
|
||||
const envToken = env.COPILOT_GITHUB_TOKEN ?? env.GH_TOKEN ?? env.GITHUB_TOKEN;
|
||||
const githubToken = (envToken ?? "").trim();
|
||||
@@ -622,7 +647,10 @@ export async function resolveImplicitBedrockProvider(params: {
|
||||
}
|
||||
|
||||
const region = discoveryConfig?.region ?? env.AWS_REGION ?? env.AWS_DEFAULT_REGION ?? "us-east-1";
|
||||
const models = await discoverBedrockModels({ region, config: discoveryConfig });
|
||||
const models = await discoverBedrockModels({
|
||||
region,
|
||||
config: discoveryConfig,
|
||||
});
|
||||
if (models.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
133
src/agents/together-models.ts
Normal file
133
src/agents/together-models.ts
Normal file
@@ -0,0 +1,133 @@
|
||||
import type { ModelDefinitionConfig } from "../config/types.models.js";
|
||||
|
||||
export const TOGETHER_BASE_URL = "https://api.together.xyz/v1";
|
||||
|
||||
export const TOGETHER_MODEL_CATALOG: ModelDefinitionConfig[] = [
|
||||
{
|
||||
id: "zai-org/GLM-4.7",
|
||||
name: "GLM 4.7 Fp8",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 202752,
|
||||
maxTokens: 8192,
|
||||
cost: {
|
||||
input: 0.45,
|
||||
output: 2.0,
|
||||
cacheRead: 0.45,
|
||||
cacheWrite: 2.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "moonshotai/Kimi-K2.5",
|
||||
name: "Kimi K2.5",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: {
|
||||
input: 0.5,
|
||||
output: 2.8,
|
||||
cacheRead: 0.5,
|
||||
cacheWrite: 2.8,
|
||||
},
|
||||
contextWindow: 262144,
|
||||
maxTokens: 32768,
|
||||
},
|
||||
{
|
||||
id: "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
||||
name: "Llama 3.3 70B Instruct Turbo",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 131072,
|
||||
maxTokens: 8192,
|
||||
cost: {
|
||||
input: 0.88,
|
||||
output: 0.88,
|
||||
cacheRead: 0.88,
|
||||
cacheWrite: 0.88,
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "meta-llama/Llama-4-Scout-17B-16E-Instruct",
|
||||
name: "Llama 4 Scout 17B 16E Instruct",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
contextWindow: 10000000,
|
||||
maxTokens: 32768,
|
||||
cost: {
|
||||
input: 0.18,
|
||||
output: 0.59,
|
||||
cacheRead: 0.18,
|
||||
cacheWrite: 0.18,
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
||||
name: "Llama 4 Maverick 17B 128E Instruct FP8",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
contextWindow: 20000000,
|
||||
maxTokens: 32768,
|
||||
cost: {
|
||||
input: 0.27,
|
||||
output: 0.85,
|
||||
cacheRead: 0.27,
|
||||
cacheWrite: 0.27,
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "deepseek-ai/DeepSeek-V3.1",
|
||||
name: "DeepSeek V3.1",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 131072,
|
||||
maxTokens: 8192,
|
||||
cost: {
|
||||
input: 0.6,
|
||||
output: 1.25,
|
||||
cacheRead: 0.6,
|
||||
cacheWrite: 0.6,
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "deepseek-ai/DeepSeek-R1",
|
||||
name: "DeepSeek R1",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
contextWindow: 131072,
|
||||
maxTokens: 8192,
|
||||
cost: {
|
||||
input: 3.0,
|
||||
output: 7.0,
|
||||
cacheRead: 3.0,
|
||||
cacheWrite: 3.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "moonshotai/Kimi-K2-Instruct-0905",
|
||||
name: "Kimi K2-Instruct 0905",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
contextWindow: 262144,
|
||||
maxTokens: 8192,
|
||||
cost: {
|
||||
input: 1.0,
|
||||
output: 3.0,
|
||||
cacheRead: 1.0,
|
||||
cacheWrite: 3.0,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
export function buildTogetherModelDefinition(
|
||||
model: (typeof TOGETHER_MODEL_CATALOG)[number],
|
||||
): ModelDefinitionConfig {
|
||||
return {
|
||||
id: model.id,
|
||||
name: model.name,
|
||||
api: "openai-completions",
|
||||
reasoning: model.reasoning,
|
||||
input: model.input,
|
||||
cost: model.cost,
|
||||
contextWindow: model.contextWindow,
|
||||
maxTokens: model.maxTokens,
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user