mirror of
https://github.com/openclaw/openclaw.git
synced 2026-03-27 09:21:35 +07:00
refactor: move provider catalogs into extensions
This commit is contained in:
@@ -1,10 +1,7 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import {
|
||||
buildBytePlusCodingProvider,
|
||||
buildBytePlusProvider,
|
||||
} from "../../src/agents/models-config.providers.static.js";
|
||||
import { ensureModelAllowlistEntry } from "../../src/commands/model-allowlist.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildBytePlusCodingProvider, buildBytePlusProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "byteplus";
|
||||
const BYTEPLUS_DEFAULT_MODEL_REF = "byteplus-plan/ark-code-latest";
|
||||
|
||||
24
extensions/byteplus/provider-catalog.ts
Normal file
24
extensions/byteplus/provider-catalog.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import {
|
||||
buildBytePlusModelDefinition,
|
||||
BYTEPLUS_BASE_URL,
|
||||
BYTEPLUS_CODING_BASE_URL,
|
||||
BYTEPLUS_CODING_MODEL_CATALOG,
|
||||
BYTEPLUS_MODEL_CATALOG,
|
||||
} from "../../src/agents/byteplus-models.js";
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export function buildBytePlusProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: BYTEPLUS_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: BYTEPLUS_MODEL_CATALOG.map(buildBytePlusModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildBytePlusCodingProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: BYTEPLUS_CODING_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: BYTEPLUS_CODING_MODEL_CATALOG.map(buildBytePlusModelDefinition),
|
||||
};
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildHuggingfaceProvider } from "../../src/agents/models-config.providers.discovery.js";
|
||||
import {
|
||||
applyHuggingfaceConfig,
|
||||
HUGGINGFACE_DEFAULT_MODEL_REF,
|
||||
} from "../../src/commands/onboard-auth.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildHuggingfaceProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "huggingface";
|
||||
|
||||
|
||||
22
extensions/huggingface/provider-catalog.ts
Normal file
22
extensions/huggingface/provider-catalog.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
import {
|
||||
buildHuggingfaceModelDefinition,
|
||||
discoverHuggingfaceModels,
|
||||
HUGGINGFACE_BASE_URL,
|
||||
HUGGINGFACE_MODEL_CATALOG,
|
||||
} from "../../src/agents/huggingface-models.js";
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export async function buildHuggingfaceProvider(
|
||||
discoveryApiKey?: string,
|
||||
): Promise<ModelProviderConfig> {
|
||||
const resolvedSecret = discoveryApiKey?.trim() ?? "";
|
||||
const models =
|
||||
resolvedSecret !== ""
|
||||
? await discoverHuggingfaceModels(resolvedSecret)
|
||||
: HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
|
||||
return {
|
||||
baseUrl: HUGGINGFACE_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models,
|
||||
};
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildKilocodeProviderWithDiscovery } from "../../src/agents/models-config.providers.discovery.js";
|
||||
import {
|
||||
createKilocodeWrapper,
|
||||
isProxyReasoningUnsupported,
|
||||
@@ -9,6 +8,7 @@ import {
|
||||
KILOCODE_DEFAULT_MODEL_REF,
|
||||
} from "../../src/commands/onboard-auth.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildKilocodeProviderWithDiscovery } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "kilocode";
|
||||
|
||||
|
||||
34
extensions/kilocode/provider-catalog.ts
Normal file
34
extensions/kilocode/provider-catalog.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import { discoverKilocodeModels } from "../../src/agents/kilocode-models.js";
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
import {
|
||||
KILOCODE_BASE_URL,
|
||||
KILOCODE_DEFAULT_CONTEXT_WINDOW,
|
||||
KILOCODE_DEFAULT_COST,
|
||||
KILOCODE_DEFAULT_MAX_TOKENS,
|
||||
KILOCODE_MODEL_CATALOG,
|
||||
} from "../../src/providers/kilocode-shared.js";
|
||||
|
||||
export function buildKilocodeProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: KILOCODE_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: KILOCODE_MODEL_CATALOG.map((model) => ({
|
||||
id: model.id,
|
||||
name: model.name,
|
||||
reasoning: model.reasoning,
|
||||
input: model.input,
|
||||
cost: KILOCODE_DEFAULT_COST,
|
||||
contextWindow: model.contextWindow ?? KILOCODE_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: model.maxTokens ?? KILOCODE_DEFAULT_MAX_TOKENS,
|
||||
})),
|
||||
};
|
||||
}
|
||||
|
||||
export async function buildKilocodeProviderWithDiscovery(): Promise<ModelProviderConfig> {
|
||||
const models = await discoverKilocodeModels();
|
||||
return {
|
||||
baseUrl: KILOCODE_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models,
|
||||
};
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildKimiCodingProvider } from "../../src/agents/models-config.providers.static.js";
|
||||
import { applyKimiCodeConfig, KIMI_CODING_MODEL_REF } from "../../src/commands/onboard-auth.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { isRecord } from "../../src/utils.js";
|
||||
import { buildKimiCodingProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "kimi-coding";
|
||||
|
||||
|
||||
34
extensions/kimi-coding/provider-catalog.ts
Normal file
34
extensions/kimi-coding/provider-catalog.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export const KIMI_CODING_BASE_URL = "https://api.kimi.com/coding/";
|
||||
const KIMI_CODING_USER_AGENT = "claude-code/0.1.0";
|
||||
export const KIMI_CODING_DEFAULT_MODEL_ID = "k2p5";
|
||||
const KIMI_CODING_DEFAULT_CONTEXT_WINDOW = 262144;
|
||||
const KIMI_CODING_DEFAULT_MAX_TOKENS = 32768;
|
||||
const KIMI_CODING_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
export function buildKimiCodingProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: KIMI_CODING_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
headers: {
|
||||
"User-Agent": KIMI_CODING_USER_AGENT,
|
||||
},
|
||||
models: [
|
||||
{
|
||||
id: KIMI_CODING_DEFAULT_MODEL_ID,
|
||||
name: "Kimi for Coding",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: KIMI_CODING_DEFAULT_COST,
|
||||
contextWindow: KIMI_CODING_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: KIMI_CODING_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
@@ -8,10 +8,6 @@ import {
|
||||
} from "openclaw/plugin-sdk/minimax-portal-auth";
|
||||
import { ensureAuthProfileStore, listProfilesForProvider } from "../../src/agents/auth-profiles.js";
|
||||
import { MINIMAX_OAUTH_MARKER } from "../../src/agents/model-auth-markers.js";
|
||||
import {
|
||||
buildMinimaxPortalProvider,
|
||||
buildMinimaxProvider,
|
||||
} from "../../src/agents/models-config.providers.static.js";
|
||||
import {
|
||||
applyMinimaxApiConfig,
|
||||
applyMinimaxApiConfigCn,
|
||||
@@ -19,6 +15,7 @@ import {
|
||||
import { fetchMinimaxUsage } from "../../src/infra/provider-usage.fetch.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { loginMiniMaxPortalOAuth, type MiniMaxRegion } from "./oauth.js";
|
||||
import { buildMinimaxPortalProvider, buildMinimaxProvider } from "./provider-catalog.js";
|
||||
|
||||
const API_PROVIDER_ID = "minimax";
|
||||
const PORTAL_PROVIDER_ID = "minimax-portal";
|
||||
|
||||
77
extensions/minimax/provider-catalog.ts
Normal file
77
extensions/minimax/provider-catalog.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
import type { ModelDefinitionConfig, ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
const MINIMAX_PORTAL_BASE_URL = "https://api.minimax.io/anthropic";
|
||||
export const MINIMAX_DEFAULT_MODEL_ID = "MiniMax-M2.5";
|
||||
const MINIMAX_DEFAULT_VISION_MODEL_ID = "MiniMax-VL-01";
|
||||
const MINIMAX_DEFAULT_CONTEXT_WINDOW = 200000;
|
||||
const MINIMAX_DEFAULT_MAX_TOKENS = 8192;
|
||||
const MINIMAX_API_COST = {
|
||||
input: 0.3,
|
||||
output: 1.2,
|
||||
cacheRead: 0.03,
|
||||
cacheWrite: 0.12,
|
||||
};
|
||||
|
||||
function buildMinimaxModel(params: {
|
||||
id: string;
|
||||
name: string;
|
||||
reasoning: boolean;
|
||||
input: ModelDefinitionConfig["input"];
|
||||
}): ModelDefinitionConfig {
|
||||
return {
|
||||
id: params.id,
|
||||
name: params.name,
|
||||
reasoning: params.reasoning,
|
||||
input: params.input,
|
||||
cost: MINIMAX_API_COST,
|
||||
contextWindow: MINIMAX_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: MINIMAX_DEFAULT_MAX_TOKENS,
|
||||
};
|
||||
}
|
||||
|
||||
function buildMinimaxTextModel(params: {
|
||||
id: string;
|
||||
name: string;
|
||||
reasoning: boolean;
|
||||
}): ModelDefinitionConfig {
|
||||
return buildMinimaxModel({ ...params, input: ["text"] });
|
||||
}
|
||||
|
||||
function buildMinimaxCatalog(): ModelDefinitionConfig[] {
|
||||
return [
|
||||
buildMinimaxModel({
|
||||
id: MINIMAX_DEFAULT_VISION_MODEL_ID,
|
||||
name: "MiniMax VL 01",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
}),
|
||||
buildMinimaxTextModel({
|
||||
id: MINIMAX_DEFAULT_MODEL_ID,
|
||||
name: "MiniMax M2.5",
|
||||
reasoning: true,
|
||||
}),
|
||||
buildMinimaxTextModel({
|
||||
id: "MiniMax-M2.5-highspeed",
|
||||
name: "MiniMax M2.5 Highspeed",
|
||||
reasoning: true,
|
||||
}),
|
||||
];
|
||||
}
|
||||
|
||||
export function buildMinimaxProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: MINIMAX_PORTAL_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
authHeader: true,
|
||||
models: buildMinimaxCatalog(),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildMinimaxPortalProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: MINIMAX_PORTAL_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
authHeader: true,
|
||||
models: buildMinimaxCatalog(),
|
||||
};
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildModelStudioProvider } from "../../src/agents/models-config.providers.static.js";
|
||||
import {
|
||||
applyModelStudioConfig,
|
||||
applyModelStudioConfigCn,
|
||||
MODELSTUDIO_DEFAULT_MODEL_REF,
|
||||
} from "../../src/commands/onboard-auth.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildModelStudioProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "modelstudio";
|
||||
|
||||
|
||||
93
extensions/modelstudio/provider-catalog.ts
Normal file
93
extensions/modelstudio/provider-catalog.ts
Normal file
@@ -0,0 +1,93 @@
|
||||
import type { ModelDefinitionConfig, ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export const MODELSTUDIO_BASE_URL = "https://coding-intl.dashscope.aliyuncs.com/v1";
|
||||
export const MODELSTUDIO_DEFAULT_MODEL_ID = "qwen3.5-plus";
|
||||
const MODELSTUDIO_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
const MODELSTUDIO_MODEL_CATALOG: ReadonlyArray<ModelDefinitionConfig> = [
|
||||
{
|
||||
id: "qwen3.5-plus",
|
||||
name: "qwen3.5-plus",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
{
|
||||
id: "qwen3-max-2026-01-23",
|
||||
name: "qwen3-max-2026-01-23",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 262_144,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
{
|
||||
id: "qwen3-coder-next",
|
||||
name: "qwen3-coder-next",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 262_144,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
{
|
||||
id: "qwen3-coder-plus",
|
||||
name: "qwen3-coder-plus",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
{
|
||||
id: "MiniMax-M2.5",
|
||||
name: "MiniMax-M2.5",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
{
|
||||
id: "glm-5",
|
||||
name: "glm-5",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 202_752,
|
||||
maxTokens: 16_384,
|
||||
},
|
||||
{
|
||||
id: "glm-4.7",
|
||||
name: "glm-4.7",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 202_752,
|
||||
maxTokens: 16_384,
|
||||
},
|
||||
{
|
||||
id: "kimi-k2.5",
|
||||
name: "kimi-k2.5",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 262_144,
|
||||
maxTokens: 32_768,
|
||||
},
|
||||
];
|
||||
|
||||
export function buildModelStudioProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: MODELSTUDIO_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: MODELSTUDIO_MODEL_CATALOG.map((model) => ({ ...model })),
|
||||
};
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
import { buildMoonshotProvider } from "../../src/agents/models-config.providers.static.js";
|
||||
import {
|
||||
createMoonshotThinkingWrapper,
|
||||
resolveMoonshotThinkingType,
|
||||
@@ -16,6 +15,7 @@ import { MOONSHOT_DEFAULT_MODEL_REF } from "../../src/commands/onboard-auth.mode
|
||||
import { emptyPluginConfigSchema } from "../../src/plugins/config-schema.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import type { OpenClawPluginApi } from "../../src/plugins/types.js";
|
||||
import { buildMoonshotProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "moonshot";
|
||||
|
||||
|
||||
30
extensions/moonshot/provider-catalog.ts
Normal file
30
extensions/moonshot/provider-catalog.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export const MOONSHOT_BASE_URL = "https://api.moonshot.ai/v1";
|
||||
export const MOONSHOT_DEFAULT_MODEL_ID = "kimi-k2.5";
|
||||
const MOONSHOT_DEFAULT_CONTEXT_WINDOW = 256000;
|
||||
const MOONSHOT_DEFAULT_MAX_TOKENS = 8192;
|
||||
const MOONSHOT_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
export function buildMoonshotProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: MOONSHOT_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: MOONSHOT_DEFAULT_MODEL_ID,
|
||||
name: "Kimi K2.5",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: MOONSHOT_DEFAULT_COST,
|
||||
contextWindow: MOONSHOT_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: MOONSHOT_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildNvidiaProvider } from "../../src/agents/models-config.providers.static.js";
|
||||
import { buildNvidiaProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "nvidia";
|
||||
|
||||
|
||||
48
extensions/nvidia/provider-catalog.ts
Normal file
48
extensions/nvidia/provider-catalog.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
const NVIDIA_BASE_URL = "https://integrate.api.nvidia.com/v1";
|
||||
const NVIDIA_DEFAULT_MODEL_ID = "nvidia/llama-3.1-nemotron-70b-instruct";
|
||||
const NVIDIA_DEFAULT_CONTEXT_WINDOW = 131072;
|
||||
const NVIDIA_DEFAULT_MAX_TOKENS = 4096;
|
||||
const NVIDIA_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
export function buildNvidiaProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: NVIDIA_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: NVIDIA_DEFAULT_MODEL_ID,
|
||||
name: "NVIDIA Llama 3.1 Nemotron 70B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: NVIDIA_DEFAULT_COST,
|
||||
contextWindow: NVIDIA_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: NVIDIA_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
{
|
||||
id: "meta/llama-3.3-70b-instruct",
|
||||
name: "Meta Llama 3.3 70B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: NVIDIA_DEFAULT_COST,
|
||||
contextWindow: 131072,
|
||||
maxTokens: 4096,
|
||||
},
|
||||
{
|
||||
id: "nvidia/mistral-nemo-minitron-8b-8k-instruct",
|
||||
name: "NVIDIA Mistral NeMo Minitron 8B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: NVIDIA_DEFAULT_COST,
|
||||
contextWindow: 8192,
|
||||
maxTokens: 2048,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
@@ -6,8 +6,8 @@ import {
|
||||
type ProviderAuthResult,
|
||||
type ProviderDiscoveryContext,
|
||||
} from "openclaw/plugin-sdk/core";
|
||||
import { resolveOllamaApiBase } from "../../src/agents/models-config.providers.discovery.js";
|
||||
import { OLLAMA_DEFAULT_BASE_URL } from "../../src/agents/ollama-defaults.js";
|
||||
import { resolveOllamaApiBase } from "../../src/agents/ollama-models.js";
|
||||
|
||||
const PROVIDER_ID = "ollama";
|
||||
const DEFAULT_API_KEY = "ollama-local";
|
||||
|
||||
11
extensions/openai/openai-codex-catalog.ts
Normal file
11
extensions/openai/openai-codex-catalog.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
|
||||
|
||||
export function buildOpenAICodexProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: OPENAI_CODEX_BASE_URL,
|
||||
api: "openai-codex-responses",
|
||||
models: [],
|
||||
};
|
||||
}
|
||||
@@ -10,12 +10,12 @@ import { ensureAuthProfileStore } from "../../src/agents/auth-profiles/store.js"
|
||||
import type { OAuthCredential } from "../../src/agents/auth-profiles/types.js";
|
||||
import { DEFAULT_CONTEXT_TOKENS } from "../../src/agents/defaults.js";
|
||||
import { normalizeModelCompat } from "../../src/agents/model-compat.js";
|
||||
import { buildOpenAICodexProvider } from "../../src/agents/models-config.providers.static.js";
|
||||
import { normalizeProviderId } from "../../src/agents/provider-id.js";
|
||||
import { loginOpenAICodexOAuth } from "../../src/commands/openai-codex-oauth.js";
|
||||
import { fetchCodexUsage } from "../../src/infra/provider-usage.fetch.js";
|
||||
import { buildOauthProviderAuthResult } from "../../src/plugin-sdk/provider-auth-result.js";
|
||||
import type { ProviderPlugin } from "../../src/plugins/types.js";
|
||||
import { buildOpenAICodexProvider } from "./openai-codex-catalog.js";
|
||||
import {
|
||||
cloneFirstTemplateModel,
|
||||
findCatalogTemplate,
|
||||
|
||||
@@ -6,7 +6,6 @@ import {
|
||||
type ProviderRuntimeModel,
|
||||
} from "openclaw/plugin-sdk/core";
|
||||
import { DEFAULT_CONTEXT_TOKENS } from "../../src/agents/defaults.js";
|
||||
import { buildOpenrouterProvider } from "../../src/agents/models-config.providers.static.js";
|
||||
import {
|
||||
getOpenRouterModelCapabilities,
|
||||
loadOpenRouterModelCapabilities,
|
||||
@@ -21,6 +20,7 @@ import {
|
||||
OPENROUTER_DEFAULT_MODEL_REF,
|
||||
} from "../../src/commands/onboard-auth.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildOpenrouterProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "openrouter";
|
||||
const OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1";
|
||||
|
||||
48
extensions/openrouter/provider-catalog.ts
Normal file
48
extensions/openrouter/provider-catalog.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
const OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1";
|
||||
const OPENROUTER_DEFAULT_MODEL_ID = "auto";
|
||||
const OPENROUTER_DEFAULT_CONTEXT_WINDOW = 200000;
|
||||
const OPENROUTER_DEFAULT_MAX_TOKENS = 8192;
|
||||
const OPENROUTER_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
export function buildOpenrouterProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: OPENROUTER_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: OPENROUTER_DEFAULT_MODEL_ID,
|
||||
name: "OpenRouter Auto",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: OPENROUTER_DEFAULT_COST,
|
||||
contextWindow: OPENROUTER_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: OPENROUTER_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
{
|
||||
id: "openrouter/hunter-alpha",
|
||||
name: "Hunter Alpha",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: OPENROUTER_DEFAULT_COST,
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "openrouter/healer-alpha",
|
||||
name: "Healer Alpha",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: OPENROUTER_DEFAULT_COST,
|
||||
contextWindow: 262144,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildQianfanProvider } from "../../src/agents/models-config.providers.static.js";
|
||||
import { applyQianfanConfig, QIANFAN_DEFAULT_MODEL_REF } from "../../src/commands/onboard-auth.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildQianfanProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "qianfan";
|
||||
|
||||
|
||||
39
extensions/qianfan/provider-catalog.ts
Normal file
39
extensions/qianfan/provider-catalog.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export const QIANFAN_BASE_URL = "https://qianfan.baidubce.com/v2";
|
||||
export const QIANFAN_DEFAULT_MODEL_ID = "deepseek-v3.2";
|
||||
const QIANFAN_DEFAULT_CONTEXT_WINDOW = 98304;
|
||||
const QIANFAN_DEFAULT_MAX_TOKENS = 32768;
|
||||
const QIANFAN_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
export function buildQianfanProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: QIANFAN_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: QIANFAN_DEFAULT_MODEL_ID,
|
||||
name: "DEEPSEEK V3.2",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: QIANFAN_DEFAULT_COST,
|
||||
contextWindow: QIANFAN_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: QIANFAN_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
{
|
||||
id: "ernie-5.0-thinking-preview",
|
||||
name: "ERNIE-5.0-Thinking-Preview",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: QIANFAN_DEFAULT_COST,
|
||||
contextWindow: 119000,
|
||||
maxTokens: 64000,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
@@ -9,13 +9,12 @@ import { ensureAuthProfileStore, listProfilesForProvider } from "../../src/agent
|
||||
import { QWEN_OAUTH_MARKER } from "../../src/agents/model-auth-markers.js";
|
||||
import { refreshQwenPortalCredentials } from "../../src/providers/qwen-portal-oauth.js";
|
||||
import { loginQwenPortalOAuth } from "./oauth.js";
|
||||
import { buildQwenPortalProvider, QWEN_PORTAL_BASE_URL } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "qwen-portal";
|
||||
const PROVIDER_LABEL = "Qwen";
|
||||
const DEFAULT_MODEL = "qwen-portal/coder-model";
|
||||
const DEFAULT_BASE_URL = "https://portal.qwen.ai/v1";
|
||||
const DEFAULT_CONTEXT_WINDOW = 128000;
|
||||
const DEFAULT_MAX_TOKENS = 8192;
|
||||
const DEFAULT_BASE_URL = QWEN_PORTAL_BASE_URL;
|
||||
|
||||
function normalizeBaseUrl(value: string | undefined): string {
|
||||
const raw = value?.trim() || DEFAULT_BASE_URL;
|
||||
@@ -23,39 +22,11 @@ function normalizeBaseUrl(value: string | undefined): string {
|
||||
return withProtocol.endsWith("/v1") ? withProtocol : `${withProtocol.replace(/\/+$/, "")}/v1`;
|
||||
}
|
||||
|
||||
function buildModelDefinition(params: {
|
||||
id: string;
|
||||
name: string;
|
||||
input: Array<"text" | "image">;
|
||||
}) {
|
||||
return {
|
||||
id: params.id,
|
||||
name: params.name,
|
||||
reasoning: false,
|
||||
input: params.input,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: DEFAULT_MAX_TOKENS,
|
||||
};
|
||||
}
|
||||
|
||||
function buildProviderCatalog(params: { baseUrl: string; apiKey: string }) {
|
||||
return {
|
||||
...buildQwenPortalProvider(),
|
||||
baseUrl: params.baseUrl,
|
||||
apiKey: params.apiKey,
|
||||
api: "openai-completions" as const,
|
||||
models: [
|
||||
buildModelDefinition({
|
||||
id: "coder-model",
|
||||
name: "Qwen Coder",
|
||||
input: ["text"],
|
||||
}),
|
||||
buildModelDefinition({
|
||||
id: "vision-model",
|
||||
name: "Qwen Vision",
|
||||
input: ["text", "image"],
|
||||
}),
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
46
extensions/qwen-portal-auth/provider-catalog.ts
Normal file
46
extensions/qwen-portal-auth/provider-catalog.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import type { ModelDefinitionConfig, ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export const QWEN_PORTAL_BASE_URL = "https://portal.qwen.ai/v1";
|
||||
const QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW = 128000;
|
||||
const QWEN_PORTAL_DEFAULT_MAX_TOKENS = 8192;
|
||||
const QWEN_PORTAL_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
function buildModelDefinition(params: {
|
||||
id: string;
|
||||
name: string;
|
||||
input: ModelDefinitionConfig["input"];
|
||||
}): ModelDefinitionConfig {
|
||||
return {
|
||||
id: params.id,
|
||||
name: params.name,
|
||||
reasoning: false,
|
||||
input: params.input,
|
||||
cost: QWEN_PORTAL_DEFAULT_COST,
|
||||
contextWindow: QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: QWEN_PORTAL_DEFAULT_MAX_TOKENS,
|
||||
};
|
||||
}
|
||||
|
||||
export function buildQwenPortalProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: QWEN_PORTAL_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
buildModelDefinition({
|
||||
id: "coder-model",
|
||||
name: "Qwen Coder",
|
||||
input: ["text"],
|
||||
}),
|
||||
buildModelDefinition({
|
||||
id: "vision-model",
|
||||
name: "Qwen Vision",
|
||||
input: ["text", "image"],
|
||||
}),
|
||||
],
|
||||
};
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildSyntheticProvider } from "../../src/agents/models-config.providers.static.js";
|
||||
import {
|
||||
applySyntheticConfig,
|
||||
SYNTHETIC_DEFAULT_MODEL_REF,
|
||||
} from "../../src/commands/onboard-auth.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildSyntheticProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "synthetic";
|
||||
|
||||
|
||||
14
extensions/synthetic/provider-catalog.ts
Normal file
14
extensions/synthetic/provider-catalog.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
import {
|
||||
buildSyntheticModelDefinition,
|
||||
SYNTHETIC_BASE_URL,
|
||||
SYNTHETIC_MODEL_CATALOG,
|
||||
} from "../../src/agents/synthetic-models.js";
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export function buildSyntheticProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: SYNTHETIC_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
models: SYNTHETIC_MODEL_CATALOG.map(buildSyntheticModelDefinition),
|
||||
};
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildTogetherProvider } from "../../src/agents/models-config.providers.static.js";
|
||||
import {
|
||||
applyTogetherConfig,
|
||||
TOGETHER_DEFAULT_MODEL_REF,
|
||||
} from "../../src/commands/onboard-auth.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildTogetherProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "together";
|
||||
|
||||
|
||||
14
extensions/together/provider-catalog.ts
Normal file
14
extensions/together/provider-catalog.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
import {
|
||||
buildTogetherModelDefinition,
|
||||
TOGETHER_BASE_URL,
|
||||
TOGETHER_MODEL_CATALOG,
|
||||
} from "../../src/agents/together-models.js";
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export function buildTogetherProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: TOGETHER_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: TOGETHER_MODEL_CATALOG.map(buildTogetherModelDefinition),
|
||||
};
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildVeniceProvider } from "../../src/agents/models-config.providers.discovery.js";
|
||||
import { applyVeniceConfig, VENICE_DEFAULT_MODEL_REF } from "../../src/commands/onboard-auth.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildVeniceProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "venice";
|
||||
|
||||
|
||||
11
extensions/venice/provider-catalog.ts
Normal file
11
extensions/venice/provider-catalog.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import { discoverVeniceModels, VENICE_BASE_URL } from "../../src/agents/venice-models.js";
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export async function buildVeniceProvider(): Promise<ModelProviderConfig> {
|
||||
const models = await discoverVeniceModels();
|
||||
return {
|
||||
baseUrl: VENICE_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models,
|
||||
};
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildVercelAiGatewayProvider } from "../../src/agents/models-config.providers.discovery.js";
|
||||
import {
|
||||
applyVercelAiGatewayConfig,
|
||||
VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF,
|
||||
} from "../../src/commands/onboard-auth.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildVercelAiGatewayProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "vercel-ai-gateway";
|
||||
|
||||
|
||||
13
extensions/vercel-ai-gateway/provider-catalog.ts
Normal file
13
extensions/vercel-ai-gateway/provider-catalog.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import {
|
||||
discoverVercelAiGatewayModels,
|
||||
VERCEL_AI_GATEWAY_BASE_URL,
|
||||
} from "../../src/agents/vercel-ai-gateway.js";
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export async function buildVercelAiGatewayProvider(): Promise<ModelProviderConfig> {
|
||||
return {
|
||||
baseUrl: VERCEL_AI_GATEWAY_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
models: await discoverVercelAiGatewayModels(),
|
||||
};
|
||||
}
|
||||
@@ -1,10 +1,7 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import {
|
||||
buildDoubaoCodingProvider,
|
||||
buildDoubaoProvider,
|
||||
} from "../../src/agents/models-config.providers.static.js";
|
||||
import { ensureModelAllowlistEntry } from "../../src/commands/model-allowlist.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildDoubaoCodingProvider, buildDoubaoProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "volcengine";
|
||||
const VOLCENGINE_DEFAULT_MODEL_REF = "volcengine-plan/ark-code-latest";
|
||||
|
||||
24
extensions/volcengine/provider-catalog.ts
Normal file
24
extensions/volcengine/provider-catalog.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import {
|
||||
buildDoubaoModelDefinition,
|
||||
DOUBAO_BASE_URL,
|
||||
DOUBAO_CODING_BASE_URL,
|
||||
DOUBAO_CODING_MODEL_CATALOG,
|
||||
DOUBAO_MODEL_CATALOG,
|
||||
} from "../../src/agents/doubao-models.js";
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
export function buildDoubaoProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: DOUBAO_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: DOUBAO_MODEL_CATALOG.map(buildDoubaoModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildDoubaoCodingProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: DOUBAO_CODING_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: DOUBAO_CODING_MODEL_CATALOG.map(buildDoubaoModelDefinition),
|
||||
};
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
import { emptyPluginConfigSchema, type OpenClawPluginApi } from "openclaw/plugin-sdk/core";
|
||||
import { buildXiaomiProvider } from "../../src/agents/models-config.providers.static.js";
|
||||
import { applyXiaomiConfig, XIAOMI_DEFAULT_MODEL_REF } from "../../src/commands/onboard-auth.js";
|
||||
import { PROVIDER_LABELS } from "../../src/infra/provider-usage.shared.js";
|
||||
import { createProviderApiKeyAuthMethod } from "../../src/plugins/provider-api-key-auth.js";
|
||||
import { buildXiaomiProvider } from "./provider-catalog.js";
|
||||
|
||||
const PROVIDER_ID = "xiaomi";
|
||||
|
||||
|
||||
30
extensions/xiaomi/provider-catalog.ts
Normal file
30
extensions/xiaomi/provider-catalog.ts
Normal file
@@ -0,0 +1,30 @@
|
||||
import type { ModelProviderConfig } from "../../src/config/types.models.js";
|
||||
|
||||
const XIAOMI_BASE_URL = "https://api.xiaomimimo.com/anthropic";
|
||||
export const XIAOMI_DEFAULT_MODEL_ID = "mimo-v2-flash";
|
||||
const XIAOMI_DEFAULT_CONTEXT_WINDOW = 262144;
|
||||
const XIAOMI_DEFAULT_MAX_TOKENS = 8192;
|
||||
const XIAOMI_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
export function buildXiaomiProvider(): ModelProviderConfig {
|
||||
return {
|
||||
baseUrl: XIAOMI_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
models: [
|
||||
{
|
||||
id: XIAOMI_DEFAULT_MODEL_ID,
|
||||
name: "Xiaomi MiMo V2 Flash",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: XIAOMI_DEFAULT_COST,
|
||||
contextWindow: XIAOMI_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: XIAOMI_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
@@ -1,14 +1,6 @@
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import type { ModelDefinitionConfig } from "../config/types.models.js";
|
||||
import { createSubsystemLogger } from "../logging/subsystem.js";
|
||||
import { KILOCODE_BASE_URL } from "../providers/kilocode-shared.js";
|
||||
import {
|
||||
discoverHuggingfaceModels,
|
||||
HUGGINGFACE_BASE_URL,
|
||||
HUGGINGFACE_MODEL_CATALOG,
|
||||
buildHuggingfaceModelDefinition,
|
||||
} from "./huggingface-models.js";
|
||||
import { discoverKilocodeModels } from "./kilocode-models.js";
|
||||
import {
|
||||
enrichOllamaModelsWithContext,
|
||||
OLLAMA_DEFAULT_CONTEXT_WINDOW,
|
||||
@@ -24,9 +16,11 @@ import {
|
||||
SELF_HOSTED_DEFAULT_MAX_TOKENS,
|
||||
} from "./self-hosted-provider-defaults.js";
|
||||
import { SGLANG_DEFAULT_BASE_URL, SGLANG_PROVIDER_LABEL } from "./sglang-defaults.js";
|
||||
import { discoverVeniceModels, VENICE_BASE_URL } from "./venice-models.js";
|
||||
import { discoverVercelAiGatewayModels, VERCEL_AI_GATEWAY_BASE_URL } from "./vercel-ai-gateway.js";
|
||||
import { VLLM_DEFAULT_BASE_URL, VLLM_PROVIDER_LABEL } from "./vllm-defaults.js";
|
||||
export { buildHuggingfaceProvider } from "../../extensions/huggingface/provider-catalog.js";
|
||||
export { buildKilocodeProviderWithDiscovery } from "../../extensions/kilocode/provider-catalog.js";
|
||||
export { buildVeniceProvider } from "../../extensions/venice/provider-catalog.js";
|
||||
export { buildVercelAiGatewayProvider } from "../../extensions/vercel-ai-gateway/provider-catalog.js";
|
||||
|
||||
export { resolveOllamaApiBase } from "./ollama-models.js";
|
||||
|
||||
@@ -145,15 +139,6 @@ async function discoverOpenAICompatibleLocalModels(params: {
|
||||
}
|
||||
}
|
||||
|
||||
export async function buildVeniceProvider(): Promise<ProviderConfig> {
|
||||
const models = await discoverVeniceModels();
|
||||
return {
|
||||
baseUrl: VENICE_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models,
|
||||
};
|
||||
}
|
||||
|
||||
export async function buildOllamaProvider(
|
||||
configuredBaseUrl?: string,
|
||||
opts?: { quiet?: boolean },
|
||||
@@ -166,27 +151,6 @@ export async function buildOllamaProvider(
|
||||
};
|
||||
}
|
||||
|
||||
export async function buildHuggingfaceProvider(discoveryApiKey?: string): Promise<ProviderConfig> {
|
||||
const resolvedSecret = discoveryApiKey?.trim() ?? "";
|
||||
const models =
|
||||
resolvedSecret !== ""
|
||||
? await discoverHuggingfaceModels(resolvedSecret)
|
||||
: HUGGINGFACE_MODEL_CATALOG.map(buildHuggingfaceModelDefinition);
|
||||
return {
|
||||
baseUrl: HUGGINGFACE_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models,
|
||||
};
|
||||
}
|
||||
|
||||
export async function buildVercelAiGatewayProvider(): Promise<ProviderConfig> {
|
||||
return {
|
||||
baseUrl: VERCEL_AI_GATEWAY_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
models: await discoverVercelAiGatewayModels(),
|
||||
};
|
||||
}
|
||||
|
||||
export async function buildVllmProvider(params?: {
|
||||
baseUrl?: string;
|
||||
apiKey?: string;
|
||||
@@ -220,16 +184,3 @@ export async function buildSglangProvider(params?: {
|
||||
models,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the Kilocode provider with dynamic model discovery from the gateway
|
||||
* API. Falls back to the static catalog on failure.
|
||||
*/
|
||||
export async function buildKilocodeProviderWithDiscovery(): Promise<ProviderConfig> {
|
||||
const models = await discoverKilocodeModels();
|
||||
return {
|
||||
baseUrl: KILOCODE_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,551 +1,35 @@
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import {
|
||||
KILOCODE_BASE_URL,
|
||||
KILOCODE_DEFAULT_CONTEXT_WINDOW,
|
||||
KILOCODE_DEFAULT_COST,
|
||||
KILOCODE_DEFAULT_MAX_TOKENS,
|
||||
KILOCODE_MODEL_CATALOG,
|
||||
} from "../providers/kilocode-shared.js";
|
||||
import {
|
||||
buildBytePlusModelDefinition,
|
||||
BYTEPLUS_BASE_URL,
|
||||
BYTEPLUS_MODEL_CATALOG,
|
||||
BYTEPLUS_CODING_BASE_URL,
|
||||
BYTEPLUS_CODING_MODEL_CATALOG,
|
||||
} from "./byteplus-models.js";
|
||||
import {
|
||||
buildDoubaoModelDefinition,
|
||||
DOUBAO_BASE_URL,
|
||||
DOUBAO_MODEL_CATALOG,
|
||||
DOUBAO_CODING_BASE_URL,
|
||||
DOUBAO_CODING_MODEL_CATALOG,
|
||||
} from "./doubao-models.js";
|
||||
import {
|
||||
buildSyntheticModelDefinition,
|
||||
SYNTHETIC_BASE_URL,
|
||||
SYNTHETIC_MODEL_CATALOG,
|
||||
} from "./synthetic-models.js";
|
||||
import {
|
||||
TOGETHER_BASE_URL,
|
||||
TOGETHER_MODEL_CATALOG,
|
||||
buildTogetherModelDefinition,
|
||||
} from "./together-models.js";
|
||||
|
||||
type ModelsConfig = NonNullable<OpenClawConfig["models"]>;
|
||||
type ProviderConfig = NonNullable<ModelsConfig["providers"]>[string];
|
||||
type ProviderModelConfig = NonNullable<ProviderConfig["models"]>[number];
|
||||
|
||||
const MINIMAX_PORTAL_BASE_URL = "https://api.minimax.io/anthropic";
|
||||
const MINIMAX_DEFAULT_MODEL_ID = "MiniMax-M2.5";
|
||||
const MINIMAX_DEFAULT_VISION_MODEL_ID = "MiniMax-VL-01";
|
||||
const MINIMAX_DEFAULT_CONTEXT_WINDOW = 200000;
|
||||
const MINIMAX_DEFAULT_MAX_TOKENS = 8192;
|
||||
const MINIMAX_API_COST = {
|
||||
input: 0.3,
|
||||
output: 1.2,
|
||||
cacheRead: 0.03,
|
||||
cacheWrite: 0.12,
|
||||
};
|
||||
|
||||
function buildMinimaxModel(params: {
|
||||
id: string;
|
||||
name: string;
|
||||
reasoning: boolean;
|
||||
input: ProviderModelConfig["input"];
|
||||
}): ProviderModelConfig {
|
||||
return {
|
||||
id: params.id,
|
||||
name: params.name,
|
||||
reasoning: params.reasoning,
|
||||
input: params.input,
|
||||
cost: MINIMAX_API_COST,
|
||||
contextWindow: MINIMAX_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: MINIMAX_DEFAULT_MAX_TOKENS,
|
||||
};
|
||||
}
|
||||
|
||||
function buildMinimaxTextModel(params: {
|
||||
id: string;
|
||||
name: string;
|
||||
reasoning: boolean;
|
||||
}): ProviderModelConfig {
|
||||
return buildMinimaxModel({ ...params, input: ["text"] });
|
||||
}
|
||||
|
||||
const XIAOMI_BASE_URL = "https://api.xiaomimimo.com/anthropic";
|
||||
export const XIAOMI_DEFAULT_MODEL_ID = "mimo-v2-flash";
|
||||
const XIAOMI_DEFAULT_CONTEXT_WINDOW = 262144;
|
||||
const XIAOMI_DEFAULT_MAX_TOKENS = 8192;
|
||||
const XIAOMI_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
const MOONSHOT_BASE_URL = "https://api.moonshot.ai/v1";
|
||||
const MOONSHOT_DEFAULT_MODEL_ID = "kimi-k2.5";
|
||||
const MOONSHOT_DEFAULT_CONTEXT_WINDOW = 256000;
|
||||
const MOONSHOT_DEFAULT_MAX_TOKENS = 8192;
|
||||
const MOONSHOT_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
const KIMI_CODING_BASE_URL = "https://api.kimi.com/coding/";
|
||||
const KIMI_CODING_USER_AGENT = "claude-code/0.1.0";
|
||||
const KIMI_CODING_DEFAULT_MODEL_ID = "k2p5";
|
||||
const KIMI_CODING_DEFAULT_CONTEXT_WINDOW = 262144;
|
||||
const KIMI_CODING_DEFAULT_MAX_TOKENS = 32768;
|
||||
const KIMI_CODING_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
const QWEN_PORTAL_BASE_URL = "https://portal.qwen.ai/v1";
|
||||
const QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW = 128000;
|
||||
const QWEN_PORTAL_DEFAULT_MAX_TOKENS = 8192;
|
||||
const QWEN_PORTAL_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
const OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1";
|
||||
const OPENROUTER_DEFAULT_MODEL_ID = "auto";
|
||||
const OPENROUTER_DEFAULT_CONTEXT_WINDOW = 200000;
|
||||
const OPENROUTER_DEFAULT_MAX_TOKENS = 8192;
|
||||
const OPENROUTER_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
export const QIANFAN_BASE_URL = "https://qianfan.baidubce.com/v2";
|
||||
export const QIANFAN_DEFAULT_MODEL_ID = "deepseek-v3.2";
|
||||
const QIANFAN_DEFAULT_CONTEXT_WINDOW = 98304;
|
||||
const QIANFAN_DEFAULT_MAX_TOKENS = 32768;
|
||||
const QIANFAN_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
export const MODELSTUDIO_BASE_URL = "https://coding-intl.dashscope.aliyuncs.com/v1";
|
||||
export const MODELSTUDIO_DEFAULT_MODEL_ID = "qwen3.5-plus";
|
||||
const MODELSTUDIO_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
const MODELSTUDIO_MODEL_CATALOG: ReadonlyArray<ProviderModelConfig> = [
|
||||
{
|
||||
id: "qwen3.5-plus",
|
||||
name: "qwen3.5-plus",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
{
|
||||
id: "qwen3-max-2026-01-23",
|
||||
name: "qwen3-max-2026-01-23",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 262_144,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
{
|
||||
id: "qwen3-coder-next",
|
||||
name: "qwen3-coder-next",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 262_144,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
{
|
||||
id: "qwen3-coder-plus",
|
||||
name: "qwen3-coder-plus",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
{
|
||||
id: "MiniMax-M2.5",
|
||||
name: "MiniMax-M2.5",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 1_000_000,
|
||||
maxTokens: 65_536,
|
||||
},
|
||||
{
|
||||
id: "glm-5",
|
||||
name: "glm-5",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 202_752,
|
||||
maxTokens: 16_384,
|
||||
},
|
||||
{
|
||||
id: "glm-4.7",
|
||||
name: "glm-4.7",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 202_752,
|
||||
maxTokens: 16_384,
|
||||
},
|
||||
{
|
||||
id: "kimi-k2.5",
|
||||
name: "kimi-k2.5",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: MODELSTUDIO_DEFAULT_COST,
|
||||
contextWindow: 262_144,
|
||||
maxTokens: 32_768,
|
||||
},
|
||||
];
|
||||
|
||||
const NVIDIA_BASE_URL = "https://integrate.api.nvidia.com/v1";
|
||||
const NVIDIA_DEFAULT_MODEL_ID = "nvidia/llama-3.1-nemotron-70b-instruct";
|
||||
const NVIDIA_DEFAULT_CONTEXT_WINDOW = 131072;
|
||||
const NVIDIA_DEFAULT_MAX_TOKENS = 4096;
|
||||
const NVIDIA_DEFAULT_COST = {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
};
|
||||
|
||||
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
|
||||
|
||||
export function buildMinimaxProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: MINIMAX_PORTAL_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
authHeader: true,
|
||||
models: [
|
||||
buildMinimaxModel({
|
||||
id: MINIMAX_DEFAULT_VISION_MODEL_ID,
|
||||
name: "MiniMax VL 01",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
}),
|
||||
buildMinimaxTextModel({
|
||||
id: "MiniMax-M2.5",
|
||||
name: "MiniMax M2.5",
|
||||
reasoning: true,
|
||||
}),
|
||||
buildMinimaxTextModel({
|
||||
id: "MiniMax-M2.5-highspeed",
|
||||
name: "MiniMax M2.5 Highspeed",
|
||||
reasoning: true,
|
||||
}),
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildMinimaxPortalProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: MINIMAX_PORTAL_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
authHeader: true,
|
||||
models: [
|
||||
buildMinimaxModel({
|
||||
id: MINIMAX_DEFAULT_VISION_MODEL_ID,
|
||||
name: "MiniMax VL 01",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
}),
|
||||
buildMinimaxTextModel({
|
||||
id: MINIMAX_DEFAULT_MODEL_ID,
|
||||
name: "MiniMax M2.5",
|
||||
reasoning: true,
|
||||
}),
|
||||
buildMinimaxTextModel({
|
||||
id: "MiniMax-M2.5-highspeed",
|
||||
name: "MiniMax M2.5 Highspeed",
|
||||
reasoning: true,
|
||||
}),
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildMoonshotProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: MOONSHOT_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: MOONSHOT_DEFAULT_MODEL_ID,
|
||||
name: "Kimi K2.5",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: MOONSHOT_DEFAULT_COST,
|
||||
contextWindow: MOONSHOT_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: MOONSHOT_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildKimiCodingProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: KIMI_CODING_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
headers: {
|
||||
"User-Agent": KIMI_CODING_USER_AGENT,
|
||||
},
|
||||
models: [
|
||||
{
|
||||
id: KIMI_CODING_DEFAULT_MODEL_ID,
|
||||
name: "Kimi for Coding",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: KIMI_CODING_DEFAULT_COST,
|
||||
contextWindow: KIMI_CODING_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: KIMI_CODING_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildQwenPortalProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: QWEN_PORTAL_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: "coder-model",
|
||||
name: "Qwen Coder",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: QWEN_PORTAL_DEFAULT_COST,
|
||||
contextWindow: QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: QWEN_PORTAL_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
{
|
||||
id: "vision-model",
|
||||
name: "Qwen Vision",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: QWEN_PORTAL_DEFAULT_COST,
|
||||
contextWindow: QWEN_PORTAL_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: QWEN_PORTAL_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildSyntheticProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: SYNTHETIC_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
models: SYNTHETIC_MODEL_CATALOG.map(buildSyntheticModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildDoubaoProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: DOUBAO_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: DOUBAO_MODEL_CATALOG.map(buildDoubaoModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildDoubaoCodingProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: DOUBAO_CODING_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: DOUBAO_CODING_MODEL_CATALOG.map(buildDoubaoModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildBytePlusProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: BYTEPLUS_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: BYTEPLUS_MODEL_CATALOG.map(buildBytePlusModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildBytePlusCodingProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: BYTEPLUS_CODING_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: BYTEPLUS_CODING_MODEL_CATALOG.map(buildBytePlusModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildXiaomiProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: XIAOMI_BASE_URL,
|
||||
api: "anthropic-messages",
|
||||
models: [
|
||||
{
|
||||
id: XIAOMI_DEFAULT_MODEL_ID,
|
||||
name: "Xiaomi MiMo V2 Flash",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: XIAOMI_DEFAULT_COST,
|
||||
contextWindow: XIAOMI_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: XIAOMI_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildTogetherProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: TOGETHER_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: TOGETHER_MODEL_CATALOG.map(buildTogetherModelDefinition),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildOpenrouterProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: OPENROUTER_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: OPENROUTER_DEFAULT_MODEL_ID,
|
||||
name: "OpenRouter Auto",
|
||||
reasoning: false,
|
||||
input: ["text", "image"],
|
||||
cost: OPENROUTER_DEFAULT_COST,
|
||||
contextWindow: OPENROUTER_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: OPENROUTER_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
{
|
||||
id: "openrouter/hunter-alpha",
|
||||
name: "Hunter Alpha",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: OPENROUTER_DEFAULT_COST,
|
||||
contextWindow: 1048576,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
{
|
||||
id: "openrouter/healer-alpha",
|
||||
name: "Healer Alpha",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: OPENROUTER_DEFAULT_COST,
|
||||
contextWindow: 262144,
|
||||
maxTokens: 65536,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildOpenAICodexProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: OPENAI_CODEX_BASE_URL,
|
||||
api: "openai-codex-responses",
|
||||
models: [],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildQianfanProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: QIANFAN_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: QIANFAN_DEFAULT_MODEL_ID,
|
||||
name: "DEEPSEEK V3.2",
|
||||
reasoning: true,
|
||||
input: ["text"],
|
||||
cost: QIANFAN_DEFAULT_COST,
|
||||
contextWindow: QIANFAN_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: QIANFAN_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
{
|
||||
id: "ernie-5.0-thinking-preview",
|
||||
name: "ERNIE-5.0-Thinking-Preview",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: QIANFAN_DEFAULT_COST,
|
||||
contextWindow: 119000,
|
||||
maxTokens: 64000,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildModelStudioProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: MODELSTUDIO_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: MODELSTUDIO_MODEL_CATALOG.map((model) => ({ ...model })),
|
||||
};
|
||||
}
|
||||
|
||||
export function buildNvidiaProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: NVIDIA_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: [
|
||||
{
|
||||
id: NVIDIA_DEFAULT_MODEL_ID,
|
||||
name: "NVIDIA Llama 3.1 Nemotron 70B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: NVIDIA_DEFAULT_COST,
|
||||
contextWindow: NVIDIA_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: NVIDIA_DEFAULT_MAX_TOKENS,
|
||||
},
|
||||
{
|
||||
id: "meta/llama-3.3-70b-instruct",
|
||||
name: "Meta Llama 3.3 70B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: NVIDIA_DEFAULT_COST,
|
||||
contextWindow: 131072,
|
||||
maxTokens: 4096,
|
||||
},
|
||||
{
|
||||
id: "nvidia/mistral-nemo-minitron-8b-8k-instruct",
|
||||
name: "NVIDIA Mistral NeMo Minitron 8B Instruct",
|
||||
reasoning: false,
|
||||
input: ["text"],
|
||||
cost: NVIDIA_DEFAULT_COST,
|
||||
contextWindow: 8192,
|
||||
maxTokens: 2048,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
export function buildKilocodeProvider(): ProviderConfig {
|
||||
return {
|
||||
baseUrl: KILOCODE_BASE_URL,
|
||||
api: "openai-completions",
|
||||
models: KILOCODE_MODEL_CATALOG.map((model) => ({
|
||||
id: model.id,
|
||||
name: model.name,
|
||||
reasoning: model.reasoning,
|
||||
input: model.input,
|
||||
cost: KILOCODE_DEFAULT_COST,
|
||||
contextWindow: model.contextWindow ?? KILOCODE_DEFAULT_CONTEXT_WINDOW,
|
||||
maxTokens: model.maxTokens ?? KILOCODE_DEFAULT_MAX_TOKENS,
|
||||
})),
|
||||
};
|
||||
}
|
||||
export {
|
||||
buildBytePlusCodingProvider,
|
||||
buildBytePlusProvider,
|
||||
} from "../../extensions/byteplus/provider-catalog.js";
|
||||
export { buildKimiCodingProvider } from "../../extensions/kimi-coding/provider-catalog.js";
|
||||
export { buildKilocodeProvider } from "../../extensions/kilocode/provider-catalog.js";
|
||||
export {
|
||||
buildMinimaxPortalProvider,
|
||||
buildMinimaxProvider,
|
||||
} from "../../extensions/minimax/provider-catalog.js";
|
||||
export {
|
||||
MODELSTUDIO_BASE_URL,
|
||||
MODELSTUDIO_DEFAULT_MODEL_ID,
|
||||
buildModelStudioProvider,
|
||||
} from "../../extensions/modelstudio/provider-catalog.js";
|
||||
export { buildMoonshotProvider } from "../../extensions/moonshot/provider-catalog.js";
|
||||
export { buildNvidiaProvider } from "../../extensions/nvidia/provider-catalog.js";
|
||||
export { buildOpenAICodexProvider } from "../../extensions/openai/openai-codex-catalog.js";
|
||||
export { buildOpenrouterProvider } from "../../extensions/openrouter/provider-catalog.js";
|
||||
export {
|
||||
QIANFAN_BASE_URL,
|
||||
QIANFAN_DEFAULT_MODEL_ID,
|
||||
buildQianfanProvider,
|
||||
} from "../../extensions/qianfan/provider-catalog.js";
|
||||
export { buildQwenPortalProvider } from "../../extensions/qwen-portal-auth/provider-catalog.js";
|
||||
export { buildSyntheticProvider } from "../../extensions/synthetic/provider-catalog.js";
|
||||
export { buildTogetherProvider } from "../../extensions/together/provider-catalog.js";
|
||||
export {
|
||||
buildDoubaoCodingProvider,
|
||||
buildDoubaoProvider,
|
||||
} from "../../extensions/volcengine/provider-catalog.js";
|
||||
export {
|
||||
XIAOMI_DEFAULT_MODEL_ID,
|
||||
buildXiaomiProvider,
|
||||
} from "../../extensions/xiaomi/provider-catalog.js";
|
||||
|
||||
Reference in New Issue
Block a user