diff --git a/.env.example b/.env.example
index d928be01f9..7d4fda347b 100644
--- a/.env.example
+++ b/.env.example
@@ -173,6 +173,11 @@ OPENAI_API_KEY=sk-xxxxxxxxx
# NEBIUS_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+### NewAPI Service ###
+
+# NEWAPI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+# NEWAPI_PROXY_URL=https://your-newapi-server.com
+
########################################
############ Market Service ############
########################################
diff --git a/Dockerfile b/Dockerfile
index 6c25f9de4c..cd27942b8d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -196,6 +196,8 @@ ENV \
MOONSHOT_API_KEY="" MOONSHOT_MODEL_LIST="" MOONSHOT_PROXY_URL="" \
# Nebius
NEBIUS_API_KEY="" NEBIUS_MODEL_LIST="" NEBIUS_PROXY_URL="" \
+ # NewAPI
+ NEWAPI_API_KEY="" NEWAPI_PROXY_URL="" \
# Novita
NOVITA_API_KEY="" NOVITA_MODEL_LIST="" \
# Nvidia NIM
diff --git a/Dockerfile.database b/Dockerfile.database
index 25f658d60a..1f4136de17 100644
--- a/Dockerfile.database
+++ b/Dockerfile.database
@@ -238,6 +238,8 @@ ENV \
MOONSHOT_API_KEY="" MOONSHOT_MODEL_LIST="" MOONSHOT_PROXY_URL="" \
# Nebius
NEBIUS_API_KEY="" NEBIUS_MODEL_LIST="" NEBIUS_PROXY_URL="" \
+ # NewAPI
+ NEWAPI_API_KEY="" NEWAPI_PROXY_URL="" \
# Novita
NOVITA_API_KEY="" NOVITA_MODEL_LIST="" \
# Nvidia NIM
diff --git a/Dockerfile.pglite b/Dockerfile.pglite
index 35a454ae7f..612ac8d45d 100644
--- a/Dockerfile.pglite
+++ b/Dockerfile.pglite
@@ -198,6 +198,8 @@ ENV \
MOONSHOT_API_KEY="" MOONSHOT_MODEL_LIST="" MOONSHOT_PROXY_URL="" \
# Nebius
NEBIUS_API_KEY="" NEBIUS_MODEL_LIST="" NEBIUS_PROXY_URL="" \
+ # NewAPI
+ NEWAPI_API_KEY="" NEWAPI_PROXY_URL="" \
# Novita
NOVITA_API_KEY="" NOVITA_MODEL_LIST="" \
# Nvidia NIM
diff --git a/docs/self-hosting/environment-variables/model-provider.mdx b/docs/self-hosting/environment-variables/model-provider.mdx
index b419c0869c..9c201218f4 100644
--- a/docs/self-hosting/environment-variables/model-provider.mdx
+++ b/docs/self-hosting/environment-variables/model-provider.mdx
@@ -675,4 +675,22 @@ The above example disables all models first, then enables `flux/schnell` and `fl
The above example disables all models first, then enables `flux-pro-1.1` and `flux-kontext-pro` (displayed as `FLUX.1 Kontext [pro]`).
+## NewAPI
+
+### `NEWAPI_API_KEY`
+
+- Type: Optional
+- Description: This is the API key for your NewAPI service instance. NewAPI is a multi-provider model aggregation service that provides unified access to various AI model APIs.
+- Default: -
+- Example: `sk-xxxxxx...xxxxxx`
+
+### `NEWAPI_PROXY_URL`
+
+- Type: Optional
+- Description: The base URL for your NewAPI server instance. This should point to your deployed NewAPI service endpoint.
+- Default: -
+- Example: `https://your-newapi-server.com/`
+
+NewAPI is a multi-provider model aggregation service that supports automatic model routing based on provider detection. It offers cost management features and provides a single endpoint for accessing models from multiple providers including OpenAI, Anthropic, Google, and more. Learn more about NewAPI at [https://github.com/Calcium-Ion/new-api](https://github.com/Calcium-Ion/new-api).
+
[model-list]: /docs/self-hosting/advanced/model-list
diff --git a/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx b/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx
index ddbdd6b638..7f53734c8c 100644
--- a/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx
+++ b/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx
@@ -674,4 +674,24 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
上述示例表示先禁用所有模型,再启用 `flux-pro-1.1` 和 `flux-kontext-pro`(显示名为 `FLUX.1 Kontext [pro]`)。
+## NewAPI
+
+### `NEWAPI_API_KEY`
+
+- 类型:可选
+- 描述:这是你的 NewAPI 服务实例的 API 密钥。NewAPI 是一个多供应商模型聚合服务,提供对各种 AI 模型 API 的统一访问。
+- 默认值:-
+- 示例:`sk-xxxxxx...xxxxxx`
+
+### `NEWAPI_PROXY_URL`
+
+- 类型:可选
+- 描述:你的 NewAPI 服务器实例的基础 URL。这应该指向你部署的 NewAPI 服务端点。
+- 默认值:-
+- 示例:`https://your-newapi-server.com`
+
+
+ NewAPI 是一个多供应商模型聚合服务,支持基于供应商检测的自动模型路由。它提供成本管理功能,并为访问包括 OpenAI、Anthropic、Google 等多个供应商的模型提供单一端点。了解更多关于 NewAPI 的信息请访问 [https://github.com/Calcium-Ion/new-api](https://github.com/Calcium-Ion/new-api)。
+
+
[model-list]: /zh/docs/self-hosting/advanced/model-list
diff --git a/locales/zh-CN/modelProvider.json b/locales/zh-CN/modelProvider.json
index d5ebabd045..5be6cf04c1 100644
--- a/locales/zh-CN/modelProvider.json
+++ b/locales/zh-CN/modelProvider.json
@@ -161,7 +161,7 @@
"title": "API 密钥"
},
"apiUrl": {
- "desc": "New API 服务的 API 地址,大部分时候需要带 /v1",
+ "desc": "New API 服务的 API 地址,大部分时候不要带 /v1",
"title": "API 地址"
},
"enabled": {
diff --git a/packages/model-runtime/src/newapi/index.test.ts b/packages/model-runtime/src/newapi/index.test.ts
index c885e5e4c1..7d7193e176 100644
--- a/packages/model-runtime/src/newapi/index.test.ts
+++ b/packages/model-runtime/src/newapi/index.test.ts
@@ -62,7 +62,7 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
describe('HandlePayload Function Branch Coverage - Direct Testing', () => {
// Create a mock Set for testing
let testResponsesAPIModels: Set;
-
+
const testHandlePayload = (payload: ChatStreamPayload) => {
// This replicates the exact handlePayload logic from the source
if (
@@ -85,7 +85,7 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
};
const result = testHandlePayload(payload);
-
+
expect(result).toEqual({ ...payload, apiMode: 'responses' });
});
@@ -99,7 +99,7 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
};
const result = testHandlePayload(payload);
-
+
expect(result).toEqual({ ...payload, apiMode: 'responses' });
});
@@ -113,7 +113,7 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
};
const result = testHandlePayload(payload);
-
+
expect(result).toEqual({ ...payload, apiMode: 'responses' });
});
@@ -127,7 +127,7 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
};
const result = testHandlePayload(payload);
-
+
expect(result).toEqual({ ...payload, apiMode: 'responses' });
});
@@ -141,7 +141,7 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
};
const result = testHandlePayload(payload);
-
+
expect(result).toEqual(payload);
});
});
@@ -207,7 +207,7 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
describe('Models Function Branch Coverage - Logical Testing', () => {
// Test the complex models function logic by replicating its branching behavior
-
+
describe('Data Handling Branches', () => {
it('should handle undefined data from models.list (Branch 3.1: data = undefined)', () => {
const data = undefined;
@@ -293,63 +293,63 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
it('should use model_price when > 0 (Branch 3.8: model_price && model_price > 0 = true)', () => {
const pricing = { model_price: 15, model_ratio: 10 };
let inputPrice;
-
+
if (pricing.model_price && pricing.model_price > 0) {
inputPrice = pricing.model_price * 2;
} else if (pricing.model_ratio) {
inputPrice = pricing.model_ratio * 2;
}
-
+
expect(inputPrice).toBe(30); // model_price * 2
});
it('should fallback to model_ratio when model_price = 0 (Branch 3.8: model_price > 0 = false, Branch 3.9: model_ratio = true)', () => {
const pricing = { model_price: 0, model_ratio: 12 };
let inputPrice;
-
+
if (pricing.model_price && pricing.model_price > 0) {
inputPrice = pricing.model_price * 2;
} else if (pricing.model_ratio) {
inputPrice = pricing.model_ratio * 2;
}
-
+
expect(inputPrice).toBe(24); // model_ratio * 2
});
it('should handle missing model_ratio (Branch 3.9: model_ratio = undefined)', () => {
const pricing: Partial = { quota_type: 0 }; // No model_price and no model_ratio
let inputPrice: number | undefined;
-
+
if (pricing.model_price && pricing.model_price > 0) {
inputPrice = pricing.model_price * 2;
} else if (pricing.model_ratio) {
inputPrice = pricing.model_ratio * 2;
}
-
+
expect(inputPrice).toBeUndefined();
});
it('should calculate output price when inputPrice is defined (Branch 3.10: inputPrice !== undefined = true)', () => {
const inputPrice = 20;
const completionRatio = 1.5;
-
+
let outputPrice;
if (inputPrice !== undefined) {
outputPrice = inputPrice * (completionRatio || 1);
}
-
+
expect(outputPrice).toBe(30);
});
it('should use default completion_ratio when not provided', () => {
const inputPrice = 16;
const completionRatio = undefined;
-
+
let outputPrice;
if (inputPrice !== undefined) {
outputPrice = inputPrice * (completionRatio || 1);
}
-
+
expect(outputPrice).toBe(16); // input * 1 (default)
});
});
@@ -358,74 +358,77 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
it('should use supported_endpoint_types with anthropic (Branch 3.11: length > 0 = true, Branch 3.12: includes anthropic = true)', () => {
const model = { supported_endpoint_types: ['anthropic'] };
let detectedProvider = 'openai';
-
+
if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
if (model.supported_endpoint_types.includes('anthropic')) {
detectedProvider = 'anthropic';
}
}
-
+
expect(detectedProvider).toBe('anthropic');
});
it('should use supported_endpoint_types with gemini (Branch 3.13: includes gemini = true)', () => {
const model = { supported_endpoint_types: ['gemini'] };
let detectedProvider = 'openai';
-
+
if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
if (model.supported_endpoint_types.includes('gemini')) {
detectedProvider = 'google';
}
}
-
+
expect(detectedProvider).toBe('google');
});
it('should use supported_endpoint_types with xai (Branch 3.14: includes xai = true)', () => {
const model = { supported_endpoint_types: ['xai'] };
let detectedProvider = 'openai';
-
+
if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
if (model.supported_endpoint_types.includes('xai')) {
detectedProvider = 'xai';
}
}
-
+
expect(detectedProvider).toBe('xai');
});
it('should fallback to owned_by when supported_endpoint_types is empty (Branch 3.11: length > 0 = false, Branch 3.15: owned_by = true)', () => {
- const model: Partial = { supported_endpoint_types: [], owned_by: 'anthropic' };
+ const model: Partial = {
+ supported_endpoint_types: [],
+ owned_by: 'anthropic',
+ };
let detectedProvider = 'openai';
-
+
if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
// Skip - empty array
} else if (model.owned_by) {
detectedProvider = 'anthropic'; // Simplified for test
}
-
+
expect(detectedProvider).toBe('anthropic');
});
it('should fallback to owned_by when no supported_endpoint_types (Branch 3.15: owned_by = true)', () => {
const model: Partial = { owned_by: 'google' };
let detectedProvider = 'openai';
-
+
if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
// Skip - no supported_endpoint_types
} else if (model.owned_by) {
detectedProvider = 'google'; // Simplified for test
}
-
+
expect(detectedProvider).toBe('google');
});
it('should use detectModelProvider fallback when no owned_by (Branch 3.15: owned_by = false, Branch 3.17)', () => {
const model: Partial = { id: 'claude-3-sonnet', owned_by: '' };
mockDetectModelProvider.mockReturnValue('anthropic');
-
+
let detectedProvider = 'openai';
-
+
if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
// Skip - no supported_endpoint_types
} else if (model.owned_by) {
@@ -433,7 +436,7 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
} else {
detectedProvider = mockDetectModelProvider(model.id || '');
}
-
+
expect(detectedProvider).toBe('anthropic');
expect(mockDetectModelProvider).toHaveBeenCalledWith('claude-3-sonnet');
});
@@ -444,11 +447,11 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
displayName: 'Test Model',
_detectedProvider: 'openai',
};
-
+
if (model._detectedProvider) {
delete model._detectedProvider;
}
-
+
expect(model).not.toHaveProperty('_detectedProvider');
});
@@ -457,27 +460,31 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
id: 'test-model',
displayName: 'Test Model',
};
-
+
const hadDetectedProvider = '_detectedProvider' in model;
-
+
if (model._detectedProvider) {
delete model._detectedProvider;
}
-
+
expect(hadDetectedProvider).toBe(false);
});
});
describe('URL Processing Branch Coverage', () => {
- it('should remove trailing /v1 from baseURL', () => {
+ it('should remove trailing API version paths from baseURL', () => {
const testURLs = [
{ input: 'https://api.newapi.com/v1', expected: 'https://api.newapi.com' },
{ input: 'https://api.newapi.com/v1/', expected: 'https://api.newapi.com' },
+ { input: 'https://api.newapi.com/v1beta', expected: 'https://api.newapi.com' },
+ { input: 'https://api.newapi.com/v1beta/', expected: 'https://api.newapi.com' },
+ { input: 'https://api.newapi.com/v2', expected: 'https://api.newapi.com' },
+ { input: 'https://api.newapi.com/v1alpha', expected: 'https://api.newapi.com' },
{ input: 'https://api.newapi.com', expected: 'https://api.newapi.com' },
];
testURLs.forEach(({ input, expected }) => {
- const result = input.replace(/\/v1\/?$/, '');
+ const result = input.replace(/\/v\d+[a-z]*\/?$/, '');
expect(result).toBe(expected);
});
});
@@ -538,7 +545,7 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
{ model_name: 'openai-gpt4', quota_type: 1, model_price: 30 }, // Should be skipped
];
- const pricingMap = new Map(pricingData.map(p => [p.model_name, p]));
+ const pricingMap = new Map(pricingData.map((p) => [p.model_name, p]));
const enrichedModels = models.map((model) => {
let enhancedModel: any = { ...model };
@@ -601,7 +608,7 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
// Test the dynamic routers configuration
const testOptions = {
apiKey: 'test-key',
- baseURL: 'https://yourapi.cn/v1'
+ baseURL: 'https://yourapi.cn/v1',
};
// Create instance to test dynamic routers
@@ -611,8 +618,8 @@ describe('NewAPI Runtime - 100% Branch Coverage', () => {
// The dynamic routers should be configured with user's baseURL
// This is tested indirectly through successful instantiation
// since the routers function processes the options.baseURL
- const expectedBaseURL = testOptions.baseURL.replace(/\/v1\/?$/, '');
+ const expectedBaseURL = testOptions.baseURL.replace(/\/v\d+[a-z]*\/?$/, '');
expect(expectedBaseURL).toBe('https://yourapi.cn');
});
});
-});
\ No newline at end of file
+});
diff --git a/packages/model-runtime/src/newapi/index.ts b/packages/model-runtime/src/newapi/index.ts
index e3beacb44c..fb3ce1d8b2 100644
--- a/packages/model-runtime/src/newapi/index.ts
+++ b/packages/model-runtime/src/newapi/index.ts
@@ -1,3 +1,4 @@
+import { LOBE_DEFAULT_MODEL_LIST } from 'model-bank';
import urlJoin from 'url-join';
import { createRouterRuntime } from '../RouterRuntime';
@@ -54,9 +55,6 @@ const getProviderFromOwnedBy = (ownedBy: string): string => {
return 'openai';
};
-// 全局的模型路由映射,在 models 函数执行后被填充
-let globalModelRouteMap: Map = new Map();
-
export const LobeNewAPIAI = createRouterRuntime({
debug: {
chatCompletion: () => process.env.DEBUG_NEWAPI_CHAT_COMPLETION === '1',
@@ -66,180 +64,163 @@ export const LobeNewAPIAI = createRouterRuntime({
},
id: ModelProvider.NewAPI,
models: async ({ client: openAIClient }) => {
- // 每次调用 models 时清空并重建路由映射
- globalModelRouteMap.clear();
+ // 获取基础 URL(移除末尾的 API 版本路径如 /v1、/v1beta 等)
+ const baseURL = openAIClient.baseURL.replace(/\/v\d+[a-z]*\/?$/, '');
- // 获取基础 URL(移除末尾的 /v1)
- const baseURL = openAIClient.baseURL.replace(/\/v1\/?$/, '');
+ const modelsPage = (await openAIClient.models.list()) as any;
+ const modelList: NewAPIModelCard[] = modelsPage.data || [];
- const modelsPage = (await openAIClient.models.list()) as any;
- const modelList: NewAPIModelCard[] = modelsPage.data || [];
+ // 尝试获取 pricing 信息以补充模型详细信息
+ let pricingMap: Map = new Map();
+ try {
+ // 使用保存的 baseURL
+ const pricingResponse = await fetch(`${baseURL}/api/pricing`, {
+ headers: {
+ Authorization: `Bearer ${openAIClient.apiKey}`,
+ },
+ });
- // 尝试获取 pricing 信息以补充模型详细信息
- let pricingMap: Map = new Map();
- try {
- // 使用保存的 baseURL
- const pricingResponse = await fetch(`${baseURL}/api/pricing`, {
- headers: {
- Authorization: `Bearer ${openAIClient.apiKey}`,
- },
- });
+ if (pricingResponse.ok) {
+ const pricingData = await pricingResponse.json();
+ if (pricingData.success && pricingData.data) {
+ (pricingData.data as NewAPIPricing[]).forEach((pricing) => {
+ pricingMap.set(pricing.model_name, pricing);
+ });
+ }
+ }
+ } catch (error) {
+ // If fetching pricing information fails, continue using the basic model information
+ console.debug('Failed to fetch NewAPI pricing info:', error);
+ }
- if (pricingResponse.ok) {
- const pricingData = await pricingResponse.json();
- if (pricingData.success && pricingData.data) {
- (pricingData.data as NewAPIPricing[]).forEach((pricing) => {
- pricingMap.set(pricing.model_name, pricing);
- });
+ // Process the model list: determine the provider for each model based on priority rules
+ const enrichedModelList = modelList.map((model) => {
+ let enhancedModel: any = { ...model };
+
+ // 1. 添加 pricing 信息
+ const pricing = pricingMap.get(model.id);
+ if (pricing) {
+ // NewAPI 的价格计算逻辑:
+ // - quota_type: 0 表示按量计费(按 token),1 表示按次计费
+ // - model_ratio: 相对于基础价格的倍率(基础价格 = $0.002/1K tokens)
+ // - model_price: 直接指定的价格(优先使用)
+ // - completion_ratio: 输出价格相对于输入价格的倍率
+ //
+ // LobeChat 需要的格式:美元/百万 token
+
+ let inputPrice: number | undefined;
+ let outputPrice: number | undefined;
+
+ if (pricing.quota_type === 0) {
+ // 按量计费
+ if (pricing.model_price && pricing.model_price > 0) {
+ // model_price is a direct price value; need to confirm its unit.
+ // Assumption: model_price is the price per 1,000 tokens (i.e., $/1K tokens).
+ // To convert to price per 1,000,000 tokens ($/1M tokens), multiply by 1,000,000 / 1,000 = 1,000.
+ // Since the base price is $0.002/1K tokens, multiplying by 2 gives $2/1M tokens.
+ // Therefore, inputPrice = model_price * 2 converts the price to $/1M tokens for LobeChat.
+ inputPrice = pricing.model_price * 2;
+ } else if (pricing.model_ratio) {
+ // model_ratio × $0.002/1K = model_ratio × $2/1M
+ inputPrice = pricing.model_ratio * 2; // 转换为 $/1M tokens
+ }
+
+ if (inputPrice !== undefined) {
+ // 计算输出价格
+ outputPrice = inputPrice * (pricing.completion_ratio || 1);
+
+ enhancedModel.pricing = {
+ input: inputPrice,
+ output: outputPrice,
+ };
}
}
- } catch (error) {
- // If fetching pricing information fails, continue using the basic model information
- console.debug('Failed to fetch NewAPI pricing info:', error);
+ // quota_type === 1 按次计费暂不支持
}
- // Process the model list: determine the provider for each model based on priority rules
- const enrichedModelList = modelList.map((model) => {
- let enhancedModel: any = { ...model };
+ // 2. 根据优先级处理 provider 信息并缓存路由
+ let detectedProvider = 'openai'; // 默认
- // 1. 添加 pricing 信息
- const pricing = pricingMap.get(model.id);
- if (pricing) {
- // NewAPI 的价格计算逻辑:
- // - quota_type: 0 表示按量计费(按 token),1 表示按次计费
- // - model_ratio: 相对于基础价格的倍率(基础价格 = $0.002/1K tokens)
- // - model_price: 直接指定的价格(优先使用)
- // - completion_ratio: 输出价格相对于输入价格的倍率
- //
- // LobeChat 需要的格式:美元/百万 token
-
- let inputPrice: number | undefined;
- let outputPrice: number | undefined;
-
- if (pricing.quota_type === 0) {
- // 按量计费
- if (pricing.model_price && pricing.model_price > 0) {
- // model_price is a direct price value; need to confirm its unit.
- // Assumption: model_price is the price per 1,000 tokens (i.e., $/1K tokens).
- // To convert to price per 1,000,000 tokens ($/1M tokens), multiply by 1,000,000 / 1,000 = 1,000.
- // Since the base price is $0.002/1K tokens, multiplying by 2 gives $2/1M tokens.
- // Therefore, inputPrice = model_price * 2 converts the price to $/1M tokens for LobeChat.
- inputPrice = pricing.model_price * 2;
- } else if (pricing.model_ratio) {
- // model_ratio × $0.002/1K = model_ratio × $2/1M
- inputPrice = pricing.model_ratio * 2; // 转换为 $/1M tokens
- }
-
- if (inputPrice !== undefined) {
- // 计算输出价格
- outputPrice = inputPrice * (pricing.completion_ratio || 1);
-
- enhancedModel.pricing = {
- input: inputPrice,
- output: outputPrice,
- };
- }
- }
- // quota_type === 1 按次计费暂不支持
+ // 优先级1:使用 supported_endpoint_types
+ if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
+ if (model.supported_endpoint_types.includes('anthropic')) {
+ detectedProvider = 'anthropic';
+ } else if (model.supported_endpoint_types.includes('gemini')) {
+ detectedProvider = 'google';
+ } else if (model.supported_endpoint_types.includes('xai')) {
+ detectedProvider = 'xai';
}
+ }
+ // 优先级2:使用 owned_by 字段
+ else if (model.owned_by) {
+ detectedProvider = getProviderFromOwnedBy(model.owned_by);
+ }
+ // 优先级3:基于模型名称检测
+ else {
+ detectedProvider = detectModelProvider(model.id);
+ }
- // 2. 根据优先级处理 provider 信息并缓存路由
- let detectedProvider = 'openai'; // 默认
+ // 将检测到的 provider 信息附加到模型上
+ enhancedModel._detectedProvider = detectedProvider;
- // 优先级1:使用 supported_endpoint_types
- if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
- if (model.supported_endpoint_types.includes('anthropic')) {
- detectedProvider = 'anthropic';
- } else if (model.supported_endpoint_types.includes('gemini')) {
- detectedProvider = 'google';
- } else if (model.supported_endpoint_types.includes('xai')) {
- detectedProvider = 'xai';
- }
- }
- // 优先级2:使用 owned_by 字段
- else if (model.owned_by) {
- detectedProvider = getProviderFromOwnedBy(model.owned_by);
- }
- // 优先级3:基于模型名称检测
- else {
- detectedProvider = detectModelProvider(model.id);
- }
+ return enhancedModel;
+ });
- // 将检测到的 provider 信息附加到模型上,供路由使用
- enhancedModel._detectedProvider = detectedProvider;
- // 同时更新全局路由映射表
- globalModelRouteMap.set(model.id, detectedProvider);
+ // 使用 processMultiProviderModelList 处理模型能力
+ const processedModels = await processMultiProviderModelList(enrichedModelList, 'newapi');
- return enhancedModel;
- });
+ // 清理临时字段
+ return processedModels.map((model: any) => {
+ if (model._detectedProvider) {
+ delete model._detectedProvider;
+ }
+ return model;
+ });
+ },
+ routers: (options) => {
+ const userBaseURL = options.baseURL?.replace(/\/v\d+[a-z]*\/?$/, '') || '';
- // 使用 processMultiProviderModelList 处理模型能力
- const processedModels = await processMultiProviderModelList(enrichedModelList, 'newapi');
-
- // 如果我们检测到了 provider,确保它被正确应用
- return processedModels.map((model: any) => {
- if (model._detectedProvider) {
- // Here you can adjust certain model properties as needed.
- // FIXME: The current data structure does not support storing provider information, and the official NewAPI does not provide a corresponding field. Consider extending the model schema if provider tracking is required in the future.
- delete model._detectedProvider; // Remove temporary field
- }
- return model;
- });
- },
- // 使用动态 routers 配置,在构造时获取用户的 baseURL
- routers: (options) => {
- // 使用全局的模型路由映射
- const userBaseURL = options.baseURL?.replace(/\/v1\/?$/, '') || '';
-
- return [
+ return [
{
apiType: 'anthropic',
- models: () =>
- Promise.resolve(
- Array.from(globalModelRouteMap.entries())
- .filter(([, provider]) => provider === 'anthropic')
- .map(([modelId]) => modelId),
- ),
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
+ (id) => detectModelProvider(id) === 'anthropic',
+ ),
options: {
- // Anthropic 在 NewAPI 中使用 /v1 路径,会自动转换为 /v1/messages
- baseURL: urlJoin(userBaseURL, '/v1'),
+ ...options,
+ baseURL: userBaseURL,
},
},
{
apiType: 'google',
- models: () =>
- Promise.resolve(
- Array.from(globalModelRouteMap.entries())
- .filter(([, provider]) => provider === 'google')
- .map(([modelId]) => modelId),
- ),
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
+ (id) => detectModelProvider(id) === 'google',
+ ),
options: {
- // Gemini 在 NewAPI 中使用 /v1beta 路径
- baseURL: urlJoin(userBaseURL, '/v1beta'),
+ ...options,
+ baseURL: userBaseURL,
},
},
{
apiType: 'xai',
- models: () =>
- Promise.resolve(
- Array.from(globalModelRouteMap.entries())
- .filter(([, provider]) => provider === 'xai')
- .map(([modelId]) => modelId),
- ),
+ models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
+ (id) => detectModelProvider(id) === 'xai',
+ ),
options: {
- // xAI 使用标准 OpenAI 格式,走 /v1 路径
+ ...options,
baseURL: urlJoin(userBaseURL, '/v1'),
},
},
{
apiType: 'openai',
options: {
+ ...options,
baseURL: urlJoin(userBaseURL, '/v1'),
chatCompletion: {
handlePayload,
},
},
},
- ];
- },
+ ];
+ },
});
diff --git a/src/app/[variants]/(main)/settings/provider/(detail)/newapi/page.tsx b/src/app/[variants]/(main)/settings/provider/(detail)/newapi/page.tsx
index be1203a95f..ba3f5d0dcd 100644
--- a/src/app/[variants]/(main)/settings/provider/(detail)/newapi/page.tsx
+++ b/src/app/[variants]/(main)/settings/provider/(detail)/newapi/page.tsx
@@ -16,7 +16,7 @@ const Page = () => {
...NewAPIProviderCard.settings,
proxyUrl: {
desc: t('newapi.apiUrl.desc'),
- placeholder: 'https://any-newapi-provider.com/v1',
+ placeholder: 'https://any-newapi-provider.com/',
title: t('newapi.apiUrl.title'),
},
}}
diff --git a/src/config/llm.ts b/src/config/llm.ts
index 441ac8a8ce..02f30173bb 100644
--- a/src/config/llm.ts
+++ b/src/config/llm.ts
@@ -186,6 +186,10 @@ export const getLLMConfig = () => {
ENABLED_AIHUBMIX: z.boolean(),
AIHUBMIX_API_KEY: z.string().optional(),
+
+ ENABLED_NEWAPI: z.boolean(),
+ NEWAPI_API_KEY: z.string().optional(),
+ NEWAPI_PROXY_URL: z.string().optional(),
},
runtimeEnv: {
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -368,6 +372,10 @@ export const getLLMConfig = () => {
ENABLED_AIHUBMIX: !!process.env.AIHUBMIX_API_KEY,
AIHUBMIX_API_KEY: process.env.AIHUBMIX_API_KEY,
+ ENABLED_NEWAPI: !!process.env.NEWAPI_API_KEY,
+ NEWAPI_API_KEY: process.env.NEWAPI_API_KEY,
+ NEWAPI_PROXY_URL: process.env.NEWAPI_PROXY_URL,
+
ENABLED_NEBIUS: !!process.env.NEBIUS_API_KEY,
NEBIUS_API_KEY: process.env.NEBIUS_API_KEY,
},
diff --git a/src/locales/default/modelProvider.ts b/src/locales/default/modelProvider.ts
index e286194f7d..3e039f8f88 100644
--- a/src/locales/default/modelProvider.ts
+++ b/src/locales/default/modelProvider.ts
@@ -164,7 +164,7 @@ export default {
title: 'API 密钥',
},
apiUrl: {
- desc: 'New API 服务的 API 地址,大部分时候需要带 /v1',
+ desc: 'New API 服务的 API 地址,大部分时候不要带 /v1',
title: 'API 地址',
},
enabled: {