feat: Add new provider Xiaomi MiMo (#10834)

*  feat: 添加 Xiaomi MiMo 模型及其配置,更新相关接口和环境变量

*  feat: 添加 Xiaomi MiMo AI 模型及其导出到 package.json 和 index.ts

*  feat: 更新 Xiaomi MiMo 模型的配置,添加单元测试以验证功能

*  feat: 移除 Xiaomi MiMo 模型的 enabled 属性,优化设置配置

* Update index.ts

* Update llm.ts

* Update llm.ts

*  feat(model): add Xiaomi MiMo provider

* Update index.ts

* update Xiaomi MiMo descriptions to English
This commit is contained in:
sxjeru
2026-01-04 14:49:30 +08:00
committed by GitHub
parent f8be760115
commit 62f78586f7
12 changed files with 264 additions and 0 deletions

View File

@@ -71,6 +71,7 @@
"./volcengine": "./src/aiModels/volcengine.ts",
"./wenxin": "./src/aiModels/wenxin.ts",
"./xai": "./src/aiModels/xai.ts",
"./xiaomimimo": "./src/aiModels/xiaomimimo.ts",
"./xinference": "./src/aiModels/xinference.ts",
"./zenmux": "./src/aiModels/zenmux.ts",
"./zeroone": "./src/aiModels/zeroone.ts",

View File

@@ -66,6 +66,7 @@ import { default as vllm } from './vllm';
import { default as volcengine } from './volcengine';
import { default as wenxin } from './wenxin';
import { default as xai } from './xai';
import { default as xiaomimimo } from './xiaomimimo';
import { default as xinference } from './xinference';
import { default as zenmux } from './zenmux';
import { default as zeroone } from './zeroone';
@@ -156,6 +157,7 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
volcengine,
wenxin,
xai,
xiaomimimo,
xinference,
zenmux,
zeroone,
@@ -227,6 +229,7 @@ export { default as vllm } from './vllm';
export { default as volcengine } from './volcengine';
export { default as wenxin } from './wenxin';
export { default as xai } from './xai';
export { default as xiaomimimo } from './xiaomimimo';
export { default as xinference } from './xinference';
export { default as zenmux } from './zenmux';
export { default as zeroone } from './zeroone';

View File

@@ -0,0 +1,24 @@
import { AIChatModelCard } from '../types/aiModel';
const xiaomimimoChatModels: AIChatModelCard[] = [
{
abilities: {
functionCall: true,
reasoning: true,
},
contextWindowTokens: 262_144,
description: 'MiMo-V2-Flash: An efficient model for reasoning, coding, and agent foundations.',
displayName: 'MiMo-V2 Flash',
enabled: true,
id: 'mimo-v2-flash',
maxOutput: 131_072,
settings: {
extendParams: ['enableReasoning'],
},
type: 'chat',
},
];
export const allModels = [...xiaomimimoChatModels];
export default allModels;

View File

@@ -64,6 +64,7 @@ export enum ModelProvider {
Volcengine = 'volcengine',
Wenxin = 'wenxin',
XAI = 'xai',
XiaomiMiMo = 'xiaomimimo',
Xinference = 'xinference',
ZenMux = 'zenmux',
ZeroOne = 'zeroone',

View File

@@ -67,6 +67,7 @@ import VLLMProvider from './vllm';
import VolcengineProvider from './volcengine';
import WenxinProvider from './wenxin';
import XAIProvider from './xai';
import XiaomiMiMoProvider from './xiaomimimo';
import XinferenceProvider from './xinference';
import ZenMuxProvider from './zenmux';
import ZeroOneProvider from './zeroone';
@@ -198,6 +199,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
VercelAIGatewayProvider,
CerebrasProvider,
ZenMuxProvider,
XiaomiMiMoProvider,
];
export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -274,6 +276,7 @@ export { default as VLLMProviderCard } from './vllm';
export { default as VolcengineProviderCard } from './volcengine';
export { default as WenxinProviderCard } from './wenxin';
export { default as XAIProviderCard } from './xai';
export { default as XiaomiMiMoProviderCard } from './xiaomimimo';
export { default as XinferenceProviderCard } from './xinference';
export { default as ZenMuxProviderCard } from './zenmux';
export { default as ZeroOneProviderCard } from './zeroone';

View File

@@ -0,0 +1,22 @@
import { type ModelProviderCard } from '@/types/llm';
const XiaomiMiMo: ModelProviderCard = {
chatModels: [],
checkModel: 'mimo-v2-flash',
description:
'Xiaomi MiMo provides a conversational model service with an OpenAI-compatible API. The mimo-v2-flash model supports deep reasoning, streaming output, function calling, a 256K context window, and a maximum output of 128K.',
id: 'xiaomimimo',
modelList: { showModelFetcher: true },
name: 'Xiaomi MiMo',
settings: {
disableBrowserRequest: true, // CORS error
proxyUrl: {
placeholder: 'https://api.xiaomimimo.com/v1',
},
sdkType: 'openai',
showModelFetcher: true,
},
url: 'https://platform.xiaomimimo.com/',
};
export default XiaomiMiMo;

View File

@@ -32,6 +32,7 @@ export { LobeQwenAI } from './providers/qwen';
export { LobeStepfunAI } from './providers/stepfun';
export { LobeTogetherAI } from './providers/togetherai';
export { LobeVolcengineAI } from './providers/volcengine';
export { LobeXiaomiMiMoAI } from './providers/xiaomimimo';
export { LobeZenMuxAI } from './providers/zenmux';
export { LobeZeroOneAI } from './providers/zeroone';
export { LobeZhipuAI } from './providers/zhipu';

View File

@@ -0,0 +1,147 @@
// @vitest-environment node
import { ModelProvider } from 'model-bank';
import { describe, expect, it, vi } from 'vitest';
import { testProvider } from '../../providerTestUtils';
import { LobeXiaomiMiMoAI, params } from './index';
const provider = ModelProvider.XiaomiMiMo;
const defaultBaseURL = 'https://api.xiaomimimo.com/v1';
testProvider({
Runtime: LobeXiaomiMiMoAI,
provider,
defaultBaseURL,
chatDebugEnv: 'DEBUG_XIAOMIMIMO_CHAT_COMPLETION',
chatModel: 'gpt-4o',
test: {
skipAPICall: true,
},
});
describe('LobeXiaomiMiMoAI - custom features', () => {
describe('chatCompletion.handlePayload', () => {
it('should map max_tokens to max_completion_tokens', () => {
const payload = {
max_tokens: 1000,
model: 'gpt-4o',
};
const result = params.chatCompletion!.handlePayload!(payload as any);
expect(result.max_completion_tokens).toBe(1000);
expect(result.max_tokens).toBeUndefined();
});
it('should set stream to true by default', () => {
const payload = {
model: 'gpt-4o',
};
const result = params.chatCompletion!.handlePayload!(payload as any);
expect(result.stream).toBe(true);
});
it('should preserve existing stream value', () => {
const payload = {
model: 'gpt-4o',
stream: false,
};
const result = params.chatCompletion!.handlePayload!(payload as any);
expect(result.stream).toBe(false);
});
it('should clamp temperature between 0 and 1.5', () => {
const payloadLow = {
temperature: -1,
model: 'gpt-4o',
};
const resultLow = params.chatCompletion!.handlePayload!(payloadLow as any);
expect(resultLow.temperature).toBe(0);
const payloadHigh = {
temperature: 2,
model: 'gpt-4o',
};
const resultHigh = params.chatCompletion!.handlePayload!(payloadHigh as any);
expect(resultHigh.temperature).toBe(1.5);
const payloadNormal = {
temperature: 0.7,
model: 'gpt-4o',
};
const resultNormal = params.chatCompletion!.handlePayload!(payloadNormal as any);
expect(resultNormal.temperature).toBe(0.7);
});
it('should clamp top_p between 0.01 and 1', () => {
const payloadLow = {
top_p: 0,
model: 'gpt-4o',
};
const resultLow = params.chatCompletion!.handlePayload!(payloadLow as any);
expect(resultLow.top_p).toBe(0.01);
const payloadHigh = {
top_p: 1.5,
model: 'gpt-4o',
};
const resultHigh = params.chatCompletion!.handlePayload!(payloadHigh as any);
expect(resultHigh.top_p).toBe(1);
const payloadNormal = {
top_p: 0.5,
model: 'gpt-4o',
};
const resultNormal = params.chatCompletion!.handlePayload!(payloadNormal as any);
expect(resultNormal.top_p).toBe(0.5);
});
it('should handle thinking type enabled/disabled', () => {
const payloadEnabled = {
thinking: { type: 'enabled' },
model: 'gpt-4o',
};
const resultEnabled = params.chatCompletion!.handlePayload!(payloadEnabled as any);
expect(resultEnabled.thinking).toEqual({ type: 'enabled' });
const payloadDisabled = {
thinking: { type: 'disabled' },
model: 'gpt-4o',
};
const resultDisabled = params.chatCompletion!.handlePayload!(payloadDisabled as any);
expect(resultDisabled.thinking).toEqual({ type: 'disabled' });
const payloadOther = {
thinking: { type: 'other' },
model: 'gpt-4o',
};
const resultOther = params.chatCompletion!.handlePayload!(payloadOther as any);
expect(resultOther.thinking).toBeUndefined();
});
});
describe('models', () => {
it('should fetch and process model list', async () => {
const mockModels = [{ id: 'model-1' }, { id: 'model-2' }];
const client = {
models: {
list: vi.fn().mockResolvedValue({ data: mockModels }),
},
};
const result = await params.models!({ client: client as any });
expect(client.models.list).toHaveBeenCalled();
expect(result).toEqual(
expect.arrayContaining([
expect.objectContaining({ id: 'model-1' }),
expect.objectContaining({ id: 'model-2' }),
]),
);
});
});
});

View File

@@ -0,0 +1,48 @@
import { ModelProvider } from 'model-bank';
import {
type OpenAICompatibleFactoryOptions,
createOpenAICompatibleRuntime,
} from '../../core/openaiCompatibleFactory';
import { MODEL_LIST_CONFIGS, processModelList } from '../../utils/modelParse';
const clamp = (value: number, min: number, max: number) => Math.min(max, Math.max(min, value));
export interface XiaomiMiMoModelCard {
id: string;
}
export const params = {
baseURL: 'https://api.xiaomimimo.com/v1',
chatCompletion: {
handlePayload: (payload) => {
const { thinking, temperature, top_p, max_tokens, stream, ...rest } = payload as any;
const thinkingType = thinking?.type;
return {
...rest,
max_completion_tokens: max_tokens,
stream: stream ?? true,
...(typeof temperature === 'number'
? { temperature: clamp(temperature, 0, 1.5) }
: undefined),
...(typeof top_p === 'number' ? { top_p: clamp(top_p, 0.01, 1) } : undefined),
...(thinkingType === 'enabled' || thinkingType === 'disabled'
? { thinking: { type: thinkingType } }
: undefined),
} as any;
},
},
debug: {
chatCompletion: () => process.env.DEBUG_XIAOMIMIMO_CHAT_COMPLETION === '1',
},
models: async ({ client }) => {
const modelsPage = (await client.models.list()) as any;
const modelList: XiaomiMiMoModelCard[] = modelsPage.data;
return processModelList(modelList, MODEL_LIST_CONFIGS.xiaomimimo, 'xiaomimimo');
},
provider: ModelProvider.XiaomiMiMo,
} satisfies OpenAICompatibleFactoryOptions;
export const LobeXiaomiMiMoAI = createOpenAICompatibleRuntime(params);

View File

@@ -62,6 +62,7 @@ import { LobeVLLMAI } from './providers/vllm';
import { LobeVolcengineAI } from './providers/volcengine';
import { LobeWenxinAI } from './providers/wenxin';
import { LobeXAI } from './providers/xai';
import { LobeXiaomiMiMoAI } from './providers/xiaomimimo';
import { LobeXinferenceAI } from './providers/xinference';
import { LobeZenMuxAI } from './providers/zenmux';
import { LobeZeroOneAI } from './providers/zeroone';
@@ -133,6 +134,7 @@ export const providerRuntimeMap = {
volcengine: LobeVolcengineAI,
wenxin: LobeWenxinAI,
xai: LobeXAI,
xiaomimimo: LobeXiaomiMiMoAI,
xinference: LobeXinferenceAI,
zenmux: LobeZenMuxAI,
zeroone: LobeZeroOneAI,

View File

@@ -120,6 +120,11 @@ export const MODEL_LIST_CONFIGS = {
reasoningKeywords: ['mini', 'grok-4', 'grok-code-fast', '!non-reasoning'],
visionKeywords: ['vision', 'grok-4'],
},
xiaomimimo: {
functionCallKeywords: ['mimo'],
reasoningKeywords: ['mimo'],
visionKeywords: [],
},
zeroone: {
functionCallKeywords: ['fc'],
visionKeywords: ['vision'],
@@ -149,6 +154,7 @@ export const MODEL_OWNER_DETECTION_CONFIG = {
volcengine: ['doubao'],
wenxin: ['ernie', 'qianfan'],
xai: ['grok'],
xiaomimimo: ['mimo-'],
zeroone: ['yi-'],
zhipu: ['glm'],
} as const;

View File

@@ -216,6 +216,9 @@ export const getLLMConfig = () => {
ZENMUX_API_KEY: z.string().optional(),
ENABLED_LOBEHUB: z.boolean(),
ENABLED_XIAOMIMIMO: z.boolean(),
XIAOMIMIMO_API_KEY: z.string().optional(),
},
runtimeEnv: {
API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE,
@@ -430,6 +433,9 @@ export const getLLMConfig = () => {
ZENMUX_API_KEY: process.env.ZENMUX_API_KEY,
ENABLED_LOBEHUB: !!process.env.ENABLED_LOBEHUB,
ENABLED_XIAOMIMIMO: !!process.env.XIAOMIMIMO_API_KEY,
XIAOMIMIMO_API_KEY: process.env.XIAOMIMIMO_API_KEY,
},
});
};