feat: add nano banana Chinese prompt notify (#9038)

This commit is contained in:
YuTengjing
2025-09-03 16:29:13 +08:00
committed by GitHub
parent d1c5645517
commit 58e19f856c
22 changed files with 423 additions and 219 deletions

View File

@@ -19,12 +19,15 @@ lobe-chat/
│ ├── development/ # Development docs
│ ├── self-hosting/ # Self-hosting docs
│ └── usage/ # Usage guides
├── locales/ # Internationalization files
│ ├── en-US/ # English
│ └── zh-CN/ # Simplified Chinese
├── locales/ # Internationalization files (multiple locales)
│ ├── en-US/ # English (example)
│ └── zh-CN/ # Simplified Chinese (example)
├── packages/ # Monorepo packages directory
│ ├── const/ # Constants definition package
│ ├── database/ # Database related package
│ ├── electron-client-ipc/ # Electron renderer ↔ main IPC client
│ ├── electron-server-ipc/ # Electron main process IPC server
│ ├── model-bank/ # Built-in model presets/catalog exports
│ ├── model-runtime/ # AI model runtime package
│ ├── types/ # TypeScript type definitions
│ ├── utils/ # Utility functions package
@@ -37,7 +40,6 @@ lobe-chat/
│ └── screenshots/ # Application screenshots
├── scripts/ # Build and tool scripts
├── src/ # Main application source code (see below)
├── tests/ # Test configuration
├── .cursor/ # Cursor AI configuration
├── docker-compose/ # Docker configuration
├── package.json # Project dependencies
@@ -60,6 +62,7 @@ src/
│ │ ├── oidc/ # OpenID Connect endpoints
│ │ ├── trpc/ # tRPC API routes
│ │ │ ├── async/ # Async tRPC endpoints
│ │ │ ├── desktop/ # Desktop runtime endpoints
│ │ │ ├── edge/ # Edge runtime endpoints
│ │ │ ├── lambda/ # Lambda runtime endpoints
│ │ │ └── tools/ # Tools-specific endpoints
@@ -86,7 +89,7 @@ src/
│ ├── Error/ # Error handling components
│ └── Loading/ # Loading state components
├── config/ # Application configuration
│ ├── aiModels/ # AI model configurations
│ ├── featureFlags/ # Feature flags & experiments
│ └── modelProviders/ # Model provider configurations
├── features/ # Feature components (UI Layer)
│ ├── AgentSetting/ # Agent configuration and management
@@ -109,7 +112,10 @@ src/
│ ├── modules/ # Server modules
│ ├── routers/ # tRPC routers
│ └── services/ # Server services
├── services/ # Client service layer
├── services/ # Service layer (per-domain, client/server split)
│ ├── user/ # User services
│ │ ├── client.ts # Client DB (PGLite) implementation
│ │ └── server.ts # Server DB implementation (via tRPC)
│ ├── aiModel/ # AI model services
│ ├── session/ # Session services
│ └── message/ # Message services
@@ -162,43 +168,15 @@ packages/
└── web-crawler/ # Web crawling functionality
```
## Architecture Layers
## Architecture Map
### 1. **Presentation Layer**
- Business-specific feature components and reusable UI components
- Global layout providers and responsive design wrappers
### 2. **State Management Layer**
- Zustand-based client state with domain-specific slices
- Actions and selectors for predictable state updates
### 3. **Client Service Layer**
- Environment-adaptive services (local Model vs remote tRPC)
- Dual implementation pattern for multi-runtime compatibility
### 4. **API Interface Layer**
- Type-safe tRPC routers organized by runtime environment
- Request routing and validation
### 5. **Server Service Layer**
- Platform-agnostic business logic with implementation abstractions
- Reusable, testable service composition
### 6. **Data Access Layer**
- **Repository**: Complex queries, joins, and transaction management
- **Model**: Basic CRUD operations and single-table queries
- **Schema**: Drizzle ORM definitions and migration management
### 7. **Integration & Extensions**
- **External**: Third-party service integrations and library wrappers
- **Built-in**: AI runtime, tool system, file processing, and web crawling
- Presentation: `src/features`, `src/components`, `src/layout` — UI composition, global providers
- State: `src/store` — Zustand slices, selectors, middleware
- Client Services: `src/services/<domain>/{client|server}.ts` — client: PGLite; server: tRPC bridge
- API Routers: `src/app/(backend)/webapi` (REST), `src/app/(backend)/trpc/{edge|lambda|async|desktop|tools}`; Lambda router triggers Async router for long-running tasks (e.g., image)
- Server Services: `src/server/services` (business logic), `src/server/modules` (infra adapters)
- Data Access: `packages/database/src/{schemas,models,repositories}` — Schema (Drizzle), Model (CRUD), Repository (complex queries)
- Integrations: `src/libs` — analytics, auth, trpc, logging, runtime helpers
## Data Flow Architecture
@@ -225,3 +203,37 @@ _\*Depends on cloud sync configuration_
- **Type Safety**: End-to-end type safety via tRPC and Drizzle ORM
- **Local/Remote Dual Mode**: PGLite enables user data ownership and local control
## Quick Map
- App Routes: `src/app` — UI routes (App Router) and backend routes under `(backend)`
- Web API: `src/app/(backend)/webapi` — REST-like endpoints
- tRPC Routers: `src/server/routers` — typed RPC endpoints by runtime
- Client Services: `src/services` — environment-adaptive client-side business logic
- Server Services: `src/server/services` — platform-agnostic business logic
- Database: `packages/database` — schemas/models/repositories/migrations
- State: `src/store` — Zustand stores and slices
- Integrations: `src/libs` — analytics/auth/trpc/logging/runtime helpers
- Tools: `src/tools` — built-in tool system
## Common Tasks
- Add Web API route: `src/app/(backend)/webapi/<module>/route.ts`
- Add tRPC endpoint: `src/server/routers/{edge|lambda|desktop}/...`
- Add client/server service: `src/services/<domain>/{client|server}.ts` (client: PGLite; server: tRPC)
- Add server service: `src/server/services/<domain>`
- Add a new model/provider: `src/config/modelProviders/<provider>.ts` + `packages/model-bank/src/aiModels/<provider>.ts` + `packages/model-runtime/src/<provider>/index.ts`
- Add DB schema/model/repository: `packages/database/src/{schemas|models|repositories}`
- Add Zustand slice: `src/store/<domain>/slices`
## Env Modes
- `NEXT_PUBLIC_CLIENT_DB`: selects client DB mode (e.g., `pglite`) vs server-backed
- `NEXT_PUBLIC_IS_DESKTOP_APP`: enables desktop-specific routes and behavior
- `NEXT_PUBLIC_SERVICE_MODE`: controls service routing preference (client/server)
## Boundaries
- Keep client logic in `src/services`; server-only logic stays in `src/server/services`
- Dont mix Web API (`webapi/`) with tRPC (`src/server/routers/`)
- Place business UI under `src/features`, global reusable UI under `src/components`

View File

@@ -67,10 +67,19 @@ vitest test-file.test.ts
### 核心原则
1. **充分阅读测试代码**: 在修复测试之前,必须完整理解测试的意图和实现
2. **测试优先修复**: 如果是测试本身写错了,修改测试而不是实现代码
3. **专注单一问题**: 只修复指定的测试,不要添加额外测试或功能
4. **不自作主张**: 不要因为发现其他问题就直接修改,先提出再讨论
1. **收集足够的上下文**
在修复测试之前,务必做到:
- 完整理解测试的意图和实现
- 强烈建议阅读当前的 git diff 和 PR diff
2. **测试优先修复**
如果是测试本身写错了,应优先修改测试,而不是实现代码。
3. **专注单一问题**
只修复指定的测试,不要顺带添加额外测试。
4. **不自作主张**
发现其他问题时,不要直接修改,需先提出并讨论。
### 测试协作最佳实践
@@ -291,7 +300,7 @@ beforeEach(() => {
naturalWidth: 800,
}));
vi.stubGlobal('Image', mockImage);
// 现代方法2使用vi.spyOn保留原功能只mock特定方法
vi.spyOn(URL, 'createObjectURL').mockReturnValue('blob:mock-url');
vi.spyOn(URL, 'revokeObjectURL').mockImplementation(() => {});
@@ -312,8 +321,8 @@ global.Image = mockImage;
global.URL = { ...global.URL, createObjectURL: mockFn };
// ✅ 现代方法类型安全的vi API
vi.stubGlobal('Image', mockImage); // 完全替换全局对象
vi.spyOn(URL, 'createObjectURL'); // 部分mock保留其他功能
vi.stubGlobal('Image', mockImage); // 完全替换全局对象
vi.spyOn(URL, 'createObjectURL'); // 部分mock保留其他功能
```
### 测试覆盖率原则:代码分支优于用例数量
@@ -324,16 +333,16 @@ vi.spyOn(URL, 'createObjectURL'); // 部分mock保留其他功能
// ❌ 过度测试29个测试用例都验证相同分支
describe('getImageDimensions', () => {
it('should reject .txt files');
it('should reject .pdf files');
it('should reject .pdf files');
// ... 25个类似测试都走相同的验证分支
});
// ✅ 精简测试4个核心用例覆盖所有分支
describe('getImageDimensions', () => {
it('should return dimensions for valid File object'); // 成功路径 - File
it('should return dimensions for valid data URI'); // 成功路径 - String
it('should return undefined for invalid inputs'); // 输入验证分支
it('should return undefined when image fails to load'); // 错误处理分支
it('should return dimensions for valid File object'); // 成功路径 - File
it('should return dimensions for valid data URI'); // 成功路径 - String
it('should return undefined for invalid inputs'); // 输入验证分支
it('should return undefined when image fails to load'); // 错误处理分支
});
```
@@ -345,6 +354,7 @@ describe('getImageDimensions', () => {
4. **业务逻辑** - 覆盖所有if/else分支
**合理测试数量**
- 简单工具函数2-5个测试
- 复杂业务逻辑5-10个测试
- 核心安全功能:适当增加,但避免重复路径
@@ -358,10 +368,12 @@ describe('getImageDimensions', () => {
```typescript
// ✅ 测试错误类型和属性
expect(() => validateUser({})).toThrow(ValidationError);
expect(() => processPayment({})).toThrow(expect.objectContaining({
code: 'INVALID_PAYMENT_DATA',
statusCode: 400,
}));
expect(() => processPayment({})).toThrow(
expect.objectContaining({
code: 'INVALID_PAYMENT_DATA',
statusCode: 400,
}),
);
// ❌ 避免测试具体错误文本
expect(() => processUser({})).toThrow('用户数据不能为空,请检查输入参数');
@@ -461,7 +473,6 @@ await (instance as any).getFromCache('key'); // 避免as any
- **文档说明**: 对于使用 `any` 的复杂场景,添加注释说明原因
- **测试覆盖**: 确保即使使用了 `any`,测试仍能有效验证功能正确性
### 检查最近修改记录
**核心原则**:测试突然失败时,优先检查最近的代码修改。

View File

@@ -18,7 +18,6 @@
"javascriptreact",
"typescript",
"typescriptreact",
"markdown",
// support mdx
"mdx"
],

View File

@@ -58,12 +58,11 @@ Testing work follows the Rule-Aware Task Execution system above.
- use `bun run type-check` to check type errors.
### Internationalization
### i18n
- **Keys**: Add to `src/locales/default/namespace.ts`
- **Dev**: Translate at least `zh-CN` files for preview
- **Structure**: Hierarchical nested objects, not flat keys
- **Script**: DON'T run `pnpm i18n` (user/CI handles it)
- **Dev**: Translate `locales/zh-CN/namespace.json` locale file only for preview
- DON'T run `pnpm i18n`, let CI auto handle it
## Rules Index

View File

@@ -182,6 +182,13 @@
"title": "喜欢我们的产品?"
},
"fullscreen": "全屏模式",
"geminiImageChineseWarning": {
"content": "Nano Banana 使用中文有概率性生成图片失败。建议使用英文以获得更好的效果。",
"continueGenerate": "继续生成",
"continueSend": "继续发送",
"doNotShowAgain": "不再提示",
"title": "中文输入提示"
},
"historyRange": "历史范围",
"import": "导入",
"importData": "导入数据",

View File

@@ -208,7 +208,9 @@ export class AiInfraRepos {
const providerModels = modules[providerId];
// use the serverModelLists as the defined server model list
const presetList = this.providerConfigs[providerId]?.serverModelLists || providerModels;
// fallback to empty array for custom provider
const presetList = this.providerConfigs[providerId]?.serverModelLists || providerModels || [];
return (presetList as AIChatModelCard[]).map<AiProviderModelListItem>((m) => ({
...m,
enabled: m.enabled || false,

View File

@@ -96,7 +96,7 @@ describe('createRouterRuntime', () => {
});
const runtime = new Runtime();
const models = await runtime['getModels']({
const models = await runtime['getRouterMatchModels']({
id: 'test',
models: ['model-1', 'model-2'],
runtime: mockRuntime,
@@ -105,7 +105,7 @@ describe('createRouterRuntime', () => {
expect(models).toEqual(['model-1', 'model-2']);
});
it('should call and cache asynchronous models function', async () => {
it('should call asynchronous models function', async () => {
const mockRuntime = {
chat: vi.fn(),
} as unknown as LobeRuntimeAI;
@@ -131,14 +131,9 @@ describe('createRouterRuntime', () => {
runtime: mockRuntime,
};
// First call
const models1 = await runtime['getModels'](runtimeItem);
expect(models1).toEqual(['async-model-1', 'async-model-2']);
expect(mockModelsFunction).toHaveBeenCalledTimes(1);
// Second call should use cache
const models2 = await runtime['getModels'](runtimeItem);
expect(models2).toEqual(['async-model-1', 'async-model-2']);
// Call the function
const models = await runtime['getRouterMatchModels'](runtimeItem);
expect(models).toEqual(['async-model-1', 'async-model-2']);
expect(mockModelsFunction).toHaveBeenCalledTimes(1);
});
@@ -159,7 +154,7 @@ describe('createRouterRuntime', () => {
});
const runtime = new Runtime();
const models = await runtime['getModels']({
const models = await runtime['getRouterMatchModels']({
id: 'test',
runtime: mockRuntime,
});
@@ -455,84 +450,4 @@ describe('createRouterRuntime', () => {
expect(mockTextToSpeech).toHaveBeenCalledWith(payload, options);
});
});
describe('clearModelCache method', () => {
it('should clear specific runtime cache when runtimeId provided', async () => {
const mockModelsFunction = vi.fn().mockResolvedValue(['model-1']);
const Runtime = createRouterRuntime({
id: 'test-runtime',
routers: [
{
apiType: 'openai',
options: {},
runtime: vi.fn() as any,
models: mockModelsFunction,
},
],
});
const runtime = new Runtime();
const runtimeItem = {
id: 'test-id',
models: mockModelsFunction,
runtime: {} as any,
};
// Build cache
await runtime['getModels'](runtimeItem);
expect(mockModelsFunction).toHaveBeenCalledTimes(1);
// Clear specific cache
runtime.clearModelCache('test-id');
// Should call function again
await runtime['getModels'](runtimeItem);
expect(mockModelsFunction).toHaveBeenCalledTimes(2);
});
it('should clear all cache when no runtimeId provided', async () => {
const mockModelsFunction1 = vi.fn().mockResolvedValue(['model-1']);
const mockModelsFunction2 = vi.fn().mockResolvedValue(['model-2']);
const Runtime = createRouterRuntime({
id: 'test-runtime',
routers: [
{
apiType: 'openai',
options: {},
runtime: vi.fn() as any,
models: mockModelsFunction1,
},
],
});
const runtime = new Runtime();
const runtimeItem1 = {
id: 'test-id-1',
models: mockModelsFunction1,
runtime: {} as any,
};
const runtimeItem2 = {
id: 'test-id-2',
models: mockModelsFunction2,
runtime: {} as any,
};
// Build cache for both items
await runtime['getModels'](runtimeItem1);
await runtime['getModels'](runtimeItem2);
expect(mockModelsFunction1).toHaveBeenCalledTimes(1);
expect(mockModelsFunction2).toHaveBeenCalledTimes(1);
// Clear all cache
runtime.clearModelCache();
// Should call functions again
await runtime['getModels'](runtimeItem1);
await runtime['getModels'](runtimeItem2);
expect(mockModelsFunction1).toHaveBeenCalledTimes(2);
expect(mockModelsFunction2).toHaveBeenCalledTimes(2);
});
});
});

View File

@@ -117,7 +117,6 @@ export const createRouterRuntime = ({
return class UniformRuntime implements LobeRuntimeAI {
private _runtimes: RuntimeItem[];
private _options: ClientOptions & Record<string, any>;
private _modelCache = new Map<string, string[]>();
constructor(options: ClientOptions & Record<string, any> = {}) {
const _options = {
@@ -143,30 +142,21 @@ export const createRouterRuntime = ({
this._options = _options;
}
// Get runtime's models list, supporting both synchronous arrays and asynchronous functions with caching
private async getModels(runtimeItem: RuntimeItem): Promise<string[]> {
const cacheKey = runtimeItem.id;
// If it's a synchronous array, return directly without caching
// Get runtime's models list, supporting both synchronous arrays and asynchronous functions
private async getRouterMatchModels(runtimeItem: RuntimeItem): Promise<string[]> {
// If it's a synchronous array, return directly
if (typeof runtimeItem.models !== 'function') {
return runtimeItem.models || [];
}
// Check cache
if (this._modelCache.has(cacheKey)) {
return this._modelCache.get(cacheKey)!;
}
// Get model list and cache result
const models = await runtimeItem.models();
this._modelCache.set(cacheKey, models);
return models;
// Get model list
return await runtimeItem.models();
}
// Check if it can match a specific model, otherwise default to using the last runtime
async getRuntimeByModel(model: string) {
for (const runtimeItem of this._runtimes) {
const models = await this.getModels(runtimeItem);
const models = await this.getRouterMatchModels(runtimeItem);
if (models.includes(model)) {
return runtimeItem.runtime;
}
@@ -226,17 +216,5 @@ export const createRouterRuntime = ({
return runtime.textToSpeech!(payload, options);
}
/**
* Clear model list cache, forcing reload on next access
* @param runtimeId - Optional, specify to clear cache for a specific runtime, omit to clear all caches
*/
clearModelCache(runtimeId?: string) {
if (runtimeId) {
this._modelCache.delete(runtimeId);
} else {
this._modelCache.clear();
}
}
};
};

View File

@@ -71,29 +71,32 @@ export const LobeOpenRouterAI = createOpenAICompatibleRuntime({
// 处理前端获取的模型信息,转换为标准格式
const formattedModels = modelList.map((model) => {
const { endpoint } = model;
const endpointModel = endpoint?.model;
const displayName = model.slug?.toLowerCase().includes('deepseek')
? (model.name ?? model.slug)
: (model.short_name ?? model.name ?? model.slug);
const inputModalities = endpointModel?.input_modalities || model.input_modalities;
return {
contextWindowTokens: model.context_length,
description: model.description,
contextWindowTokens: endpoint?.context_length || model.context_length,
description: endpointModel?.description || model.description,
displayName,
functionCall: model.endpoint?.supports_tool_parameters || false,
id: model.slug,
functionCall: endpoint?.supports_tool_parameters || false,
id: endpoint?.model_variant_slug || model.slug,
maxOutput:
typeof model.endpoint?.max_completion_tokens === 'number'
? model.endpoint.max_completion_tokens
typeof endpoint?.max_completion_tokens === 'number'
? endpoint.max_completion_tokens
: undefined,
pricing: {
input: formatPrice(model.endpoint?.pricing?.prompt),
output: formatPrice(model.endpoint?.pricing?.completion),
input: formatPrice(endpoint?.pricing?.prompt),
output: formatPrice(endpoint?.pricing?.completion),
},
reasoning: model.endpoint?.supports_reasoning || false,
reasoning: endpoint?.supports_reasoning || false,
releasedAt: new Date(model.created_at).toISOString().split('T')[0],
vision:
(Array.isArray(model.input_modalities) && model.input_modalities.includes('image')) ||
false,
vision: Array.isArray(inputModalities) && inputModalities.includes('image'),
};
});

View File

@@ -19,11 +19,21 @@ export interface OpenRouterModelCard {
}
interface OpenRouterModelEndpoint {
context_length?: number;
max_completion_tokens: number | null;
model?: {
description?: string;
input_modalities?: string[];
name?: string;
short_name?: string;
slug: string;
};
model_variant_slug?: string;
pricing: ModelPricing;
supported_parameters: string[];
supports_reasoning?: boolean;
supports_tool_parameters?: boolean;
variant?: 'free' | 'standard' | 'unknown';
}
interface OpenRouterOpenAIReasoning {

View File

@@ -758,4 +758,70 @@ describe('modelParse', () => {
expect(modelConfigKeys.sort()).toEqual(providerDetectionKeys.sort());
});
});
describe('displayName processing', () => {
it('should replace "Gemini 2.5 Flash Image Preview" with "Nano Banana"', async () => {
const modelList = [
{
id: 'gemini-2.5-flash-image-preview',
displayName: 'Gemini 2.5 Flash Image Preview',
},
{
id: 'some-other-model',
displayName: 'Some Other Model',
},
{
id: 'partial-gemini-model',
displayName: 'Custom Gemini 2.5 Flash Image Preview Enhanced',
},
{
id: 'gemini-free-model',
displayName: 'Gemini 2.5 Flash Image Preview (free)',
},
];
const result = await processModelList(modelList, MODEL_LIST_CONFIGS.google);
expect(result).toHaveLength(4);
// First model should have "Nano Banana" as displayName
const geminiModel = result.find((m) => m.id === 'gemini-2.5-flash-image-preview');
expect(geminiModel?.displayName).toBe('Nano Banana');
// Second model should keep original displayName
const otherModel = result.find((m) => m.id === 'some-other-model');
expect(otherModel?.displayName).toBe('Some Other Model');
// Third model (partial match) should replace only the matching part
const partialModel = result.find((m) => m.id === 'partial-gemini-model');
expect(partialModel?.displayName).toBe('Custom Nano Banana Enhanced');
// Fourth model should preserve the (free) suffix
const freeModel = result.find((m) => m.id === 'gemini-free-model');
expect(freeModel?.displayName).toBe('Nano Banana (free)');
});
it('should keep original displayName when not matching Gemini 2.5 Flash Image Preview', async () => {
const modelList = [
{
id: 'gpt-4',
displayName: 'GPT-4',
},
{
id: 'gemini-pro',
displayName: 'Gemini Pro',
},
];
const result = await processModelList(modelList, MODEL_LIST_CONFIGS.google);
expect(result).toHaveLength(2);
const gptModel = result.find((m) => m.id === 'gpt-4');
expect(gptModel?.displayName).toBe('GPT-4');
const geminiProModel = result.find((m) => m.id === 'gemini-pro');
expect(geminiProModel?.displayName).toBe('Gemini Pro');
});
});
});

View File

@@ -264,6 +264,20 @@ const processReleasedAt = (model: any, knownModel?: any): string | undefined =>
return model.releasedAt ?? knownModel?.releasedAt ?? undefined;
};
/**
* 处理模型显示名称
* @param displayName 原始显示名称
* @returns 处理后的显示名称
*/
const processDisplayName = (displayName: string): string => {
// 如果包含 "Gemini 2.5 Flash Image Preview",替换对应部分为 "Nano Banana"
if (displayName.includes('Gemini 2.5 Flash Image Preview')) {
return displayName.replace('Gemini 2.5 Flash Image Preview', 'Nano Banana');
}
return displayName;
};
/**
* 处理模型卡片的通用逻辑
*/
@@ -331,9 +345,7 @@ const processModelCard = (
return {
contextWindowTokens: model.contextWindowTokens ?? knownModel?.contextWindowTokens ?? undefined,
description: model.description ?? knownModel?.description ?? '',
displayName: (model.displayName ?? knownModel?.displayName ?? model.id)
.replaceAll(/\s*[(][^)]*[)]\s*/g, '')
.trim(), // 去除括号内容
displayName: processDisplayName(model.displayName ?? knownModel?.displayName ?? model.id),
enabled: model?.enabled || false,
functionCall:
model.functionCall ??

View File

@@ -5,6 +5,7 @@ import type { ChatModelCard } from '@/types/llm';
// Whitelist for automatic image model generation
export const IMAGE_GENERATION_MODEL_WHITELIST = [
'gemini-2.5-flash-image-preview',
'gemini-2.5-flash-image-preview:free',
// More models can be added in the future
] as const;

View File

@@ -0,0 +1,37 @@
import { describe, expect, it } from 'vitest';
import { containsChinese } from './detectChinese';
describe('containsChinese', () => {
it('should return true for text containing Chinese characters', () => {
expect(containsChinese('你好世界')).toBe(true);
expect(containsChinese('Hello 世界')).toBe(true);
expect(containsChinese('测试 test')).toBe(true);
expect(containsChinese('这是一个测试')).toBe(true);
});
it('should return false for text without Chinese characters', () => {
expect(containsChinese('Hello World')).toBe(false);
expect(containsChinese('123456')).toBe(false);
expect(containsChinese('!@#$%^&*()')).toBe(false);
expect(containsChinese('')).toBe(false);
expect(containsChinese('English only text')).toBe(false);
});
it('should handle mixed content correctly', () => {
expect(containsChinese('Hello 中国')).toBe(true);
expect(containsChinese('English and 数字 123')).toBe(true);
expect(containsChinese('Japanese こんにちは and English')).toBe(false);
expect(containsChinese('Korean 안녕하세요 and English')).toBe(false);
});
it('should detect extended Chinese character ranges', () => {
// Test CJK Unified Ideographs Extension A (U+3400-U+4DBF)
expect(containsChinese('㐀㑇㒯')).toBe(true);
// Test CJK Compatibility Ideographs (U+F900-U+FAFF)
expect(containsChinese('豈更車')).toBe(true);
// Test traditional Chinese characters
expect(containsChinese('繁體中文')).toBe(true);
expect(containsChinese('學習語言')).toBe(true);
});
});

View File

@@ -0,0 +1,12 @@
/**
* Detect if text contains Chinese characters
* @param text - The text to check
* @returns true if text contains Chinese characters, false otherwise
*/
export const containsChinese = (text: string): boolean => {
// Enhanced regex to cover more Chinese character ranges:
// \u4e00-\u9fa5: CJK Unified Ideographs (basic)
// \u3400-\u4dbf: CJK Unified Ideographs Extension A
// \uf900-\ufaff: CJK Compatibility Ideographs
return /[\u3400-\u4DBF\u4E00-\u9FA5\uF900-\uFAFF]/.test(text);
};

View File

@@ -1,4 +1,5 @@
export * from './client/cookie';
export * from './detectChinese';
export * from './format';
export * from './imageToBase64';
export * from './parseModels';

View File

@@ -7,10 +7,27 @@ import { useUserStore } from '@/store/user';
import InputArea from './TextArea';
let sendMessageMock: () => Promise<void>;
// Mock the useSendMessage hook to return our mock function
vi.mock('@/features/ChatInput/useSend', () => ({
useSendMessage: () => ({
send: sendMessageMock,
canSend: true,
}),
}));
// Mock the Chinese warning hook to always allow sending
vi.mock('@/hooks/useGeminiChineseWarning', () => ({
useGeminiChineseWarning: () => () => Promise.resolve(true),
}));
let onSendMock: () => void;
beforeEach(() => {
onSendMock = vi.fn();
sendMessageMock = vi.fn().mockResolvedValue(undefined);
vi.clearAllMocks();
});
describe('<InputArea />', () => {
@@ -194,9 +211,8 @@ describe('<InputArea />', () => {
describe('message sending behavior', () => {
it('does not send message when loading or shift key is pressed', () => {
const sendMessageMock = vi.fn();
act(() => {
useChatStore.setState({ chatLoadingIds: ['123'], sendMessage: sendMessageMock });
useChatStore.setState({ chatLoadingIds: ['123'] });
});
render(<InputArea onSend={onSendMock} />);
@@ -206,13 +222,11 @@ describe('<InputArea />', () => {
expect(sendMessageMock).not.toHaveBeenCalled();
});
it('sends message on Enter press when not loading and no shift key', () => {
const sendMessageMock = vi.fn();
it('sends message on Enter press when not loading and no shift key', async () => {
act(() => {
useChatStore.setState({
chatLoadingIds: [],
inputMessage: 'abc',
sendMessage: sendMessageMock,
});
});
@@ -221,17 +235,18 @@ describe('<InputArea />', () => {
fireEvent.change(textArea, { target: { value: 'Test message' } });
fireEvent.keyDown(textArea, { code: 'Enter', key: 'Enter' });
expect(sendMessageMock).toHaveBeenCalled();
await vi.waitFor(() => {
expect(sendMessageMock).toHaveBeenCalled();
});
});
describe('metaKey behavior for sending messages', () => {
it('windows: sends message on ctrl + enter when useCmdEnterToSend is true', () => {
const sendMessageMock = vi.fn();
it('windows: sends message on ctrl + enter when useCmdEnterToSend is true', async () => {
act(() => {
useChatStore.setState({
chatLoadingIds: [],
inputMessage: '123',
sendMessage: sendMessageMock,
});
useUserStore.getState().updatePreference({ useCmdEnterToSend: true });
});
@@ -240,17 +255,18 @@ describe('<InputArea />', () => {
const textArea = screen.getByRole('textbox');
fireEvent.keyDown(textArea, { code: 'Enter', ctrlKey: true, key: 'Enter' });
expect(sendMessageMock).toHaveBeenCalled();
await vi.waitFor(() => {
expect(sendMessageMock).toHaveBeenCalled();
});
});
it('windows: inserts a new line on ctrl + enter when useCmdEnterToSend is false', () => {
const sendMessageMock = vi.fn();
const updateInputMessageMock = vi.fn();
act(() => {
useChatStore.setState({
chatLoadingIds: [],
inputMessage: 'Test',
sendMessage: sendMessageMock,
updateInputMessage: updateInputMessageMock,
});
useUserStore.getState().updatePreference({ useCmdEnterToSend: false });
@@ -264,17 +280,15 @@ describe('<InputArea />', () => {
expect(sendMessageMock).not.toHaveBeenCalled(); // sendMessage should not be called
});
it('macOS: sends message on cmd + enter when useCmdEnterToSend is true', () => {
it('macOS: sends message on cmd + enter when useCmdEnterToSend is true', async () => {
vi.stubGlobal('navigator', {
userAgent:
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
});
const sendMessageMock = vi.fn();
act(() => {
useChatStore.setState({
chatLoadingIds: [],
inputMessage: '123',
sendMessage: sendMessageMock,
});
useUserStore.getState().updatePreference({ useCmdEnterToSend: true });
});
@@ -283,7 +297,10 @@ describe('<InputArea />', () => {
const textArea = screen.getByRole('textbox');
fireEvent.keyDown(textArea, { code: 'Enter', key: 'Enter', metaKey: true });
expect(sendMessageMock).toHaveBeenCalled();
await vi.waitFor(() => {
expect(sendMessageMock).toHaveBeenCalled();
});
vi.restoreAllMocks();
});
@@ -292,13 +309,11 @@ describe('<InputArea />', () => {
userAgent:
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
});
const sendMessageMock = vi.fn();
const updateInputMessageMock = vi.fn();
act(() => {
useChatStore.setState({
chatLoadingIds: [],
inputMessage: 'Test',
sendMessage: sendMessageMock,
updateInputMessage: updateInputMessageMock,
});
useUserStore.getState().updatePreference({ useCmdEnterToSend: false });

View File

@@ -8,9 +8,11 @@ import { useTranslation } from 'react-i18next';
import { Flexbox } from 'react-layout-kit';
import { loginRequired } from '@/components/Error/loginRequiredNotification';
import { useGeminiChineseWarning } from '@/hooks/useGeminiChineseWarning';
import { useImageStore } from '@/store/image';
import { createImageSelectors } from '@/store/image/selectors';
import { useGenerationConfigParam } from '@/store/image/slices/generationConfig/hooks';
import { imageGenerationConfigSelectors } from '@/store/image/slices/generationConfig/selectors';
import { useUserStore } from '@/store/user';
import { authSelectors } from '@/store/user/slices/auth/selectors';
@@ -49,13 +51,23 @@ const PromptInput = ({ showTitle = false }: PromptInputProps) => {
const { value, setValue } = useGenerationConfigParam('prompt');
const isCreating = useImageStore(createImageSelectors.isCreating);
const createImage = useImageStore((s) => s.createImage);
const currentModel = useImageStore(imageGenerationConfigSelectors.model);
const isLogin = useUserStore(authSelectors.isLogin);
const checkGeminiChineseWarning = useGeminiChineseWarning();
const handleGenerate = async () => {
if (!isLogin) {
loginRequired.redirect({ timeout: 2000 });
return;
}
// Check for Chinese text warning with Gemini model
const shouldContinue = await checkGeminiChineseWarning({
model: currentModel,
prompt: value,
scenario: 'image',
});
if (!shouldContinue) return;
await createImage();
};

View File

@@ -1,6 +1,7 @@
import { useAnalytics } from '@lobehub/analytics/react';
import { useCallback, useMemo } from 'react';
import { useGeminiChineseWarning } from '@/hooks/useGeminiChineseWarning';
import { getAgentStoreState } from '@/store/agent';
import { agentSelectors } from '@/store/agent/selectors';
import { useChatStore } from '@/store/chat';
@@ -20,6 +21,7 @@ export const useSendMessage = () => {
s.updateInputMessage,
]);
const { analytics } = useAnalytics();
const checkGeminiChineseWarning = useGeminiChineseWarning();
const clearChatUploadFileList = useFileStore((s) => s.clearChatUploadFileList);
@@ -28,7 +30,7 @@ export const useSendMessage = () => {
const canSend = !isUploadingFiles && !isSendButtonDisabledByMessage;
const send = useCallback((params: UseSendMessageParams = {}) => {
const send = useCallback(async (params: UseSendMessageParams = {}) => {
const store = useChatStore.getState();
if (chatSelectors.isAIGenerating(store)) return;
@@ -45,6 +47,17 @@ export const useSendMessage = () => {
// if there is no message and no image, then we should not send the message
if (!store.inputMessage && fileList.length === 0) return;
// Check for Chinese text warning with Gemini model
const agentStore = getAgentStoreState();
const currentModel = agentSelectors.currentAgentModel(agentStore);
const shouldContinue = await checkGeminiChineseWarning({
model: currentModel,
prompt: store.inputMessage,
scenario: 'chat',
});
if (!shouldContinue) return;
sendMessage({
files: fileList,
message: store.inputMessage,
@@ -56,7 +69,6 @@ export const useSendMessage = () => {
// 获取分析数据
const userStore = getUserStoreState();
const agentStore = getAgentStoreState();
// 直接使用现有数据结构判断消息类型
const hasImages = fileList.some((file) => file.file?.type?.startsWith('image'));

View File

@@ -0,0 +1,91 @@
import { containsChinese } from '@lobechat/utils';
import { App, Checkbox } from 'antd';
import React, { useCallback } from 'react';
import { useTranslation } from 'react-i18next';
import { useGlobalStore } from '@/store/global';
import { systemStatusSelectors } from '@/store/global/selectors';
const shouldShowChineseWarning = (
model: string,
prompt: string,
hasWarningBeenDismissed: boolean,
): boolean => {
return (
model.includes('gemini-2.5-flash-image-preview') &&
!hasWarningBeenDismissed &&
Boolean(prompt) &&
containsChinese(prompt)
);
};
interface UseGeminiChineseWarningOptions {
model: string;
prompt: string;
scenario?: 'chat' | 'image';
}
export const useGeminiChineseWarning = () => {
const { t } = useTranslation('common');
const { modal } = App.useApp();
const [hideGeminiChineseWarning, updateSystemStatus] = useGlobalStore((s) => [
systemStatusSelectors.systemStatus(s).hideGemini2_5FlashImagePreviewChineseWarning ?? false,
s.updateSystemStatus,
]);
const checkWarning = useCallback(
async ({
model,
prompt,
scenario = 'chat',
}: UseGeminiChineseWarningOptions): Promise<boolean> => {
if (!shouldShowChineseWarning(model, prompt, hideGeminiChineseWarning)) {
return true;
}
return new Promise<boolean>((resolve) => {
let doNotShowAgain = false;
// 根据场景选择不同的按钮文案
const continueText =
scenario === 'image'
? t('geminiImageChineseWarning.continueGenerate')
: t('geminiImageChineseWarning.continueSend');
modal.confirm({
cancelText: t('cancel', { ns: 'common' }),
centered: true,
content: (
<div>
<p>{t('geminiImageChineseWarning.content')}</p>
<div style={{ marginTop: 16 }}>
<Checkbox
onChange={(e) => {
doNotShowAgain = e.target.checked;
}}
>
{t('geminiImageChineseWarning.doNotShowAgain')}
</Checkbox>
</div>
</div>
),
okText: continueText,
onCancel: () => {
resolve(false);
},
onOk: () => {
if (doNotShowAgain) {
updateSystemStatus({ hideGemini2_5FlashImagePreviewChineseWarning: true });
}
resolve(true);
},
title: t('geminiImageChineseWarning.title'),
});
});
},
[modal, t, hideGeminiChineseWarning, updateSystemStatus],
);
return checkWarning;
};

View File

@@ -185,6 +185,13 @@ export default {
title: '喜欢我们的产品?',
},
fullscreen: '全屏模式',
geminiImageChineseWarning: {
content: 'Nano Banana 使用中文有概率性生成图片失败。建议使用英文以获得更好的效果。',
continueGenerate: '继续生成',
continueSend: '继续发送',
doNotShowAgain: '不再提示',
title: '中文输入提示',
},
historyRange: '历史范围',
import: '导入',
importData: '导入数据',

View File

@@ -50,6 +50,7 @@ export interface SystemStatus {
// which sessionGroup should expand
expandSessionGroupKeys: string[];
filePanelWidth: number;
hideGemini2_5FlashImagePreviewChineseWarning?: boolean;
hidePWAInstaller?: boolean;
hideThreadLimitAlert?: boolean;
imagePanelWidth: number;
@@ -108,6 +109,7 @@ export interface GlobalState {
export const INITIAL_STATUS = {
expandSessionGroupKeys: [SessionDefaultGroup.Pinned, SessionDefaultGroup.Default],
filePanelWidth: 320,
hideGemini2_5FlashImagePreviewChineseWarning: false,
hidePWAInstaller: false,
hideThreadLimitAlert: false,
imagePanelWidth: 320,