mirror of
https://github.com/lobehub/lobehub.git
synced 2026-03-26 13:19:34 +07:00
🐛 fix(memory): respect agent-level memory toggle when injecting memories (#13265)
* 🐛 fix(memory): respect agent-level memory toggle when injecting memories When the user disables the memory toggle in ChatInput (which writes to agent-level chatConfig.memory.enabled), the actual message-sending path in chat/index.ts was only checking the user-level memoryEnabled setting, completely ignoring the agent-level override. This aligns the injection logic with useMemoryEnabled hook: agent-level config takes priority, falls back to user-level setting. Also fix pre-commit hook to use bunx instead of npx to ensure the correct ESLint version (v10) is used in monorepo context. Adds regression tests verifying all three priority scenarios. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * Update pre-commit --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -9,8 +9,10 @@ import * as toolEngineeringModule from '@/helpers/toolEngineering';
|
||||
import { agentSelectors, chatConfigByIdSelectors } from '@/store/agent/selectors';
|
||||
import { aiModelSelectors } from '@/store/aiInfra';
|
||||
import { useToolStore } from '@/store/tool';
|
||||
import { settingsSelectors } from '@/store/user/selectors';
|
||||
|
||||
import { chatService } from './index';
|
||||
import * as mechaModule from './mecha';
|
||||
import { type ResolvedAgentConfig } from './mecha';
|
||||
|
||||
// Helper to compute expected date content from SystemDateProvider
|
||||
@@ -1357,6 +1359,74 @@ describe('ChatService', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('memory enablement priority', () => {
|
||||
it('should respect agent-level memory disabled even when user-level memory is enabled', async () => {
|
||||
const contextEngineeringSpy = vi
|
||||
.spyOn(mechaModule, 'contextEngineering')
|
||||
.mockResolvedValue([]);
|
||||
// user-level memory is enabled
|
||||
vi.spyOn(settingsSelectors, 'memoryEnabled').mockReturnValue(true);
|
||||
|
||||
const messages = [{ content: 'Hello', role: 'user' }] as UIChatMessage[];
|
||||
|
||||
await chatService.createAssistantMessage({
|
||||
messages,
|
||||
resolvedAgentConfig: createMockResolvedConfig({
|
||||
chatConfig: { memory: { enabled: false } },
|
||||
}),
|
||||
});
|
||||
|
||||
// agent-level off takes priority over user-level on
|
||||
expect(contextEngineeringSpy).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ enableUserMemories: false }),
|
||||
);
|
||||
});
|
||||
|
||||
it('should enable memory when agent-level is on even if user-level memory is disabled', async () => {
|
||||
const contextEngineeringSpy = vi
|
||||
.spyOn(mechaModule, 'contextEngineering')
|
||||
.mockResolvedValue([]);
|
||||
// user-level memory is disabled
|
||||
vi.spyOn(settingsSelectors, 'memoryEnabled').mockReturnValue(false);
|
||||
|
||||
const messages = [{ content: 'Hello', role: 'user' }] as UIChatMessage[];
|
||||
|
||||
await chatService.createAssistantMessage({
|
||||
messages,
|
||||
resolvedAgentConfig: createMockResolvedConfig({
|
||||
chatConfig: { memory: { enabled: true } },
|
||||
}),
|
||||
});
|
||||
|
||||
// agent-level on takes priority over user-level off
|
||||
expect(contextEngineeringSpy).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ enableUserMemories: true }),
|
||||
);
|
||||
});
|
||||
|
||||
it('should fall back to user-level setting when agent-level memory is not configured', async () => {
|
||||
const contextEngineeringSpy = vi
|
||||
.spyOn(mechaModule, 'contextEngineering')
|
||||
.mockResolvedValue([]);
|
||||
// user-level memory is disabled
|
||||
vi.spyOn(settingsSelectors, 'memoryEnabled').mockReturnValue(false);
|
||||
|
||||
const messages = [{ content: 'Hello', role: 'user' }] as UIChatMessage[];
|
||||
|
||||
await chatService.createAssistantMessage({
|
||||
messages,
|
||||
resolvedAgentConfig: createMockResolvedConfig({
|
||||
chatConfig: {},
|
||||
}),
|
||||
});
|
||||
|
||||
// no agent-level config, fallback to user-level off
|
||||
expect(contextEngineeringSpy).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ enableUserMemories: false }),
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('getChatCompletion', () => {
|
||||
|
||||
@@ -141,7 +141,10 @@ class ChatService {
|
||||
|
||||
// =================== 1.1 process user memories =================== //
|
||||
|
||||
const enableUserMemories = settingsSelectors.memoryEnabled(getUserStoreState());
|
||||
const userLevelMemoryEnabled = settingsSelectors.memoryEnabled(getUserStoreState());
|
||||
// Agent-level memory toggle takes priority over user-level setting,
|
||||
// matching the logic in useMemoryEnabled hook
|
||||
const enableUserMemories = chatConfig.memory?.enabled ?? userLevelMemoryEnabled;
|
||||
const userMemorySettings = settingsSelectors.currentMemorySettings(getUserStoreState());
|
||||
const effectiveMemoryEffort =
|
||||
chatConfig.memory?.effort ?? userMemorySettings.effort ?? 'medium';
|
||||
|
||||
Reference in New Issue
Block a user