🐛 fix: fix sub task issue (#11777)

* fix sub task issue

* fix tests
This commit is contained in:
Arvin Xu
2026-01-24 18:00:30 +08:00
committed by GitHub
parent e8526a9574
commit 8ae345647e
4 changed files with 113 additions and 36 deletions

View File

@@ -469,10 +469,22 @@ export class GeneralChatAgent implements Agent {
// Async tasks batch completed, continue to call LLM with results
const { parentMessageId } = context.payload as TasksBatchResultPayload;
// Inject a virtual user message to force the model to summarize or continue
// This fixes an issue where some models (e.g., Kimi K2) return empty content
// when the last message is a task result, thinking the task is already done
const messagesWithPrompt = [
...state.messages,
{
content:
'All tasks above have been completed. Please summarize the results or continue with your response following user query language.',
role: 'user' as const,
},
];
// Continue to call LLM with updated messages (task messages are already in state)
return {
payload: {
messages: state.messages,
messages: messagesWithPrompt,
model: this.config.modelRuntimeConfig?.model,
parentMessageId,
provider: this.config.modelRuntimeConfig?.provider,

View File

@@ -1213,7 +1213,14 @@ describe('GeneralChatAgent', () => {
expect(result).toEqual({
type: 'call_llm',
payload: {
messages: state.messages,
messages: [
...state.messages,
{
content:
'All tasks above have been completed. Please summarize the results or continue with your response following user query language.',
role: 'user',
},
],
model: 'gpt-4o-mini',
parentMessageId: 'task-parent-msg',
provider: 'openai',
@@ -1256,7 +1263,14 @@ describe('GeneralChatAgent', () => {
expect(result).toEqual({
type: 'call_llm',
payload: {
messages: state.messages,
messages: [
...state.messages,
{
content:
'All tasks above have been completed. Please summarize the results or continue with your response following user query language.',
role: 'user',
},
],
model: 'gpt-4o-mini',
parentMessageId: 'task-parent-msg',
provider: 'openai',

View File

@@ -3,50 +3,85 @@
import { AccordionItem, Block, Text } from '@lobehub/ui';
import { memo, useMemo, useState } from 'react';
import { useChatStore } from '@/store/chat';
import { displayMessageSelectors } from '@/store/chat/selectors';
import { messageMapKey } from '@/store/chat/utils/messageMapKey';
import { ThreadStatus } from '@/types/index';
import type { UIChatMessage } from '@/types/index';
import {
CompletedState,
ErrorState,
InitializingState,
ProcessingState,
isProcessingStatus,
} from '../shared';
import ClientTaskDetailCompletedState from '../../Task/ClientTaskDetail/CompletedState';
import ClientTaskDetailProcessingState from '../../Task/ClientTaskDetail/ProcessingState';
import { ErrorState, InitializingState, isProcessingStatus } from '../shared';
import TaskTitle, { type TaskMetrics } from './TaskTitle';
import { useClientTaskStats } from './useClientTaskStats';
interface ClientTaskItemProps {
item: UIChatMessage;
}
const ClientTaskItem = memo<ClientTaskItemProps>(({ item }) => {
const { id, content, metadata, taskDetail } = item;
const { id, metadata, taskDetail } = item;
const [expanded, setExpanded] = useState(false);
const title = taskDetail?.title || metadata?.taskTitle;
const instruction = metadata?.instruction;
const status = taskDetail?.status;
const threadId = taskDetail?.threadId;
const isProcessing = isProcessingStatus(status);
const isCompleted = status === ThreadStatus.Completed;
const isError = status === ThreadStatus.Failed || status === ThreadStatus.Cancel;
const isInitializing = !taskDetail || !status;
// Fetch client task stats when processing
const clientStats = useClientTaskStats({
enabled: isProcessing,
threadId: taskDetail?.threadId,
});
// Fetch thread messages for client mode (like Task/ClientTaskDetail)
const [activeAgentId, activeTopicId, useFetchMessages] = useChatStore((s) => [
s.activeAgentId,
s.activeTopicId,
s.useFetchMessages,
]);
// Build metrics for TaskTitle
const threadContext = useMemo(
() => ({
agentId: activeAgentId,
scope: 'thread' as const,
threadId,
topicId: activeTopicId,
}),
[activeAgentId, activeTopicId, threadId],
);
const threadMessageKey = useMemo(
() => (threadId ? messageMapKey(threadContext) : null),
[threadId, threadContext],
);
// Fetch thread messages (skip when executing - messages come from real-time updates)
useFetchMessages(threadContext, isProcessing);
// Get thread messages from store using selector
const threadMessages = useChatStore((s) =>
threadMessageKey
? displayMessageSelectors.getDisplayMessagesByKey(threadMessageKey)(s)
: undefined,
);
// Find the assistantGroup message which contains the children blocks
const assistantGroupMessage = threadMessages?.find((item) => item.role === 'assistantGroup');
const blocks = assistantGroupMessage?.children;
const childrenCount = blocks?.length ?? 0;
// Get model/provider from assistantGroup message
const model = assistantGroupMessage?.model;
const provider = assistantGroupMessage?.provider;
// Build metrics for TaskTitle based on blocks data
const metrics: TaskMetrics | undefined = useMemo(() => {
if (isProcessing) {
if (isProcessing && blocks) {
const toolCalls = blocks.reduce((sum, block) => sum + (block.tools?.length || 0), 0);
return {
isLoading: clientStats.isLoading,
startTime: clientStats.startTime,
steps: clientStats.steps,
toolCalls: clientStats.toolCalls,
isLoading: false,
startTime: assistantGroupMessage?.createdAt,
steps: blocks.length,
toolCalls,
};
}
if (isCompleted || isError) {
@@ -61,12 +96,16 @@ const ClientTaskItem = memo<ClientTaskItemProps>(({ item }) => {
isProcessing,
isCompleted,
isError,
clientStats,
blocks,
assistantGroupMessage?.createdAt,
taskDetail?.duration,
taskDetail?.totalSteps,
taskDetail?.totalToolCalls,
]);
// Check if we have blocks to show (for Processing and Completed states)
const hasBlocks = blocks && childrenCount > 0;
return (
<AccordionItem
expand={expanded}
@@ -85,24 +124,34 @@ const ClientTaskItem = memo<ClientTaskItemProps>(({ item }) => {
</Block>
)}
{/* Initializing State - no taskDetail yet */}
{isInitializing && <InitializingState />}
{/* Initializing State - no taskDetail yet or no blocks */}
{(isInitializing || (isProcessing && !hasBlocks)) && <InitializingState />}
{/* Processing State */}
{!isInitializing && isProcessing && taskDetail && (
<ProcessingState messageId={id} taskDetail={taskDetail} variant="compact" />
{/* Processing State - show streaming blocks */}
{!isInitializing && isProcessing && hasBlocks && (
<ClientTaskDetailProcessingState
assistantId={assistantGroupMessage!.id}
blocks={blocks!}
model={model ?? undefined}
provider={provider ?? undefined}
startTime={assistantGroupMessage?.createdAt}
/>
)}
{/* Error State */}
{!isInitializing && isError && taskDetail && <ErrorState taskDetail={taskDetail} />}
{/* Completed State */}
{!isInitializing && isCompleted && taskDetail && (
<CompletedState
content={content}
expanded={expanded}
taskDetail={taskDetail}
variant="compact"
{/* Completed State - show blocks with final result */}
{!isInitializing && isCompleted && taskDetail && hasBlocks && (
<ClientTaskDetailCompletedState
assistantId={assistantGroupMessage!.id}
blocks={blocks!}
duration={taskDetail.duration}
model={model ?? undefined}
provider={provider ?? undefined}
totalCost={taskDetail.totalCost}
totalTokens={taskDetail.totalTokens}
totalToolCalls={taskDetail.totalToolCalls}
/>
)}
</Block>

View File

@@ -322,6 +322,7 @@ export const createAgentExecutors = (context: {
context: {
agentId: opContext.agentId!,
topicId: opContext.topicId,
threadId: opContext.threadId,
},
parentOperationId: context.operationId,
metadata: {
@@ -367,6 +368,7 @@ export const createAgentExecutors = (context: {
context: {
agentId: opContext.agentId!,
topicId: opContext.topicId,
threadId: opContext.threadId,
},
parentOperationId: toolOperationId,
metadata: {