🐛 fix(model-runtime): handle Qwen tool_calls without initial arguments (#11211)

* 🐛 fix(model-runtime): handle Qwen tool_calls without initial arguments

Qwen models (e.g., qwen3-vl-235b-a22b-thinking) send tool_calls in
two separate chunks:
1. First chunk: {id, name} without arguments
2. Second chunk: {id, arguments} without name

Previously, the code directly passed `value.function`, which caused
undefined values for arguments/name in respective chunks.

Changes:
- Add default values for function.arguments (empty string) and
  function.name (null) in Qwen stream transformer
- Align behavior with OpenAI/vLLM stream handling
- Add test cases for split tool_call chunks scenario

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* 🐛 fix: fix openai parallel tools calling in chat competition

* 💄 style: improve style

---------

Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Arvin Xu
2026-01-05 01:48:07 +08:00
committed by GitHub
parent 0205cf73bd
commit 5321d9112d
7 changed files with 343 additions and 13 deletions

View File

@@ -47,7 +47,7 @@ export const SearchInspector = memo<BuiltinInspectorProps<SearchQuery, UniformSe
)}
>
<span>{t('builtins.lobe-web-browsing.apiName.search')}: </span>
{query && <span className={highlightTextStyles.gold}>{query}</span>}
{query && <span className={highlightTextStyles.primary}>{query}</span>}
{!isLoading &&
!isArgumentsStreaming &&
pluginState?.results &&

View File

@@ -28,7 +28,6 @@ const lobehubChatModels: AIChatModelCard[] = [
releasedAt: '2025-12-11',
settings: {
extendParams: ['gpt5_2ReasoningEffort', 'textVerbosity'],
searchImpl: 'params',
},
type: 'chat',
},
@@ -56,7 +55,6 @@ const lobehubChatModels: AIChatModelCard[] = [
releasedAt: '2025-11-13',
settings: {
extendParams: ['gpt5_1ReasoningEffort', 'textVerbosity'],
searchImpl: 'params',
},
type: 'chat',
},
@@ -84,7 +82,6 @@ const lobehubChatModels: AIChatModelCard[] = [
releasedAt: '2025-08-07',
settings: {
extendParams: ['reasoningEffort'],
searchImpl: 'params',
},
type: 'chat',
},

View File

@@ -1203,6 +1203,173 @@ describe('OpenAIStream', () => {
'thoughtSignature',
);
});
it('should handle GPT-5.2 parallel tool calls with correct id mapping', async () => {
// GPT-5.2 returns multiple tool calls in parallel with different indices
// Each tool call starts with id+name, followed by arguments-only chunks
// The key issue is that subsequent chunks without id should use the correct id
// based on their index, not the first tool's id
const streamData = [
// Tool 0: first chunk with id
{
id: 'chatcmpl-test',
choices: [
{
index: 0,
delta: {
tool_calls: [
{
id: 'call_tool0',
type: 'function',
function: { name: 'search', arguments: '' },
index: 0,
},
],
},
},
],
},
// Tool 0: arguments chunk
{
id: 'chatcmpl-test',
choices: [
{
index: 0,
delta: {
tool_calls: [{ function: { arguments: '{"query":' }, index: 0 }],
},
},
],
},
// Tool 1: first chunk with id (parallel tool call starts)
{
id: 'chatcmpl-test',
choices: [
{
index: 0,
delta: {
tool_calls: [
{
id: 'call_tool1',
type: 'function',
function: { name: 'search', arguments: '' },
index: 1,
},
],
},
},
],
},
// Tool 0: more arguments (continuing tool 0)
{
id: 'chatcmpl-test',
choices: [
{
index: 0,
delta: {
tool_calls: [{ function: { arguments: ' "test0"}' }, index: 0 }],
},
},
],
},
// Tool 1: arguments chunk
{
id: 'chatcmpl-test',
choices: [
{
index: 0,
delta: {
tool_calls: [{ function: { arguments: '{"query": "test1"}' }, index: 1 }],
},
},
],
},
// Tool 2: first chunk with id
{
id: 'chatcmpl-test',
choices: [
{
index: 0,
delta: {
tool_calls: [
{
id: 'call_tool2',
type: 'function',
function: { name: 'search', arguments: '' },
index: 2,
},
],
},
},
],
},
// Tool 2: arguments chunk
{
id: 'chatcmpl-test',
choices: [
{
index: 0,
delta: {
tool_calls: [{ function: { arguments: '{"query": "test2"}' }, index: 2 }],
},
},
],
},
// Finish
{
id: 'chatcmpl-test',
choices: [{ index: 0, delta: {}, finish_reason: 'tool_calls' }],
},
];
const mockOpenAIStream = new ReadableStream({
start(controller) {
streamData.forEach((data) => {
controller.enqueue(data);
});
controller.close();
},
});
const protocolStream = OpenAIStream(mockOpenAIStream);
const decoder = new TextDecoder();
const chunks: string[] = [];
// @ts-ignore
for await (const chunk of protocolStream) {
chunks.push(decoder.decode(chunk, { stream: true }));
}
// Verify the exact output - each tool call chunk should have the correct id based on index
expect(chunks).toEqual(
[
'id: chatcmpl-test',
'event: tool_calls',
`data: [{"function":{"arguments":"","name":"search"},"id":"call_tool0","index":0,"type":"function"}]\n`,
'id: chatcmpl-test',
'event: tool_calls',
`data: [{"function":{"arguments":"{\\"query\\":","name":null},"id":"call_tool0","index":0,"type":"function"}]\n`,
'id: chatcmpl-test',
'event: tool_calls',
`data: [{"function":{"arguments":"","name":"search"},"id":"call_tool1","index":1,"type":"function"}]\n`,
'id: chatcmpl-test',
'event: tool_calls',
`data: [{"function":{"arguments":" \\"test0\\"}","name":null},"id":"call_tool0","index":0,"type":"function"}]\n`,
'id: chatcmpl-test',
'event: tool_calls',
`data: [{"function":{"arguments":"{\\"query\\": \\"test1\\"}","name":null},"id":"call_tool1","index":1,"type":"function"}]\n`,
'id: chatcmpl-test',
'event: tool_calls',
`data: [{"function":{"arguments":"","name":"search"},"id":"call_tool2","index":2,"type":"function"}]\n`,
'id: chatcmpl-test',
'event: tool_calls',
`data: [{"function":{"arguments":"{\\"query\\": \\"test2\\"}","name":null},"id":"call_tool2","index":2,"type":"function"}]\n`,
'id: chatcmpl-test',
'event: stop',
`data: "tool_calls"\n`,
].map((i) => `${i}\n`),
);
});
});
describe('Reasoning', () => {

View File

@@ -157,13 +157,35 @@ const transformOpenAIStream = (
);
if (tool_calls.length > 0) {
// Validate tool calls - function must exist for valid tool calls
// This ensures proper error handling for malformed chunks
const hasInvalidToolCall = item.delta.tool_calls.some((tc) => tc.function === null);
if (hasInvalidToolCall) {
throw new Error('Invalid tool call: function is null');
}
return {
data: item.delta.tool_calls.map((value, index): StreamToolCallChunkData => {
if (streamContext && !streamContext.tool) {
data: item.delta.tool_calls.map((value, mapIndex): StreamToolCallChunkData => {
// Determine the actual tool index
const toolIndex = typeof value.index !== 'undefined' ? value.index : mapIndex;
// Store tool info by index for parallel tool calls (e.g., GPT-5.2)
// When a chunk has id and name, it's the start of a new tool call
if (streamContext && value.id && value.function?.name) {
if (!streamContext.tools) streamContext.tools = {};
streamContext.tools[toolIndex] = {
id: value.id,
index: toolIndex,
name: value.function.name,
};
}
// Also maintain backward compatibility with single tool context
if (streamContext && !streamContext.tool && value.id) {
streamContext.tool = {
id: value.id!,
index: value.index,
name: value.function!.name!,
index: toolIndex,
name: value.function?.name ?? '',
};
}
@@ -172,10 +194,12 @@ const transformOpenAIStream = (
arguments: value.function?.arguments ?? '',
name: value.function?.name ?? null,
},
// Priority: explicit id > tools map by index > single tool fallback > generated id
id:
value.id ||
streamContext?.tools?.[toolIndex]?.id ||
streamContext?.tool?.id ||
generateToolCallId(index, value.function?.name),
generateToolCallId(mapIndex, value.function?.name),
// mistral's tool calling don't have index and function field, it's data like:
// [{"id":"xbhnmTtY7","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"A photo of a small, fluffy dog with a playful expression and wagging tail.\", \"A watercolor painting of a small, energetic dog with a glossy coat and bright eyes.\", \"A vector illustration of a small, adorable dog with a short snout and perky ears.\", \"A drawing of a small, scruffy dog with a mischievous grin and a wagging tail.\"], \"quality\": \"standard\", \"seeds\": [123456, 654321, 111222, 333444], \"size\": \"1024x1024\", \"style\": \"vivid\"}"}}]
@@ -184,7 +208,7 @@ const transformOpenAIStream = (
// [{"id":"call_function_4752059746","type":"function","function":{"name":"lobe-image-designer____text2image____builtin","arguments":"{\"prompts\": [\"一个流浪的地球,背景是浩瀚"}}]
// so we need to add these default values
index: typeof value.index !== 'undefined' ? value.index : index,
index: toolIndex,
type: value.type || 'function',
};

View File

@@ -59,6 +59,11 @@ export interface StreamContext {
name: string;
};
toolIndex?: number;
/**
* Map of tool information by index for parallel tool calls
* Used when multiple tools are called in parallel (e.g., GPT-5.2 parallel search)
*/
tools?: Record<number, { id: string; index: number; name: string }>;
usage?: ModelUsage;
}

View File

@@ -131,7 +131,7 @@ describe('QwenAIStream', () => {
expect(chunks).toEqual([
'id: 2\n',
'event: tool_calls\n',
`data: [{"function":{"name":"tool1","arguments":"{}"},"id":"call_1","index":0,"type":"function"},{"function":{"name":"tool2","arguments":"{}"},"id":"call_2","index":1,"type":"function"}]\n\n`,
`data: [{"function":{"arguments":"{}","name":"tool1"},"id":"call_1","index":0,"type":"function"},{"function":{"arguments":"{}","name":"tool2"},"id":"call_2","index":1,"type":"function"}]\n\n`,
]);
expect(onToolCallMock).toHaveBeenCalledTimes(1);
@@ -347,7 +347,136 @@ describe('QwenAIStream', () => {
expect(chunks).toEqual([
'id: 5\n',
'event: tool_calls\n',
`data: [{"function":{"name":"tool1","arguments":"{}"},"id":"call_1","index":0,"type":"function"},{"function":{"name":"tool2","arguments":"{}"},"id":"call_2","index":1,"type":"function"}]\n\n`,
`data: [{"function":{"arguments":"{}","name":"tool1"},"id":"call_1","index":0,"type":"function"},{"function":{"arguments":"{}","name":"tool2"},"id":"call_2","index":1,"type":"function"}]\n\n`,
]);
});
// Test case for Qwen models sending tool_calls in two separate chunks:
// 1. First chunk: {id, name} without arguments
// 2. Second chunk: {id, arguments} without name
// This behavior is observed in qwen3-vl-235b-a22b-thinking model
it('should handle tool calls with name in first chunk and arguments in second chunk (Qwen behavior)', async () => {
const mockOpenAIStream = new ReadableStream({
start(controller) {
// First chunk: has id and name, but no arguments
controller.enqueue({
choices: [
{
delta: {
content: null,
tool_calls: [
{
index: 0,
id: 'call_4bde23783e314f219c6d65',
type: 'function',
function: { name: 'time____get_current_time____mcp' },
},
],
},
finish_reason: null,
index: 0,
},
],
id: 'chatcmpl-f574998f-e5b0-9b80-aac5-14b58e6978b5',
});
// Second chunk: same id, has arguments but no name
controller.enqueue({
choices: [
{
delta: {
content: null,
tool_calls: [
{
index: 0,
id: 'call_4bde23783e314f219c6d65',
type: 'function',
function: { arguments: '{"timezone": "Asia/Shanghai"}' },
},
],
},
finish_reason: null,
index: 0,
},
],
id: 'chatcmpl-f574998f-e5b0-9b80-aac5-14b58e6978b5',
});
controller.close();
},
});
const onToolCallMock = vi.fn();
const protocolStream = QwenAIStream(mockOpenAIStream, {
callbacks: {
onToolsCalling: onToolCallMock,
},
});
const decoder = new TextDecoder();
const chunks = [];
// @ts-ignore
for await (const chunk of protocolStream) {
chunks.push(decoder.decode(chunk, { stream: true }));
}
// First chunk should have name with empty arguments
// Second chunk should have arguments with null name (same as OpenAI/vLLM behavior)
expect(chunks).toEqual([
'id: chatcmpl-f574998f-e5b0-9b80-aac5-14b58e6978b5\n',
'event: tool_calls\n',
`data: [{"function":{"arguments":"","name":"time____get_current_time____mcp"},"id":"call_4bde23783e314f219c6d65","index":0,"type":"function"}]\n\n`,
'id: chatcmpl-f574998f-e5b0-9b80-aac5-14b58e6978b5\n',
'event: tool_calls\n',
`data: [{"function":{"arguments":"{\\"timezone\\": \\"Asia/Shanghai\\"}","name":null},"id":"call_4bde23783e314f219c6d65","index":0,"type":"function"}]\n\n`,
]);
expect(onToolCallMock).toHaveBeenCalledTimes(2);
});
it('should handle tool calls with only name (no arguments field)', async () => {
const mockOpenAIStream = new ReadableStream({
start(controller) {
controller.enqueue({
choices: [
{
delta: {
tool_calls: [
{
index: 0,
id: 'call_123',
type: 'function',
function: { name: 'get_weather' },
},
],
},
index: 0,
},
],
id: '6',
});
controller.close();
},
});
const protocolStream = QwenAIStream(mockOpenAIStream);
const decoder = new TextDecoder();
const chunks = [];
// @ts-ignore
for await (const chunk of protocolStream) {
chunks.push(decoder.decode(chunk, { stream: true }));
}
// Should have empty string for arguments, not undefined
expect(chunks).toEqual([
'id: 6\n',
'event: tool_calls\n',
`data: [{"function":{"arguments":"","name":"get_weather"},"id":"call_123","index":0,"type":"function"}]\n\n`,
]);
});
});

View File

@@ -72,7 +72,15 @@ export const transformQwenStream = (
return {
data: item.delta.tool_calls.map(
(value, index): StreamToolCallChunkData => ({
function: value.function,
// Qwen models may send tool_calls in two separate chunks:
// 1. First chunk: {id, name} without arguments
// 2. Second chunk: {id, arguments} without name
// We need to provide default values to handle both cases
// Use null for missing name (same as OpenAI stream behavior)
function: {
arguments: value.function?.arguments ?? '',
name: value.function?.name ?? null,
},
id: value.id || generateToolCallId(index, value.function?.name),
index: typeof value.index !== 'undefined' ? value.index : index,
type: value.type || 'function',