test: add websocket replay planning coverage

This commit is contained in:
Peter Steinberger
2026-03-27 02:14:43 +00:00
parent 86bac4ee2a
commit b49accc273
2 changed files with 118 additions and 1 deletions

View File

@@ -292,7 +292,9 @@ export function planTurnInput(params: {
}): PlannedTurnInput {
if (params.previousResponseId && params.lastContextLength > 0) {
const newMessages = params.context.messages.slice(params.lastContextLength);
const toolResults = newMessages.filter((message) => (message as AnyMessage).role === "toolResult");
const toolResults = newMessages.filter(
(message) => (message as AnyMessage).role === "toolResult",
);
if (toolResults.length > 0) {
return {
mode: "incremental_tool_results",

View File

@@ -18,6 +18,7 @@ import {
convertTools,
createOpenAIWebSocketStreamFn,
hasWsSession,
planTurnInput,
releaseWsSession,
} from "./openai-ws-stream.js";
@@ -901,6 +902,120 @@ describe("buildAssistantMessageFromResponse", () => {
// ─────────────────────────────────────────────────────────────────────────────
describe("planTurnInput", () => {
const replayModel = { input: ["text"] };
it("uses incremental tool result replay when a previous response id and new tool results exist", () => {
const context = {
systemPrompt: "You are helpful.",
messages: [
userMsg("Run ls"),
assistantMsg([], [{ id: "call_1|fc_1", name: "exec", args: { cmd: "ls" } }]),
toolResultMsg("call_1|fc_1", "file.txt"),
] as Parameters<typeof convertMessagesToInputItems>[0],
tools: [],
};
const turnInput = planTurnInput({
context,
model: replayModel,
previousResponseId: "resp_prev",
lastContextLength: 2,
});
expect(turnInput.mode).toBe("incremental_tool_results");
expect(turnInput.previousResponseId).toBe("resp_prev");
expect(turnInput.inputItems).toEqual([
{
type: "function_call_output",
call_id: "call_1",
output: "file.txt",
},
]);
});
it("restarts with full context when follow-up turns have no new tool results", () => {
const turn1Response = {
id: "resp_turn1_reasoning",
object: "response",
created_at: Date.now(),
status: "completed",
model: "gpt-5.2",
output: [
{
type: "reasoning",
id: "rs_turn1",
content: "Thinking before tool call",
},
{
type: "function_call",
id: "fc_turn1",
call_id: "call_turn1",
name: "exec",
arguments: '{"cmd":"ls"}',
},
],
usage: { input_tokens: 12, output_tokens: 8, total_tokens: 20 },
} as ResponseObject;
const context = {
systemPrompt: "You are helpful.",
messages: [
userMsg("Run ls"),
buildAssistantMessageFromResponse(turn1Response, {
api: "openai-responses",
provider: "openai",
id: "gpt-5.2",
}),
] as Parameters<typeof convertMessagesToInputItems>[0],
tools: [],
};
const turnInput = planTurnInput({
context,
model: replayModel,
previousResponseId: "resp_turn1_reasoning",
lastContextLength: context.messages.length,
});
expect(turnInput.mode).toBe("full_context_restart");
expect(turnInput.previousResponseId).toBeUndefined();
expect(turnInput.inputItems.map((item) => item.type)).toEqual([
"message",
"reasoning",
"function_call",
]);
expect(turnInput.inputItems[1]).toMatchObject({ type: "reasoning", id: "rs_turn1" });
expect(turnInput.inputItems[2]).toMatchObject({
type: "function_call",
call_id: "call_turn1",
id: "fc_turn1",
});
});
it("uses full context on the initial turn", () => {
const context = {
systemPrompt: "You are helpful.",
messages: [userMsg("Hello!")] as Parameters<typeof convertMessagesToInputItems>[0],
tools: [],
};
const turnInput = planTurnInput({
context,
model: replayModel,
previousResponseId: null,
lastContextLength: 0,
});
expect(turnInput).toMatchObject({
mode: "full_context_initial",
inputItems: [{ type: "message", role: "user", content: "Hello!" }],
});
});
});
// ─────────────────────────────────────────────────────────────────────────────
describe("createOpenAIWebSocketStreamFn", () => {
const modelStub = {
api: "openai-responses",