From 9c1789ca7198d43020d18c3325f27236303e3f42 Mon Sep 17 00:00:00 2001 From: Aaron Reisman Date: Thu, 25 Dec 2025 00:10:14 +0700 Subject: [PATCH] fix(session): prevent GPT-5.2 resume reasoning crash --- packages/opencode/src/session/message-v2.ts | 12 ++- packages/opencode/src/session/prompt.ts | 5 +- .../opencode/test/session/message-v2.test.ts | 93 +++++++++++++++++++ 3 files changed, 108 insertions(+), 2 deletions(-) create mode 100644 packages/opencode/test/session/message-v2.test.ts diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index 95bc3812e7a..8916098465c 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -419,7 +419,13 @@ export namespace MessageV2 { }) export type WithParts = z.infer - export function toModelMessage(input: WithParts[]): ModelMessage[] { + export function toModelMessage( + input: WithParts[], + options?: { + dropReasoningOnlyAssistantMessages?: boolean + }, + ): ModelMessage[] { + const drop = options?.dropReasoningOnlyAssistantMessages === true const result: UIMessage[] = [] for (const msg of input) { @@ -536,6 +542,10 @@ export namespace MessageV2 { }) } } + + if (drop && assistantMessage.parts.every((part) => part.type === "step-start" || part.type === "reasoning")) { + if (result[result.length - 1] === assistantMessage) result.pop() + } } } diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts index fabe3fa5128..532a74966a4 100644 --- a/packages/opencode/src/session/prompt.ts +++ b/packages/opencode/src/session/prompt.ts @@ -527,6 +527,7 @@ export namespace SessionPrompt { await Plugin.trigger("experimental.chat.messages.transform", {}, { messages: sessionMessages }) + const drop = model.api.id.startsWith("gpt-5.2") const result = await processor.process({ user: lastUser, agent, @@ -534,7 +535,9 @@ export namespace SessionPrompt { sessionID, system: [...(await SystemPrompt.environment()), ...(await SystemPrompt.custom())], messages: [ - ...MessageV2.toModelMessage(sessionMessages), + ...MessageV2.toModelMessage(sessionMessages, { + dropReasoningOnlyAssistantMessages: drop, + }), ...(isLastStep ? [ { diff --git a/packages/opencode/test/session/message-v2.test.ts b/packages/opencode/test/session/message-v2.test.ts new file mode 100644 index 00000000000..c3bdefafcfb --- /dev/null +++ b/packages/opencode/test/session/message-v2.test.ts @@ -0,0 +1,93 @@ +import { describe, expect, test } from "bun:test" +import { MessageV2 } from "../../src/session/message-v2" + +describe("MessageV2.toModelMessage", () => { + const sid = "session_1" + const mid = "message_1" + const pid = "message_0" + + function assistant(parts: MessageV2.Part[]): MessageV2.WithParts { + return { + info: { + id: mid, + sessionID: sid, + role: "assistant", + time: { + created: 0, + }, + parentID: pid, + modelID: "gpt-5.2", + providerID: "openai", + mode: "chat", + agent: "agent", + path: { + cwd: "/", + root: "/", + }, + cost: 0, + tokens: { + input: 0, + output: 0, + reasoning: 0, + cache: { + read: 0, + write: 0, + }, + }, + }, + parts, + } + } + + function step(id: string): MessageV2.StepStartPart { + return { + id, + sessionID: sid, + messageID: mid, + type: "step-start", + } + } + + function think(id: string): MessageV2.ReasoningPart { + return { + id, + sessionID: sid, + messageID: mid, + type: "reasoning", + text: "Let me think...", + time: { + start: 0, + }, + } + } + + function say(id: string): MessageV2.TextPart { + return { + id, + sessionID: sid, + messageID: mid, + type: "text", + text: "Answer", + } + } + + test("drops assistant messages with only reasoning/step-start parts when enabled", () => { + const messages = [assistant([step("part_step"), think("part_reasoning")])] + + expect(MessageV2.toModelMessage(messages, { dropReasoningOnlyAssistantMessages: true })).toEqual([]) + }) + + test("keeps assistant messages with only reasoning by default", () => { + const messages = [assistant([think("part_reasoning")])] + + expect(MessageV2.toModelMessage(messages)).toHaveLength(1) + }) + + test("keeps assistant messages when non-reasoning content exists", () => { + const messages = [assistant([think("part_reasoning"), say("part_text")])] + + const result = MessageV2.toModelMessage(messages) + expect(result).toHaveLength(1) + expect(result[0].role).toBe("assistant") + }) +})