From db061661cc4e034c515790bb1509814d644d79a3 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Sat, 10 Jan 2026 05:40:55 +0000 Subject: [PATCH 1/7] feat: add strict tool message ordering setting for OpenAI Compatible providers Adds a new setting `openAiStrictToolMessageOrdering` to OpenAI Compatible providers that enables merging text content after tool results into the last tool message. This fixes "Unexpected role user after role tool" errors with NVIDIA NIM, OpenRouter, and other strict OpenAI-compatible APIs. When enabled, environment_details and other text content that follows tool_result messages are merged into the last tool message instead of being sent as a separate user message. Fixes #10540 --- packages/types/src/provider-settings.ts | 1 + src/api/providers/openai.ts | 24 +++++++++++++++---- .../settings/providers/OpenAICompatible.tsx | 10 ++++++++ webview-ui/src/i18n/locales/en/settings.json | 4 +++- 4 files changed, 34 insertions(+), 5 deletions(-) diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 3b3717ad45..8cad064732 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -250,6 +250,7 @@ const openAiSchema = baseProviderSettingsSchema.extend({ openAiStreamingEnabled: z.boolean().optional(), openAiHostHeader: z.string().optional(), // Keep temporarily for backward compatibility during migration. openAiHeaders: z.record(z.string(), z.string()).optional(), + openAiStrictToolMessageOrdering: z.boolean().optional(), // Merge text after tool results to avoid "user after tool" errors. }) const ollamaSchema = baseProviderSettingsSchema.extend({ diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 43a4fe7ae3..0906e4fd8a 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -122,7 +122,11 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl } } - convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)] + // When openAiStrictToolMessageOrdering is enabled, merge text content after tool results + // into the last tool message. This fixes "user after tool" errors with NVIDIA NIM, + // OpenRouter, and other strict OpenAI-compatible APIs. + const mergeToolResultText = this.options.openAiStrictToolMessageOrdering ?? false + convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText })] if (modelInfo.supportsPromptCache) { // Note: the following logic is copied from openrouter: @@ -224,11 +228,15 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl yield this.processUsageMetrics(lastUsage, modelInfo) } } else { + // When openAiStrictToolMessageOrdering is enabled, merge text content after tool results + // into the last tool message. This fixes "user after tool" errors with NVIDIA NIM, + // OpenRouter, and other strict OpenAI-compatible APIs. + const mergeToolResultText = this.options.openAiStrictToolMessageOrdering ?? false const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { model: modelId, messages: deepseekReasoner ? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) - : [systemMessage, ...convertToOpenAiMessages(messages)], + : [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText })], ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), ...(metadata?.toolProtocol === "native" && { @@ -342,6 +350,10 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl if (this.options.openAiStreamingEnabled ?? true) { const isGrokXAI = this._isGrokXAI(this.options.openAiBaseUrl) + // When openAiStrictToolMessageOrdering is enabled, merge text content after tool results + // into the last tool message. This fixes "user after tool" errors with NVIDIA NIM, + // OpenRouter, and other strict OpenAI-compatible APIs. + const mergeToolResultText = this.options.openAiStrictToolMessageOrdering ?? false const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { model: modelId, messages: [ @@ -349,7 +361,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl role: "developer", content: `Formatting re-enabled\n${systemPrompt}`, }, - ...convertToOpenAiMessages(messages), + ...convertToOpenAiMessages(messages, { mergeToolResultText }), ], stream: true, ...(isGrokXAI ? {} : { stream_options: { include_usage: true } }), @@ -379,6 +391,10 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl yield* this.handleStreamResponse(stream) } else { + // When openAiStrictToolMessageOrdering is enabled, merge text content after tool results + // into the last tool message. This fixes "user after tool" errors with NVIDIA NIM, + // OpenRouter, and other strict OpenAI-compatible APIs. + const mergeToolResultText = this.options.openAiStrictToolMessageOrdering ?? false const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { model: modelId, messages: [ @@ -386,7 +402,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl role: "developer", content: `Formatting re-enabled\n${systemPrompt}`, }, - ...convertToOpenAiMessages(messages), + ...convertToOpenAiMessages(messages, { mergeToolResultText }), ], reasoning_effort: modelInfo.reasoningEffort as "low" | "medium" | "high" | undefined, temperature: undefined, diff --git a/webview-ui/src/components/settings/providers/OpenAICompatible.tsx b/webview-ui/src/components/settings/providers/OpenAICompatible.tsx index 4eea6f09f1..25fefd2639 100644 --- a/webview-ui/src/components/settings/providers/OpenAICompatible.tsx +++ b/webview-ui/src/components/settings/providers/OpenAICompatible.tsx @@ -158,6 +158,16 @@ export const OpenAICompatible = ({ onChange={handleInputChange("openAiStreamingEnabled", noTransform)}> {t("settings:modelInfo.enableStreaming")} +
+ + {t("settings:providers.openAiStrictToolMessageOrdering")} + +
+ {t("settings:providers.openAiStrictToolMessageOrderingDescription")} +
+
Date: Sat, 10 Jan 2026 05:44:58 +0000 Subject: [PATCH 2/7] feat: add strict tool message ordering setting for OpenAI-compatible providers Adds a new setting `openAiStrictToolMessageOrdering` for OpenAI-compatible providers that, when enabled, merges text content after tool results into the last tool message instead of creating a separate user message. This fixes the "Unexpected role user after role tool" error that occurs with providers like NVIDIA NIM and Devstral that enforce strict message ordering rules. Fixes #10540 --- packages/types/src/provider-settings.ts | 2 +- src/api/providers/openai.ts | 36 +++++++++---------- .../settings/providers/OpenAICompatible.tsx | 4 +-- webview-ui/src/i18n/locales/en/settings.json | 4 +++ 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 8cad064732..cc74f9f43d 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -250,7 +250,7 @@ const openAiSchema = baseProviderSettingsSchema.extend({ openAiStreamingEnabled: z.boolean().optional(), openAiHostHeader: z.string().optional(), // Keep temporarily for backward compatibility during migration. openAiHeaders: z.record(z.string(), z.string()).optional(), - openAiStrictToolMessageOrdering: z.boolean().optional(), // Merge text after tool results to avoid "user after tool" errors. + openAiStrictToolMessageOrdering: z.boolean().optional(), // For providers that don't allow user messages after tool messages. }) const ollamaSchema = baseProviderSettingsSchema.extend({ diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 0906e4fd8a..01792b99fb 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -102,6 +102,11 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl content: systemPrompt, } + // When strict tool message ordering is enabled, merge text content after tool_results + // into the last tool message instead of creating a separate user message. + // This is required for providers like NVIDIA NIM that don't allow user messages after tool messages. + const strictToolMessageOrdering = this.options.openAiStrictToolMessageOrdering ?? false + if (this.options.openAiStreamingEnabled ?? true) { let convertedMessages @@ -122,11 +127,10 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl } } - // When openAiStrictToolMessageOrdering is enabled, merge text content after tool results - // into the last tool message. This fixes "user after tool" errors with NVIDIA NIM, - // OpenRouter, and other strict OpenAI-compatible APIs. - const mergeToolResultText = this.options.openAiStrictToolMessageOrdering ?? false - convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText })] + convertedMessages = [ + systemMessage, + ...convertToOpenAiMessages(messages, { mergeToolResultText: strictToolMessageOrdering }), + ] if (modelInfo.supportsPromptCache) { // Note: the following logic is copied from openrouter: @@ -228,15 +232,14 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl yield this.processUsageMetrics(lastUsage, modelInfo) } } else { - // When openAiStrictToolMessageOrdering is enabled, merge text content after tool results - // into the last tool message. This fixes "user after tool" errors with NVIDIA NIM, - // OpenRouter, and other strict OpenAI-compatible APIs. - const mergeToolResultText = this.options.openAiStrictToolMessageOrdering ?? false const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { model: modelId, messages: deepseekReasoner ? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) - : [systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText })], + : [ + systemMessage, + ...convertToOpenAiMessages(messages, { mergeToolResultText: strictToolMessageOrdering }), + ], ...(metadata?.tools && { tools: this.convertToolsForOpenAI(metadata.tools) }), ...(metadata?.tool_choice && { tool_choice: metadata.tool_choice }), ...(metadata?.toolProtocol === "native" && { @@ -346,14 +349,11 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl ): ApiStream { const modelInfo = this.getModel().info const methodIsAzureAiInference = this._isAzureAiInference(this.options.openAiBaseUrl) + const strictToolMessageOrdering = this.options.openAiStrictToolMessageOrdering ?? false if (this.options.openAiStreamingEnabled ?? true) { const isGrokXAI = this._isGrokXAI(this.options.openAiBaseUrl) - // When openAiStrictToolMessageOrdering is enabled, merge text content after tool results - // into the last tool message. This fixes "user after tool" errors with NVIDIA NIM, - // OpenRouter, and other strict OpenAI-compatible APIs. - const mergeToolResultText = this.options.openAiStrictToolMessageOrdering ?? false const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { model: modelId, messages: [ @@ -361,7 +361,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl role: "developer", content: `Formatting re-enabled\n${systemPrompt}`, }, - ...convertToOpenAiMessages(messages, { mergeToolResultText }), + ...convertToOpenAiMessages(messages, { mergeToolResultText: strictToolMessageOrdering }), ], stream: true, ...(isGrokXAI ? {} : { stream_options: { include_usage: true } }), @@ -391,10 +391,6 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl yield* this.handleStreamResponse(stream) } else { - // When openAiStrictToolMessageOrdering is enabled, merge text content after tool results - // into the last tool message. This fixes "user after tool" errors with NVIDIA NIM, - // OpenRouter, and other strict OpenAI-compatible APIs. - const mergeToolResultText = this.options.openAiStrictToolMessageOrdering ?? false const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { model: modelId, messages: [ @@ -402,7 +398,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl role: "developer", content: `Formatting re-enabled\n${systemPrompt}`, }, - ...convertToOpenAiMessages(messages, { mergeToolResultText }), + ...convertToOpenAiMessages(messages, { mergeToolResultText: strictToolMessageOrdering }), ], reasoning_effort: modelInfo.reasoningEffort as "low" | "medium" | "high" | undefined, temperature: undefined, diff --git a/webview-ui/src/components/settings/providers/OpenAICompatible.tsx b/webview-ui/src/components/settings/providers/OpenAICompatible.tsx index 25fefd2639..cc1777ca76 100644 --- a/webview-ui/src/components/settings/providers/OpenAICompatible.tsx +++ b/webview-ui/src/components/settings/providers/OpenAICompatible.tsx @@ -162,10 +162,10 @@ export const OpenAICompatible = ({ - {t("settings:providers.openAiStrictToolMessageOrdering")} + {t("settings:providers.openAiStrictToolMessageOrdering.label")}
- {t("settings:providers.openAiStrictToolMessageOrderingDescription")} + {t("settings:providers.openAiStrictToolMessageOrdering.description")}
diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 0b78ee2196..fd87ca2fd1 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -498,6 +498,10 @@ }, "resetDefaults": "Reset to Defaults" }, + "openAiStrictToolMessageOrdering": { + "label": "Strict tool message ordering", + "description": "Enable for providers like NVIDIA NIM and Devstral that require strict message ordering. When enabled, text content after tool results is merged into the last tool message instead of creating a separate user message." + }, "rateLimitSeconds": { "label": "Rate limit", "description": "Minimum time between API requests." From 809602f2b876ab70581eb0981e56acd0f18c5903 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Sat, 10 Jan 2026 07:04:07 +0000 Subject: [PATCH 3/7] fix: pass mergeToolResultText option to convertToR1Format when R1 mode is enabled When both "Enable R1 model parameters" and "Strict tool message ordering" are enabled, the mergeToolResultText option was not being passed to convertToR1Format, causing the 400 error to still occur. This fix ensures the strictToolMessageOrdering setting is respected in both the streaming and non-streaming code paths when using R1 format. --- src/api/providers/openai.ts | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 01792b99fb..b6b5147739 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -111,7 +111,9 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl let convertedMessages if (deepseekReasoner) { - convertedMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) + convertedMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages], { + mergeToolResultText: strictToolMessageOrdering, + }) } else { if (modelInfo.supportsPromptCache) { systemMessage = { @@ -235,7 +237,9 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { model: modelId, messages: deepseekReasoner - ? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) + ? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages], { + mergeToolResultText: strictToolMessageOrdering, + }) : [ systemMessage, ...convertToOpenAiMessages(messages, { mergeToolResultText: strictToolMessageOrdering }), From c13260f9355c67b570ed6bc3e05ef5d9f5bd74d0 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Sat, 10 Jan 2026 07:31:31 +0000 Subject: [PATCH 4/7] fix: remove duplicate translation keys in settings.json --- webview-ui/src/i18n/locales/en/settings.json | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index fd87ca2fd1..337a31b7aa 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -535,9 +535,7 @@ "placeholder": "Default: claude", "maxTokensLabel": "Max Output Tokens", "maxTokensDescription": "Maximum number of output tokens for Claude Code responses. Default is 8000." - }, - "openAiStrictToolMessageOrdering": "Strict tool message ordering", - "openAiStrictToolMessageOrderingDescription": "Enable this if your API returns errors like \"Unexpected role 'user' after role 'tool'\". This merges text content into tool messages to fix compatibility with NVIDIA NIM, OpenRouter, and other strict OpenAI-compatible APIs." + } }, "browser": { "enable": { From 8065da46f6331b87538470f73b606ee485186732 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Sat, 10 Jan 2026 08:14:22 +0000 Subject: [PATCH 5/7] fix: merge text content into tool message even when images are present When mergeToolResultText is enabled and both text and images are present after tool results, the text content is now merged into the last tool message while images are sent as a separate user message. This fixes the issue where strict tool message ordering would not work when images were added to the chat, causing "Unexpected role user after role tool" errors on providers like NVIDIA NIM. --- .../transform/__tests__/openai-format.spec.ts | 56 ++++++++++++++++++- src/api/transform/openai-format.ts | 46 ++++++++++----- 2 files changed, 85 insertions(+), 17 deletions(-) diff --git a/src/api/transform/__tests__/openai-format.spec.ts b/src/api/transform/__tests__/openai-format.spec.ts index d5d4404837..111f79101f 100644 --- a/src/api/transform/__tests__/openai-format.spec.ts +++ b/src/api/transform/__tests__/openai-format.spec.ts @@ -319,7 +319,55 @@ describe("convertToOpenAiMessages", () => { ) }) - it("should NOT merge text when images are present (fall back to user message)", () => { + it("should merge text and send images separately when mergeToolResultText is true", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-123", + content: "Tool result content", + }, + { + type: "text", + text: "Context info", + }, + { + type: "image", + source: { + type: "base64", + media_type: "image/png", + data: "base64data", + }, + }, + ], + }, + ] + + const openAiMessages = convertToOpenAiMessages(anthropicMessages, { mergeToolResultText: true }) + + // Should produce a tool message with merged text AND a user message for the image + expect(openAiMessages).toHaveLength(2) + + // First message: tool message with merged text + const toolMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam + expect(toolMessage.role).toBe("tool") + expect(toolMessage.tool_call_id).toBe("tool-123") + expect(toolMessage.content).toBe( + "Tool result content\n\nContext info", + ) + + // Second message: user message with only the image + expect(openAiMessages[1].role).toBe("user") + const userContent = openAiMessages[1].content as Array<{ type: string; image_url?: { url: string } }> + expect(Array.isArray(userContent)).toBe(true) + expect(userContent).toHaveLength(1) + expect(userContent[0].type).toBe("image_url") + expect(userContent[0].image_url?.url).toBe("data:image/png;base64,base64data") + }) + + it("should send only images as user message when no text content exists with mergeToolResultText", () => { const anthropicMessages: Anthropic.Messages.MessageParam[] = [ { role: "user", @@ -343,9 +391,13 @@ describe("convertToOpenAiMessages", () => { const openAiMessages = convertToOpenAiMessages(anthropicMessages, { mergeToolResultText: true }) - // Should produce a tool message AND a user message (because image is present) + // Should produce a tool message AND a user message (only image, no text to merge) expect(openAiMessages).toHaveLength(2) expect((openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam).role).toBe("tool") + // Tool message content should NOT be modified since there's no text to merge + expect((openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam).content).toBe( + "Tool result content", + ) expect(openAiMessages[1].role).toBe("user") }) diff --git a/src/api/transform/openai-format.ts b/src/api/transform/openai-format.ts index ad02be5541..f182a2aac1 100644 --- a/src/api/transform/openai-format.ts +++ b/src/api/transform/openai-format.ts @@ -138,24 +138,40 @@ export function convertToOpenAiMessages( // Process non-tool messages if (nonToolMessages.length > 0) { - // Check if we should merge text into the last tool message - // This is critical for reasoning/thinking models where a user message - // after tool results causes the model to drop all previous reasoning_content - const hasOnlyTextContent = nonToolMessages.every((part) => part.type === "text") const hasToolMessages = toolMessages.length > 0 - const shouldMergeIntoToolMessage = - options?.mergeToolResultText && hasToolMessages && hasOnlyTextContent - if (shouldMergeIntoToolMessage) { + if (options?.mergeToolResultText && hasToolMessages) { + // When mergeToolResultText is enabled, separate text and images + // Merge text into the last tool message, and send images separately + // This is critical for providers like NVIDIA NIM that don't allow user messages after tool messages + const textMessages = nonToolMessages.filter( + (part) => part.type === "text", + ) as Anthropic.TextBlockParam[] + const imageMessages = nonToolMessages.filter( + (part) => part.type === "image", + ) as Anthropic.ImageBlockParam[] + // Merge text content into the last tool message - const lastToolMessage = openAiMessages[ - openAiMessages.length - 1 - ] as OpenAI.Chat.ChatCompletionToolMessageParam - if (lastToolMessage?.role === "tool") { - const additionalText = nonToolMessages - .map((part) => (part as Anthropic.TextBlockParam).text) - .join("\n") - lastToolMessage.content = `${lastToolMessage.content}\n\n${additionalText}` + if (textMessages.length > 0) { + const lastToolMessage = openAiMessages[ + openAiMessages.length - 1 + ] as OpenAI.Chat.ChatCompletionToolMessageParam + if (lastToolMessage?.role === "tool") { + const additionalText = textMessages.map((part) => part.text).join("\n") + lastToolMessage.content = `${lastToolMessage.content}\n\n${additionalText}` + } + } + + // Send images as a separate user message if any + // Note: Images must still be sent as user messages since tool messages don't support images + if (imageMessages.length > 0) { + openAiMessages.push({ + role: "user", + content: imageMessages.map((part) => ({ + type: "image_url", + image_url: { url: `data:${part.source.media_type};base64,${part.source.data}` }, + })), + }) } } else { // Standard behavior: add user message with text/image content From 3661e5432788a1f38448f852f5b05416c7442ee7 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Sat, 10 Jan 2026 08:48:27 +0000 Subject: [PATCH 6/7] test: add multi-turn conversation tests for mergeToolResultText option Add regression tests to verify that the mergeToolResultText option works correctly for ALL tool_result messages in a conversation, not just the first one. Tests added: - Multi-turn conversation with multiple tool calls - Verification that no user messages appear after tool messages when mergeToolResultText is enabled All 27 tests pass. --- .../transform/__tests__/openai-format.spec.ts | 116 ++++++++++++++++++ 1 file changed, 116 insertions(+) diff --git a/src/api/transform/__tests__/openai-format.spec.ts b/src/api/transform/__tests__/openai-format.spec.ts index 111f79101f..33996763ef 100644 --- a/src/api/transform/__tests__/openai-format.spec.ts +++ b/src/api/transform/__tests__/openai-format.spec.ts @@ -482,6 +482,122 @@ describe("convertToOpenAiMessages", () => { expect(openAiMessages).toHaveLength(1) expect(openAiMessages[0].role).toBe("user") }) + + it("should merge text into tool messages for multiple tool calls across conversation turns", () => { + // This test simulates a full conversation with multiple tool_result + environment_details messages + // to ensure mergeToolResultText works correctly for ALL tool_result messages, not just the first one + // Regression test for: "The fix works for the first message but after the first response the text content is NOT merged" + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + // Initial user message (no tool_results) + { + role: "user", + content: [ + { type: "text", text: "Create a file for me" }, + { type: "text", text: "Context 1" }, + ], + }, + // Assistant uses first tool + { + role: "assistant", + content: [ + { type: "text", text: "I'll create the file for you." }, + { + type: "tool_use", + id: "call_1", + name: "write_file", + input: { path: "test.txt", content: "hello" }, + }, + ], + }, + // First tool result + environment_details + { + role: "user", + content: [ + { type: "tool_result", tool_use_id: "call_1", content: "File created successfully" }, + { type: "text", text: "Context 2" }, + ], + }, + // Assistant uses second tool + { + role: "assistant", + content: [ + { type: "text", text: "Now I'll read the file to verify." }, + { type: "tool_use", id: "call_2", name: "read_file", input: { path: "test.txt" } }, + ], + }, + // Second tool result + environment_details (this is where the bug was reported) + { + role: "user", + content: [ + { type: "tool_result", tool_use_id: "call_2", content: "File content: hello" }, + { type: "text", text: "Context 3" }, + ], + }, + ] + + const openAiMessages = convertToOpenAiMessages(anthropicMessages, { mergeToolResultText: true }) + + // Expected structure: + // 1. User message (initial, no tool_results - text should remain as user message) + // 2. Assistant message with tool_calls + // 3. Tool message with merged text (first tool_result) + // 4. Assistant message with tool_calls + // 5. Tool message with merged text (second tool_result) + expect(openAiMessages).toHaveLength(5) + + // First message should be a user message (no tool_results to merge into) + expect(openAiMessages[0].role).toBe("user") + + // Second message should be assistant with tool_calls + expect(openAiMessages[1].role).toBe("assistant") + expect((openAiMessages[1] as OpenAI.Chat.ChatCompletionAssistantMessageParam).tool_calls).toHaveLength(1) + + // Third message should be tool message with merged environment_details + const firstToolMsg = openAiMessages[2] as OpenAI.Chat.ChatCompletionToolMessageParam + expect(firstToolMsg.role).toBe("tool") + expect(firstToolMsg.tool_call_id).toBe("call_1") + expect(firstToolMsg.content).toContain("File created successfully") + expect(firstToolMsg.content).toContain("Context 2") + + // Fourth message should be assistant with tool_calls + expect(openAiMessages[3].role).toBe("assistant") + expect((openAiMessages[3] as OpenAI.Chat.ChatCompletionAssistantMessageParam).tool_calls).toHaveLength(1) + + // Fifth message should be tool message with merged environment_details (THE BUG FIX) + const secondToolMsg = openAiMessages[4] as OpenAI.Chat.ChatCompletionToolMessageParam + expect(secondToolMsg.role).toBe("tool") + expect(secondToolMsg.tool_call_id).toBe("call_2") + expect(secondToolMsg.content).toContain("File content: hello") + expect(secondToolMsg.content).toContain("Context 3") + }) + + it("should NOT create user messages after tool messages when mergeToolResultText is true", () => { + // This test specifically verifies that the "user after tool" error is avoided + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "assistant", + content: [{ type: "tool_use", id: "tool_1", name: "read_file", input: { path: "test.ts" } }], + }, + { + role: "user", + content: [ + { type: "tool_result", tool_use_id: "tool_1", content: "File contents" }, + { type: "text", text: "Some context" }, + ], + }, + ] + + const openAiMessages = convertToOpenAiMessages(anthropicMessages, { mergeToolResultText: true }) + + // Should produce assistant + tool (no user message) + expect(openAiMessages).toHaveLength(2) + expect(openAiMessages[0].role).toBe("assistant") + expect(openAiMessages[1].role).toBe("tool") + // The text should be merged into the tool message, NOT as a separate user message + expect((openAiMessages[1] as OpenAI.Chat.ChatCompletionToolMessageParam).content).toContain( + "Some context", + ) + }) }) describe("reasoning_details transformation", () => { From 50c0ab97895a83057c0d39133cc6a4610bb57ec6 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Mon, 12 Jan 2026 21:25:14 +0000 Subject: [PATCH 7/7] i18n: add openAiStrictToolMessageOrdering translations for all locales --- webview-ui/src/i18n/locales/ca/settings.json | 4 ++++ webview-ui/src/i18n/locales/de/settings.json | 4 ++++ webview-ui/src/i18n/locales/es/settings.json | 4 ++++ webview-ui/src/i18n/locales/fr/settings.json | 4 ++++ webview-ui/src/i18n/locales/hi/settings.json | 4 ++++ webview-ui/src/i18n/locales/id/settings.json | 4 ++++ webview-ui/src/i18n/locales/it/settings.json | 4 ++++ webview-ui/src/i18n/locales/ja/settings.json | 4 ++++ webview-ui/src/i18n/locales/ko/settings.json | 4 ++++ webview-ui/src/i18n/locales/nl/settings.json | 4 ++++ webview-ui/src/i18n/locales/pl/settings.json | 4 ++++ webview-ui/src/i18n/locales/pt-BR/settings.json | 4 ++++ webview-ui/src/i18n/locales/ru/settings.json | 4 ++++ webview-ui/src/i18n/locales/tr/settings.json | 4 ++++ webview-ui/src/i18n/locales/vi/settings.json | 4 ++++ webview-ui/src/i18n/locales/zh-CN/settings.json | 4 ++++ webview-ui/src/i18n/locales/zh-TW/settings.json | 4 ++++ 17 files changed, 68 insertions(+) diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 4f92caf7f7..7788ae51fa 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -522,6 +522,10 @@ "placeholder": "Per defecte: claude", "maxTokensLabel": "Tokens màxims de sortida", "maxTokensDescription": "Nombre màxim de tokens de sortida per a les respostes de Claude Code. El valor per defecte és 8000." + }, + "openAiStrictToolMessageOrdering": { + "label": "Ordre estricte de missatges d'eines", + "description": "Activa per a proveïdors com NVIDIA NIM i Devstral que requereixen un ordre estricte de missatges. Quan està activat, el contingut de text després dels resultats d'eines es fusiona en l'últim missatge d'eina en lloc de crear un missatge d'usuari separat." } }, "browser": { diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index 30e482c820..926d2c63f8 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -489,6 +489,10 @@ }, "resetDefaults": "Auf Standardwerte zurücksetzen" }, + "openAiStrictToolMessageOrdering": { + "label": "Strikte Tool-Nachrichtenreihenfolge", + "description": "Aktivieren für Anbieter wie NVIDIA NIM und Devstral, die eine strikte Nachrichtenreihenfolge erfordern. Wenn aktiviert, wird Textinhalt nach Tool-Ergebnissen in die letzte Tool-Nachricht integriert, anstatt eine separate Benutzernachricht zu erstellen." + }, "rateLimitSeconds": { "label": "Ratenbegrenzung", "description": "Minimale Zeit zwischen API-Anfragen." diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index f514ba12fc..c7c14284ce 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -489,6 +489,10 @@ }, "resetDefaults": "Restablecer valores predeterminados" }, + "openAiStrictToolMessageOrdering": { + "label": "Orden estricto de mensajes de herramientas", + "description": "Habilitar para proveedores como NVIDIA NIM y Devstral que requieren un orden estricto de mensajes. Cuando está habilitado, el contenido de texto después de los resultados de herramientas se fusiona en el último mensaje de herramienta en lugar de crear un mensaje de usuario separado." + }, "rateLimitSeconds": { "label": "Límite de tasa", "description": "Tiempo mínimo entre solicitudes de API." diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 415bfa5220..0d2632ae8d 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -489,6 +489,10 @@ }, "resetDefaults": "Réinitialiser les valeurs par défaut" }, + "openAiStrictToolMessageOrdering": { + "label": "Ordre strict des messages d'outils", + "description": "Activer pour les fournisseurs comme NVIDIA NIM et Devstral qui nécessitent un ordre strict des messages. Lorsqu'activé, le contenu texte après les résultats d'outils est fusionné dans le dernier message d'outil au lieu de créer un message utilisateur séparé." + }, "rateLimitSeconds": { "label": "Limite de débit", "description": "Temps minimum entre les requêtes API." diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 96a33fafee..66cf06df35 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -522,6 +522,10 @@ "placeholder": "डिफ़ॉल्ट: claude", "maxTokensLabel": "अधिकतम आउटपुट टोकन", "maxTokensDescription": "Claude Code प्रतिक्रियाओं के लिए आउटपुट टोकन की अधिकतम संख्या। डिफ़ॉल्ट 8000 है।" + }, + "openAiStrictToolMessageOrdering": { + "label": "सख्त टूल संदेश क्रम", + "description": "NVIDIA NIM और Devstral जैसे प्रदाताओं के लिए सक्षम करें जिन्हें सख्त संदेश क्रम की आवश्यकता होती है। सक्षम होने पर, टूल परिणामों के बाद टेक्स्ट सामग्री को अलग उपयोगकर्ता संदेश बनाने के बजाय अंतिम टूल संदेश में मर्ज किया जाता है।" } }, "browser": { diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index a184c5c092..e14d242ca9 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -526,6 +526,10 @@ "placeholder": "Default: claude", "maxTokensLabel": "Token Output Maks", "maxTokensDescription": "Jumlah maksimum token output untuk respons Claude Code. Default adalah 8000." + }, + "openAiStrictToolMessageOrdering": { + "label": "Urutan pesan alat yang ketat", + "description": "Aktifkan untuk penyedia seperti NVIDIA NIM dan Devstral yang memerlukan urutan pesan yang ketat. Ketika diaktifkan, konten teks setelah hasil alat digabungkan ke dalam pesan alat terakhir alih-alih membuat pesan pengguna terpisah." } }, "browser": { diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index 69fd4e40c1..4f16fbb15a 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -522,6 +522,10 @@ "placeholder": "Predefinito: claude", "maxTokensLabel": "Token di output massimi", "maxTokensDescription": "Numero massimo di token di output per le risposte di Claude Code. Il valore predefinito è 8000." + }, + "openAiStrictToolMessageOrdering": { + "label": "Ordinamento rigoroso dei messaggi degli strumenti", + "description": "Abilita per provider come NVIDIA NIM e Devstral che richiedono un ordinamento rigoroso dei messaggi. Quando abilitato, il contenuto testuale dopo i risultati degli strumenti viene unito nell'ultimo messaggio dello strumento invece di creare un messaggio utente separato." } }, "browser": { diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 66d1b6579e..fc64b1d42c 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -522,6 +522,10 @@ "placeholder": "デフォルト:claude", "maxTokensLabel": "最大出力トークン", "maxTokensDescription": "Claude Codeレスポンスの最大出力トークン数。デフォルトは8000です。" + }, + "openAiStrictToolMessageOrdering": { + "label": "厳格なツールメッセージの順序", + "description": "NVIDIA NIMやDevstralなど、厳格なメッセージ順序を必要とするプロバイダーで有効にします。有効にすると、ツール結果の後のテキストコンテンツは、別のユーザーメッセージを作成する代わりに、最後のツールメッセージにマージされます。" } }, "browser": { diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 691d756fed..f3893c9c86 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -522,6 +522,10 @@ "placeholder": "기본값: claude", "maxTokensLabel": "최대 출력 토큰", "maxTokensDescription": "Claude Code 응답의 최대 출력 토큰 수. 기본값은 8000입니다." + }, + "openAiStrictToolMessageOrdering": { + "label": "엄격한 도구 메시지 순서", + "description": "NVIDIA NIM 및 Devstral과 같이 엄격한 메시지 순서가 필요한 공급자에 대해 활성화하세요. 활성화하면 도구 결과 이후의 텍스트 콘텐츠가 별도의 사용자 메시지를 생성하는 대신 마지막 도구 메시지에 병합됩니다." } }, "browser": { diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index d0c3a78216..0ae80cbb7c 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -522,6 +522,10 @@ "placeholder": "Standaard: claude", "maxTokensLabel": "Max Output Tokens", "maxTokensDescription": "Maximaal aantal output-tokens voor Claude Code-reacties. Standaard is 8000." + }, + "openAiStrictToolMessageOrdering": { + "label": "Strikte volgorde van toolberichten", + "description": "Schakel in voor providers zoals NVIDIA NIM en Devstral die een strikte berichtvolgorde vereisen. Indien ingeschakeld, wordt tekstinhoud na toolresultaten samengevoegd in het laatste toolbericht in plaats van een apart gebruikersbericht te maken." } }, "browser": { diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index 218cdf60e8..36aa03c9fa 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -522,6 +522,10 @@ "placeholder": "Domyślnie: claude", "maxTokensLabel": "Maksymalna liczba tokenów wyjściowych", "maxTokensDescription": "Maksymalna liczba tokenów wyjściowych dla odpowiedzi Claude Code. Domyślnie 8000." + }, + "openAiStrictToolMessageOrdering": { + "label": "Ścisła kolejność wiadomości narzędzi", + "description": "Włącz dla dostawców takich jak NVIDIA NIM i Devstral, którzy wymagają ścisłej kolejności wiadomości. Po włączeniu treść tekstowa po wynikach narzędzi jest scalana z ostatnią wiadomością narzędzia zamiast tworzenia oddzielnej wiadomości użytkownika." } }, "browser": { diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index e96dc4c345..da9a7763f7 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -522,6 +522,10 @@ "placeholder": "Padrão: claude", "maxTokensLabel": "Tokens de saída máximos", "maxTokensDescription": "Número máximo de tokens de saída para respostas do Claude Code. O padrão é 8000." + }, + "openAiStrictToolMessageOrdering": { + "label": "Ordenação estrita de mensagens de ferramentas", + "description": "Ativar para provedores como NVIDIA NIM e Devstral que requerem ordenação estrita de mensagens. Quando ativado, o conteúdo de texto após os resultados das ferramentas é mesclado na última mensagem da ferramenta em vez de criar uma mensagem de usuário separada." } }, "browser": { diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index aaae476d9c..73f9461e3b 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -522,6 +522,10 @@ "placeholder": "По умолчанию: claude", "maxTokensLabel": "Макс. выходных токенов", "maxTokensDescription": "Максимальное количество выходных токенов для ответов Claude Code. По умолчанию 8000." + }, + "openAiStrictToolMessageOrdering": { + "label": "Строгий порядок сообщений инструментов", + "description": "Включите для провайдеров, таких как NVIDIA NIM и Devstral, которым требуется строгий порядок сообщений. При включении текстовое содержимое после результатов инструментов объединяется с последним сообщением инструмента вместо создания отдельного сообщения пользователя." } }, "browser": { diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 0eabf2782f..fa001c7cfc 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -522,6 +522,10 @@ "placeholder": "Varsayılan: claude", "maxTokensLabel": "Maksimum Çıktı Token sayısı", "maxTokensDescription": "Claude Code yanıtları için maksimum çıktı token sayısı. Varsayılan 8000'dir." + }, + "openAiStrictToolMessageOrdering": { + "label": "Sıkı araç mesajı sıralaması", + "description": "NVIDIA NIM ve Devstral gibi sıkı mesaj sıralaması gerektiren sağlayıcılar için etkinleştirin. Etkinleştirildiğinde, araç sonuçlarından sonraki metin içeriği ayrı bir kullanıcı mesajı oluşturmak yerine son araç mesajıyla birleştirilir." } }, "browser": { diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index debb9fe860..63b1a4dd9e 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -522,6 +522,10 @@ "placeholder": "Mặc định: claude", "maxTokensLabel": "Số token đầu ra tối đa", "maxTokensDescription": "Số lượng token đầu ra tối đa cho các phản hồi của Claude Code. Mặc định là 8000." + }, + "openAiStrictToolMessageOrdering": { + "label": "Thứ tự thông báo công cụ nghiêm ngặt", + "description": "Bật cho các nhà cung cấp như NVIDIA NIM và Devstral yêu cầu thứ tự thông báo nghiêm ngặt. Khi được bật, nội dung văn bản sau kết quả công cụ được hợp nhất vào thông báo công cụ cuối cùng thay vì tạo thông báo người dùng riêng biệt." } }, "browser": { diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index 6137b091e7..c87506df32 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -522,6 +522,10 @@ "placeholder": "默认:claude", "maxTokensLabel": "最大输出 Token", "maxTokensDescription": "Claude Code 响应的最大输出 Token 数量。默认为 8000。" + }, + "openAiStrictToolMessageOrdering": { + "label": "严格的工具消息顺序", + "description": "为NVIDIA NIM和Devstral等需要严格消息顺序的提供商启用。启用后,工具结果后的文本内容会合并到最后一个工具消息中,而不是创建单独的用户消息。" } }, "browser": { diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index 84dfdb7527..4ada264c7a 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -522,6 +522,10 @@ "placeholder": "預設:claude", "maxTokensLabel": "最大輸出 Token", "maxTokensDescription": "Claude Code 回應的最大輸出 Token 數量。預設為 8000。" + }, + "openAiStrictToolMessageOrdering": { + "label": "嚴格的工具訊息順序", + "description": "為NVIDIA NIM和Devstral等需要嚴格訊息順序的供應商啟用。啟用後,工具結果後的文字內容會合併到最後一個工具訊息中,而不是建立單獨的使用者訊息。" } }, "browser": {