Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 87 additions & 0 deletions packages/types/src/providers/openai-codex.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,23 @@ export const openAiCodexModels = {
supportsTemperature: false,
description: "GPT-5.1 Codex Max: Maximum capability coding model via ChatGPT subscription",
},
"gpt-5.1-codex": {
maxTokens: 128000,
contextWindow: 400000,
supportsNativeTools: true,
defaultToolProtocol: "native",
includedTools: ["apply_patch"],
excludedTools: ["apply_diff", "write_to_file"],
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: ["low", "medium", "high"],
reasoningEffort: "medium",
// Subscription-based: no per-token costs
inputPrice: 0,
outputPrice: 0,
supportsTemperature: false,
description: "GPT-5.1 Codex: GPT-5.1 optimized for agentic coding via ChatGPT subscription",
},
"gpt-5.2-codex": {
maxTokens: 128000,
contextWindow: 400000,
Expand All @@ -57,6 +74,76 @@ export const openAiCodexModels = {
supportsTemperature: false,
description: "GPT-5.2 Codex: OpenAI's flagship coding model via ChatGPT subscription",
},
"gpt-5.1": {
maxTokens: 128000,
contextWindow: 400000,
supportsNativeTools: true,
defaultToolProtocol: "native",
includedTools: ["apply_patch"],
excludedTools: ["apply_diff", "write_to_file"],
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: ["none", "low", "medium", "high"],
reasoningEffort: "medium",
// Subscription-based: no per-token costs
inputPrice: 0,
outputPrice: 0,
supportsVerbosity: true,
supportsTemperature: false,
description: "GPT-5.1: General GPT-5.1 model via ChatGPT subscription",
},
"gpt-5": {
maxTokens: 128000,
contextWindow: 400000,
supportsNativeTools: true,
defaultToolProtocol: "native",
includedTools: ["apply_patch"],
excludedTools: ["apply_diff", "write_to_file"],
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: ["minimal", "low", "medium", "high"],
reasoningEffort: "medium",
// Subscription-based: no per-token costs
inputPrice: 0,
outputPrice: 0,
supportsVerbosity: true,
supportsTemperature: false,
description: "GPT-5: General GPT-5 model via ChatGPT subscription",
},
"gpt-5-codex": {
maxTokens: 128000,
contextWindow: 400000,
supportsNativeTools: true,
defaultToolProtocol: "native",
includedTools: ["apply_patch"],
excludedTools: ["apply_diff", "write_to_file"],
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: ["low", "medium", "high"],
reasoningEffort: "medium",
// Subscription-based: no per-token costs
inputPrice: 0,
outputPrice: 0,
supportsTemperature: false,
description: "GPT-5 Codex: GPT-5 optimized for agentic coding via ChatGPT subscription",
},
"gpt-5-codex-mini": {
maxTokens: 128000,
contextWindow: 400000,
supportsNativeTools: true,
defaultToolProtocol: "native",
includedTools: ["apply_patch"],
excludedTools: ["apply_diff", "write_to_file"],
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: ["low", "medium", "high"],
reasoningEffort: "medium",
// Subscription-based: no per-token costs
inputPrice: 0,
outputPrice: 0,
supportsTemperature: false,
description: "GPT-5 Codex Mini: Faster coding model via ChatGPT subscription",
},
"gpt-5.1-codex-mini": {
maxTokens: 128000,
contextWindow: 400000,
Expand Down
26 changes: 26 additions & 0 deletions src/api/providers/__tests__/openai-codex.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// npx vitest run api/providers/__tests__/openai-codex.spec.ts

import { OpenAiCodexHandler } from "../openai-codex"

describe("OpenAiCodexHandler.getModel", () => {
it.each(["gpt-5.1", "gpt-5", "gpt-5.1-codex", "gpt-5-codex", "gpt-5-codex-mini"])(
"should return specified model when a valid model id is provided: %s",
(apiModelId) => {
const handler = new OpenAiCodexHandler({ apiModelId })
const model = handler.getModel()

expect(model.id).toBe(apiModelId)
expect(model.info).toBeDefined()
// Default reasoning effort for GPT-5 family
expect(model.info.reasoningEffort).toBe("medium")
},
)

it("should fall back to default model when an invalid model id is provided", () => {
const handler = new OpenAiCodexHandler({ apiModelId: "not-a-real-model" })
const model = handler.getModel()

expect(model.id).toBe("gpt-5.2-codex")
expect(model.info).toBeDefined()
})
})
33 changes: 28 additions & 5 deletions webview-ui/src/components/settings/ApiOptions.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -311,18 +311,40 @@ const ApiOptions = ({
// To address that we set the modelId to the default value for th
// provider if it's not already set.
const validateAndResetModel = (
provider: ProviderName,
modelId: string | undefined,
field: keyof ProviderSettings,
defaultValue?: string,
) => {
// in case we haven't set a default value for a provider
if (!defaultValue) return

// only set default if no model is set, but don't reset invalid models
// let users see and decide what to do with invalid model selections
const shouldSetDefault = !modelId
// 1) If nothing is set, initialize to the provider default.
if (!modelId) {
setApiConfigurationField(field, defaultValue, false)
return
}

// 2) If something *is* set, ensure it's valid for the newly selected provider.
//
// Without this, switching providers can leave the UI showing a model from the
// previously selected provider (including model IDs that don't exist for the
// newly selected provider).
//
// Note: We only validate providers with static model lists.
const staticModels = MODELS_BY_PROVIDER[provider]
if (!staticModels) {
return
}

// Bedrock has a special “custom-arn” pseudo-model that isn't part of MODELS_BY_PROVIDER.
if (provider === "bedrock" && modelId === "custom-arn") {
return
}

if (shouldSetDefault) {
const filteredModels = filterModels(staticModels, provider, organizationAllowList)
const isValidModel = !!filteredModels && Object.prototype.hasOwnProperty.call(filteredModels, modelId)
if (!isValidModel) {
setApiConfigurationField(field, defaultValue, false)
}
}
Expand Down Expand Up @@ -381,13 +403,14 @@ const ApiOptions = ({
const config = PROVIDER_MODEL_CONFIG[value]
if (config) {
validateAndResetModel(
value,
apiConfiguration[config.field] as string | undefined,
config.field,
config.default,
)
}
},
[setApiConfigurationField, apiConfiguration],
[setApiConfigurationField, apiConfiguration, organizationAllowList],
)

const modelValidationError = useMemo(() => {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import { render, screen, fireEvent } from "@/utils/test-utils"
import { QueryClient, QueryClientProvider } from "@tanstack/react-query"

import { type ModelInfo, type ProviderSettings, openAiModelInfoSaneDefaults } from "@roo-code/types"
import { openAiCodexDefaultModelId } from "@roo-code/types"

import * as ExtensionStateContext from "@src/context/ExtensionStateContext"
const { ExtensionStateContextProvider } = ExtensionStateContext
Expand Down Expand Up @@ -297,6 +298,31 @@ const renderApiOptions = (props: Partial<ApiOptionsProps> = {}) => {
}

describe("ApiOptions", () => {
it("resets model to provider default when switching to openai-codex with an invalid prior apiModelId", () => {
const mockSetApiConfigurationField = vi.fn()

renderApiOptions({
apiConfiguration: {
apiProvider: "anthropic",
// Simulate a previously-selected model ID from another provider.
// When switching to OpenAI - ChatGPT Plus/Pro, this is invalid and should be reset.
apiModelId: "claude-3-5-sonnet-20241022",
},
setApiConfigurationField: mockSetApiConfigurationField,
})

const providerSelectContainer = screen.getByTestId("provider-select")
const providerSelect = providerSelectContainer.querySelector("select") as HTMLSelectElement
expect(providerSelect).toBeInTheDocument()

fireEvent.change(providerSelect, { target: { value: "openai-codex" } })

// Provider is updated
expect(mockSetApiConfigurationField).toHaveBeenCalledWith("apiProvider", "openai-codex")
// Model is reset to the provider default since the previous value is invalid for this provider
expect(mockSetApiConfigurationField).toHaveBeenCalledWith("apiModelId", openAiCodexDefaultModelId, false)
})

it("shows diff settings, temperature and rate limit controls by default", () => {
renderApiOptions({
apiConfiguration: {
Expand Down
Loading