From 8201322cd09e6b3e5d6e128c6b70670b85537dd3 Mon Sep 17 00:00:00 2001 From: Ammar Date: Mon, 24 Nov 2025 14:01:46 -0600 Subject: [PATCH] =?UTF-8?q?=F0=9F=A4=96=20feat:=20upgrade=20Claude=20Opus?= =?UTF-8?q?=20to=204-5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- mobile/app/workspace-settings.tsx | 13 ++----------- src/browser/utils/slashCommands/compact.test.ts | 2 +- src/common/constants/knownModels.ts | 10 +++++++++- src/common/utils/tokens/models-extra.ts | 17 +++++++++++++++++ .../services/mock/scenarios/slashCommands.ts | 8 ++++---- tests/e2e/scenarios/slashCommands.spec.ts | 8 ++++---- tests/models/knownModels.test.ts | 11 +++++++---- 7 files changed, 44 insertions(+), 25 deletions(-) diff --git a/mobile/app/workspace-settings.tsx b/mobile/app/workspace-settings.tsx index 4a50776856..e6c3fff824 100644 --- a/mobile/app/workspace-settings.tsx +++ b/mobile/app/workspace-settings.tsx @@ -10,20 +10,11 @@ import { ThemedText } from "../src/components/ThemedText"; import { useWorkspaceDefaults } from "../src/hooks/useWorkspaceDefaults"; import type { ThinkingLevel, WorkspaceMode } from "../src/types/settings"; import { supports1MContext } from "@/common/utils/ai/models"; +import { KNOWN_MODEL_OPTIONS } from "@/common/constants/knownModels"; const MODE_TABS: WorkspaceMode[] = ["plan", "exec"]; const THINKING_LEVELS: ThinkingLevel[] = ["off", "low", "medium", "high"]; -// Common models from MODEL_ABBREVIATIONS -const AVAILABLE_MODELS = [ - { label: "Claude Sonnet 4.5", value: "anthropic:claude-sonnet-4-5" }, - { label: "Claude Haiku 4.5", value: "anthropic:claude-haiku-4-5" }, - { label: "Claude Opus 4.1", value: "anthropic:claude-opus-4-1" }, - { label: "GPT-5", value: "openai:gpt-5" }, - { label: "GPT-5 Pro", value: "openai:gpt-5-pro" }, - { label: "GPT-5 Codex", value: "openai:gpt-5-codex" }, -]; - function thinkingLevelToValue(level: ThinkingLevel): number { const index = THINKING_LEVELS.indexOf(level); return index >= 0 ? index : 0; @@ -110,7 +101,7 @@ export default function WorkspaceSettings(): JSX.Element { }} dropdownIconColor={theme.colors.foregroundPrimary} > - {AVAILABLE_MODELS.map((model) => ( + {KNOWN_MODEL_OPTIONS.map((model) => ( { }); it("parses -m flag with full model string", () => { - const result = parseCommand("/compact -m anthropic:claude-opus-4-1"); + const result = parseCommand(`/compact -m ${KNOWN_MODELS.OPUS.id}`); expect(result).toEqual({ type: "compact", maxOutputTokens: undefined, diff --git a/src/common/constants/knownModels.ts b/src/common/constants/knownModels.ts index 13e72e314b..c4c4d1a5b5 100644 --- a/src/common/constants/knownModels.ts +++ b/src/common/constants/knownModels.ts @@ -2,6 +2,8 @@ * Centralized model metadata. Update model versions here and everywhere else will follow. */ +import { formatModelDisplayName } from "../utils/ai/modelDisplay"; + type ModelProvider = "anthropic" | "openai" | "google" | "xai"; interface KnownModelDefinition { @@ -43,7 +45,7 @@ const MODEL_DEFINITIONS = { }, OPUS: { provider: "anthropic", - providerModelId: "claude-opus-4-1", + providerModelId: "claude-opus-4-5", aliases: ["opus"], }, GPT: { @@ -151,3 +153,9 @@ export const MODEL_NAMES: Record> = Object }, {} as Record> ); + +/** Picker-friendly list: { label, value } for each known model */ +export const KNOWN_MODEL_OPTIONS = Object.values(KNOWN_MODELS).map((model) => ({ + label: formatModelDisplayName(model.providerModelId), + value: model.id, +})); diff --git a/src/common/utils/tokens/models-extra.ts b/src/common/utils/tokens/models-extra.ts index 949c56b101..edefced357 100644 --- a/src/common/utils/tokens/models-extra.ts +++ b/src/common/utils/tokens/models-extra.ts @@ -22,6 +22,23 @@ interface ModelData { } export const modelsExtra: Record = { + // Claude Opus 4.5 - Released November 2025 + // $15/M input, $75/M output (same pricing as Opus 4.1) + "claude-opus-4-5": { + max_input_tokens: 200000, + max_output_tokens: 32000, + input_cost_per_token: 0.000015, // $15 per million input tokens + output_cost_per_token: 0.000075, // $75 per million output tokens + cache_creation_input_token_cost: 0.00001875, // $18.75 per million tokens + cache_read_input_token_cost: 0.0000015, // $1.50 per million tokens + litellm_provider: "anthropic", + mode: "chat", + supports_function_calling: true, + supports_vision: true, + supports_reasoning: true, + supports_response_schema: true, + }, + // GPT-5 Pro - Released October 6, 2025 at DevDay // $15/M input, $120/M output // Only available via OpenAI's Responses API diff --git a/src/node/services/mock/scenarios/slashCommands.ts b/src/node/services/mock/scenarios/slashCommands.ts index 922abbd96e..674958fe1e 100644 --- a/src/node/services/mock/scenarios/slashCommands.ts +++ b/src/node/services/mock/scenarios/slashCommands.ts @@ -65,18 +65,18 @@ const modelStatusTurn: ScenarioTurn = { kind: "stream-start", delay: 0, messageId: "msg-slash-model-status", - model: "anthropic:claude-opus-4-1", + model: "anthropic:claude-opus-4-5", }, { kind: "stream-delta", delay: STREAM_BASE_DELAY, - text: "Claude Opus 4.1 is now responding with enhanced reasoning capacity.", + text: "Claude Opus 4.5 is now responding with enhanced reasoning capacity.", }, { kind: "stream-end", delay: STREAM_BASE_DELAY * 2, metadata: { - model: "anthropic:claude-opus-4-1", + model: "anthropic:claude-opus-4-5", inputTokens: 70, outputTokens: 54, systemMessageTokens: 12, @@ -84,7 +84,7 @@ const modelStatusTurn: ScenarioTurn = { parts: [ { type: "text", - text: "I'm responding as Claude Opus 4.1, which you selected via /model opus. Let me know how to proceed.", + text: "I'm responding as Claude Opus 4.5, which you selected via /model opus. Let me know how to proceed.", }, ], }, diff --git a/tests/e2e/scenarios/slashCommands.spec.ts b/tests/e2e/scenarios/slashCommands.spec.ts index e751a747a6..32b3ae106c 100644 --- a/tests/e2e/scenarios/slashCommands.spec.ts +++ b/tests/e2e/scenarios/slashCommands.spec.ts @@ -109,17 +109,17 @@ test.describe("slash command flows", () => { ).toBeVisible(); await ui.chat.sendMessage("/model opus"); - await ui.chat.expectStatusMessageContains("Model changed to anthropic:claude-opus-4-1"); - await expect(modeToggles.getByText("anthropic:claude-opus-4-1", { exact: true })).toBeVisible(); + await ui.chat.expectStatusMessageContains("Model changed to anthropic:claude-opus-4-5"); + await expect(modeToggles.getByText("anthropic:claude-opus-4-5", { exact: true })).toBeVisible(); const timeline = await ui.chat.captureStreamTimeline(async () => { await ui.chat.sendMessage(SLASH_COMMAND_PROMPTS.MODEL_STATUS); }); const streamStart = timeline.events.find((event) => event.type === "stream-start"); - expect(streamStart?.model).toBe("anthropic:claude-opus-4-1"); + expect(streamStart?.model).toBe("anthropic:claude-opus-4-5"); await ui.chat.expectTranscriptContains( - "Claude Opus 4.1 is now responding with enhanced reasoning capacity." + "Claude Opus 4.5 is now responding with enhanced reasoning capacity." ); }); diff --git a/tests/models/knownModels.test.ts b/tests/models/knownModels.test.ts index dcb1166fb8..c880e7a784 100644 --- a/tests/models/knownModels.test.ts +++ b/tests/models/knownModels.test.ts @@ -8,6 +8,7 @@ import { describe, test, expect } from "@jest/globals"; import { KNOWN_MODELS } from "@/common/constants/knownModels"; import modelsJson from "@/common/utils/tokens/models.json"; +import { modelsExtra } from "@/common/utils/tokens/models-extra"; describe("Known Models Integration", () => { test("all known models exist in models.json", () => { @@ -16,10 +17,10 @@ describe("Known Models Integration", () => { for (const [key, model] of Object.entries(KNOWN_MODELS)) { const modelId = model.providerModelId; - // Check if model exists in models.json + // Check if model exists in models.json or models-extra // xAI models are prefixed with "xai/" in models.json const lookupKey = model.provider === "xai" ? `xai/${modelId}` : modelId; - if (!(lookupKey in modelsJson)) { + if (!(lookupKey in modelsJson) && !(modelId in modelsExtra)) { missingModels.push(`${key}: ${model.provider}:${modelId}`); } } @@ -34,11 +35,13 @@ describe("Known Models Integration", () => { }); test("all known models have required metadata", () => { - for (const [key, model] of Object.entries(KNOWN_MODELS)) { + for (const [, model] of Object.entries(KNOWN_MODELS)) { const modelId = model.providerModelId; // xAI models are prefixed with "xai/" in models.json const lookupKey = model.provider === "xai" ? `xai/${modelId}` : modelId; - const modelData = modelsJson[lookupKey as keyof typeof modelsJson] as Record; + const modelData = + (modelsJson[lookupKey as keyof typeof modelsJson] as Record) ?? + (modelsExtra[modelId as keyof typeof modelsExtra] as unknown as Record); expect(modelData).toBeDefined(); // Check that basic metadata fields exist (not all models have all fields)