Skip to content

Commit bc841f9

Browse files
ThomasK33ammar-agent
authored andcommitted
fix: correct gpt-5.1-codex-max pricing to match codex
Same pricing as gpt-5.1-codex: $1.25/M input, $10/M output Also aligned max token limits with the codex model. Change-Id: I030014df05a5ccae62f5c93d7435ec2363d23317 Signed-off-by: Thomas Kosiewski <tk@coder.com>
1 parent 5d30466 commit bc841f9

File tree

3 files changed

+7
-8
lines changed

3 files changed

+7
-8
lines changed

src/common/orpc/schemas/telemetry.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ const FrontendPlatformInfoSchema = z.object({
2929
});
3030

3131
// Thinking level enum (matches payload.ts TelemetryThinkingLevel)
32-
const TelemetryThinkingLevelSchema = z.enum(["off", "low", "medium", "high"]);
32+
const TelemetryThinkingLevelSchema = z.enum(["off", "low", "medium", "high", "xhigh"]);
3333

3434
// Command type enum (matches payload.ts TelemetryCommandType)
3535
const TelemetryCommandTypeSchema = z.enum([

src/common/telemetry/payload.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ export interface WorkspaceSwitchedPayload {
8686
/**
8787
* Thinking level for extended thinking feature
8888
*/
89-
export type TelemetryThinkingLevel = "off" | "low" | "medium" | "high";
89+
export type TelemetryThinkingLevel = "off" | "low" | "medium" | "high" | "xhigh";
9090

9191
/**
9292
* Chat/AI interaction events

src/common/utils/tokens/models-extra.ts

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -90,20 +90,19 @@ export const modelsExtra: Record<string, ModelData> = {
9090
},
9191

9292
// GPT-5.1-Codex-Max - Extended reasoning model with xhigh support
93-
// Pricing TBD - using estimated values based on Codex pricing pattern
93+
// Same pricing as gpt-5.1-codex: $1.25/M input, $10/M output
9494
// Supports 5 reasoning levels: off, low, medium, high, xhigh
9595
"gpt-5.1-codex-max": {
96-
max_input_tokens: 400000, // Estimated based on compaction capability
97-
max_output_tokens: 272000, // Same as gpt-5-pro
98-
input_cost_per_token: 0.00002, // $20/M - placeholder estimate
99-
output_cost_per_token: 0.00008, // $80/M - placeholder estimate
96+
max_input_tokens: 272000, // Same as gpt-5.1-codex
97+
max_output_tokens: 128000, // Same as gpt-5.1-codex
98+
input_cost_per_token: 0.00000125, // $1.25 per million input tokens
99+
output_cost_per_token: 0.00001, // $10 per million output tokens
100100
litellm_provider: "openai",
101101
mode: "chat",
102102
supports_function_calling: true,
103103
supports_vision: true,
104104
supports_reasoning: true,
105105
supports_response_schema: true,
106-
knowledge_cutoff: "2025-06-30", // Estimated
107106
supported_endpoints: ["/v1/responses"],
108107
},
109108
};

0 commit comments

Comments
 (0)