Skip to content

Commit 20c0858

Browse files
committed
fix: correct gpt-5.1-codex-max pricing to match codex
Same pricing as gpt-5.1-codex: $1.25/M input, $10/M output Also aligned max token limits with the codex model. Change-Id: I030014df05a5ccae62f5c93d7435ec2363d23317 Signed-off-by: Thomas Kosiewski <tk@coder.com>
1 parent ecc4440 commit 20c0858

File tree

1 file changed

+5
-6
lines changed

1 file changed

+5
-6
lines changed

src/common/utils/tokens/models-extra.ts

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -90,20 +90,19 @@ export const modelsExtra: Record<string, ModelData> = {
9090
},
9191

9292
// GPT-5.1-Codex-Max - Extended reasoning model with xhigh support
93-
// Pricing TBD - using estimated values based on Codex pricing pattern
93+
// Same pricing as gpt-5.1-codex: $1.25/M input, $10/M output
9494
// Supports 5 reasoning levels: off, low, medium, high, xhigh
9595
"gpt-5.1-codex-max": {
96-
max_input_tokens: 400000, // Estimated based on compaction capability
97-
max_output_tokens: 272000, // Same as gpt-5-pro
98-
input_cost_per_token: 0.00002, // $20/M - placeholder estimate
99-
output_cost_per_token: 0.00008, // $80/M - placeholder estimate
96+
max_input_tokens: 272000, // Same as gpt-5.1-codex
97+
max_output_tokens: 128000, // Same as gpt-5.1-codex
98+
input_cost_per_token: 0.00000125, // $1.25 per million input tokens
99+
output_cost_per_token: 0.00001, // $10 per million output tokens
100100
litellm_provider: "openai",
101101
mode: "chat",
102102
supports_function_calling: true,
103103
supports_vision: true,
104104
supports_reasoning: true,
105105
supports_response_schema: true,
106-
knowledge_cutoff: "2025-06-30", // Estimated
107106
supported_endpoints: ["/v1/responses"],
108107
},
109108
};

0 commit comments

Comments
 (0)