Skip to content

Commit 2f4c92e

Browse files
ericyangpanclaude
andcommitted
feat(manifests): add new model manifests for latest AI models
Add manifests for Claude Opus 4.5, Gemini 3 Flash, GLM-4-7, GPT-5-2, Kat Coder Pro v1, Kimi K2 Thinking, and MiniMax M2-1. Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
1 parent e5a9b63 commit 2f4c92e

File tree

7 files changed

+504
-0
lines changed

7 files changed

+504
-0
lines changed
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
{
2+
"$schema": "../$schemas/model.schema.json",
3+
"id": "claude-opus-4-5",
4+
"name": "Claude Opus 4.5",
5+
"description": "Anthropic's flagship Claude Opus 4.5: strong reasoning, 200K context, multimodal (text, image, PDF).",
6+
"translations": {
7+
"zh-Hans": {
8+
"description": "Anthropic 旗舰 Claude Opus 4.5:更强推理、200K 上下文、多模态(文本/图像/PDF)。"
9+
},
10+
"de": {
11+
"description": "Anthropics Flaggschiff Claude Opus 4.5: starkes Reasoning, 200K Kontext, multimodal (Text, Bild, PDF)."
12+
},
13+
"ko": {
14+
"description": "Anthropic의 플래그십 Claude Opus 4.5: 강력한 추론, 200K 컨텍스트, 멀티모달(텍스트·이미지·PDF)."
15+
},
16+
"es": {
17+
"description": "Claude Opus 4.5, modelo insignia de Anthropic: razonamiento avanzado, 200K de contexto, multimodal (texto, imagen, PDF)."
18+
},
19+
"fr": {
20+
"description": "Claude Opus 4.5, modele phare d'Anthropic : raisonnement avance, contexte 200K, multimodal (texte, image, PDF)."
21+
},
22+
"id": {
23+
"description": "Model unggulan Anthropic Claude Opus 4.5: penalaran kuat, konteks 200K, multimodal (teks, gambar, PDF)."
24+
},
25+
"ja": {
26+
"description": "Anthropicの旗艦Claude Opus 4.5:強力な推論、200Kコンテキスト、マルチモーダル(テキスト・画像・PDF)。"
27+
},
28+
"pt": {
29+
"description": "Claude Opus 4.5, modelo flagship da Anthropic: raciocinio forte, contexto 200K, multimodal (texto, imagem, PDF)."
30+
},
31+
"ru": {
32+
"description": "Флагман Anthropic Claude Opus 4.5: сильное рассуждение, контекст 200K, мультимодальный ввод (текст, изображение, PDF)."
33+
},
34+
"tr": {
35+
"description": "Anthropic'in amiral gemisi Claude Opus 4.5: guclu muhakeme, 200K baglam, cok modlu (metin, resim, PDF)."
36+
},
37+
"zh-Hant": {
38+
"description": "Anthropic 旗艦 Claude Opus 4.5:更強推理、200K 上下文、多模態(文字/圖像/PDF)。"
39+
}
40+
},
41+
"verified": true,
42+
"websiteUrl": "https://www.anthropic.com",
43+
"docsUrl": "https://docs.anthropic.com/claude/docs/models-overview",
44+
"vendor": "Anthropic",
45+
"size": "Unknown",
46+
"contextWindow": 200000,
47+
"maxOutput": 64000,
48+
"tokenPricing": {
49+
"input": 5,
50+
"output": 25,
51+
"cache": 0.5
52+
},
53+
"releaseDate": "2025-11-24",
54+
"lifecycle": "latest",
55+
"knowledgeCutoff": "2025-05",
56+
"inputModalities": ["text", "image", "pdf"],
57+
"capabilities": ["function-calling", "tool-choice", "structured-outputs", "reasoning"],
58+
"benchmarks": {
59+
"sweBench": null,
60+
"terminalBench": null,
61+
"sciCode": null,
62+
"liveCodeBench": null,
63+
"mmmu": null,
64+
"mmmuPro": null,
65+
"webDevArena": null
66+
},
67+
"platformUrls": {
68+
"huggingface": null,
69+
"artificialAnalysis": "https://artificialanalysis.ai/models/claude-opus-4-5",
70+
"openrouter": "https://openrouter.ai/anthropic/claude-opus-4.5"
71+
}
72+
}
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
{
2+
"$schema": "../$schemas/model.schema.json",
3+
"id": "gemini-3-flash",
4+
"name": "Gemini 3 Flash",
5+
"description": "Google's fast reasoning model for efficient advanced reasoning, coding, math, and science. Built-in thinking ensures greater accuracy with optimized performance.",
6+
"translations": {
7+
"zh-Hans": {
8+
"description": "Google 的下一代快速推理模型,专为高效的高级推理、编码、数学和科学任务设计。内置\\\"思考\\\"能力能够在优化性能的同时提供更高的准确性。"
9+
},
10+
"de": {
11+
"description": "Googles schnelles Reasoning-Modell für effizientes fortgeschrittenes Reasoning, Codierung, Mathematik und Wissenschaft."
12+
},
13+
"ko": {
14+
"description": "고급 추론, 코딩, 수학 및 과학 작업을 효율적으로 수행하기 위해 설계된 Google의 차세대 빠른 추론 모델입니다. 통합 \\\"생각\\\" 능력은 최적화된 성능으로 더 높은 정확도를 가능하게 합니다."
15+
},
16+
"es": {
17+
"description": "Modelo de razonamiento rapido de Google para razonamiento avanzado eficiente, codificacion, matematicas y ciencia."
18+
},
19+
"fr": {
20+
"description": "Modele de raisonnement rapide de Google pour raisonnement avance efficace, codage, mathematiques et science."
21+
},
22+
"id": {
23+
"description": "Model penalaran cepat Google untuk penalaran tingkat lanjut efisien, coding, matematika, dan sains."
24+
},
25+
"ja": {
26+
"description": "Googleの次世代高速推論モデルで、効率的な高度な推論、コーディング、数学、科学タスクのために設計されています。統合された\\\"思考\\\"能力により、最適化されたパフォーマンスでより高い精度を実現します。"
27+
},
28+
"pt": {
29+
"description": "Modelo de raciocinio rapido do Google para raciocinio avancado eficiente, codificacao, matematica e ciencia."
30+
},
31+
"ru": {
32+
"description": "Быстрая модель рассуждения Google для эффективного рассуждения, программирования, математики и науки."
33+
},
34+
"tr": {
35+
"description": "Google'in hizli muhakeme modeli, gelismis muhakeme, kodlama, matematik ve bilim icin tasarlandi."
36+
},
37+
"zh-Hant": {
38+
"description": "Google 的下一代快速推理模型,專為高效的高級推理、編碼、數學和科學任務設計。內置\\\"思考\\\"能力能夠在優化性能的同時提供更高的準確性。"
39+
}
40+
},
41+
"verified": true,
42+
"websiteUrl": "https://deepmind.google",
43+
"docsUrl": "https://ai.google.dev/gemini-api/docs/models#gemini-3-flash",
44+
"vendor": "Google",
45+
"size": "Unknown",
46+
"contextWindow": 1048576,
47+
"maxOutput": 65536,
48+
"tokenPricing": {
49+
"input": 0.5,
50+
"cache": 0.05,
51+
"output": 3
52+
},
53+
"releaseDate": "2025-12-17",
54+
"lifecycle": "latest",
55+
"knowledgeCutoff": "2025-01",
56+
"inputModalities": ["text", "image", "pdf", "audio", "video"],
57+
"capabilities": ["function-calling", "tool-choice", "structured-outputs", "reasoning"],
58+
"benchmarks": {
59+
"sweBench": null,
60+
"terminalBench": null,
61+
"sciCode": null,
62+
"liveCodeBench": null,
63+
"mmmu": null,
64+
"mmmuPro": null,
65+
"webDevArena": null
66+
},
67+
"platformUrls": {
68+
"huggingface": null,
69+
"artificialAnalysis": "https://artificialanalysis.ai/models/gemini-3-flash",
70+
"openrouter": "https://openrouter.ai/google/gemini-3-flash"
71+
}
72+
}

manifests/models/glm-4-7.json

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
{
2+
"$schema": "../$schemas/model.schema.json",
3+
"id": "glm-4-7",
4+
"name": "GLM 4.7",
5+
"description": "GLM 4.7 is an advanced language model from Z.ai with 128K context window, optimized for enhanced reasoning and multilingual understanding.",
6+
"translations": {
7+
"zh-Hans": {
8+
"description": "GLM 4.7 是 Z.ai 推出的先进语言模型,具备 128K 上下文窗口,专为增强推理和多语言理解优化。"
9+
},
10+
"de": {
11+
"description": "GLM 4.7 ist ein fortschrittliches Sprachmodell von Z.ai mit 128K-Kontextfenster, optimiert für verbessertes Schließen und mehrsprachiges Verständnis."
12+
},
13+
"ko": {
14+
"description": "GLM 4.7은 Z.ai의 고급 언어 모델로, 128K 컨텍스트 윈도를 갖추고 있으며 향상된 추론 및 다국어 이해를 위해 최적화되었습니다."
15+
},
16+
"es": {
17+
"description": "GLM 4.7 es un modelo de lenguaje avanzado de Z.ai con ventana de contexto de 128K, optimizado para razonamiento mejorado y comprensión multilingüe."
18+
},
19+
"fr": {
20+
"description": "GLM 4.7 est un modele de langage avance de Z.ai avec une fenetre de contexte de 128K, optimise pour le raisonnement ameliore et la comprehension multilingue."
21+
},
22+
"id": {
23+
"description": "GLM 4.7 adalah model bahasa canggih dari Z.ai dengan jendela konteks 128K, dioptimalkan untuk penalaran yang ditingkatkan dan pemahaman multibahasa."
24+
},
25+
"ja": {
26+
"description": "Z.aiの高度な言語モデルGLM 4.7は、128Kコンテキストウィンドウを持ち、強化された推論と多言語理解のために最適化されています。"
27+
},
28+
"pt": {
29+
"description": "GLM 4.7 é um modelo de linguagem avançado de Z.ai com janela de contexto de 128K, otimizado para raciocínio aprimorado e compreensão multilíngue."
30+
},
31+
"ru": {
32+
"description": "GLM 4.7 - это продвинутая языковая модель от Z.ai с окном контекста 128K, оптимизированная для улучшенного рассуждения и многоязычного понимания."
33+
},
34+
"tr": {
35+
"description": "GLM 4.7, Z.ai'den gelen gelismis bir dil modelidir ve 128K baglam penceresi ile gelismis muhakeme ve cok dilli anlas icin optimize edilmistir."
36+
},
37+
"zh-Hant": {
38+
"description": "GLM 4.7 是 Z.ai 推出的先進語言模型,具備 128K 上下文視窗,專為增強推理和多語言理解優化。"
39+
}
40+
},
41+
"verified": true,
42+
"websiteUrl": "https://z.ai",
43+
"docsUrl": "https://huggingface.co/zai-org/GLM-4.7",
44+
"vendor": "Z.ai",
45+
"size": "358B",
46+
"contextWindow": 204800,
47+
"maxOutput": 131072,
48+
"tokenPricing": {
49+
"input": 0.6,
50+
"output": 2.2,
51+
"cache": 0.11
52+
},
53+
"releaseDate": "2025-12-22",
54+
"lifecycle": "latest",
55+
"knowledgeCutoff": "2025-12",
56+
"inputModalities": ["text"],
57+
"capabilities": ["function-calling", "tool-choice", "structured-outputs", "reasoning"],
58+
"benchmarks": {
59+
"sweBench": null,
60+
"terminalBench": null,
61+
"sciCode": null,
62+
"liveCodeBench": null,
63+
"mmmu": null,
64+
"mmmuPro": null,
65+
"webDevArena": null
66+
},
67+
"platformUrls": {
68+
"huggingface": "https://huggingface.co/zai-org/GLM-4.7",
69+
"artificialAnalysis": "https://artificialanalysis.ai/models/glm-4-7",
70+
"openrouter": "https://openrouter.ai/z-ai/glm-4-7"
71+
}
72+
}

manifests/models/gpt-5-2.json

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
{
2+
"$schema": "../$schemas/model.schema.json",
3+
"id": "gpt-5-2",
4+
"name": "GPT-5.2",
5+
"description": "OpenAI's new generation high-performance model, building upon GPT-5.1 with enhanced reasoning capabilities, larger context understanding, and improved coding performance features.",
6+
"translations": {
7+
"zh-Hans": {
8+
"description": "OpenAI 的新一代高性能模型,在 GPT-5.1 的基础上进行了增强,具有更强的推理能力、更大的上下文理解和改进的编码性能功能。"
9+
},
10+
"de": {
11+
"description": "OpenAIs neuartiges Hochleistungsmodell, das auf GPT-5.1 aufbaut mit verbessertem Reasoning, erweitertem Kontextverstandnis und verbesserter Coding-Leistung."
12+
},
13+
"ko": {
14+
"description": "OpenAI의 차세대 고성능 모델로, GPT-5.1을 기반으로 향상된 추론 능력, 더 큰 컨텍스트 이해 및 개선된 코딩 성능 기능을 갖추고 있습니다."
15+
},
16+
"es": {
17+
"description": "Modelo de alto rendimiento de nueva generacion de OpenAI, mejorando sobre GPT-5.1 con razonamiento mejorado y mejor codificacion."
18+
},
19+
"fr": {
20+
"description": "Modele haute performance de nouvelle generation d'OpenAI, sur GPT-5.1 avec raisonnement ameliore et meilleur codage."
21+
},
22+
"id": {
23+
"description": "Model kinerja tinggi generasi baru dari OpenAI, membangun di atas GPT-5.1 dengan kemampuan penalaran yang ditingkatkan, pemahaman konteks yang lebih besar, dan fitur performa coding yang lebih baik."
24+
},
25+
"ja": {
26+
"description": "OpenAIの次世代高性能モデルで、GPT-5.1をベースに強化された推論能力、より大きなコンテキスト理解、そして改善されたコーディングパフォーマンス機能を備えています。"
27+
},
28+
"pt": {
29+
"description": "Modelo alto desempenho nova geracao OpenAI, baseado no GPT-5.1 com raciocinio aprimorado e melhor codificacao."
30+
},
31+
"ru": {
32+
"description": "Модель новой генерации высокой производительности от OpenAI, на базе GPT-5.1 с улучшенным рассуждением и кодированием."
33+
},
34+
"tr": {
35+
"description": "OpenAI'nin yeni nesil yuksek performansli modeli, GPT-5.1 uzerine insa edilmis gelismis muhakeme yetenekleri, daha genis baglam anlayisi ve gelismis kodlama performans ozellikleri ile."
36+
},
37+
"zh-Hant": {
38+
"description": "OpenAI 的新一代高性能模型,在 GPT-5.1 的基礎上進行了增強,具有更強的推理能力、更大的上下文理解和改進的編碼性能功能。"
39+
}
40+
},
41+
"verified": true,
42+
"websiteUrl": "https://openai.com",
43+
"docsUrl": "https://platform.openai.com/docs/models/gpt-5.2",
44+
"vendor": "OpenAI",
45+
"size": "Unknown",
46+
"contextWindow": 400000,
47+
"maxOutput": 128000,
48+
"tokenPricing": {
49+
"input": 1.75,
50+
"cache": 0.175,
51+
"output": 14
52+
},
53+
"releaseDate": "2025-12-11",
54+
"lifecycle": "latest",
55+
"knowledgeCutoff": "2025-08-31",
56+
"inputModalities": ["text", "image"],
57+
"capabilities": ["function-calling", "tool-choice", "structured-outputs", "reasoning"],
58+
"benchmarks": {
59+
"sweBench": null,
60+
"terminalBench": null,
61+
"sciCode": null,
62+
"liveCodeBench": null,
63+
"mmmu": null,
64+
"mmmuPro": null,
65+
"webDevArena": null
66+
},
67+
"platformUrls": {
68+
"huggingface": null,
69+
"artificialAnalysis": "https://artificialanalysis.ai/models/gpt-5-2",
70+
"openrouter": "https://openrouter.ai/openai/gpt-5.2"
71+
}
72+
}
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
{
2+
"$schema": "../$schemas/model.schema.json",
3+
"id": "kat-coder-pro-v1",
4+
"name": "KAT-Coder-Pro V1",
5+
"description": "KwaiKAT's most advanced agentic coding model in the KAT-Coder series. Designed for autonomous programming with tool calling, multi-turn interaction, and instruction following.",
6+
"translations": {
7+
"zh-Hans": {
8+
"description": "KwaiKAT 最先进的代理编码模型,属于 KAT-Coder 系列。专为自主编程设计,具备工具调用、多轮交互和指令遵循能力。"
9+
},
10+
"de": {
11+
"description": "KwaiKATs modernstes agentic Coding-Modell aus der KAT-Coder-Serie. Speziell fur autonomes Programmierung entwickelt mit Tool-Calling, Multi-Turn-Interaktion und Instruction-Following-Fahigkeiten."
12+
},
13+
"ko": {
14+
"description": "KwaiKAT의 KAT-Coder 시리즈에서 가장 진보된 에이전트 코딩 모델입니다. 도구 호출, 다중 턴 상호작용 및 명령 따르기 기능을 갖춘 자율 프로그래밍을 위해 설계되었습니다."
15+
},
16+
"es": {
17+
"description": "Modelo de codificacion agentico mas avanzado de KwaiKAT en la serie KAT-Coder. Diseñado para programacion autonoma con llamadas a herramientas, interaccion multi-turno y seguimiento de instrucciones."
18+
},
19+
"fr": {
20+
"description": "Le modele de codage agentique le plus avance de KwaiKAT dans la serie KAT-Coder. Conçu pour la programmation autonome avec appel d'outils, interaction multi-tours et suivi d'instructions."
21+
},
22+
"id": {
23+
"description": "Model coding agentik paling canggih dari KwaiKAT dalam seri KAT-Coder. Dirancang khusus untuk pemrograman otonom dengan kemampuan pemanggilan alat, interaksi multi-turn, dan pengikutan instruksi."
24+
},
25+
"ja": {
26+
"description": "KwaiKATのKAT-Coderシリーズで最も高度なエージェント型コーディングモデル。ツール呼び出し、マルチターン対話、命令追従機能を備えた自律プログラミング向けに設計されています。"
27+
},
28+
"pt": {
29+
"description": "Modelo de codificacao agentico mais avancado da KwaiKAT na serie KAT-Coder. Projetado para programacao autonoma com chamada de ferramentas, interacao multi-turno e seguimento de instrucoes."
30+
},
31+
"ru": {
32+
"description": "Самая продвинутая агентная модель кодирования KwaiKAT в серии KAT-Coder. Разработана для автономного программирования с вызовом инструментов, многоходового взаимодействия и выполнения инструкций."
33+
},
34+
"tr": {
35+
"description": "KwaiKAT'in KAT-Coder serisindeki en gelismis agentik kodlama modeli. Ara cagirma, cok turlu etkilesim ve talimat takip etme yetenekleri ile otonom programlama icin tasarlanmistir."
36+
},
37+
"zh-Hant": {
38+
"description": "KwaiKAT 最先進的代理編碼模型,屬於 KAT-Coder 系列。專為自主編程設計,具備工具調用、多輪交互和指令遵循能力。"
39+
}
40+
},
41+
"verified": false,
42+
"websiteUrl": "https://openrouter.ai/kwaipilot",
43+
"docsUrl": "https://openrouter.ai/kwaipilot/kat-coder-pro:free",
44+
"vendor": "KwaiKAT",
45+
"size": "Unknown",
46+
"contextWindow": 256000,
47+
"maxOutput": 128000,
48+
"tokenPricing": {
49+
"input": 0,
50+
"output": 0,
51+
"cache": null
52+
},
53+
"releaseDate": "2025-11-11",
54+
"lifecycle": "latest",
55+
"knowledgeCutoff": "2025-11",
56+
"inputModalities": ["text"],
57+
"capabilities": ["function-calling", "tool-choice", "structured-outputs", "reasoning"],
58+
"benchmarks": {
59+
"sweBench": null,
60+
"terminalBench": 8.5,
61+
"sciCode": 36.6,
62+
"liveCodeBench": 74.7,
63+
"mmmu": null,
64+
"mmmuPro": null,
65+
"webDevArena": null
66+
},
67+
"platformUrls": {
68+
"huggingface": null,
69+
"artificialAnalysis": "https://artificialanalysis.ai/models/kat-coder-pro-v1",
70+
"openrouter": "https://openrouter.ai/kwaipilot/kat-coder-pro:free"
71+
}
72+
}

0 commit comments

Comments
 (0)