@@ -8,33 +8,27 @@
"default_small_model_id": "zai-org/GLM-4.7-Flash",
"models": [
{
- "id": "zai-org/GLM-4.6",
- "name": "GLM 4.6",
- "cost_per_1m_in": 0.35,
- "cost_per_1m_out": 1.5,
- "cost_per_1m_in_cached": 0.175,
- "cost_per_1m_out_cached": 0.7,
- "context_window": 200000,
- "default_max_tokens": 20000,
- "can_reason": true,
- "reasoning_levels": [
- "low",
- "medium",
- "high"
- ],
- "default_reasoning_effort": "medium",
+ "id": "Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar",
+ "name": "Intel: Qwen3 Coder 480B A35B Instruct INT4 Mixed AR",
+ "cost_per_1m_in": 0.22,
+ "cost_per_1m_out": 0.95,
+ "cost_per_1m_in_cached": 0.11,
+ "cost_per_1m_out_cached": 0.44,
+ "context_window": 106000,
+ "default_max_tokens": 10600,
+ "can_reason": false,
"supports_attachments": false,
"options": {}
},
{
- "id": "zai-org/GLM-4.7",
- "name": "GLM 4.7",
- "cost_per_1m_in": 0.3,
- "cost_per_1m_out": 1.4,
- "cost_per_1m_in_cached": 0.15,
- "cost_per_1m_out_cached": 0.6,
- "context_window": 200000,
- "default_max_tokens": 20000,
+ "id": "meta-llama/Llama-3.3-70B-Instruct",
+ "name": "Meta: Llama 3.3 70B Instruct",
+ "cost_per_1m_in": 0.09999999999999999,
+ "cost_per_1m_out": 0.32,
+ "cost_per_1m_in_cached": 0.049999999999999996,
+ "cost_per_1m_out_cached": 0.19999999999999998,
+ "context_window": 128000,
+ "default_max_tokens": 12800,
"can_reason": true,
"reasoning_levels": [
"low",
@@ -46,27 +40,21 @@
"options": {}
},
{
- "id": "zai-org/GLM-4.7-Flash",
- "name": "GLM 4.7 Flash",
- "cost_per_1m_in": 0.07,
- "cost_per_1m_out": 0.39999999999999997,
- "cost_per_1m_in_cached": 0.035,
- "cost_per_1m_out_cached": 0.14,
- "context_window": 200000,
- "default_max_tokens": 20000,
- "can_reason": true,
- "reasoning_levels": [
- "low",
- "medium",
- "high"
- ],
- "default_reasoning_effort": "medium",
- "supports_attachments": false,
+ "id": "mistralai/Mistral-Large-Instruct-2411",
+ "name": "Mistral: Mistral Large Instruct 2411",
+ "cost_per_1m_in": 2,
+ "cost_per_1m_out": 6,
+ "cost_per_1m_in_cached": 1,
+ "cost_per_1m_out_cached": 4,
+ "context_window": 128000,
+ "default_max_tokens": 12800,
+ "can_reason": false,
+ "supports_attachments": true,
"options": {}
},
{
"id": "moonshotai/Kimi-K2-Instruct-0905",
- "name": "Kimi K2 Instruct 0905",
+ "name": "MoonshotAI: Kimi K2 Instruct 0905",
"cost_per_1m_in": 0.39,
"cost_per_1m_out": 1.9,
"cost_per_1m_in_cached": 0.195,
@@ -79,7 +67,7 @@
},
{
"id": "moonshotai/Kimi-K2-Thinking",
- "name": "Kimi K2 Thinking",
+ "name": "MoonshotAI: Kimi K2 Thinking",
"cost_per_1m_in": 0.32,
"cost_per_1m_out": 0.48,
"cost_per_1m_in_cached": 0.16,
@@ -97,14 +85,27 @@
"options": {}
},
{
- "id": "meta-llama/Llama-3.3-70B-Instruct",
- "name": "Llama 3.3 70B Instruct",
- "cost_per_1m_in": 0.09999999999999999,
- "cost_per_1m_out": 0.32,
- "cost_per_1m_in_cached": 0.049999999999999996,
- "cost_per_1m_out_cached": 0.19999999999999998,
- "context_window": 128000,
- "default_max_tokens": 12800,
+ "id": "Qwen/Qwen3-Next-80B-A3B-Instruct",
+ "name": "Qwen: Qwen3 Next 80B A3B Instruct",
+ "cost_per_1m_in": 0.06,
+ "cost_per_1m_out": 0.6,
+ "cost_per_1m_in_cached": 0.03,
+ "cost_per_1m_out_cached": 0.12,
+ "context_window": 262144,
+ "default_max_tokens": 26214,
+ "can_reason": false,
+ "supports_attachments": false,
+ "options": {}
+ },
+ {
+ "id": "zai-org/GLM-4.6",
+ "name": "Z.ai: GLM 4.6",
+ "cost_per_1m_in": 0.35,
+ "cost_per_1m_out": 1.5,
+ "cost_per_1m_in_cached": 0.175,
+ "cost_per_1m_out_cached": 0.7,
+ "context_window": 200000,
+ "default_max_tokens": 20000,
"can_reason": true,
"reasoning_levels": [
"low",
@@ -116,41 +117,40 @@
"options": {}
},
{
- "id": "mistralai/Mistral-Large-Instruct-2411",
- "name": "Mistral Large Instruct 2411",
- "cost_per_1m_in": 2,
- "cost_per_1m_out": 6,
- "cost_per_1m_in_cached": 1,
- "cost_per_1m_out_cached": 4,
- "context_window": 128000,
- "default_max_tokens": 12800,
- "can_reason": false,
- "supports_attachments": true,
- "options": {}
- },
- {
- "id": "Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar",
- "name": "Qwen3 Coder 480B A35B Instruct int4 mixed ar",
- "cost_per_1m_in": 0.22,
- "cost_per_1m_out": 0.95,
- "cost_per_1m_in_cached": 0.11,
- "cost_per_1m_out_cached": 0.44,
- "context_window": 106000,
- "default_max_tokens": 10600,
- "can_reason": false,
+ "id": "zai-org/GLM-4.7",
+ "name": "Z.ai: GLM 4.7",
+ "cost_per_1m_in": 0.3,
+ "cost_per_1m_out": 1.4,
+ "cost_per_1m_in_cached": 0.15,
+ "cost_per_1m_out_cached": 0.6,
+ "context_window": 200000,
+ "default_max_tokens": 20000,
+ "can_reason": true,
+ "reasoning_levels": [
+ "low",
+ "medium",
+ "high"
+ ],
+ "default_reasoning_effort": "medium",
"supports_attachments": false,
"options": {}
},
{
- "id": "Qwen/Qwen3-Next-80B-A3B-Instruct",
- "name": "Qwen3 Next 80B A3B Instruct",
- "cost_per_1m_in": 0.06,
- "cost_per_1m_out": 0.6,
- "cost_per_1m_in_cached": 0.03,
- "cost_per_1m_out_cached": 0.12,
- "context_window": 262144,
- "default_max_tokens": 26214,
- "can_reason": false,
+ "id": "zai-org/GLM-4.7-Flash",
+ "name": "Z.ai: GLM 4.7 Flash",
+ "cost_per_1m_in": 0.07,
+ "cost_per_1m_out": 0.39999999999999997,
+ "cost_per_1m_in_cached": 0.035,
+ "cost_per_1m_out_cached": 0.14,
+ "context_window": 200000,
+ "default_max_tokens": 20000,
+ "can_reason": true,
+ "reasoning_levels": [
+ "low",
+ "medium",
+ "high"
+ ],
+ "default_reasoning_effort": "medium",
"supports_attachments": false,
"options": {}
}