diff --git a/internal/providers/configs/cerebras.json b/internal/providers/configs/cerebras.json index c73ca67401ba4f31fd3d6d05305dd01a8a70e367..30a79396a0705fa6ad064d4484fa07d582694141 100644 --- a/internal/providers/configs/cerebras.json +++ b/internal/providers/configs/cerebras.json @@ -4,14 +4,12 @@ "type": "openai", "api_key": "$CEREBRAS_API_KEY", "api_endpoint": "https://api.cerebras.ai/v1", - "default_large_model_id": "qwen-3-235b-a22b-instruct-2507", + "default_large_model_id": "qwen-3-coder-480b", "default_small_model_id": "qwen-3-32b", "models": [ { "id": "llama-4-scout-17b-16e-instruct", "name": "Llama 4 Scout", - "parameters": "109B", - "speed_tokens_per_s": 2600, "cost_per_1m_in": 0.65, "cost_per_1m_out": 0.85, "context_window": 32768, @@ -22,10 +20,8 @@ { "id": "llama3.1-8b", "name": "Llama 3.1 8B", - "parameters": "8B", - "speed_tokens_per_s": 2200, - "cost_per_1m_in": 0.10, - "cost_per_1m_out": 0.10, + "cost_per_1m_in": 0.1, + "cost_per_1m_out": 0.1, "context_window": 32768, "default_max_tokens": 4000, "can_reason": true, @@ -34,10 +30,8 @@ { "id": "llama-3.3-70b", "name": "Llama 3.3 70B", - "parameters": "70B", - "speed_tokens_per_s": 2100, "cost_per_1m_in": 0.85, - "cost_per_1m_out": 1.20, + "cost_per_1m_out": 1.2, "context_window": 128000, "default_max_tokens": 4000, "can_reason": true, @@ -46,10 +40,8 @@ { "id": "qwen-3-32b", "name": "Qwen 3 32B", - "parameters": "32B", - "speed_tokens_per_s": 2600, - "cost_per_1m_in": 0.40, - "cost_per_1m_out": 0.80, + "cost_per_1m_in": 0.4, + "cost_per_1m_out": 0.8, "context_window": 128000, "default_max_tokens": 32768, "can_reason": true, @@ -58,10 +50,8 @@ { "id": "llama-4-maverick-17b-128e-instruct", "name": "Llama 4 Maverick", - "parameters": "17B", - "speed_tokens_per_s": 1500, - "cost_per_1m_in": 0.20, - "cost_per_1m_out": 0.60, + "cost_per_1m_in": 0.2, + "cost_per_1m_out": 0.6, "context_window": 32768, "default_max_tokens": 4000, "can_reason": true, @@ -70,10 +60,8 @@ { "id": "qwen-3-235b-a22b-instruct-2507", "name": "Qwen 3 235B Instruct", - "parameters": "235B", - "speed_tokens_per_s": 1400, - "cost_per_1m_in": 0.60, - "cost_per_1m_out": 1.20, + "cost_per_1m_in": 0.6, + "cost_per_1m_out": 1.2, "context_window": 131072, "default_max_tokens": 16384, "can_reason": true, @@ -82,10 +70,8 @@ { "id": "qwen-3-235b-a22b-thinking-2507", "name": "Qwen 3 235B Thinking", - "parameters": "235B", - "speed_tokens_per_s": 1700, - "cost_per_1m_in": 0.60, - "cost_per_1m_out": 1.20, + "cost_per_1m_in": 0.6, + "cost_per_1m_out": 1.2, "context_window": 128000, "default_max_tokens": 32768, "can_reason": true, @@ -94,17 +80,12 @@ { "id": "qwen-3-coder-480b", "name": "Qwen 3 480B Coder", - "parameters": "480B", - "speed_tokens_per_s": 2000, - "cost_per_1m_in": 2.00, - "cost_per_1m_out": 2.00, + "cost_per_1m_in": 2.0, + "cost_per_1m_out": 2.0, "context_window": 131072, "default_max_tokens": 65536, "can_reason": true, "supports_attachments": false } - ], - "default_headers": { - "User-Agent": "Crush-Client/1.0" - } -} \ No newline at end of file + ] +}