cerebras.json

  1{
  2    "name": "Cerebras",
  3    "id": "cerebras",
  4    "type": "openai-compat",
  5    "api_key": "$CEREBRAS_API_KEY",
  6    "api_endpoint": "https://api.cerebras.ai/v1",
  7    "default_large_model_id": "qwen-3-coder-480b",
  8    "default_small_model_id": "qwen-3-32b",
  9    "models": [
 10        {
 11            "id": "llama-4-scout-17b-16e-instruct",
 12            "name": "Llama 4 Scout",
 13            "cost_per_1m_in": 0.65,
 14            "cost_per_1m_out": 0.85,
 15            "context_window": 32768,
 16            "default_max_tokens": 4000,
 17            "can_reason": false,
 18            "supports_attachments": false
 19        },
 20        {
 21            "id": "llama3.1-8b",
 22            "name": "Llama 3.1 8B",
 23            "cost_per_1m_in": 0.1,
 24            "cost_per_1m_out": 0.1,
 25            "context_window": 32768,
 26            "default_max_tokens": 4000,
 27            "can_reason": false,
 28            "supports_attachments": false
 29        },
 30        {
 31            "id": "llama-3.3-70b",
 32            "name": "Llama 3.3 70B",
 33            "cost_per_1m_in": 0.85,
 34            "cost_per_1m_out": 1.2,
 35            "context_window": 128000,
 36            "default_max_tokens": 4000,
 37            "can_reason": false,
 38            "supports_attachments": false
 39        },
 40        {
 41            "id": "gpt-oss-120b",
 42            "name": "gpt-oss-120b",
 43            "cost_per_1m_in": 0.4,
 44            "cost_per_1m_out": 0.8,
 45            "context_window": 128000,
 46            "default_max_tokens": 65536,
 47            "can_reason": true,
 48            "reasoning_levels": [
 49                "low",
 50                "medium",
 51                "high"
 52            ],
 53            "default_reasoning_efforts": "medium",
 54            "supports_attachments": false
 55        },
 56        {
 57            "id": "qwen-3-32b",
 58            "name": "Qwen 3 32B",
 59            "cost_per_1m_in": 0.4,
 60            "cost_per_1m_out": 0.8,
 61            "context_window": 128000,
 62            "default_max_tokens": 32768,
 63            "can_reason": false,
 64            "supports_attachments": false
 65        },
 66        {
 67            "id": "llama-4-maverick-17b-128e-instruct",
 68            "name": "Llama 4 Maverick",
 69            "cost_per_1m_in": 0.2,
 70            "cost_per_1m_out": 0.6,
 71            "context_window": 32768,
 72            "default_max_tokens": 4000,
 73            "can_reason": false,
 74            "supports_attachments": false
 75        },
 76        {
 77            "id": "qwen-3-235b-a22b-instruct-2507",
 78            "name": "Qwen 3 235B Instruct",
 79            "cost_per_1m_in": 0.6,
 80            "cost_per_1m_out": 1.2,
 81            "context_window": 131072,
 82            "default_max_tokens": 16384,
 83            "can_reason": true,
 84            "reasoning_levels": [
 85                "low",
 86                "medium",
 87                "high"
 88            ],
 89            "default_reasoning_efforts": "medium",
 90            "supports_attachments": false
 91        },
 92        {
 93            "id": "qwen-3-235b-a22b-thinking-2507",
 94            "name": "Qwen 3 235B Thinking",
 95            "cost_per_1m_in": 0.6,
 96            "cost_per_1m_out": 1.2,
 97            "context_window": 128000,
 98            "default_max_tokens": 32768,
 99            "can_reason": true,
100            "reasoning_levels": [
101                "low",
102                "medium",
103                "high"
104            ],
105            "default_reasoning_efforts": "medium",
106            "supports_attachments": false
107        },
108        {
109            "id": "qwen-3-coder-480b",
110            "name": "Qwen 3 480B Coder",
111            "cost_per_1m_in": 2.0,
112            "cost_per_1m_out": 2.0,
113            "context_window": 131072,
114            "default_max_tokens": 65536,
115            "can_reason": true,
116            "reasoning_levels": [
117                "low",
118                "medium",
119                "high"
120            ],
121            "default_reasoning_efforts": "medium",
122            "supports_attachments": false
123        }
124    ]
125}