cerebras.json

  1{
  2    "name": "Cerebras",
  3    "id": "cerebras",
  4    "type": "openai",
  5    "api_key": "$CEREBRAS_API_KEY",
  6    "api_endpoint": "https://api.cerebras.ai/v1",
  7    "default_large_model_id": "qwen-3-coder-480b",
  8    "default_small_model_id": "qwen-3-32b",
  9    "models": [
 10        {
 11            "id": "llama-4-scout-17b-16e-instruct",
 12            "name": "Llama 4 Scout",
 13            "cost_per_1m_in": 0.65,
 14            "cost_per_1m_out": 0.85,
 15            "context_window": 32768,
 16            "default_max_tokens": 4000,
 17            "can_reason": true,
 18            "supports_attachments": false
 19        },
 20        {
 21            "id": "llama3.1-8b",
 22            "name": "Llama 3.1 8B",
 23            "cost_per_1m_in": 0.1,
 24            "cost_per_1m_out": 0.1,
 25            "context_window": 32768,
 26            "default_max_tokens": 4000,
 27            "can_reason": true,
 28            "supports_attachments": false
 29        },
 30        {
 31            "id": "llama-3.3-70b",
 32            "name": "Llama 3.3 70B",
 33            "cost_per_1m_in": 0.85,
 34            "cost_per_1m_out": 1.2,
 35            "context_window": 128000,
 36            "default_max_tokens": 4000,
 37            "can_reason": true,
 38            "supports_attachments": false
 39        },
 40        {
 41            "id": "gpt-oss-120b",
 42            "name": "gpt-oss-120b",
 43            "cost_per_1m_in": 0.4,
 44            "cost_per_1m_out": 0.8,
 45            "context_window": 128000,
 46            "default_max_tokens": 65536,
 47            "can_reason": true,
 48            "supports_attachments": false
 49        },
 50        {
 51            "id": "qwen-3-32b",
 52            "name": "Qwen 3 32B",
 53            "cost_per_1m_in": 0.4,
 54            "cost_per_1m_out": 0.8,
 55            "context_window": 128000,
 56            "default_max_tokens": 32768,
 57            "can_reason": true,
 58            "supports_attachments": false
 59        },
 60        {
 61            "id": "llama-4-maverick-17b-128e-instruct",
 62            "name": "Llama 4 Maverick",
 63            "cost_per_1m_in": 0.2,
 64            "cost_per_1m_out": 0.6,
 65            "context_window": 32768,
 66            "default_max_tokens": 4000,
 67            "can_reason": true,
 68            "supports_attachments": false
 69        },
 70        {
 71            "id": "qwen-3-235b-a22b-instruct-2507",
 72            "name": "Qwen 3 235B Instruct",
 73            "cost_per_1m_in": 0.6,
 74            "cost_per_1m_out": 1.2,
 75            "context_window": 131072,
 76            "default_max_tokens": 16384,
 77            "can_reason": true,
 78            "supports_attachments": false
 79        },
 80        {
 81            "id": "qwen-3-235b-a22b-thinking-2507",
 82            "name": "Qwen 3 235B Thinking",
 83            "cost_per_1m_in": 0.6,
 84            "cost_per_1m_out": 1.2,
 85            "context_window": 128000,
 86            "default_max_tokens": 32768,
 87            "can_reason": true,
 88            "supports_attachments": false
 89        },
 90        {
 91            "id": "qwen-3-coder-480b",
 92            "name": "Qwen 3 480B Coder",
 93            "cost_per_1m_in": 2.0,
 94            "cost_per_1m_out": 2.0,
 95            "context_window": 131072,
 96            "default_max_tokens": 65536,
 97            "can_reason": true,
 98            "supports_attachments": false
 99        }
100    ]
101}