synthetic.json

  1{
  2  "name": "Synthetic",
  3  "id": "synthetic",
  4  "api_key": "$SYNTHETIC_API_KEY",
  5  "api_endpoint": "https://api.synthetic.new/openai/v1",
  6  "type": "openai-compat",
  7  "default_large_model_id": "hf:moonshotai/Kimi-K2.6",
  8  "default_small_model_id": "hf:deepseek-ai/DeepSeek-V3.2",
  9  "models": [
 10    {
 11      "id": "hf:deepseek-ai/DeepSeek-R1-0528",
 12      "name": "DeepSeek R1 0528",
 13      "cost_per_1m_in": 3,
 14      "cost_per_1m_out": 8,
 15      "cost_per_1m_in_cached": 3,
 16      "cost_per_1m_out_cached": 3,
 17      "context_window": 131072,
 18      "default_max_tokens": 13107,
 19      "can_reason": true,
 20      "reasoning_levels": [
 21        "low",
 22        "medium",
 23        "high"
 24      ],
 25      "default_reasoning_effort": "medium",
 26      "supports_attachments": false
 27    },
 28    {
 29      "id": "hf:deepseek-ai/DeepSeek-V3",
 30      "name": "DeepSeek V3",
 31      "cost_per_1m_in": 1.25,
 32      "cost_per_1m_out": 1.25,
 33      "cost_per_1m_in_cached": 1.25,
 34      "cost_per_1m_out_cached": 1.25,
 35      "context_window": 131072,
 36      "default_max_tokens": 13107,
 37      "can_reason": false,
 38      "supports_attachments": false
 39    },
 40    {
 41      "id": "hf:deepseek-ai/DeepSeek-V3.2",
 42      "name": "DeepSeek V3.2",
 43      "cost_per_1m_in": 0.56,
 44      "cost_per_1m_out": 1.68,
 45      "cost_per_1m_in_cached": 0.56,
 46      "cost_per_1m_out_cached": 0.56,
 47      "context_window": 162816,
 48      "default_max_tokens": 16281,
 49      "can_reason": true,
 50      "reasoning_levels": [
 51        "low",
 52        "medium",
 53        "high"
 54      ],
 55      "default_reasoning_effort": "medium",
 56      "supports_attachments": false
 57    },
 58    {
 59      "id": "hf:zai-org/GLM-4.7",
 60      "name": "GLM 4.7",
 61      "cost_per_1m_in": 0.45,
 62      "cost_per_1m_out": 2.19,
 63      "cost_per_1m_in_cached": 0.45,
 64      "cost_per_1m_out_cached": 0.45,
 65      "context_window": 202752,
 66      "default_max_tokens": 20275,
 67      "can_reason": true,
 68      "reasoning_levels": [
 69        "low",
 70        "medium",
 71        "high"
 72      ],
 73      "default_reasoning_effort": "medium",
 74      "supports_attachments": false
 75    },
 76    {
 77      "id": "hf:zai-org/GLM-4.7-Flash",
 78      "name": "GLM 4.7 Flash",
 79      "cost_per_1m_in": 0.1,
 80      "cost_per_1m_out": 0.5,
 81      "cost_per_1m_in_cached": 0.1,
 82      "cost_per_1m_out_cached": 0.1,
 83      "context_window": 196608,
 84      "default_max_tokens": 19660,
 85      "can_reason": true,
 86      "reasoning_levels": [
 87        "low",
 88        "medium",
 89        "high"
 90      ],
 91      "default_reasoning_effort": "medium",
 92      "supports_attachments": false
 93    },
 94    {
 95      "id": "hf:zai-org/GLM-5",
 96      "name": "GLM 5",
 97      "cost_per_1m_in": 1,
 98      "cost_per_1m_out": 3,
 99      "cost_per_1m_in_cached": 1,
100      "cost_per_1m_out_cached": 1,
101      "context_window": 196608,
102      "default_max_tokens": 19660,
103      "can_reason": true,
104      "reasoning_levels": [
105        "low",
106        "medium",
107        "high"
108      ],
109      "default_reasoning_effort": "medium",
110      "supports_attachments": false
111    },
112    {
113      "id": "hf:zai-org/GLM-5.1",
114      "name": "GLM 5.1",
115      "cost_per_1m_in": 1,
116      "cost_per_1m_out": 3,
117      "cost_per_1m_in_cached": 1,
118      "cost_per_1m_out_cached": 1,
119      "context_window": 196608,
120      "default_max_tokens": 19660,
121      "can_reason": true,
122      "reasoning_levels": [
123        "low",
124        "medium",
125        "high"
126      ],
127      "default_reasoning_effort": "medium",
128      "supports_attachments": false
129    },
130    {
131      "id": "hf:moonshotai/Kimi-K2.6",
132      "name": "Kimi K2.6",
133      "cost_per_1m_in": 0.95,
134      "cost_per_1m_out": 4,
135      "cost_per_1m_in_cached": 0.95,
136      "cost_per_1m_out_cached": 0.95,
137      "context_window": 262144,
138      "default_max_tokens": 32768,
139      "can_reason": true,
140      "reasoning_levels": [
141        "low",
142        "medium",
143        "high"
144      ],
145      "default_reasoning_effort": "medium",
146      "supports_attachments": true
147    },
148    {
149      "id": "hf:meta-llama/Llama-3.3-70B-Instruct",
150      "name": "Llama 3.3 70B Instruct",
151      "cost_per_1m_in": 0.88,
152      "cost_per_1m_out": 0.88,
153      "cost_per_1m_in_cached": 0.88,
154      "cost_per_1m_out_cached": 0.88,
155      "context_window": 131072,
156      "default_max_tokens": 13107,
157      "can_reason": false,
158      "supports_attachments": false
159    },
160    {
161      "id": "hf:MiniMaxAI/MiniMax-M2.5",
162      "name": "MiniMax M2.5",
163      "cost_per_1m_in": 0.4,
164      "cost_per_1m_out": 2,
165      "cost_per_1m_in_cached": 0.4,
166      "cost_per_1m_out_cached": 0.4,
167      "context_window": 191488,
168      "default_max_tokens": 19148,
169      "can_reason": true,
170      "reasoning_levels": [
171        "low",
172        "medium",
173        "high"
174      ],
175      "default_reasoning_effort": "medium",
176      "supports_attachments": false
177    },
178    {
179      "id": "hf:nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4",
180      "name": "NVIDIA Nemotron 3 Super 120B A12B NVFP4",
181      "cost_per_1m_in": 0.3,
182      "cost_per_1m_out": 1,
183      "cost_per_1m_in_cached": 0.3,
184      "cost_per_1m_out_cached": 0.3,
185      "context_window": 262144,
186      "default_max_tokens": 32768,
187      "can_reason": true,
188      "reasoning_levels": [
189        "low",
190        "medium",
191        "high"
192      ],
193      "default_reasoning_effort": "medium",
194      "supports_attachments": false
195    },
196    {
197      "id": "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
198      "name": "Qwen3 235B A22B Thinking 2507",
199      "cost_per_1m_in": 0.65,
200      "cost_per_1m_out": 3,
201      "cost_per_1m_in_cached": 0.65,
202      "cost_per_1m_out_cached": 0.65,
203      "context_window": 262144,
204      "default_max_tokens": 26214,
205      "can_reason": true,
206      "reasoning_levels": [
207        "low",
208        "medium",
209        "high"
210      ],
211      "default_reasoning_effort": "medium",
212      "supports_attachments": false
213    },
214    {
215      "id": "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
216      "name": "Qwen3 Coder 480B A35B Instruct",
217      "cost_per_1m_in": 2,
218      "cost_per_1m_out": 2,
219      "cost_per_1m_in_cached": 2,
220      "cost_per_1m_out_cached": 2,
221      "context_window": 262144,
222      "default_max_tokens": 26214,
223      "can_reason": false,
224      "supports_attachments": false
225    },
226    {
227      "id": "hf:Qwen/Qwen3.5-397B-A17B",
228      "name": "Qwen3.5 397B A17B",
229      "cost_per_1m_in": 0.6,
230      "cost_per_1m_out": 3.6,
231      "cost_per_1m_in_cached": 0.6,
232      "cost_per_1m_out_cached": 0.6,
233      "context_window": 262144,
234      "default_max_tokens": 26214,
235      "can_reason": false,
236      "supports_attachments": true
237    },
238    {
239      "id": "hf:openai/gpt-oss-120b",
240      "name": "gpt oss 120b",
241      "cost_per_1m_in": 0.1,
242      "cost_per_1m_out": 0.1,
243      "cost_per_1m_in_cached": 0.1,
244      "cost_per_1m_out_cached": 0.1,
245      "context_window": 131072,
246      "default_max_tokens": 13107,
247      "can_reason": true,
248      "reasoning_levels": [
249        "low",
250        "medium",
251        "high"
252      ],
253      "default_reasoning_effort": "medium",
254      "supports_attachments": false
255    }
256  ]
257}