synthetic.json

  1{
  2  "name": "Synthetic",
  3  "id": "synthetic",
  4  "api_key": "$SYNTHETIC_API_KEY",
  5  "api_endpoint": "https://api.synthetic.new/openai/v1",
  6  "type": "openai-compat",
  7  "default_large_model_id": "hf:zai-org/GLM-4.7",
  8  "default_small_model_id": "hf:deepseek-ai/DeepSeek-V3.2",
  9  "models": [
 10    {
 11      "id": "hf:deepseek-ai/DeepSeek-R1-0528",
 12      "name": "DeepSeek R1 0528",
 13      "cost_per_1m_in": 3,
 14      "cost_per_1m_out": 8,
 15      "cost_per_1m_in_cached": 3,
 16      "cost_per_1m_out_cached": 3,
 17      "context_window": 131072,
 18      "default_max_tokens": 13107,
 19      "can_reason": true,
 20      "reasoning_levels": [
 21        "low",
 22        "medium",
 23        "high"
 24      ],
 25      "default_reasoning_effort": "medium",
 26      "supports_attachments": false,
 27      "options": {}
 28    },
 29    {
 30      "id": "hf:deepseek-ai/DeepSeek-V3",
 31      "name": "DeepSeek V3",
 32      "cost_per_1m_in": 1.25,
 33      "cost_per_1m_out": 1.25,
 34      "cost_per_1m_in_cached": 1.25,
 35      "cost_per_1m_out_cached": 1.25,
 36      "context_window": 131072,
 37      "default_max_tokens": 13107,
 38      "can_reason": false,
 39      "supports_attachments": false,
 40      "options": {}
 41    },
 42    {
 43      "id": "hf:deepseek-ai/DeepSeek-V3.2",
 44      "name": "DeepSeek V3.2",
 45      "cost_per_1m_in": 0.56,
 46      "cost_per_1m_out": 1.68,
 47      "cost_per_1m_in_cached": 0.56,
 48      "cost_per_1m_out_cached": 0.56,
 49      "context_window": 162816,
 50      "default_max_tokens": 16281,
 51      "can_reason": true,
 52      "reasoning_levels": [
 53        "low",
 54        "medium",
 55        "high"
 56      ],
 57      "default_reasoning_effort": "medium",
 58      "supports_attachments": false,
 59      "options": {}
 60    },
 61    {
 62      "id": "hf:zai-org/GLM-4.7",
 63      "name": "GLM 4.7",
 64      "cost_per_1m_in": 0.55,
 65      "cost_per_1m_out": 2.19,
 66      "cost_per_1m_in_cached": 0.55,
 67      "cost_per_1m_out_cached": 0.55,
 68      "context_window": 202752,
 69      "default_max_tokens": 20275,
 70      "can_reason": true,
 71      "reasoning_levels": [
 72        "low",
 73        "medium",
 74        "high"
 75      ],
 76      "default_reasoning_effort": "medium",
 77      "supports_attachments": false,
 78      "options": {}
 79    },
 80    {
 81      "id": "hf:zai-org/GLM-4.7-Flash",
 82      "name": "GLM 4.7 Flash",
 83      "cost_per_1m_in": 0.06,
 84      "cost_per_1m_out": 0.4,
 85      "cost_per_1m_in_cached": 0.06,
 86      "cost_per_1m_out_cached": 0.06,
 87      "context_window": 196608,
 88      "default_max_tokens": 19660,
 89      "can_reason": true,
 90      "reasoning_levels": [
 91        "low",
 92        "medium",
 93        "high"
 94      ],
 95      "default_reasoning_effort": "medium",
 96      "supports_attachments": false,
 97      "options": {}
 98    },
 99    {
100      "id": "hf:moonshotai/Kimi-K2-Instruct-0905",
101      "name": "Kimi K2 Instruct 0905",
102      "cost_per_1m_in": 1.2,
103      "cost_per_1m_out": 1.2,
104      "cost_per_1m_in_cached": 1.2,
105      "cost_per_1m_out_cached": 1.2,
106      "context_window": 262144,
107      "default_max_tokens": 26214,
108      "can_reason": false,
109      "supports_attachments": false,
110      "options": {}
111    },
112    {
113      "id": "hf:moonshotai/Kimi-K2-Thinking",
114      "name": "Kimi K2 Thinking",
115      "cost_per_1m_in": 0.6,
116      "cost_per_1m_out": 2.5,
117      "cost_per_1m_in_cached": 0.6,
118      "cost_per_1m_out_cached": 0.6,
119      "context_window": 262144,
120      "default_max_tokens": 26214,
121      "can_reason": true,
122      "reasoning_levels": [
123        "low",
124        "medium",
125        "high"
126      ],
127      "default_reasoning_effort": "medium",
128      "supports_attachments": false,
129      "options": {}
130    },
131    {
132      "id": "hf:moonshotai/Kimi-K2.5",
133      "name": "Kimi K2.5",
134      "cost_per_1m_in": 0.6,
135      "cost_per_1m_out": 3,
136      "cost_per_1m_in_cached": 0.6,
137      "cost_per_1m_out_cached": 0.6,
138      "context_window": 262144,
139      "default_max_tokens": 32768,
140      "can_reason": true,
141      "reasoning_levels": [
142        "low",
143        "medium",
144        "high"
145      ],
146      "default_reasoning_effort": "medium",
147      "supports_attachments": true,
148      "options": {}
149    },
150    {
151      "id": "hf:nvidia/Kimi-K2.5-NVFP4",
152      "name": "Kimi K2.5 NVFP4",
153      "cost_per_1m_in": 0.6,
154      "cost_per_1m_out": 3,
155      "cost_per_1m_in_cached": 0.6,
156      "cost_per_1m_out_cached": 0.6,
157      "context_window": 262144,
158      "default_max_tokens": 32768,
159      "can_reason": true,
160      "reasoning_levels": [
161        "low",
162        "medium",
163        "high"
164      ],
165      "default_reasoning_effort": "medium",
166      "supports_attachments": true,
167      "options": {}
168    },
169    {
170      "id": "hf:meta-llama/Llama-3.3-70B-Instruct",
171      "name": "Llama 3.3 70B Instruct",
172      "cost_per_1m_in": 0.88,
173      "cost_per_1m_out": 0.88,
174      "cost_per_1m_in_cached": 0.88,
175      "cost_per_1m_out_cached": 0.88,
176      "context_window": 131072,
177      "default_max_tokens": 13107,
178      "can_reason": false,
179      "supports_attachments": false,
180      "options": {}
181    },
182    {
183      "id": "hf:MiniMaxAI/MiniMax-M2.1",
184      "name": "MiniMax M2.1",
185      "cost_per_1m_in": 0.3,
186      "cost_per_1m_out": 1.2,
187      "cost_per_1m_in_cached": 0.3,
188      "cost_per_1m_out_cached": 0.3,
189      "context_window": 196608,
190      "default_max_tokens": 19660,
191      "can_reason": true,
192      "reasoning_levels": [
193        "low",
194        "medium",
195        "high"
196      ],
197      "default_reasoning_effort": "medium",
198      "supports_attachments": false,
199      "options": {}
200    },
201    {
202      "id": "hf:MiniMaxAI/MiniMax-M2.5",
203      "name": "MiniMax M2.5",
204      "cost_per_1m_in": 0.6,
205      "cost_per_1m_out": 3,
206      "cost_per_1m_in_cached": 0.6,
207      "cost_per_1m_out_cached": 0.6,
208      "context_window": 191488,
209      "default_max_tokens": 19148,
210      "can_reason": true,
211      "reasoning_levels": [
212        "low",
213        "medium",
214        "high"
215      ],
216      "default_reasoning_effort": "medium",
217      "supports_attachments": false,
218      "options": {}
219    },
220    {
221      "id": "hf:nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-NVFP4",
222      "name": "NVIDIA Nemotron 3 Super 120B A12B NVFP4",
223      "cost_per_1m_in": 0.6,
224      "cost_per_1m_out": 3,
225      "cost_per_1m_in_cached": 0.6,
226      "cost_per_1m_out_cached": 0.6,
227      "context_window": 262144,
228      "default_max_tokens": 32768,
229      "can_reason": true,
230      "reasoning_levels": [
231        "low",
232        "medium",
233        "high"
234      ],
235      "default_reasoning_effort": "medium",
236      "supports_attachments": false,
237      "options": {}
238    },
239    {
240      "id": "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
241      "name": "Qwen3 235B A22B Thinking 2507",
242      "cost_per_1m_in": 0.65,
243      "cost_per_1m_out": 3,
244      "cost_per_1m_in_cached": 0.65,
245      "cost_per_1m_out_cached": 0.65,
246      "context_window": 262144,
247      "default_max_tokens": 26214,
248      "can_reason": true,
249      "reasoning_levels": [
250        "low",
251        "medium",
252        "high"
253      ],
254      "default_reasoning_effort": "medium",
255      "supports_attachments": false,
256      "options": {}
257    },
258    {
259      "id": "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
260      "name": "Qwen3 Coder 480B A35B Instruct",
261      "cost_per_1m_in": 2,
262      "cost_per_1m_out": 2,
263      "cost_per_1m_in_cached": 2,
264      "cost_per_1m_out_cached": 2,
265      "context_window": 262144,
266      "default_max_tokens": 26214,
267      "can_reason": false,
268      "supports_attachments": false,
269      "options": {}
270    },
271    {
272      "id": "hf:Qwen/Qwen3.5-397B-A17B",
273      "name": "Qwen3.5 397B A17B",
274      "cost_per_1m_in": 0.6,
275      "cost_per_1m_out": 3.6,
276      "cost_per_1m_in_cached": 0.6,
277      "cost_per_1m_out_cached": 0.6,
278      "context_window": 262144,
279      "default_max_tokens": 26214,
280      "can_reason": false,
281      "supports_attachments": true,
282      "options": {}
283    },
284    {
285      "id": "hf:openai/gpt-oss-120b",
286      "name": "gpt oss 120b",
287      "cost_per_1m_in": 0.1,
288      "cost_per_1m_out": 0.1,
289      "cost_per_1m_in_cached": 0.1,
290      "cost_per_1m_out_cached": 0.1,
291      "context_window": 131072,
292      "default_max_tokens": 13107,
293      "can_reason": true,
294      "reasoning_levels": [
295        "low",
296        "medium",
297        "high"
298      ],
299      "default_reasoning_effort": "medium",
300      "supports_attachments": false,
301      "options": {}
302    }
303  ]
304}