nebius.json

  1{
  2  "name": "Nebius Token Factory",
  3  "id": "nebius",
  4  "api_key": "$NEBIUS_API_KEY",
  5  "api_endpoint": "https://api.tokenfactory.nebius.com/v1",
  6  "type": "openai-compat",
  7  "default_large_model_id": "moonshotai/Kimi-K2.5",
  8  "default_small_model_id": "nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B",
  9  "models": [
 10    {
 11      "id": "deepseek-ai/DeepSeek-V3.2",
 12      "name": "DeepSeek-V3.2",
 13      "cost_per_1m_in": 0.3,
 14      "cost_per_1m_out": 0.45,
 15      "cost_per_1m_in_cached": 0,
 16      "cost_per_1m_out_cached": 0,
 17      "context_window": 163000,
 18      "default_max_tokens": 16300,
 19      "can_reason": true,
 20      "reasoning_levels": [
 21        "low",
 22        "medium",
 23        "high"
 24      ],
 25      "default_reasoning_effort": "medium",
 26      "supports_attachments": false
 27    },
 28    {
 29      "id": "deepseek-ai/DeepSeek-V3.2-fast",
 30      "name": "DeepSeek-V3.2 (fast)",
 31      "cost_per_1m_in": 0.4,
 32      "cost_per_1m_out": 2,
 33      "cost_per_1m_in_cached": 0,
 34      "cost_per_1m_out_cached": 0,
 35      "context_window": 8000,
 36      "default_max_tokens": 800,
 37      "can_reason": true,
 38      "reasoning_levels": [
 39        "low",
 40        "medium",
 41        "high"
 42      ],
 43      "default_reasoning_effort": "medium",
 44      "supports_attachments": false
 45    },
 46    {
 47      "id": "zai-org/GLM-5",
 48      "name": "GLM-5",
 49      "cost_per_1m_in": 1,
 50      "cost_per_1m_out": 3.2,
 51      "cost_per_1m_in_cached": 0,
 52      "cost_per_1m_out_cached": 0,
 53      "context_window": 202752,
 54      "default_max_tokens": 20275,
 55      "can_reason": true,
 56      "reasoning_levels": [
 57        "low",
 58        "medium",
 59        "high"
 60      ],
 61      "default_reasoning_effort": "medium",
 62      "supports_attachments": false
 63    },
 64    {
 65      "id": "NousResearch/Hermes-4-405B",
 66      "name": "Hermes-4-405B",
 67      "cost_per_1m_in": 1,
 68      "cost_per_1m_out": 3,
 69      "cost_per_1m_in_cached": 0,
 70      "cost_per_1m_out_cached": 0,
 71      "context_window": 131072,
 72      "default_max_tokens": 13107,
 73      "can_reason": true,
 74      "reasoning_levels": [
 75        "low",
 76        "medium",
 77        "high"
 78      ],
 79      "default_reasoning_effort": "medium",
 80      "supports_attachments": false
 81    },
 82    {
 83      "id": "NousResearch/Hermes-4-70B",
 84      "name": "Hermes-4-70B",
 85      "cost_per_1m_in": 0.13,
 86      "cost_per_1m_out": 0.4,
 87      "cost_per_1m_in_cached": 0,
 88      "cost_per_1m_out_cached": 0,
 89      "context_window": 131072,
 90      "default_max_tokens": 13107,
 91      "can_reason": true,
 92      "reasoning_levels": [
 93        "low",
 94        "medium",
 95        "high"
 96      ],
 97      "default_reasoning_effort": "medium",
 98      "supports_attachments": false
 99    },
100    {
101      "id": "PrimeIntellect/INTELLECT-3",
102      "name": "INTELLECT-3",
103      "cost_per_1m_in": 0.2,
104      "cost_per_1m_out": 1.1,
105      "cost_per_1m_in_cached": 0,
106      "cost_per_1m_out_cached": 0,
107      "context_window": 131072,
108      "default_max_tokens": 13107,
109      "can_reason": true,
110      "reasoning_levels": [
111        "low",
112        "medium",
113        "high"
114      ],
115      "default_reasoning_effort": "medium",
116      "supports_attachments": false
117    },
118    {
119      "id": "moonshotai/Kimi-K2.5",
120      "name": "Kimi-K2.5",
121      "cost_per_1m_in": 0.5,
122      "cost_per_1m_out": 2.5,
123      "cost_per_1m_in_cached": 0,
124      "cost_per_1m_out_cached": 0,
125      "context_window": 262144,
126      "default_max_tokens": 26214,
127      "can_reason": true,
128      "reasoning_levels": [
129        "low",
130        "medium",
131        "high"
132      ],
133      "default_reasoning_effort": "medium",
134      "supports_attachments": false
135    },
136    {
137      "id": "moonshotai/Kimi-K2.5-fast",
138      "name": "Kimi-K2.5 (fast)",
139      "cost_per_1m_in": 0.5,
140      "cost_per_1m_out": 2.5,
141      "cost_per_1m_in_cached": 0,
142      "cost_per_1m_out_cached": 0,
143      "context_window": 8000,
144      "default_max_tokens": 800,
145      "can_reason": true,
146      "reasoning_levels": [
147        "low",
148        "medium",
149        "high"
150      ],
151      "default_reasoning_effort": "medium",
152      "supports_attachments": false
153    },
154    {
155      "id": "meta-llama/Llama-3.3-70B-Instruct",
156      "name": "Llama-3.3-70B-Instruct",
157      "cost_per_1m_in": 0.13,
158      "cost_per_1m_out": 0.4,
159      "cost_per_1m_in_cached": 0,
160      "cost_per_1m_out_cached": 0,
161      "context_window": 131072,
162      "default_max_tokens": 13107,
163      "can_reason": false,
164      "supports_attachments": false
165    },
166    {
167      "id": "meta-llama/Meta-Llama-3.1-8B-Instruct",
168      "name": "Meta-Llama-3.1-8B-Instruct",
169      "cost_per_1m_in": 0.02,
170      "cost_per_1m_out": 0.06,
171      "cost_per_1m_in_cached": 0,
172      "cost_per_1m_out_cached": 0,
173      "context_window": 131072,
174      "default_max_tokens": 13107,
175      "can_reason": false,
176      "supports_attachments": false
177    },
178    {
179      "id": "MiniMaxAI/MiniMax-M2.5",
180      "name": "MiniMax-M2.5",
181      "cost_per_1m_in": 0.3,
182      "cost_per_1m_out": 1.2,
183      "cost_per_1m_in_cached": 0,
184      "cost_per_1m_out_cached": 0,
185      "context_window": 196608,
186      "default_max_tokens": 19660,
187      "can_reason": true,
188      "reasoning_levels": [
189        "low",
190        "medium",
191        "high"
192      ],
193      "default_reasoning_effort": "medium",
194      "supports_attachments": false
195    },
196    {
197      "id": "MiniMaxAI/MiniMax-M2.5-fast",
198      "name": "MiniMax-M2.5 (fast)",
199      "cost_per_1m_in": 0.3,
200      "cost_per_1m_out": 1.2,
201      "cost_per_1m_in_cached": 0,
202      "cost_per_1m_out_cached": 0,
203      "context_window": 8000,
204      "default_max_tokens": 800,
205      "can_reason": true,
206      "reasoning_levels": [
207        "low",
208        "medium",
209        "high"
210      ],
211      "default_reasoning_effort": "medium",
212      "supports_attachments": false
213    },
214    {
215      "id": "nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B",
216      "name": "Nemotron-3-Nano-30B-A3B",
217      "cost_per_1m_in": 0.06,
218      "cost_per_1m_out": 0.24,
219      "cost_per_1m_in_cached": 0,
220      "cost_per_1m_out_cached": 0,
221      "context_window": 262144,
222      "default_max_tokens": 26214,
223      "can_reason": true,
224      "reasoning_levels": [
225        "low",
226        "medium",
227        "high"
228      ],
229      "default_reasoning_effort": "medium",
230      "supports_attachments": false
231    },
232    {
233      "id": "nvidia/nemotron-3-super-120b-a12b",
234      "name": "Nemotron-3-Super-120b-a12b",
235      "cost_per_1m_in": 0.3,
236      "cost_per_1m_out": 0.9,
237      "cost_per_1m_in_cached": 0,
238      "cost_per_1m_out_cached": 0,
239      "context_window": 262144,
240      "default_max_tokens": 26214,
241      "can_reason": true,
242      "reasoning_levels": [
243        "low",
244        "medium",
245        "high"
246      ],
247      "default_reasoning_effort": "medium",
248      "supports_attachments": false
249    },
250    {
251      "id": "Qwen/Qwen3-235B-A22B-Instruct-2507",
252      "name": "Qwen3-235B-A22B-Instruct-2507",
253      "cost_per_1m_in": 0.2,
254      "cost_per_1m_out": 0.6,
255      "cost_per_1m_in_cached": 0,
256      "cost_per_1m_out_cached": 0,
257      "context_window": 262144,
258      "default_max_tokens": 26214,
259      "can_reason": false,
260      "supports_attachments": false
261    },
262    {
263      "id": "Qwen/Qwen3-235B-A22B-Thinking-2507-fast",
264      "name": "Qwen3-235B-A22B-Thinking-2507 (fast)",
265      "cost_per_1m_in": 0.5,
266      "cost_per_1m_out": 2,
267      "cost_per_1m_in_cached": 0,
268      "cost_per_1m_out_cached": 0,
269      "context_window": 8000,
270      "default_max_tokens": 800,
271      "can_reason": true,
272      "reasoning_levels": [
273        "low",
274        "medium",
275        "high"
276      ],
277      "default_reasoning_effort": "medium",
278      "supports_attachments": false
279    },
280    {
281      "id": "Qwen/Qwen3-30B-A3B-Instruct-2507",
282      "name": "Qwen3-30B-A3B-Instruct-2507",
283      "cost_per_1m_in": 0.1,
284      "cost_per_1m_out": 0.3,
285      "cost_per_1m_in_cached": 0,
286      "cost_per_1m_out_cached": 0,
287      "context_window": 262144,
288      "default_max_tokens": 26214,
289      "can_reason": false,
290      "supports_attachments": false
291    },
292    {
293      "id": "Qwen/Qwen3-32B",
294      "name": "Qwen3-32B",
295      "cost_per_1m_in": 0.1,
296      "cost_per_1m_out": 0.3,
297      "cost_per_1m_in_cached": 0,
298      "cost_per_1m_out_cached": 0,
299      "context_window": 40960,
300      "default_max_tokens": 4096,
301      "can_reason": true,
302      "reasoning_levels": [
303        "low",
304        "medium",
305        "high"
306      ],
307      "default_reasoning_effort": "medium",
308      "supports_attachments": false
309    },
310    {
311      "id": "Qwen/Qwen3-Next-80B-A3B-Thinking",
312      "name": "Qwen3-Next-80B-A3B-Thinking",
313      "cost_per_1m_in": 0.15,
314      "cost_per_1m_out": 1.2,
315      "cost_per_1m_in_cached": 0,
316      "cost_per_1m_out_cached": 0,
317      "context_window": 128000,
318      "default_max_tokens": 12800,
319      "can_reason": true,
320      "reasoning_levels": [
321        "low",
322        "medium",
323        "high"
324      ],
325      "default_reasoning_effort": "medium",
326      "supports_attachments": false
327    },
328    {
329      "id": "Qwen/Qwen3-Next-80B-A3B-Thinking-fast",
330      "name": "Qwen3-Next-80B-A3B-Thinking (fast)",
331      "cost_per_1m_in": 0.15,
332      "cost_per_1m_out": 1.2,
333      "cost_per_1m_in_cached": 0,
334      "cost_per_1m_out_cached": 0,
335      "context_window": 8000,
336      "default_max_tokens": 800,
337      "can_reason": true,
338      "reasoning_levels": [
339        "low",
340        "medium",
341        "high"
342      ],
343      "default_reasoning_effort": "medium",
344      "supports_attachments": false
345    },
346    {
347      "id": "Qwen/Qwen3.5-397B-A17B",
348      "name": "Qwen3.5-397B-A17B",
349      "cost_per_1m_in": 0.6,
350      "cost_per_1m_out": 3.6,
351      "cost_per_1m_in_cached": 0,
352      "cost_per_1m_out_cached": 0,
353      "context_window": 262144,
354      "default_max_tokens": 26214,
355      "can_reason": true,
356      "reasoning_levels": [
357        "low",
358        "medium",
359        "high"
360      ],
361      "default_reasoning_effort": "medium",
362      "supports_attachments": false
363    },
364    {
365      "id": "Qwen/Qwen3.5-397B-A17B-fast",
366      "name": "Qwen3.5-397B-A17B (fast)",
367      "cost_per_1m_in": 0.6,
368      "cost_per_1m_out": 3.6,
369      "cost_per_1m_in_cached": 0,
370      "cost_per_1m_out_cached": 0,
371      "context_window": 8000,
372      "default_max_tokens": 800,
373      "can_reason": true,
374      "reasoning_levels": [
375        "low",
376        "medium",
377        "high"
378      ],
379      "default_reasoning_effort": "medium",
380      "supports_attachments": false
381    },
382    {
383      "id": "openai/gpt-oss-120b",
384      "name": "gpt-oss-120b",
385      "cost_per_1m_in": 0.15,
386      "cost_per_1m_out": 0.6,
387      "cost_per_1m_in_cached": 0,
388      "cost_per_1m_out_cached": 0,
389      "context_window": 131072,
390      "default_max_tokens": 13107,
391      "can_reason": true,
392      "reasoning_levels": [
393        "low",
394        "medium",
395        "high"
396      ],
397      "default_reasoning_effort": "medium",
398      "supports_attachments": false
399    },
400    {
401      "id": "openai/gpt-oss-120b-fast",
402      "name": "gpt-oss-120b (fast)",
403      "cost_per_1m_in": 0.1,
404      "cost_per_1m_out": 0.5,
405      "cost_per_1m_in_cached": 0,
406      "cost_per_1m_out_cached": 0,
407      "context_window": 8000,
408      "default_max_tokens": 800,
409      "can_reason": true,
410      "reasoning_levels": [
411        "low",
412        "medium",
413        "high"
414      ],
415      "default_reasoning_effort": "medium",
416      "supports_attachments": false
417    }
418  ]
419}