1{
2 "name": "Synthetic",
3 "id": "synthetic",
4 "api_key": "$SYNTHETIC_API_KEY",
5 "api_endpoint": "https://api.synthetic.new/openai/v1",
6 "type": "openai-compat",
7 "default_large_model_id": "hf:zai-org/GLM-4.7",
8 "default_small_model_id": "hf:deepseek-ai/DeepSeek-V3.2",
9 "models": [
10 {
11 "id": "hf:deepseek-ai/DeepSeek-R1-0528",
12 "name": "DeepSeek R1 0528",
13 "cost_per_1m_in": 3,
14 "cost_per_1m_out": 8,
15 "cost_per_1m_in_cached": 3,
16 "cost_per_1m_out_cached": 3,
17 "context_window": 131072,
18 "default_max_tokens": 13107,
19 "can_reason": true,
20 "reasoning_levels": [
21 "low",
22 "medium",
23 "high"
24 ],
25 "default_reasoning_effort": "medium",
26 "supports_attachments": false,
27 "options": {}
28 },
29 {
30 "id": "hf:deepseek-ai/DeepSeek-V3",
31 "name": "DeepSeek V3",
32 "cost_per_1m_in": 1.25,
33 "cost_per_1m_out": 1.25,
34 "cost_per_1m_in_cached": 1.25,
35 "cost_per_1m_out_cached": 1.25,
36 "context_window": 131072,
37 "default_max_tokens": 13107,
38 "can_reason": false,
39 "supports_attachments": false,
40 "options": {}
41 },
42 {
43 "id": "hf:deepseek-ai/DeepSeek-V3.2",
44 "name": "DeepSeek V3.2",
45 "cost_per_1m_in": 0.56,
46 "cost_per_1m_out": 1.68,
47 "cost_per_1m_in_cached": 0.56,
48 "cost_per_1m_out_cached": 0.56,
49 "context_window": 162816,
50 "default_max_tokens": 16281,
51 "can_reason": true,
52 "reasoning_levels": [
53 "low",
54 "medium",
55 "high"
56 ],
57 "default_reasoning_effort": "medium",
58 "supports_attachments": false,
59 "options": {}
60 },
61 {
62 "id": "hf:zai-org/GLM-4.7",
63 "name": "GLM 4.7",
64 "cost_per_1m_in": 0.55,
65 "cost_per_1m_out": 2.1900000000000004,
66 "cost_per_1m_in_cached": 0.55,
67 "cost_per_1m_out_cached": 0.55,
68 "context_window": 202752,
69 "default_max_tokens": 20275,
70 "can_reason": true,
71 "reasoning_levels": [
72 "low",
73 "medium",
74 "high"
75 ],
76 "default_reasoning_effort": "medium",
77 "supports_attachments": false,
78 "options": {}
79 },
80 {
81 "id": "hf:moonshotai/Kimi-K2-Instruct-0905",
82 "name": "Kimi K2 Instruct 0905",
83 "cost_per_1m_in": 1.2,
84 "cost_per_1m_out": 1.2,
85 "cost_per_1m_in_cached": 1.2,
86 "cost_per_1m_out_cached": 1.2,
87 "context_window": 262144,
88 "default_max_tokens": 26214,
89 "can_reason": false,
90 "supports_attachments": false,
91 "options": {}
92 },
93 {
94 "id": "hf:moonshotai/Kimi-K2-Thinking",
95 "name": "Kimi K2 Thinking",
96 "cost_per_1m_in": 0.6,
97 "cost_per_1m_out": 2.5,
98 "cost_per_1m_in_cached": 0.6,
99 "cost_per_1m_out_cached": 0.6,
100 "context_window": 262144,
101 "default_max_tokens": 26214,
102 "can_reason": true,
103 "reasoning_levels": [
104 "low",
105 "medium",
106 "high"
107 ],
108 "default_reasoning_effort": "medium",
109 "supports_attachments": false,
110 "options": {}
111 },
112 {
113 "id": "hf:moonshotai/Kimi-K2.5",
114 "name": "Kimi K2.5",
115 "cost_per_1m_in": 0.6,
116 "cost_per_1m_out": 3,
117 "cost_per_1m_in_cached": 0.6,
118 "cost_per_1m_out_cached": 0.6,
119 "context_window": 262144,
120 "default_max_tokens": 32768,
121 "can_reason": true,
122 "reasoning_levels": [
123 "low",
124 "medium",
125 "high"
126 ],
127 "default_reasoning_effort": "medium",
128 "supports_attachments": true,
129 "options": {}
130 },
131 {
132 "id": "hf:nvidia/Kimi-K2.5-NVFP4",
133 "name": "Kimi K2.5 NVFP4",
134 "cost_per_1m_in": 0.6,
135 "cost_per_1m_out": 3,
136 "cost_per_1m_in_cached": 0.6,
137 "cost_per_1m_out_cached": 0.6,
138 "context_window": 262144,
139 "default_max_tokens": 32768,
140 "can_reason": true,
141 "reasoning_levels": [
142 "low",
143 "medium",
144 "high"
145 ],
146 "default_reasoning_effort": "medium",
147 "supports_attachments": true,
148 "options": {}
149 },
150 {
151 "id": "hf:meta-llama/Llama-3.3-70B-Instruct",
152 "name": "Llama 3.3 70B Instruct",
153 "cost_per_1m_in": 0.88,
154 "cost_per_1m_out": 0.88,
155 "cost_per_1m_in_cached": 0.88,
156 "cost_per_1m_out_cached": 0.88,
157 "context_window": 131072,
158 "default_max_tokens": 13107,
159 "can_reason": false,
160 "supports_attachments": false,
161 "options": {}
162 },
163 {
164 "id": "hf:MiniMaxAI/MiniMax-M2.1",
165 "name": "MiniMax M2.1",
166 "cost_per_1m_in": 0.3,
167 "cost_per_1m_out": 1.2,
168 "cost_per_1m_in_cached": 0.3,
169 "cost_per_1m_out_cached": 0.3,
170 "context_window": 196608,
171 "default_max_tokens": 19660,
172 "can_reason": true,
173 "reasoning_levels": [
174 "low",
175 "medium",
176 "high"
177 ],
178 "default_reasoning_effort": "medium",
179 "supports_attachments": false,
180 "options": {}
181 },
182 {
183 "id": "hf:MiniMaxAI/MiniMax-M2.5",
184 "name": "MiniMax M2.5",
185 "cost_per_1m_in": 0.6,
186 "cost_per_1m_out": 3,
187 "cost_per_1m_in_cached": 0.6,
188 "cost_per_1m_out_cached": 0.6,
189 "context_window": 191488,
190 "default_max_tokens": 19148,
191 "can_reason": true,
192 "reasoning_levels": [
193 "low",
194 "medium",
195 "high"
196 ],
197 "default_reasoning_effort": "medium",
198 "supports_attachments": false,
199 "options": {}
200 },
201 {
202 "id": "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
203 "name": "Qwen3 235B A22B Thinking 2507",
204 "cost_per_1m_in": 0.65,
205 "cost_per_1m_out": 3,
206 "cost_per_1m_in_cached": 0.65,
207 "cost_per_1m_out_cached": 0.65,
208 "context_window": 262144,
209 "default_max_tokens": 26214,
210 "can_reason": true,
211 "reasoning_levels": [
212 "low",
213 "medium",
214 "high"
215 ],
216 "default_reasoning_effort": "medium",
217 "supports_attachments": false,
218 "options": {}
219 },
220 {
221 "id": "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
222 "name": "Qwen3 Coder 480B A35B Instruct",
223 "cost_per_1m_in": 2,
224 "cost_per_1m_out": 2,
225 "cost_per_1m_in_cached": 2,
226 "cost_per_1m_out_cached": 2,
227 "context_window": 262144,
228 "default_max_tokens": 26214,
229 "can_reason": false,
230 "supports_attachments": false,
231 "options": {}
232 },
233 {
234 "id": "hf:Qwen/Qwen3.5-397B-A17B",
235 "name": "Qwen3.5 397B A17B",
236 "cost_per_1m_in": 0.6,
237 "cost_per_1m_out": 3,
238 "cost_per_1m_in_cached": 0.6,
239 "cost_per_1m_out_cached": 0.6,
240 "context_window": 262144,
241 "default_max_tokens": 32768,
242 "can_reason": false,
243 "supports_attachments": true,
244 "options": {}
245 },
246 {
247 "id": "hf:openai/gpt-oss-120b",
248 "name": "gpt oss 120b",
249 "cost_per_1m_in": 0.09999999999999999,
250 "cost_per_1m_out": 0.09999999999999999,
251 "cost_per_1m_in_cached": 0.09999999999999999,
252 "cost_per_1m_out_cached": 0.09999999999999999,
253 "context_window": 131072,
254 "default_max_tokens": 13107,
255 "can_reason": true,
256 "reasoning_levels": [
257 "low",
258 "medium",
259 "high"
260 ],
261 "default_reasoning_effort": "medium",
262 "supports_attachments": false,
263 "options": {}
264 }
265 ]
266}