1{
2 "name": "io.net",
3 "id": "ionet",
4 "api_key": "$IONET_API_KEY",
5 "api_endpoint": "https://api.intelligence.io.solutions/api/v1",
6 "type": "openai-compat",
7 "default_large_model_id": "moonshotai/Kimi-K2.5",
8 "default_small_model_id": "zai-org/GLM-4.7-Flash",
9 "models": [
10 {
11 "id": "Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar",
12 "name": "Intel: Qwen3 Coder 480B A35B Instruct INT4 Mixed AR",
13 "cost_per_1m_in": 0.22,
14 "cost_per_1m_out": 0.95,
15 "cost_per_1m_in_cached": 0.11,
16 "cost_per_1m_out_cached": 0.44,
17 "context_window": 106000,
18 "default_max_tokens": 10600,
19 "can_reason": false,
20 "supports_attachments": false,
21 "options": {}
22 },
23 {
24 "id": "meta-llama/Llama-3.3-70B-Instruct",
25 "name": "Meta: Llama 3.3 70B Instruct",
26 "cost_per_1m_in": 0.1,
27 "cost_per_1m_out": 0.32,
28 "cost_per_1m_in_cached": 0.05,
29 "cost_per_1m_out_cached": 0.2,
30 "context_window": 128000,
31 "default_max_tokens": 12800,
32 "can_reason": true,
33 "reasoning_levels": [
34 "low",
35 "medium",
36 "high"
37 ],
38 "default_reasoning_effort": "medium",
39 "supports_attachments": false,
40 "options": {}
41 },
42 {
43 "id": "mistralai/Mistral-Large-Instruct-2411",
44 "name": "Mistral: Mistral Large Instruct 2411",
45 "cost_per_1m_in": 2,
46 "cost_per_1m_out": 6,
47 "cost_per_1m_in_cached": 1,
48 "cost_per_1m_out_cached": 4,
49 "context_window": 128000,
50 "default_max_tokens": 12800,
51 "can_reason": false,
52 "supports_attachments": true,
53 "options": {}
54 },
55 {
56 "id": "moonshotai/Kimi-K2-Instruct-0905",
57 "name": "MoonshotAI: Kimi K2 Instruct 0905",
58 "cost_per_1m_in": 0.39,
59 "cost_per_1m_out": 1.9,
60 "cost_per_1m_in_cached": 0.195,
61 "cost_per_1m_out_cached": 0.78,
62 "context_window": 262144,
63 "default_max_tokens": 26214,
64 "can_reason": false,
65 "supports_attachments": false,
66 "options": {}
67 },
68 {
69 "id": "moonshotai/Kimi-K2-Thinking",
70 "name": "MoonshotAI: Kimi K2 Thinking",
71 "cost_per_1m_in": 0.32,
72 "cost_per_1m_out": 0.48,
73 "cost_per_1m_in_cached": 0.16,
74 "cost_per_1m_out_cached": 0.64,
75 "context_window": 262144,
76 "default_max_tokens": 26214,
77 "can_reason": true,
78 "reasoning_levels": [
79 "low",
80 "medium",
81 "high"
82 ],
83 "default_reasoning_effort": "medium",
84 "supports_attachments": false,
85 "options": {}
86 },
87 {
88 "id": "moonshotai/Kimi-K2.5",
89 "name": "MoonshotAI: Kimi K2.5",
90 "cost_per_1m_in": 0.55,
91 "cost_per_1m_out": 3,
92 "cost_per_1m_in_cached": 0.275,
93 "cost_per_1m_out_cached": 1.1,
94 "context_window": 262144,
95 "default_max_tokens": 26214,
96 "can_reason": false,
97 "supports_attachments": false,
98 "options": {}
99 },
100 {
101 "id": "openai/gpt-oss-120b",
102 "name": "OpenAI: gpt-oss-120b",
103 "cost_per_1m_in": 0.02,
104 "cost_per_1m_out": 0.1,
105 "cost_per_1m_in_cached": 0.01,
106 "cost_per_1m_out_cached": 0.04,
107 "context_window": 131072,
108 "default_max_tokens": 13107,
109 "can_reason": true,
110 "reasoning_levels": [
111 "low",
112 "medium",
113 "high"
114 ],
115 "default_reasoning_effort": "medium",
116 "supports_attachments": false,
117 "options": {}
118 },
119 {
120 "id": "openai/gpt-oss-20b",
121 "name": "OpenAI: gpt-oss-20b",
122 "cost_per_1m_in": 0.016,
123 "cost_per_1m_out": 0.06,
124 "cost_per_1m_in_cached": 0.008,
125 "cost_per_1m_out_cached": 0.032,
126 "context_window": 64000,
127 "default_max_tokens": 6400,
128 "can_reason": true,
129 "reasoning_levels": [
130 "low",
131 "medium",
132 "high"
133 ],
134 "default_reasoning_effort": "medium",
135 "supports_attachments": false,
136 "options": {}
137 },
138 {
139 "id": "Qwen/Qwen3-Next-80B-A3B-Instruct",
140 "name": "Qwen: Qwen3 Next 80B A3B Instruct",
141 "cost_per_1m_in": 0.06,
142 "cost_per_1m_out": 0.6,
143 "cost_per_1m_in_cached": 0.03,
144 "cost_per_1m_out_cached": 0.12,
145 "context_window": 262144,
146 "default_max_tokens": 26214,
147 "can_reason": false,
148 "supports_attachments": false,
149 "options": {}
150 },
151 {
152 "id": "zai-org/GLM-4.6",
153 "name": "Z.ai: GLM 4.6",
154 "cost_per_1m_in": 0.35,
155 "cost_per_1m_out": 1.5,
156 "cost_per_1m_in_cached": 0.175,
157 "cost_per_1m_out_cached": 0.7,
158 "context_window": 200000,
159 "default_max_tokens": 20000,
160 "can_reason": true,
161 "reasoning_levels": [
162 "low",
163 "medium",
164 "high"
165 ],
166 "default_reasoning_effort": "medium",
167 "supports_attachments": false,
168 "options": {}
169 },
170 {
171 "id": "zai-org/GLM-4.7",
172 "name": "Z.ai: GLM 4.7",
173 "cost_per_1m_in": 0.3,
174 "cost_per_1m_out": 1.4,
175 "cost_per_1m_in_cached": 0.15,
176 "cost_per_1m_out_cached": 0.6,
177 "context_window": 202752,
178 "default_max_tokens": 20275,
179 "can_reason": true,
180 "reasoning_levels": [
181 "low",
182 "medium",
183 "high"
184 ],
185 "default_reasoning_effort": "medium",
186 "supports_attachments": false,
187 "options": {}
188 },
189 {
190 "id": "zai-org/GLM-4.7-Flash",
191 "name": "Z.ai: GLM 4.7 Flash",
192 "cost_per_1m_in": 0.07,
193 "cost_per_1m_out": 0.4,
194 "cost_per_1m_in_cached": 0.035,
195 "cost_per_1m_out_cached": 0.14,
196 "context_window": 200000,
197 "default_max_tokens": 20000,
198 "can_reason": true,
199 "reasoning_levels": [
200 "low",
201 "medium",
202 "high"
203 ],
204 "default_reasoning_effort": "medium",
205 "supports_attachments": false,
206 "options": {}
207 },
208 {
209 "id": "zai-org/GLM-5",
210 "name": "Z.ai: GLM 5",
211 "cost_per_1m_in": 1,
212 "cost_per_1m_out": 3,
213 "cost_per_1m_in_cached": 0.5,
214 "cost_per_1m_out_cached": 0,
215 "context_window": 202752,
216 "default_max_tokens": 20275,
217 "can_reason": true,
218 "reasoning_levels": [
219 "low",
220 "medium",
221 "high"
222 ],
223 "default_reasoning_effort": "medium",
224 "supports_attachments": false,
225 "options": {}
226 }
227 ]
228}