chore: auto-update generated files

Charm created

Change summary

internal/providers/configs/cortecs.json     | 428 +++++++++++-----------
internal/providers/configs/huggingface.json |  48 --
internal/providers/configs/neuralwatt.json  |  18 
internal/providers/configs/opencode-go.json |   2 
internal/providers/configs/openrouter.json  | 158 ++++----
internal/providers/configs/synthetic.json   |  18 
internal/providers/configs/venice.json      |  14 
internal/providers/configs/vercel.json      |  72 +-
8 files changed, 352 insertions(+), 406 deletions(-)

Detailed changes

internal/providers/configs/cortecs.json 🔗

@@ -7,11 +7,29 @@
   "default_large_model_id": "qwen3-coder-30b-a3b-instruct",
   "default_small_model_id": "glm-4.7-flash",
   "models": [
+    {
+      "id": "claude-opus4-7",
+      "name": "Claude Opus 4.7",
+      "cost_per_1m_in": 4.7685,
+      "cost_per_1m_out": 23.8425,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1000000,
+      "default_max_tokens": 100000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
     {
       "id": "minimax-m2.7",
       "name": "MiniMax M2.7",
-      "cost_per_1m_in": 0.28,
-      "cost_per_1m_out": 1.118,
+      "cost_per_1m_in": 0.26622,
+      "cost_per_1m_out": 1.06488,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 196608,
@@ -28,8 +46,8 @@
     {
       "id": "glm-5.1",
       "name": "GLM 5.1",
-      "cost_per_1m_in": 1.304,
-      "cost_per_1m_out": 4.099,
+      "cost_per_1m_in": 1.24236,
+      "cost_per_1m_out": 3.90336,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 202752,
@@ -46,8 +64,8 @@
     {
       "id": "qwen3.5-122b-a10b",
       "name": "Qwen3.5 122B A10B",
-      "cost_per_1m_in": 0.466,
-      "cost_per_1m_out": 3.261,
+      "cost_per_1m_in": 0.4437,
+      "cost_per_1m_out": 3.1059,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262144,
@@ -64,8 +82,8 @@
     {
       "id": "qwen3.5-9b",
       "name": "Qwen3.5 9B",
-      "cost_per_1m_in": 0.14,
-      "cost_per_1m_out": 0.186,
+      "cost_per_1m_in": 0.13311,
+      "cost_per_1m_out": 0.17748,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262144,
@@ -82,11 +100,11 @@
     {
       "id": "nemotron-3-super-120b-a12b",
       "name": "Nemotron 3 Super 120B A12B",
-      "cost_per_1m_in": 0.28,
-      "cost_per_1m_out": 0.839,
+      "cost_per_1m_in": 0.15606,
+      "cost_per_1m_out": 0.67626,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 262144,
+      "context_window": 262000,
       "default_max_tokens": 26214,
       "can_reason": true,
       "reasoning_levels": [
@@ -100,8 +118,8 @@
     {
       "id": "qwen3-coder-next",
       "name": "Qwen3 Coder Next",
-      "cost_per_1m_in": 0.158,
-      "cost_per_1m_out": 0.84,
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.8,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 256000,
@@ -118,8 +136,8 @@
     {
       "id": "glm-5",
       "name": "GLM 5",
-      "cost_per_1m_in": 0.932,
-      "cost_per_1m_out": 2.982,
+      "cost_per_1m_in": 0.8874,
+      "cost_per_1m_out": 2.83968,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 202752,
@@ -136,8 +154,8 @@
     {
       "id": "glm-4.6",
       "name": "GLM 4.6",
-      "cost_per_1m_in": 0.373,
-      "cost_per_1m_out": 1.631,
+      "cost_per_1m_in": 0.35496,
+      "cost_per_1m_out": 1.55295,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 203000,
@@ -154,12 +172,12 @@
     {
       "id": "deepseek-chat-v3.1",
       "name": "DeepSeek Chat V3.1",
-      "cost_per_1m_in": 0.186,
-      "cost_per_1m_out": 0.745,
+      "cost_per_1m_in": 0.17748,
+      "cost_per_1m_out": 0.70992,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 164000,
-      "default_max_tokens": 12800,
+      "default_max_tokens": 16400,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -172,8 +190,8 @@
     {
       "id": "qwen-2.5-72b-instruct",
       "name": "Qwen2.5 72B Instruct",
-      "cost_per_1m_in": 0.065,
-      "cost_per_1m_out": 0.242,
+      "cost_per_1m_in": 0.062118,
+      "cost_per_1m_out": 0.230724,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 33000,
@@ -184,8 +202,8 @@
     {
       "id": "qwen3.5-397b-a17b",
       "name": "Qwen3.5 397B A17B ",
-      "cost_per_1m_in": 0.559,
-      "cost_per_1m_out": 3.354,
+      "cost_per_1m_in": 0.53244,
+      "cost_per_1m_out": 3.19464,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262000,
@@ -202,8 +220,8 @@
     {
       "id": "deepseek-v3.2",
       "name": "DeepSeek V3.2",
-      "cost_per_1m_in": 0.28,
-      "cost_per_1m_out": 0.466,
+      "cost_per_1m_in": 0.26622,
+      "cost_per_1m_out": 0.4437,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 163840,
@@ -220,8 +238,8 @@
     {
       "id": "mistral-small-2603",
       "name": "Mistral Small 4 2603",
-      "cost_per_1m_in": 0.134,
-      "cost_per_1m_out": 0.536,
+      "cost_per_1m_in": 0.1275,
+      "cost_per_1m_out": 0.51,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 256000,
@@ -238,12 +256,12 @@
     {
       "id": "minimax-m2.5",
       "name": "MiniMax M2.5",
-      "cost_per_1m_in": 0.28,
-      "cost_per_1m_out": 1.025,
+      "cost_per_1m_in": 0.26622,
+      "cost_per_1m_out": 0.97614,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 196608,
-      "default_max_tokens": 6553,
+      "default_max_tokens": 19660,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -256,8 +274,8 @@
     {
       "id": "claude-4-6-sonnet",
       "name": "Claude Sonnet 4.6",
-      "cost_per_1m_in": 3.099,
-      "cost_per_1m_out": 15.495,
+      "cost_per_1m_in": 2.8691,
+      "cost_per_1m_out": 14.3095,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1000000,
@@ -274,8 +292,8 @@
     {
       "id": "glm-4.7-flash",
       "name": "GLM 4.7 Flash",
-      "cost_per_1m_in": 0.075,
-      "cost_per_1m_out": 0.451,
+      "cost_per_1m_in": 0.0716,
+      "cost_per_1m_out": 0.4293,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 203000,
@@ -286,8 +304,8 @@
     {
       "id": "kimi-k2.5",
       "name": "Kimi K2.5",
-      "cost_per_1m_in": 0.466,
-      "cost_per_1m_out": 2.236,
+      "cost_per_1m_in": 0.4437,
+      "cost_per_1m_out": 2.12976,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 256000,
@@ -304,8 +322,8 @@
     {
       "id": "claude-opus4-6",
       "name": "Claude Opus 4.6",
-      "cost_per_1m_in": 5.165,
-      "cost_per_1m_out": 25.826,
+      "cost_per_1m_in": 4.7685,
+      "cost_per_1m_out": 23.8425,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1000000,
@@ -322,8 +340,8 @@
     {
       "id": "minimax-m2",
       "name": "MiniMax M2",
-      "cost_per_1m_in": 0.233,
-      "cost_per_1m_out": 0.932,
+      "cost_per_1m_in": 0.22185,
+      "cost_per_1m_out": 0.8874,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 196608,
@@ -340,8 +358,8 @@
     {
       "id": "glm-4.7",
       "name": "GLM 4.7",
-      "cost_per_1m_in": 0.559,
-      "cost_per_1m_out": 2.05,
+      "cost_per_1m_in": 0.53244,
+      "cost_per_1m_out": 1.95228,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 202752,
@@ -358,8 +376,8 @@
     {
       "id": "minimax-m2.1",
       "name": "MiniMax M2.1",
-      "cost_per_1m_in": 0.338,
-      "cost_per_1m_out": 1.352,
+      "cost_per_1m_in": 0.322,
+      "cost_per_1m_out": 1.2879,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 196000,
@@ -376,8 +394,8 @@
     {
       "id": "qwen3-vl-235b-a22b",
       "name": "Qwen3 VL 235B A22B",
-      "cost_per_1m_in": 0.196,
-      "cost_per_1m_out": 1.77,
+      "cost_per_1m_in": 0.186354,
+      "cost_per_1m_out": 1.68606,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131000,
@@ -394,8 +412,8 @@
     {
       "id": "mistral-small-creative",
       "name": "Mistral Small Creative",
-      "cost_per_1m_in": 0.105,
-      "cost_per_1m_out": 0.315,
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.3,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 32000,
@@ -406,8 +424,8 @@
     {
       "id": "nvidia-nemotron-3-nano-30b-a3b",
       "name": "Nemotron 3 Nano 30B A3B",
-      "cost_per_1m_in": 0.056,
-      "cost_per_1m_out": 0.226,
+      "cost_per_1m_in": 0.0537,
+      "cost_per_1m_out": 0.215,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -424,8 +442,8 @@
     {
       "id": "claude-opus4-5",
       "name": "Claude Opus 4.5",
-      "cost_per_1m_in": 5.165,
-      "cost_per_1m_out": 25.826,
+      "cost_per_1m_in": 4.7695,
+      "cost_per_1m_out": 23.8485,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 200000,
@@ -442,8 +460,8 @@
     {
       "id": "qwen3-next-80b-a3b-thinking",
       "name": "Qwen3 Next 80B A3B Thinking",
-      "cost_per_1m_in": 0.14,
-      "cost_per_1m_out": 1.118,
+      "cost_per_1m_in": 0.13311,
+      "cost_per_1m_out": 1.06488,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262000,
@@ -460,8 +478,8 @@
     {
       "id": "holo2-30b-a3b",
       "name": "Holo2 30B A3B",
-      "cost_per_1m_in": 0.315,
-      "cost_per_1m_out": 0.735,
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 0.7,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 22000,
@@ -478,20 +496,20 @@
     {
       "id": "devstral-2512",
       "name": "Devstral 2 2512",
-      "cost_per_1m_in": 0.42,
-      "cost_per_1m_out": 2.1,
+      "cost_per_1m_in": 0.4,
+      "cost_per_1m_out": 2,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262000,
-      "default_max_tokens": 20000,
+      "default_max_tokens": 26200,
       "can_reason": false,
       "supports_attachments": false
     },
     {
       "id": "nova-2-lite",
       "name": "Nova 2 Lite",
-      "cost_per_1m_in": 0.352,
-      "cost_per_1m_out": 2.963,
+      "cost_per_1m_in": 0.335,
+      "cost_per_1m_out": 2.822,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1000000,
@@ -508,8 +526,8 @@
     {
       "id": "gpt-oss-safeguard-120b",
       "name": "GPT OSS Safeguard 120B",
-      "cost_per_1m_in": 0.169,
-      "cost_per_1m_out": 0.657,
+      "cost_per_1m_in": 0.161,
+      "cost_per_1m_out": 0.626,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -526,8 +544,8 @@
     {
       "id": "mistral-large-2512",
       "name": "Mistral Large 3 2512",
-      "cost_per_1m_in": 0.525,
-      "cost_per_1m_out": 1.575,
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 1.5,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 256000,
@@ -538,8 +556,8 @@
     {
       "id": "ministral-8b-2512",
       "name": "Ministral 3 8b 2512",
-      "cost_per_1m_in": 0.158,
-      "cost_per_1m_out": 0.158,
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.15,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 256000,
@@ -550,8 +568,8 @@
     {
       "id": "ministral-3b-2512",
       "name": "Ministral 3 3b 2512",
-      "cost_per_1m_in": 0.105,
-      "cost_per_1m_out": 0.105,
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.1,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 256000,
@@ -562,8 +580,8 @@
     {
       "id": "ministral-14b-2512",
       "name": "Ministral 3 14b 2512",
-      "cost_per_1m_in": 0.21,
-      "cost_per_1m_out": 0.21,
+      "cost_per_1m_in": 0.2,
+      "cost_per_1m_out": 0.2,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 256000,
@@ -574,8 +592,8 @@
     {
       "id": "intellect-3",
       "name": "INTELLECT-3",
-      "cost_per_1m_in": 0.188,
-      "cost_per_1m_out": 1.033,
+      "cost_per_1m_in": 0.179,
+      "cost_per_1m_out": 0.984,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -592,8 +610,8 @@
     {
       "id": "gpt-5.1",
       "name": "GPT 5.1",
-      "cost_per_1m_in": 1.296,
-      "cost_per_1m_out": 10.33,
+      "cost_per_1m_in": 1.234,
+      "cost_per_1m_out": 9.838,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 400000,
@@ -610,8 +628,8 @@
     {
       "id": "nemotron-nano-v2-12b",
       "name": "Nemotron Nano V2 12b",
-      "cost_per_1m_in": 0.226,
-      "cost_per_1m_out": 0.667,
+      "cost_per_1m_in": 0.215,
+      "cost_per_1m_out": 0.635,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -628,8 +646,8 @@
     {
       "id": "claude-haiku-4-5",
       "name": "Claude Haiku 4.5",
-      "cost_per_1m_in": 0.939,
-      "cost_per_1m_out": 4.696,
+      "cost_per_1m_in": 0.894,
+      "cost_per_1m_out": 4.472,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 200000,
@@ -646,8 +664,8 @@
     {
       "id": "claude-4-5-sonnet",
       "name": "Claude 4.5 Sonnet",
-      "cost_per_1m_in": 2.817,
-      "cost_per_1m_out": 14.087,
+      "cost_per_1m_in": 2.683,
+      "cost_per_1m_out": 13.416,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 200000,
@@ -664,8 +682,8 @@
     {
       "id": "magistral-medium-2509",
       "name": "Magistral Medium 2509",
-      "cost_per_1m_in": 2.1,
-      "cost_per_1m_out": 5.25,
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 5,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -682,8 +700,8 @@
     {
       "id": "magistral-small-2509",
       "name": "Magistral Small 2509",
-      "cost_per_1m_in": 0.525,
-      "cost_per_1m_out": 1.575,
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 1.5,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -700,8 +718,8 @@
     {
       "id": "hermes-4-70b",
       "name": "Hermes 4 70B",
-      "cost_per_1m_in": 0.122,
-      "cost_per_1m_out": 0.376,
+      "cost_per_1m_in": 0.116,
+      "cost_per_1m_out": 0.358,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -712,8 +730,8 @@
     {
       "id": "gpt-5",
       "name": "GPT 5",
-      "cost_per_1m_in": 1.296,
-      "cost_per_1m_out": 10.33,
+      "cost_per_1m_in": 1.234,
+      "cost_per_1m_out": 9.838,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 400000,
@@ -730,12 +748,12 @@
     {
       "id": "gpt-oss-120b",
       "name": "GPT Oss 120b",
-      "cost_per_1m_in": 0.037,
-      "cost_per_1m_out": 0.186,
+      "cost_per_1m_in": 0.035496,
+      "cost_per_1m_out": 0.17748,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131000,
-      "default_max_tokens": 12800,
+      "default_max_tokens": 13100,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -748,8 +766,8 @@
     {
       "id": "qwen3-30b-a3b-instruct-2507",
       "name": "Qwen3 30B A3B Instruct 2507",
-      "cost_per_1m_in": 0.093,
-      "cost_per_1m_out": 0.281,
+      "cost_per_1m_in": 0.089,
+      "cost_per_1m_out": 0.268,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262000,
@@ -766,12 +784,12 @@
     {
       "id": "gpt-oss-20b",
       "name": "GPT Oss 20b",
-      "cost_per_1m_in": 0.028,
-      "cost_per_1m_out": 0.13,
+      "cost_per_1m_in": 0.026622,
+      "cost_per_1m_out": 0.124236,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131000,
-      "default_max_tokens": 12800,
+      "default_max_tokens": 13100,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -784,8 +802,8 @@
     {
       "id": "mistral-7b-instruct-v0.3",
       "name": "Mistral 7B Instruct v0.3",
-      "cost_per_1m_in": 0.105,
-      "cost_per_1m_out": 0.105,
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.1,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 127000,
@@ -796,8 +814,8 @@
     {
       "id": "mistral-large-2402",
       "name": "Mistral Large 2402",
-      "cost_per_1m_in": 4.038,
-      "cost_per_1m_out": 12.208,
+      "cost_per_1m_in": 3.846,
+      "cost_per_1m_out": 11.627,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 32000,
@@ -814,8 +832,8 @@
     {
       "id": "pixtral-large-2502",
       "name": "Pixtral Large 25.02",
-      "cost_per_1m_in": 1.878,
-      "cost_per_1m_out": 5.634,
+      "cost_per_1m_in": 1.789,
+      "cost_per_1m_out": 5.366,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -832,8 +850,8 @@
     {
       "id": "mistral-small-3.2-24b-instruct-2506",
       "name": "Mistral Small 3.2 24B Instruct 2506",
-      "cost_per_1m_in": 0.095,
-      "cost_per_1m_out": 0.294,
+      "cost_per_1m_in": 0.09,
+      "cost_per_1m_out": 0.28,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -844,12 +862,12 @@
     {
       "id": "qwen3-32b",
       "name": "Qwen3 32B",
-      "cost_per_1m_in": 0.093,
-      "cost_per_1m_out": 0.281,
+      "cost_per_1m_in": 0.089,
+      "cost_per_1m_out": 0.268,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 40000,
-      "default_max_tokens": 1638,
+      "default_max_tokens": 4000,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -862,8 +880,8 @@
     {
       "id": "qwen3-235b-a22b-instruct-2507",
       "name": "Qwen3 235B A22B Instruct 2507",
-      "cost_per_1m_in": 0.065,
-      "cost_per_1m_out": 0.429,
+      "cost_per_1m_in": 0.062118,
+      "cost_per_1m_out": 0.408204,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131000,
@@ -880,12 +898,12 @@
     {
       "id": "qwen3-coder-30b-a3b-instruct",
       "name": "Qwen3 Coder 30b a3b Instruct",
-      "cost_per_1m_in": 0.056,
-      "cost_per_1m_out": 0.233,
+      "cost_per_1m_in": 0.053244,
+      "cost_per_1m_out": 0.22185,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262000,
-      "default_max_tokens": 3200,
+      "default_max_tokens": 26200,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -896,10 +914,10 @@
       "supports_attachments": false
     },
     {
-      "id": "gpt-4.1-mini",
-      "name": "GPT 4.1 mini",
-      "cost_per_1m_in": 0.41,
-      "cost_per_1m_out": 1.607,
+      "id": "gpt-4.1",
+      "name": "GPT 4.1",
+      "cost_per_1m_in": 1.968,
+      "cost_per_1m_out": 7.872,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1047576,
@@ -914,10 +932,10 @@
       "supports_attachments": true
     },
     {
-      "id": "gpt-4.1-nano",
-      "name": "GPT 4.1 nano",
-      "cost_per_1m_in": 0.105,
-      "cost_per_1m_out": 0.41,
+      "id": "gpt-4.1-mini",
+      "name": "GPT 4.1 mini",
+      "cost_per_1m_in": 0.39,
+      "cost_per_1m_out": 1.53,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1047576,
@@ -932,14 +950,14 @@
       "supports_attachments": true
     },
     {
-      "id": "nova-lite-v1",
-      "name": "Nova Lite 1.0",
-      "cost_per_1m_in": 0.065,
-      "cost_per_1m_out": 0.259,
+      "id": "gpt-4.1-nano",
+      "name": "GPT 4.1 nano",
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.39,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 300000,
-      "default_max_tokens": 30000,
+      "context_window": 1047576,
+      "default_max_tokens": 104757,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -952,8 +970,8 @@
     {
       "id": "nova-micro-v1",
       "name": "Nova Micro 1.0",
-      "cost_per_1m_in": 0.038,
-      "cost_per_1m_out": 0.15,
+      "cost_per_1m_in": 0.036,
+      "cost_per_1m_out": 0.143,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -968,14 +986,14 @@
       "supports_attachments": true
     },
     {
-      "id": "gpt-4.1",
-      "name": "GPT 4.1",
-      "cost_per_1m_in": 2.066,
-      "cost_per_1m_out": 8.266,
+      "id": "nova-lite-v1",
+      "name": "Nova Lite 1.0",
+      "cost_per_1m_in": 0.062,
+      "cost_per_1m_out": 0.247,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 1047576,
-      "default_max_tokens": 104757,
+      "context_window": 300000,
+      "default_max_tokens": 30000,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -988,8 +1006,8 @@
     {
       "id": "nova-pro-v1",
       "name": "Nova Pro 1.0",
-      "cost_per_1m_in": 0.865,
-      "cost_per_1m_out": 3.46,
+      "cost_per_1m_in": 0.824,
+      "cost_per_1m_out": 3.295,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 300000,
@@ -1006,8 +1024,8 @@
     {
       "id": "claude-sonnet-4",
       "name": "Claude Sonnet 4",
-      "cost_per_1m_in": 2.817,
-      "cost_per_1m_out": 14.087,
+      "cost_per_1m_in": 2.601,
+      "cost_per_1m_out": 13.01,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 200000,
@@ -1024,8 +1042,8 @@
     {
       "id": "llama-3.1-nemotron-ultra-253b-v1",
       "name": "Llama 3.1 Nemotron Ultra 253B v1",
-      "cost_per_1m_in": 0.564,
-      "cost_per_1m_out": 1.691,
+      "cost_per_1m_in": 0.537,
+      "cost_per_1m_out": 1.61,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -1042,8 +1060,8 @@
     {
       "id": "llama-4-maverick",
       "name": "Llama 4 Maverick",
-      "cost_per_1m_in": 0.13,
-      "cost_per_1m_out": 0.633,
+      "cost_per_1m_in": 0.124236,
+      "cost_per_1m_out": 0.602832,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1050000,
@@ -1054,8 +1072,8 @@
     {
       "id": "deepseek-v3-0324",
       "name": "DeepSeek V3 0324",
-      "cost_per_1m_in": 0.28,
-      "cost_per_1m_out": 0.932,
+      "cost_per_1m_in": 0.26622,
+      "cost_per_1m_out": 0.8874,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 163840,
@@ -1072,8 +1090,8 @@
     {
       "id": "mistral-small-2503",
       "name": "Mistral Small 2503",
-      "cost_per_1m_in": 0.105,
-      "cost_per_1m_out": 0.315,
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.3,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -1084,8 +1102,8 @@
     {
       "id": "mistral-small-2506",
       "name": "Mistral Small 2506",
-      "cost_per_1m_in": 0.105,
-      "cost_per_1m_out": 0.315,
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.3,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
@@ -1096,8 +1114,8 @@
     {
       "id": "gemini-2.0-flash-001",
       "name": "Gemini 2.0 Flash",
-      "cost_per_1m_in": 0.141,
-      "cost_per_1m_out": 0.563,
+      "cost_per_1m_in": 0.13416,
+      "cost_per_1m_out": 0.53664,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1048576,
@@ -1108,8 +1126,8 @@
     {
       "id": "gemini-2.0-flash-lite-001",
       "name": "Gemini 2.0 Flash Lite",
-      "cost_per_1m_in": 0.07,
-      "cost_per_1m_out": 0.282,
+      "cost_per_1m_in": 0.06708,
+      "cost_per_1m_out": 0.26832,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1048576,
@@ -1120,8 +1138,8 @@
     {
       "id": "gemini-2.5-flash",
       "name": "Gemini 2.5 Flash",
-      "cost_per_1m_in": 0.282,
-      "cost_per_1m_out": 2.348,
+      "cost_per_1m_in": 0.26832,
+      "cost_per_1m_out": 2.236,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1048576,
@@ -1138,8 +1156,8 @@
     {
       "id": "gemini-2.5-pro",
       "name": "Gemini 2.5 Pro",
-      "cost_per_1m_in": 1.409,
-      "cost_per_1m_out": 9.391,
+      "cost_per_1m_in": 1.3416,
+      "cost_per_1m_out": 8.944,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1048576,
@@ -1156,12 +1174,12 @@
     {
       "id": "gemma-3-27b-it",
       "name": "Gemma 3 27b it",
-      "cost_per_1m_in": 0.093,
-      "cost_per_1m_out": 0.281,
+      "cost_per_1m_in": 0.089,
+      "cost_per_1m_out": 0.268,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131000,
-      "default_max_tokens": 4000,
+      "default_max_tokens": 13100,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1171,29 +1189,11 @@
       "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
-    {
-      "id": "deepseek-r1-distill-llama-70b",
-      "name": "Deepseek R1 Distill LLama 70B",
-      "cost_per_1m_in": 0.945,
-      "cost_per_1m_out": 0.945,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 16000,
-      "default_max_tokens": 1600,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false
-    },
     {
       "id": "deepseek-r1-0528",
       "name": "DeepSeek R1 0528",
-      "cost_per_1m_in": 0.614,
-      "cost_per_1m_out": 2.423,
+      "cost_per_1m_in": 0.585084,
+      "cost_per_1m_out": 2.30724,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 164000,
@@ -1210,8 +1210,8 @@
     {
       "id": "codestral-2508",
       "name": "Codestral 25.08",
-      "cost_per_1m_in": 0.315,
-      "cost_per_1m_out": 0.945,
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 0.9,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 256000,
@@ -1222,12 +1222,12 @@
     {
       "id": "llama-3.3-70b-instruct",
       "name": "Llama 3.3 70B Instruct",
-      "cost_per_1m_in": 0.093,
-      "cost_per_1m_out": 0.289,
+      "cost_per_1m_in": 0.08874,
+      "cost_per_1m_out": 0.274994,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131000,
-      "default_max_tokens": 12800,
+      "default_max_tokens": 13100,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1240,8 +1240,8 @@
     {
       "id": "gpt-4o",
       "name": "GPT 4o",
-      "cost_per_1m_in": 2.506,
-      "cost_per_1m_out": 10.024,
+      "cost_per_1m_in": 2.38664,
+      "cost_per_1m_out": 9.5466,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -1256,10 +1256,10 @@
       "supports_attachments": true
     },
     {
-      "id": "gpt-5-nano",
-      "name": "GPT 5 nano",
-      "cost_per_1m_in": 0.057,
-      "cost_per_1m_out": 0.414,
+      "id": "gpt-5-mini",
+      "name": "GPT 5 mini",
+      "cost_per_1m_in": 0.25,
+      "cost_per_1m_out": 1.968,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 400000,
@@ -1274,10 +1274,10 @@
       "supports_attachments": true
     },
     {
-      "id": "gpt-5-mini",
-      "name": "GPT 5 mini",
-      "cost_per_1m_in": 0.263,
-      "cost_per_1m_out": 2.066,
+      "id": "gpt-5-nano",
+      "name": "GPT 5 nano",
+      "cost_per_1m_in": 0.054,
+      "cost_per_1m_out": 0.394,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 400000,
@@ -1294,8 +1294,8 @@
     {
       "id": "mistral-large-2411",
       "name": "Mistral Large 2411",
-      "cost_per_1m_in": 1.89,
-      "cost_per_1m_out": 5.67,
+      "cost_per_1m_in": 1.8,
+      "cost_per_1m_out": 5.4,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
@@ -1312,8 +1312,8 @@
     {
       "id": "hermes-4-405b",
       "name": "Hermes 4 405B",
-      "cost_per_1m_in": 0.939,
-      "cost_per_1m_out": 2.817,
+      "cost_per_1m_in": 0.894,
+      "cost_per_1m_out": 2.683,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
@@ -1324,38 +1324,20 @@
     {
       "id": "mistral-nemo-instruct-2407",
       "name": "Mistral Nemo 2407",
-      "cost_per_1m_in": 0.137,
-      "cost_per_1m_out": 0.137,
+      "cost_per_1m_in": 0.13,
+      "cost_per_1m_out": 0.13,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 11800,
+      "default_max_tokens": 13107,
       "can_reason": false,
       "supports_attachments": false
     },
-    {
-      "id": "mistral-medium-2508",
-      "name": "Mistral Medium 2508",
-      "cost_per_1m_in": 0.42,
-      "cost_per_1m_out": 2.1,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 12800,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": true
-    },
     {
       "id": "devstral-medium-2507",
       "name": "Devstral Medium 2507",
-      "cost_per_1m_in": 0.42,
-      "cost_per_1m_out": 2.1,
+      "cost_per_1m_in": 0.4,
+      "cost_per_1m_out": 2,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,

internal/providers/configs/huggingface.json 🔗

@@ -31,18 +31,6 @@
       "can_reason": false,
       "supports_attachments": false
     },
-    {
-      "id": "Qwen/Qwen3-235B-A22B-Instruct-2507:cerebras",
-      "name": "Qwen/Qwen3-235B-A22B-Instruct-2507 (cerebras)",
-      "cost_per_1m_in": 0.6,
-      "cost_per_1m_out": 1.2,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "supports_attachments": false
-    },
     {
       "id": "Qwen/Qwen3-32B:groq",
       "name": "Qwen/Qwen3-32B (groq)",
@@ -115,30 +103,6 @@
       "can_reason": false,
       "supports_attachments": false
     },
-    {
-      "id": "moonshotai/Kimi-K2-Thinking:fireworks-ai",
-      "name": "moonshotai/Kimi-K2-Thinking (fireworks-ai)",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 262144,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "supports_attachments": false
-    },
-    {
-      "id": "moonshotai/Kimi-K2.5:fireworks-ai",
-      "name": "moonshotai/Kimi-K2.5 (fireworks-ai)",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 262144,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "supports_attachments": false
-    },
     {
       "id": "openai/gpt-oss-120b:cerebras",
       "name": "openai/gpt-oss-120b (cerebras)",
@@ -235,18 +199,6 @@
       "can_reason": false,
       "supports_attachments": false
     },
-    {
-      "id": "zai-org/GLM-5.1:fireworks-ai",
-      "name": "zai-org/GLM-5.1 (fireworks-ai)",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 202752,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "supports_attachments": false
-    },
     {
       "id": "zai-org/GLM-5.1-FP8:fireworks-ai",
       "name": "zai-org/GLM-5.1-FP8 (fireworks-ai)",

internal/providers/configs/neuralwatt.json 🔗

@@ -91,24 +91,6 @@
       "default_reasoning_effort": "medium",
       "supports_attachments": false
     },
-    {
-      "id": "Qwen/Qwen3.5-35B-A3B",
-      "name": "Qwen3.5 35B A3B",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 1.1,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 13107,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false
-    },
     {
       "id": "Qwen/Qwen3.5-397B-A17B-FP8",
       "name": "Qwen3.5 397B A17B FP8",

internal/providers/configs/opencode-go.json 🔗

@@ -105,7 +105,7 @@
       "cost_per_1m_in_cached": 0.03,
       "cost_per_1m_out_cached": 0,
       "context_window": 204800,
-      "default_max_tokens": 131072,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",

internal/providers/configs/openrouter.json 🔗

@@ -586,12 +586,12 @@
     {
       "id": "deepseek/deepseek-v3.2",
       "name": "DeepSeek: DeepSeek V3.2",
-      "cost_per_1m_in": 0.26,
-      "cost_per_1m_out": 0.38,
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 1.5,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.13,
+      "cost_per_1m_out_cached": 0.25,
       "context_window": 163840,
-      "default_max_tokens": 16384,
+      "default_max_tokens": 81920,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -886,12 +886,12 @@
     {
       "id": "google/gemma-4-26b-a4b-it",
       "name": "Google: Gemma 4 26B A4B ",
-      "cost_per_1m_in": 0.08,
-      "cost_per_1m_out": 0.35,
+      "cost_per_1m_in": 0.09,
+      "cost_per_1m_out": 0.4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.01,
+      "cost_per_1m_out_cached": 0.06,
       "context_window": 262144,
-      "default_max_tokens": 26214,
+      "default_max_tokens": 131072,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -922,12 +922,12 @@
     {
       "id": "google/gemma-4-31b-it",
       "name": "Google: Gemma 4 31B",
-      "cost_per_1m_in": 0.13,
-      "cost_per_1m_out": 0.38,
+      "cost_per_1m_in": 0.14,
+      "cost_per_1m_out": 0.4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.02,
+      "cost_per_1m_out_cached": 0,
       "context_window": 262144,
-      "default_max_tokens": 26214,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -980,8 +980,8 @@
       "cost_per_1m_out": 1.2,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.06,
-      "context_window": 262144,
-      "default_max_tokens": 72000,
+      "context_window": 256000,
+      "default_max_tokens": 40000,
       "can_reason": false,
       "supports_attachments": false
     },
@@ -1000,12 +1000,12 @@
     {
       "id": "meta-llama/llama-3.3-70b-instruct",
       "name": "Meta: Llama 3.3 70B Instruct",
-      "cost_per_1m_in": 0.6,
-      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in": 0.135,
+      "cost_per_1m_out": 0.4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.6,
+      "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 65536,
+      "default_max_tokens": 60000,
       "can_reason": false,
       "supports_attachments": false
     },
@@ -1072,12 +1072,12 @@
     {
       "id": "minimax/minimax-m2.5",
       "name": "MiniMax: MiniMax M2.5",
-      "cost_per_1m_in": 0.3,
+      "cost_per_1m_in": 0.15,
       "cost_per_1m_out": 1.2,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.03,
-      "context_window": 204800,
-      "default_max_tokens": 65550,
+      "context_window": 196608,
+      "default_max_tokens": 98304,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1210,36 +1210,36 @@
     {
       "id": "mistralai/ministral-14b-2512",
       "name": "Mistral: Ministral 3 14B 2512",
-      "cost_per_1m_in": 0.2,
-      "cost_per_1m_out": 0.2,
+      "cost_per_1m_in": 0.35,
+      "cost_per_1m_out": 0.35,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.02,
+      "cost_per_1m_out_cached": 0,
       "context_window": 262144,
-      "default_max_tokens": 26214,
+      "default_max_tokens": 131072,
       "can_reason": false,
       "supports_attachments": true
     },
     {
       "id": "mistralai/ministral-3b-2512",
       "name": "Mistral: Ministral 3 3B 2512",
-      "cost_per_1m_in": 0.1,
-      "cost_per_1m_out": 0.1,
+      "cost_per_1m_in": 0.35,
+      "cost_per_1m_out": 0.35,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.01,
+      "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 13107,
+      "default_max_tokens": 65536,
       "can_reason": false,
       "supports_attachments": true
     },
     {
       "id": "mistralai/ministral-8b-2512",
       "name": "Mistral: Ministral 3 8B 2512",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.15,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 0.015,
       "context_window": 262144,
-      "default_max_tokens": 131072,
+      "default_max_tokens": 26214,
       "can_reason": false,
       "supports_attachments": true
     },
@@ -1408,12 +1408,12 @@
     {
       "id": "moonshotai/kimi-k2-0905",
       "name": "MoonshotAI: Kimi K2 0905",
-      "cost_per_1m_in": 1,
-      "cost_per_1m_out": 3,
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 2.5,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.5,
+      "cost_per_1m_out_cached": 0,
       "context_window": 262144,
-      "default_max_tokens": 8192,
+      "default_max_tokens": 131072,
       "can_reason": false,
       "supports_attachments": false
     },
@@ -1423,7 +1423,7 @@
       "cost_per_1m_in": 0.6,
       "cost_per_1m_out": 2.5,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 0.6,
       "context_window": 262144,
       "default_max_tokens": 131072,
       "can_reason": true,
@@ -1438,10 +1438,10 @@
     {
       "id": "moonshotai/kimi-k2.5",
       "name": "MoonshotAI: Kimi K2.5",
-      "cost_per_1m_in": 0.55,
-      "cost_per_1m_out": 3.25,
+      "cost_per_1m_in": 0.445,
+      "cost_per_1m_out": 2,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.15,
+      "cost_per_1m_out_cached": 0.225,
       "context_window": 262144,
       "default_max_tokens": 131072,
       "can_reason": true,
@@ -1689,7 +1689,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.5,
       "context_window": 1047576,
-      "default_max_tokens": 16384,
+      "default_max_tokens": 104757,
       "can_reason": false,
       "supports_attachments": true
     },
@@ -1723,7 +1723,7 @@
       "cost_per_1m_in": 2.5,
       "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 1.25,
+      "cost_per_1m_out_cached": 0,
       "context_window": 128000,
       "default_max_tokens": 8192,
       "can_reason": false,
@@ -1909,9 +1909,9 @@
       "cost_per_1m_in": 0.05,
       "cost_per_1m_out": 0.4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.005,
+      "cost_per_1m_out_cached": 0.01,
       "context_window": 400000,
-      "default_max_tokens": 64000,
+      "default_max_tokens": 40000,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2230,12 +2230,12 @@
     {
       "id": "openai/gpt-oss-20b",
       "name": "OpenAI: gpt-oss-20b",
-      "cost_per_1m_in": 0.03,
-      "cost_per_1m_out": 0.14,
+      "cost_per_1m_in": 0.045,
+      "cost_per_1m_out": 0.18,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 13107,
+      "default_max_tokens": 16384,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2729,11 +2729,11 @@
       "id": "qwen/qwen3-coder",
       "name": "Qwen: Qwen3 Coder 480B A35B",
       "cost_per_1m_in": 0.22,
-      "cost_per_1m_out": 1,
+      "cost_per_1m_out": 1.8,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.022,
+      "cost_per_1m_out_cached": 0,
       "context_window": 262144,
-      "default_max_tokens": 26214,
+      "default_max_tokens": 32768,
       "can_reason": false,
       "supports_attachments": false
     },
@@ -2764,10 +2764,10 @@
     {
       "id": "qwen/qwen3-coder-next",
       "name": "Qwen: Qwen3 Coder Next",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 0.8,
+      "cost_per_1m_in": 0.18,
+      "cost_per_1m_out": 1.35,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.11,
+      "cost_per_1m_out_cached": 0.18,
       "context_window": 262144,
       "default_max_tokens": 131072,
       "can_reason": false,
@@ -2890,20 +2890,20 @@
     {
       "id": "qwen/qwen3-vl-30b-a3b-instruct",
       "name": "Qwen: Qwen3 VL 30B A3B Instruct",
-      "cost_per_1m_in": 0.29,
-      "cost_per_1m_out": 1,
+      "cost_per_1m_in": 0.13,
+      "cost_per_1m_out": 0.52,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 262144,
-      "default_max_tokens": 131072,
+      "context_window": 131072,
+      "default_max_tokens": 16384,
       "can_reason": false,
       "supports_attachments": true
     },
     {
       "id": "qwen/qwen3-vl-30b-a3b-thinking",
       "name": "Qwen: Qwen3 VL 30B A3B Thinking",
-      "cost_per_1m_in": 0.2,
-      "cost_per_1m_out": 1,
+      "cost_per_1m_in": 0.13,
+      "cost_per_1m_out": 1.56,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
@@ -3016,10 +3016,10 @@
     {
       "id": "qwen/qwen3.5-27b",
       "name": "Qwen: Qwen3.5-27B",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 2.4,
+      "cost_per_1m_in": 0.27,
+      "cost_per_1m_out": 2.16,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 0.27,
       "context_window": 262144,
       "default_max_tokens": 32768,
       "can_reason": true,
@@ -3217,9 +3217,9 @@
       "cost_per_1m_in": 0.1,
       "cost_per_1m_out": 0.3,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.02,
+      "cost_per_1m_out_cached": 0.01,
       "context_window": 262144,
-      "default_max_tokens": 16000,
+      "default_max_tokens": 32768,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -3285,7 +3285,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.11,
       "context_window": 131072,
-      "default_max_tokens": 48000,
+      "default_max_tokens": 49152,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -3352,12 +3352,12 @@
     {
       "id": "z-ai/glm-4.6",
       "name": "Z.ai: GLM 4.6",
-      "cost_per_1m_in": 0.39,
-      "cost_per_1m_out": 1.9,
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 2.2,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 0.11,
       "context_window": 204800,
-      "default_max_tokens": 102400,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -3373,9 +3373,9 @@
       "cost_per_1m_in": 0.3,
       "cost_per_1m_out": 0.9,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.055,
+      "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 16384,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -3446,7 +3446,7 @@
       "cost_per_1m_out": 4,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.24,
-      "context_window": 202752,
+      "context_window": 262144,
       "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
@@ -3520,10 +3520,10 @@
     {
       "id": "x-ai/grok-3-mini",
       "name": "xAI: Grok 3 Mini",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 0.5,
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.075,
+      "cost_per_1m_out_cached": 0.15,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": true,
@@ -3538,10 +3538,10 @@
     {
       "id": "x-ai/grok-3-mini-beta",
       "name": "xAI: Grok 3 Mini Beta",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 0.5,
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.075,
+      "cost_per_1m_out_cached": 0.15,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": true,

internal/providers/configs/synthetic.json 🔗

@@ -55,6 +55,24 @@
       "default_reasoning_effort": "medium",
       "supports_attachments": false
     },
+    {
+      "id": "hf:zai-org/GLM-4.7",
+      "name": "GLM 4.7",
+      "cost_per_1m_in": 0.45,
+      "cost_per_1m_out": 2.19,
+      "cost_per_1m_in_cached": 0.45,
+      "cost_per_1m_out_cached": 0.45,
+      "context_window": 202752,
+      "default_max_tokens": 20275,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
     {
       "id": "hf:zai-org/GLM-4.7-Flash",
       "name": "GLM 4.7 Flash",

internal/providers/configs/venice.json 🔗

@@ -409,6 +409,18 @@
       "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
+    {
+      "id": "gemma-4-uncensored",
+      "name": "Gemma 4 Uncensored",
+      "cost_per_1m_in": 0.1625,
+      "cost_per_1m_out": 0.5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 256000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
     {
       "id": "google-gemma-3-27b-it",
       "name": "Google Gemma 3 27B Instruct",
@@ -746,7 +758,7 @@
     {
       "id": "qwen3-5-9b",
       "name": "Qwen 3.5 9B",
-      "cost_per_1m_in": 0.05,
+      "cost_per_1m_in": 0.1,
       "cost_per_1m_out": 0.15,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,

internal/providers/configs/vercel.json 🔗

@@ -847,6 +847,42 @@
       "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
+    {
+      "id": "openai/gpt-oss-20b",
+      "name": "GPT OSS 120B",
+      "cost_per_1m_in": 0.05,
+      "cost_per_1m_out": 0.2,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 8000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/gpt-oss-safeguard-20b",
+      "name": "GPT OSS Safeguard 20B",
+      "cost_per_1m_in": 0.075,
+      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in_cached": 0.037,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 8000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
     {
       "id": "openai/gpt-4-turbo",
       "name": "GPT-4 Turbo",
@@ -2335,42 +2371,6 @@
       "default_reasoning_effort": "medium",
       "supports_attachments": false
     },
-    {
-      "id": "openai/gpt-oss-20b",
-      "name": "gpt-oss-20b",
-      "cost_per_1m_in": 0.05,
-      "cost_per_1m_out": 0.2,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 8000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false
-    },
-    {
-      "id": "openai/gpt-oss-safeguard-20b",
-      "name": "gpt-oss-safeguard-20b",
-      "cost_per_1m_in": 0.075,
-      "cost_per_1m_out": 0.3,
-      "cost_per_1m_in_cached": 0.037,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 8000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false
-    },
     {
       "id": "openai/o1",
       "name": "o1",