chore: auto-update generated files

Charm created

Change summary

internal/providers/configs/aihubmix.json    |  76 -----------
internal/providers/configs/huggingface.json |  39 ++----
internal/providers/configs/openrouter.json  | 146 +++++++++++-----------
internal/providers/configs/vercel.json      |  61 ++++++---
4 files changed, 126 insertions(+), 196 deletions(-)

Detailed changes

internal/providers/configs/aihubmix.json 🔗

@@ -1683,25 +1683,6 @@
       "supports_attachments": true,
       "options": {}
     },
-    {
-      "id": "gpt-5-pro",
-      "name": "GPT 5 Pro",
-      "cost_per_1m_in": 15,
-      "cost_per_1m_out": 120,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": true,
-      "options": {}
-    },
     {
       "id": "gpt-5.1",
       "name": "GPT 5.1",
@@ -1880,25 +1861,6 @@
       "supports_attachments": true,
       "options": {}
     },
-    {
-      "id": "gpt-5.2-pro",
-      "name": "GPT 5.2 Pro",
-      "cost_per_1m_in": 21,
-      "cost_per_1m_out": 168,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 2.1,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": true,
-      "options": {}
-    },
     {
       "id": "gpt-5.3-chat-latest",
       "name": "GPT 5.3 Chat",
@@ -1950,25 +1912,6 @@
       "supports_attachments": true,
       "options": {}
     },
-    {
-      "id": "gpt-5.4-pro",
-      "name": "GPT 5.4 Pro",
-      "cost_per_1m_in": 30,
-      "cost_per_1m_out": 180,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 30,
-      "context_window": 1050000,
-      "default_max_tokens": 128000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": true,
-      "options": {}
-    },
     {
       "id": "gpt-oss-120b",
       "name": "gpt-oss-120b",
@@ -2390,25 +2333,6 @@
       "supports_attachments": true,
       "options": {}
     },
-    {
-      "id": "o3-pro",
-      "name": "O3 Pro",
-      "cost_per_1m_in": 20,
-      "cost_per_1m_out": 80,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 20,
-      "context_window": 200000,
-      "default_max_tokens": 100000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": true,
-      "options": {}
-    },
     {
       "id": "o4-mini",
       "name": "O4 Mini",

internal/providers/configs/huggingface.json 🔗

@@ -33,6 +33,19 @@
       "supports_attachments": false,
       "options": {}
     },
+    {
+      "id": "Qwen/Qwen3-32B:groq",
+      "name": "Qwen/Qwen3-32B (groq)",
+      "cost_per_1m_in": 0.29,
+      "cost_per_1m_out": 0.59,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    },
     {
       "id": "Qwen/Qwen3-8B:fireworks-ai",
       "name": "Qwen/Qwen3-8B (fireworks-ai)",
@@ -163,19 +176,6 @@
       "supports_attachments": false,
       "options": {}
     },
-    {
-      "id": "moonshotai/Kimi-K2-Instruct-0905:groq",
-      "name": "moonshotai/Kimi-K2-Instruct-0905 (groq)",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 262144,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "supports_attachments": false,
-      "options": {}
-    },
     {
       "id": "moonshotai/Kimi-K2-Thinking:fireworks-ai",
       "name": "moonshotai/Kimi-K2-Thinking (fireworks-ai)",
@@ -280,19 +280,6 @@
       "supports_attachments": false,
       "options": {}
     },
-    {
-      "id": "zai-org/GLM-4.7:fireworks-ai",
-      "name": "zai-org/GLM-4.7 (fireworks-ai)",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 202752,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "supports_attachments": false,
-      "options": {}
-    },
     {
       "id": "zai-org/GLM-5:fireworks-ai",
       "name": "zai-org/GLM-5 (fireworks-ai)",

internal/providers/configs/openrouter.json 🔗

@@ -541,12 +541,12 @@
     {
       "id": "deepseek/deepseek-chat-v3.1",
       "name": "DeepSeek: DeepSeek V3.1",
-      "cost_per_1m_in": 0.21,
-      "cost_per_1m_out": 0.7899999999999999,
+      "cost_per_1m_in": 0.19999999999999998,
+      "cost_per_1m_out": 0.7999999999999999,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.1300000002,
+      "cost_per_1m_out_cached": 0.09999999999999999,
       "context_window": 163840,
-      "default_max_tokens": 16384,
+      "default_max_tokens": 32768,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -620,9 +620,9 @@
       "cost_per_1m_in": 0.27,
       "cost_per_1m_out": 0.41,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 0.27,
       "context_window": 163840,
-      "default_max_tokens": 32768,
+      "default_max_tokens": 81920,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1045,12 +1045,12 @@
     {
       "id": "meta-llama/llama-3.3-70b-instruct",
       "name": "Meta: Llama 3.3 70B Instruct",
-      "cost_per_1m_in": 0.6,
-      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in": 0.25,
+      "cost_per_1m_out": 0.75,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.6,
+      "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 65536,
+      "default_max_tokens": 13107,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
@@ -1062,8 +1062,8 @@
       "cost_per_1m_out": 0,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 65536,
-      "default_max_tokens": 6553,
+      "context_window": 128000,
+      "default_max_tokens": 64000,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
@@ -1117,11 +1117,11 @@
       "id": "minimax/minimax-m2.1",
       "name": "MiniMax: MiniMax M2.1",
       "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 1.2,
+      "cost_per_1m_out": 2.4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.029,
+      "cost_per_1m_out_cached": 0.03,
       "context_window": 204800,
-      "default_max_tokens": 20480,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1466,10 +1466,10 @@
     {
       "id": "moonshotai/kimi-k2-0905:exacto",
       "name": "MoonshotAI: Kimi K2 0905 (exacto)",
-      "cost_per_1m_in": 1.15,
-      "cost_per_1m_out": 8,
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 2.5,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.15,
+      "cost_per_1m_out_cached": 0,
       "context_window": 262144,
       "default_max_tokens": 26214,
       "can_reason": false,
@@ -1479,10 +1479,10 @@
     {
       "id": "moonshotai/kimi-k2-thinking",
       "name": "MoonshotAI: Kimi K2 Thinking",
-      "cost_per_1m_in": 0.55,
-      "cost_per_1m_out": 2.5,
+      "cost_per_1m_in": 1.15,
+      "cost_per_1m_out": 8,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 0.15,
       "context_window": 262144,
       "default_max_tokens": 131072,
       "can_reason": true,
@@ -1725,9 +1725,9 @@
       "cost_per_1m_in": 0.09999999999999999,
       "cost_per_1m_out": 0.39999999999999997,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.03,
+      "cost_per_1m_out_cached": 0.024999999999999998,
       "context_window": 1047576,
-      "default_max_tokens": 104757,
+      "default_max_tokens": 16384,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
@@ -1738,7 +1738,7 @@
       "cost_per_1m_in": 2.5,
       "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 1.25,
+      "cost_per_1m_out_cached": 0,
       "context_window": 128000,
       "default_max_tokens": 8192,
       "can_reason": false,
@@ -2202,11 +2202,11 @@
       "id": "openai/gpt-oss-120b",
       "name": "OpenAI: gpt-oss-120b",
       "cost_per_1m_in": 0.049999999999999996,
-      "cost_per_1m_out": 0.44999999999999996,
+      "cost_per_1m_out": 0.25,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.024999999999999998,
+      "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 32768,
+      "default_max_tokens": 16384,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2258,12 +2258,12 @@
     {
       "id": "openai/gpt-oss-20b",
       "name": "OpenAI: gpt-oss-20b",
-      "cost_per_1m_in": 0.075,
-      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in": 0.03,
+      "cost_per_1m_out": 0.14,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.0375,
+      "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 32768,
+      "default_max_tokens": 13107,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2597,12 +2597,12 @@
     {
       "id": "qwen/qwen3-14b",
       "name": "Qwen: Qwen3 14B",
-      "cost_per_1m_in": 0.12,
-      "cost_per_1m_out": 0.24,
+      "cost_per_1m_in": 0.22749999999999998,
+      "cost_per_1m_out": 0.9099999999999999,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 40960,
-      "default_max_tokens": 20480,
+      "context_window": 131072,
+      "default_max_tokens": 4096,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2648,12 +2648,12 @@
     {
       "id": "qwen/qwen3-235b-a22b-thinking-2507",
       "name": "Qwen: Qwen3 235B A22B Thinking 2507",
-      "cost_per_1m_in": 0.6,
-      "cost_per_1m_out": 2.4,
+      "cost_per_1m_in": 0.65,
+      "cost_per_1m_out": 3,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.6,
+      "cost_per_1m_out_cached": 0,
       "context_window": 262144,
-      "default_max_tokens": 131072,
+      "default_max_tokens": 26214,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2667,12 +2667,12 @@
     {
       "id": "qwen/qwen3-30b-a3b",
       "name": "Qwen: Qwen3 30B A3B",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in": 0.13,
+      "cost_per_1m_out": 0.52,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.15,
+      "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 65536,
+      "default_max_tokens": 4096,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2699,12 +2699,12 @@
     {
       "id": "qwen/qwen3-30b-a3b-thinking-2507",
       "name": "Qwen: Qwen3 30B A3B Thinking 2507",
-      "cost_per_1m_in": 0.09999999999999999,
-      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in": 0.08,
+      "cost_per_1m_out": 0.39999999999999997,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 262144,
-      "default_max_tokens": 26214,
+      "cost_per_1m_out_cached": 0.08,
+      "context_window": 131072,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2956,12 +2956,12 @@
     {
       "id": "qwen/qwen3-vl-235b-a22b-thinking",
       "name": "Qwen: Qwen3 VL 235B A22B Thinking",
-      "cost_per_1m_in": 0.44999999999999996,
-      "cost_per_1m_out": 3.5,
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 262144,
-      "default_max_tokens": 131072,
+      "context_window": 131072,
+      "default_max_tokens": 16384,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2988,12 +2988,12 @@
     {
       "id": "qwen/qwen3-vl-30b-a3b-thinking",
       "name": "Qwen: Qwen3 VL 30B A3B Thinking",
-      "cost_per_1m_in": 0.29,
+      "cost_per_1m_in": 0.19999999999999998,
       "cost_per_1m_out": 1,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 262144,
-      "default_max_tokens": 131072,
+      "context_window": 131072,
+      "default_max_tokens": 16384,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -3090,10 +3090,10 @@
     {
       "id": "qwen/qwen3.5-122b-a10b",
       "name": "Qwen: Qwen3.5-122B-A10B",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 2.4,
+      "cost_per_1m_in": 0.39999999999999997,
+      "cost_per_1m_out": 3.1999999999999997,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.3,
+      "cost_per_1m_out_cached": 0,
       "context_window": 262144,
       "default_max_tokens": 32768,
       "can_reason": true,
@@ -3109,8 +3109,8 @@
     {
       "id": "qwen/qwen3.5-27b",
       "name": "Qwen: Qwen3.5-27B",
-      "cost_per_1m_in": 0.195,
-      "cost_per_1m_out": 1.56,
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 2.4,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262144,
@@ -3337,7 +3337,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.11,
       "context_window": 131072,
-      "default_max_tokens": 48000,
+      "default_max_tokens": 49152,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -3522,10 +3522,10 @@
     {
       "id": "x-ai/grok-3",
       "name": "xAI: Grok 3",
-      "cost_per_1m_in": 5,
-      "cost_per_1m_out": 25,
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 1.25,
+      "cost_per_1m_out_cached": 0.75,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": false,
@@ -3535,10 +3535,10 @@
     {
       "id": "x-ai/grok-3-beta",
       "name": "xAI: Grok 3 Beta",
-      "cost_per_1m_in": 5,
-      "cost_per_1m_out": 25,
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 1.25,
+      "cost_per_1m_out_cached": 0.75,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": false,
@@ -3548,10 +3548,10 @@
     {
       "id": "x-ai/grok-3-mini",
       "name": "xAI: Grok 3 Mini",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 0.5,
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.075,
+      "cost_per_1m_out_cached": 0.15,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": true,
@@ -3567,10 +3567,10 @@
     {
       "id": "x-ai/grok-3-mini-beta",
       "name": "xAI: Grok 3 Mini Beta",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 0.5,
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.075,
+      "cost_per_1m_out_cached": 0.15,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": true,

internal/providers/configs/vercel.json 🔗

@@ -487,6 +487,25 @@
       "supports_attachments": false,
       "options": {}
     },
+    {
+      "id": "zai/glm-4.7-flash",
+      "name": "GLM 4.7 Flash",
+      "cost_per_1m_in": 0.07,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 200000,
+      "default_max_tokens": 8000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
     {
       "id": "zai/glm-4.7-flashx",
       "name": "GLM 4.7 FlashX",
@@ -506,6 +525,25 @@
       "supports_attachments": false,
       "options": {}
     },
+    {
+      "id": "zai/glm-5",
+      "name": "GLM 5",
+      "cost_per_1m_in": 0.7999999999999999,
+      "cost_per_1m_out": 2.56,
+      "cost_per_1m_in_cached": 0.16,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 202800,
+      "default_max_tokens": 8000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
     {
       "id": "zai/glm-4.5",
       "name": "GLM-4.5",
@@ -563,25 +601,6 @@
       "supports_attachments": true,
       "options": {}
     },
-    {
-      "id": "zai/glm-5",
-      "name": "GLM-5",
-      "cost_per_1m_in": 1,
-      "cost_per_1m_out": 3.1999999999999997,
-      "cost_per_1m_in_cached": 0.19999999999999998,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 202800,
-      "default_max_tokens": 8000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false,
-      "options": {}
-    },
     {
       "id": "openai/gpt-5-chat",
       "name": "GPT 5 Chat",
@@ -760,7 +779,7 @@
       "cost_per_1m_out": 15,
       "cost_per_1m_in_cached": 0.25,
       "cost_per_1m_out_cached": 0,
-      "context_window": 1050000,
+      "context_window": 200000,
       "default_max_tokens": 8000,
       "can_reason": true,
       "reasoning_levels": [
@@ -779,7 +798,7 @@
       "cost_per_1m_out": 180,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 1050000,
+      "context_window": 200000,
       "default_max_tokens": 8000,
       "can_reason": true,
       "reasoning_levels": [