chore: run task generate for misc+MiniMax M2.1 (#138)

Amolith created

- Synthetic
  - Upgrade MiniMax from M2 to M2.1
- OpenRouter
  - Add minimax/minimax-m2.1
  - Add bytedance-seed/seed-1.6 and -flash
  - Add deepseek/deepseek-chat-v3.1
  - Add qwen/qwen3-vl-235b-a22b-thinking
  - Remove meta-llama/llama-3.1-405b-instruct
  - Remove openai/gpt-oss-20b:free

Change summary

internal/providers/configs/openrouter.json | 283 ++++++++++++++---------
internal/providers/configs/synthetic.json  |   4 
2 files changed, 175 insertions(+), 112 deletions(-)

Detailed changes

internal/providers/configs/openrouter.json 🔗

@@ -174,8 +174,8 @@
       "name": "Anthropic: Claude 3.5 Sonnet",
       "cost_per_1m_in": 6,
       "cost_per_1m_out": 30,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in_cached": 7.5,
+      "cost_per_1m_out_cached": 0.6,
       "context_window": 200000,
       "default_max_tokens": 4096,
       "can_reason": false,
@@ -417,6 +417,44 @@
       "supports_attachments": true,
       "options": {}
     },
+    {
+      "id": "bytedance-seed/seed-1.6",
+      "name": "ByteDance Seed: Seed 1.6",
+      "cost_per_1m_in": 0.25,
+      "cost_per_1m_out": 2,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 16384,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true,
+      "options": {}
+    },
+    {
+      "id": "bytedance-seed/seed-1.6-flash",
+      "name": "ByteDance Seed: Seed 1.6 Flash",
+      "cost_per_1m_in": 0.075,
+      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 8192,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true,
+      "options": {}
+    },
     {
       "id": "deepcogito/cogito-v2-preview-llama-109b-moe",
       "name": "Cogito V2 Preview Llama 109B",
@@ -532,6 +570,25 @@
       "supports_attachments": false,
       "options": {}
     },
+    {
+      "id": "deepseek/deepseek-chat-v3.1",
+      "name": "DeepSeek: DeepSeek V3.1",
+      "cost_per_1m_in": 0.56,
+      "cost_per_1m_out": 1.68,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 163840,
+      "default_max_tokens": 16384,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
     {
       "id": "deepseek/deepseek-v3.1-terminus",
       "name": "DeepSeek: DeepSeek V3.1 Terminus",
@@ -574,11 +631,11 @@
       "id": "deepseek/deepseek-v3.2",
       "name": "DeepSeek: DeepSeek V3.2",
       "cost_per_1m_in": 0.26,
-      "cost_per_1m_out": 0.39,
+      "cost_per_1m_out": 0.38,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.13,
+      "cost_per_1m_out_cached": 0,
       "context_window": 163840,
-      "default_max_tokens": 16384,
+      "default_max_tokens": 32768,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -611,12 +668,12 @@
     {
       "id": "deepseek/deepseek-r1",
       "name": "DeepSeek: R1",
-      "cost_per_1m_in": 0.7,
-      "cost_per_1m_out": 2.4,
+      "cost_per_1m_in": 0.56,
+      "cost_per_1m_out": 2,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 163840,
-      "default_max_tokens": 81920,
+      "context_window": 64000,
+      "default_max_tokens": 8000,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -630,12 +687,12 @@
     {
       "id": "deepseek/deepseek-r1-0528",
       "name": "DeepSeek: R1 0528",
-      "cost_per_1m_in": 0.39999999999999997,
-      "cost_per_1m_out": 1.75,
+      "cost_per_1m_in": 0.7999999999999999,
+      "cost_per_1m_out": 2.4,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 163840,
-      "default_max_tokens": 81920,
+      "default_max_tokens": 16384,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -750,7 +807,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1048576,
-      "default_max_tokens": 32768,
+      "default_max_tokens": 32767,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -769,7 +826,7 @@
       "cost_per_1m_in_cached": 0.3833,
       "cost_per_1m_out_cached": 0.075,
       "context_window": 1048576,
-      "default_max_tokens": 32768,
+      "default_max_tokens": 32767,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -845,7 +902,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.049999999999999996,
       "context_window": 1048576,
-      "default_max_tokens": 32767,
+      "default_max_tokens": 32768,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -927,19 +984,6 @@
       "supports_attachments": false,
       "options": {}
     },
-    {
-      "id": "meta-llama/llama-3.1-405b-instruct",
-      "name": "Meta: Llama 3.1 405B Instruct",
-      "cost_per_1m_in": 5,
-      "cost_per_1m_out": 16,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 4096,
-      "can_reason": false,
-      "supports_attachments": false,
-      "options": {}
-    },
     {
       "id": "meta-llama/llama-3.1-70b-instruct",
       "name": "Meta: Llama 3.1 70B Instruct",
@@ -948,7 +992,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 8192,
+      "default_max_tokens": 13107,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
@@ -956,12 +1000,12 @@
     {
       "id": "meta-llama/llama-3.1-8b-instruct",
       "name": "Meta: Llama 3.1 8B Instruct",
-      "cost_per_1m_in": 0.03,
-      "cost_per_1m_out": 0.09,
+      "cost_per_1m_in": 0.049999999999999996,
+      "cost_per_1m_out": 0.08,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 13107,
+      "default_max_tokens": 65536,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
@@ -982,12 +1026,12 @@
     {
       "id": "meta-llama/llama-3.3-70b-instruct",
       "name": "Meta: Llama 3.3 70B Instruct",
-      "cost_per_1m_in": 0.09999999999999999,
-      "cost_per_1m_out": 0.32,
+      "cost_per_1m_in": 0.25,
+      "cost_per_1m_out": 0.75,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 8192,
+      "default_max_tokens": 13107,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
@@ -1089,6 +1133,25 @@
       "supports_attachments": false,
       "options": {}
     },
+    {
+      "id": "minimax/minimax-m2.1",
+      "name": "MiniMax: MiniMax M2.1",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.375,
+      "cost_per_1m_out_cached": 0.03,
+      "context_window": 204800,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
     {
       "id": "mistralai/mistral-large",
       "name": "Mistral Large",
@@ -1391,12 +1454,12 @@
     {
       "id": "mistralai/mistral-small-3.2-24b-instruct",
       "name": "Mistral: Mistral Small 3.2 24B",
-      "cost_per_1m_in": 0.06,
-      "cost_per_1m_out": 0.18,
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.3,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 65536,
+      "default_max_tokens": 13107,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
@@ -1495,12 +1558,12 @@
     {
       "id": "moonshotai/kimi-k2-0905",
       "name": "MoonshotAI: Kimi K2 0905",
-      "cost_per_1m_in": 0.6,
-      "cost_per_1m_out": 2.5,
+      "cost_per_1m_in": 0.39,
+      "cost_per_1m_out": 1.9,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262144,
-      "default_max_tokens": 26214,
+      "default_max_tokens": 131072,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
@@ -1508,12 +1571,12 @@
     {
       "id": "moonshotai/kimi-k2-0905:exacto",
       "name": "MoonshotAI: Kimi K2 0905 (exacto)",
-      "cost_per_1m_in": 1,
-      "cost_per_1m_out": 3,
+      "cost_per_1m_in": 1.15,
+      "cost_per_1m_out": 8,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 0.15,
       "context_window": 262144,
-      "default_max_tokens": 8192,
+      "default_max_tokens": 26214,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
@@ -1521,12 +1584,12 @@
     {
       "id": "moonshotai/kimi-k2-thinking",
       "name": "MoonshotAI: Kimi K2 Thinking",
-      "cost_per_1m_in": 0.39999999999999997,
-      "cost_per_1m_out": 1.75,
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 2.5,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262144,
-      "default_max_tokens": 32767,
+      "default_max_tokens": 131072,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1577,7 +1640,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262144,
-      "default_max_tokens": 26214,
+      "default_max_tokens": 131072,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1794,7 +1857,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.09999999999999999,
       "context_window": 1047576,
-      "default_max_tokens": 16384,
+      "default_max_tokens": 104757,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
@@ -1818,7 +1881,7 @@
       "cost_per_1m_in": 2.5,
       "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 1.25,
       "context_window": 128000,
       "default_max_tokens": 8192,
       "can_reason": false,
@@ -2192,12 +2255,12 @@
     {
       "id": "openai/gpt-oss-120b",
       "name": "OpenAI: gpt-oss-120b",
-      "cost_per_1m_in": 0.04,
-      "cost_per_1m_out": 0.25,
+      "cost_per_1m_in": 0.039,
+      "cost_per_1m_out": 0.19,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 32768,
+      "default_max_tokens": 13107,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2211,12 +2274,12 @@
     {
       "id": "openai/gpt-oss-120b:exacto",
       "name": "OpenAI: gpt-oss-120b (exacto)",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in": 0.04,
+      "cost_per_1m_out": 0.19999999999999998,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 32768,
+      "default_max_tokens": 16384,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -2249,8 +2312,8 @@
     {
       "id": "openai/gpt-oss-20b",
       "name": "OpenAI: gpt-oss-20b",
-      "cost_per_1m_in": 0.04,
-      "cost_per_1m_out": 0.15,
+      "cost_per_1m_in": 0.03,
+      "cost_per_1m_out": 0.14,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
@@ -2265,25 +2328,6 @@
       "supports_attachments": false,
       "options": {}
     },
-    {
-      "id": "openai/gpt-oss-20b:free",
-      "name": "OpenAI: gpt-oss-20b (free)",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 65536,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false,
-      "options": {}
-    },
     {
       "id": "openai/gpt-oss-safeguard-20b",
       "name": "OpenAI: gpt-oss-safeguard-20b",
@@ -2478,12 +2522,12 @@
     {
       "id": "qwen/qwen-2.5-72b-instruct",
       "name": "Qwen2.5 72B Instruct",
-      "cost_per_1m_in": 0.13,
-      "cost_per_1m_out": 0.52,
+      "cost_per_1m_in": 0.12,
+      "cost_per_1m_out": 0.39,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 32768,
-      "default_max_tokens": 16384,
+      "default_max_tokens": 8192,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
@@ -2613,12 +2657,12 @@
     {
       "id": "qwen/qwen3-235b-a22b-2507",
       "name": "Qwen: Qwen3 235B A22B Instruct 2507",
-      "cost_per_1m_in": 0.22,
-      "cost_per_1m_out": 0.88,
+      "cost_per_1m_in": 0.08,
+      "cost_per_1m_out": 0.55,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262144,
-      "default_max_tokens": 8192,
+      "default_max_tokens": 131072,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
@@ -2645,8 +2689,8 @@
     {
       "id": "qwen/qwen3-30b-a3b",
       "name": "Qwen: Qwen3 30B A3B",
-      "cost_per_1m_in": 0.09,
-      "cost_per_1m_out": 0.44999999999999996,
+      "cost_per_1m_in": 0.08,
+      "cost_per_1m_out": 0.28,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
@@ -2753,12 +2797,12 @@
     {
       "id": "qwen/qwen3-coder-30b-a3b-instruct",
       "name": "Qwen: Qwen3 Coder 30B A3B Instruct",
-      "cost_per_1m_in": 0.07,
-      "cost_per_1m_out": 0.28,
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.3,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 262144,
-      "default_max_tokens": 131072,
+      "default_max_tokens": 26214,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
@@ -2886,6 +2930,25 @@
       "supports_attachments": true,
       "options": {}
     },
+    {
+      "id": "qwen/qwen3-vl-235b-a22b-thinking",
+      "name": "Qwen: Qwen3 VL 235B A22B Thinking",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true,
+      "options": {}
+    },
     {
       "id": "qwen/qwen3-vl-30b-a3b-instruct",
       "name": "Qwen: Qwen3 VL 30B A3B Instruct",
@@ -3069,7 +3132,7 @@
       "id": "alibaba/tongyi-deepresearch-30b-a3b",
       "name": "Tongyi DeepResearch 30B A3B",
       "cost_per_1m_in": 0.09,
-      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_out": 0.44999999999999996,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
@@ -3138,12 +3201,12 @@
     {
       "id": "z-ai/glm-4.5",
       "name": "Z.AI: GLM 4.5",
-      "cost_per_1m_in": 0.48,
-      "cost_per_1m_out": 1.76,
+      "cost_per_1m_in": 0.35,
+      "cost_per_1m_out": 1.55,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.088,
+      "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 49152,
+      "default_max_tokens": 32768,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -3157,12 +3220,12 @@
     {
       "id": "z-ai/glm-4.5-air",
       "name": "Z.AI: GLM 4.5 Air",
-      "cost_per_1m_in": 0.10400000000000001,
-      "cost_per_1m_out": 0.6799999999999999,
+      "cost_per_1m_in": 0.19999999999999998,
+      "cost_per_1m_out": 1.1,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 0.03,
       "context_window": 131072,
-      "default_max_tokens": 49152,
+      "default_max_tokens": 48000,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -3214,12 +3277,12 @@
     {
       "id": "z-ai/glm-4.6",
       "name": "Z.AI: GLM 4.6",
-      "cost_per_1m_in": 0.39,
-      "cost_per_1m_out": 1.9,
+      "cost_per_1m_in": 0.44,
+      "cost_per_1m_out": 1.76,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 0.088,
       "context_window": 204800,
-      "default_max_tokens": 102400,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -3273,9 +3336,9 @@
       "name": "Z.AI: GLM 4.7",
       "cost_per_1m_in": 0.6,
       "cost_per_1m_out": 2.2,
-      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_in_cached": 0.11,
       "cost_per_1m_out_cached": 0.11,
-      "context_window": 200000,
+      "context_window": 204800,
       "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
@@ -3316,10 +3379,10 @@
     {
       "id": "x-ai/grok-3-mini",
       "name": "xAI: Grok 3 Mini",
-      "cost_per_1m_in": 0.6,
-      "cost_per_1m_out": 4,
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 0.5,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.15,
+      "cost_per_1m_out_cached": 0.075,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": true,
@@ -3335,10 +3398,10 @@
     {
       "id": "x-ai/grok-3-mini-beta",
       "name": "xAI: Grok 3 Mini Beta",
-      "cost_per_1m_in": 0.6,
-      "cost_per_1m_out": 4,
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 0.5,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.15,
+      "cost_per_1m_out_cached": 0.075,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": true,

internal/providers/configs/synthetic.json 🔗

@@ -194,8 +194,8 @@
       "options": {}
     },
     {
-      "id": "hf:MiniMaxAI/MiniMax-M2",
-      "name": "MiniMax M2",
+      "id": "hf:MiniMaxAI/MiniMax-M2.1",
+      "name": "MiniMax M2.1",
       "cost_per_1m_in": 0,
       "cost_per_1m_out": 0,
       "cost_per_1m_in_cached": 0,