From 60171ca4cb1282127240763322222665ea9b2bf8 Mon Sep 17 00:00:00 2001 From: Charm <124303983+charmcli@users.noreply.github.com> Date: Sat, 4 Apr 2026 03:06:49 +0000 Subject: [PATCH] chore: auto-update generated files --- internal/providers/configs/aihubmix.json | 12 -- internal/providers/configs/huggingface.json | 36 ++-- internal/providers/configs/ionet.json | 4 +- internal/providers/configs/openrouter.json | 192 +++++++++++--------- internal/providers/configs/synthetic.json | 2 +- internal/providers/configs/venice.json | 20 +- 6 files changed, 154 insertions(+), 112 deletions(-) diff --git a/internal/providers/configs/aihubmix.json b/internal/providers/configs/aihubmix.json index 84ad67492b048a6a1e775aba123be7452d5a690b..78e8d5ed7c271c959d57855f6bb7ce5635eda1ac 100644 --- a/internal/providers/configs/aihubmix.json +++ b/internal/providers/configs/aihubmix.json @@ -1021,18 +1021,6 @@ "can_reason": false, "supports_attachments": true }, - { - "id": "gemini-2.0-flash-free", - "name": "Gemini 2.0 Flash (free)", - "cost_per_1m_in": 0, - "cost_per_1m_out": 0, - "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0, - "context_window": 1048576, - "default_max_tokens": 8192, - "can_reason": false, - "supports_attachments": true - }, { "id": "gemini-2.5-flash", "name": "Gemini 2.5 Flash", diff --git a/internal/providers/configs/huggingface.json b/internal/providers/configs/huggingface.json index da1d969601146f9de3f8b3b6f9d7e0cecbe6d53d..b5a346367e12805ce5972a15d10ed96890dec097 100644 --- a/internal/providers/configs/huggingface.json +++ b/internal/providers/configs/huggingface.json @@ -31,6 +31,18 @@ "can_reason": false, "supports_attachments": false }, + { + "id": "Qwen/Qwen3-235B-A22B-Instruct-2507:cerebras", + "name": "Qwen/Qwen3-235B-A22B-Instruct-2507 (cerebras)", + "cost_per_1m_in": 0.6, + "cost_per_1m_out": 1.2, + "cost_per_1m_in_cached": 0, + "cost_per_1m_out_cached": 0, + "context_window": 131072, + "default_max_tokens": 8192, + "can_reason": false, + "supports_attachments": false + }, { "id": "Qwen/Qwen3-32B:groq", "name": "Qwen/Qwen3-32B (groq)", @@ -79,18 +91,6 @@ "can_reason": false, "supports_attachments": false }, - { - "id": "deepcogito/cogito-671b-v2.1:fireworks-ai", - "name": "deepcogito/cogito-671b-v2.1 (fireworks-ai)", - "cost_per_1m_in": 0, - "cost_per_1m_out": 0, - "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0, - "context_window": 163840, - "default_max_tokens": 8192, - "can_reason": false, - "supports_attachments": false - }, { "id": "meta-llama/Llama-3.3-70B-Instruct:groq", "name": "meta-llama/Llama-3.3-70B-Instruct (groq)", @@ -163,6 +163,18 @@ "can_reason": false, "supports_attachments": false }, + { + "id": "openai/gpt-oss-120b:cerebras", + "name": "openai/gpt-oss-120b (cerebras)", + "cost_per_1m_in": 0.25, + "cost_per_1m_out": 0.69, + "cost_per_1m_in_cached": 0, + "cost_per_1m_out_cached": 0, + "context_window": 131072, + "default_max_tokens": 8192, + "can_reason": false, + "supports_attachments": false + }, { "id": "openai/gpt-oss-120b:fireworks-ai", "name": "openai/gpt-oss-120b (fireworks-ai)", diff --git a/internal/providers/configs/ionet.json b/internal/providers/configs/ionet.json index 5d1c1087db08b8e0b3c17b83eca99d7d1aa6960f..90bbfc795dcff49d3d854d23f22b3c4a8bbca06f 100644 --- a/internal/providers/configs/ionet.json +++ b/internal/providers/configs/ionet.json @@ -41,7 +41,7 @@ "id": "MiniMaxAI/MiniMax-M2.5", "name": "MiniMaxAI/MiniMax-M2.5", "cost_per_1m_in": 0.118, - "cost_per_1m_out": 1.25, + "cost_per_1m_out": 0.99, "cost_per_1m_in_cached": 0.1, "cost_per_1m_out_cached": 0, "context_window": 196600, @@ -95,7 +95,7 @@ "id": "moonshotai/Kimi-K2.5", "name": "MoonshotAI: Kimi K2.5", "cost_per_1m_in": 0.445, - "cost_per_1m_out": 2.22, + "cost_per_1m_out": 2, "cost_per_1m_in_cached": 0.225, "cost_per_1m_out_cached": 1.1, "context_window": 262144, diff --git a/internal/providers/configs/openrouter.json b/internal/providers/configs/openrouter.json index 6d78ab11ccd8f01782754b7f40521a2469bb3ecd..1a5d47830f12e20269e9e8e0c5c0d70aad2cc0e7 100644 --- a/internal/providers/configs/openrouter.json +++ b/internal/providers/configs/openrouter.json @@ -328,12 +328,12 @@ { "id": "arcee-ai/trinity-large-thinking", "name": "Arcee AI: Trinity Large Thinking", - "cost_per_1m_in": 0.25, - "cost_per_1m_out": 0.9, + "cost_per_1m_in": 0.22, + "cost_per_1m_out": 0.85, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.06, + "cost_per_1m_out_cached": 0, "context_window": 262144, - "default_max_tokens": 40000, + "default_max_tokens": 131072, "can_reason": true, "reasoning_levels": [ "low", @@ -544,10 +544,10 @@ { "id": "deepseek/deepseek-chat-v3.1", "name": "DeepSeek: DeepSeek V3.1", - "cost_per_1m_in": 0.56, - "cost_per_1m_out": 1.68, + "cost_per_1m_in": 0.21, + "cost_per_1m_out": 0.79, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.28, + "cost_per_1m_out_cached": 0.13, "context_window": 163840, "default_max_tokens": 16384, "can_reason": true, @@ -562,12 +562,12 @@ { "id": "deepseek/deepseek-v3.1-terminus", "name": "DeepSeek: DeepSeek V3.1 Terminus", - "cost_per_1m_in": 0.27, - "cost_per_1m_out": 1, + "cost_per_1m_in": 0.21, + "cost_per_1m_out": 0.79, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.135, + "cost_per_1m_out_cached": 0.13, "context_window": 163840, - "default_max_tokens": 32768, + "default_max_tokens": 16384, "can_reason": true, "reasoning_levels": [ "low", @@ -601,9 +601,9 @@ "cost_per_1m_in": 0.27, "cost_per_1m_out": 0.41, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.27, + "cost_per_1m_out_cached": 0, "context_window": 163840, - "default_max_tokens": 81920, + "default_max_tokens": 32768, "can_reason": true, "reasoning_levels": [ "low", @@ -865,6 +865,42 @@ "default_reasoning_effort": "medium", "supports_attachments": true }, + { + "id": "google/gemma-4-26b-a4b-it", + "name": "Google: Gemma 4 26B A4B ", + "cost_per_1m_in": 0.13, + "cost_per_1m_out": 0.4, + "cost_per_1m_in_cached": 0, + "cost_per_1m_out_cached": 0, + "context_window": 262144, + "default_max_tokens": 131072, + "can_reason": true, + "reasoning_levels": [ + "low", + "medium", + "high" + ], + "default_reasoning_effort": "medium", + "supports_attachments": true + }, + { + "id": "google/gemma-4-31b-it", + "name": "Google: Gemma 4 31B", + "cost_per_1m_in": 0.14, + "cost_per_1m_out": 0.4, + "cost_per_1m_in_cached": 0, + "cost_per_1m_out_cached": 0, + "context_window": 262144, + "default_max_tokens": 131072, + "can_reason": true, + "reasoning_levels": [ + "low", + "medium", + "high" + ], + "default_reasoning_effort": "medium", + "supports_attachments": true + }, { "id": "inception/mercury", "name": "Inception: Mercury", @@ -939,19 +975,19 @@ "cost_per_1m_in_cached": 0, "cost_per_1m_out_cached": 0, "context_window": 131072, - "default_max_tokens": 13107, + "default_max_tokens": 8192, "can_reason": false, "supports_attachments": false }, { "id": "meta-llama/llama-3.3-70b-instruct", "name": "Meta: Llama 3.3 70B Instruct", - "cost_per_1m_in": 0.59, - "cost_per_1m_out": 0.79, + "cost_per_1m_in": 0.88, + "cost_per_1m_out": 0.88, "cost_per_1m_in_cached": 0, "cost_per_1m_out_cached": 0, "context_window": 131072, - "default_max_tokens": 16384, + "default_max_tokens": 1024, "can_reason": false, "supports_attachments": false }, @@ -1000,12 +1036,12 @@ { "id": "minimax/minimax-m2.1", "name": "MiniMax: MiniMax M2.1", - "cost_per_1m_in": 0.27, - "cost_per_1m_out": 0.95, + "cost_per_1m_in": 0.3, + "cost_per_1m_out": 1.2, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.029, - "context_window": 196608, - "default_max_tokens": 19660, + "cost_per_1m_out_cached": 0.03, + "context_window": 204800, + "default_max_tokens": 65536, "can_reason": true, "reasoning_levels": [ "low", @@ -1354,24 +1390,24 @@ { "id": "moonshotai/kimi-k2-0905", "name": "MoonshotAI: Kimi K2 0905", - "cost_per_1m_in": 1, - "cost_per_1m_out": 3, + "cost_per_1m_in": 0.6, + "cost_per_1m_out": 2.5, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.5, + "cost_per_1m_out_cached": 0.3, "context_window": 262144, - "default_max_tokens": 8192, + "default_max_tokens": 26214, "can_reason": false, "supports_attachments": false }, { "id": "moonshotai/kimi-k2-thinking", "name": "MoonshotAI: Kimi K2 Thinking", - "cost_per_1m_in": 0.47, - "cost_per_1m_out": 2, + "cost_per_1m_in": 0.6, + "cost_per_1m_out": 2.5, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.141, - "context_window": 131072, - "default_max_tokens": 13107, + "cost_per_1m_out_cached": 0, + "context_window": 262144, + "default_max_tokens": 131072, "can_reason": true, "reasoning_levels": [ "low", @@ -1384,12 +1420,12 @@ { "id": "moonshotai/kimi-k2.5", "name": "MoonshotAI: Kimi K2.5", - "cost_per_1m_in": 0.6, - "cost_per_1m_out": 3, + "cost_per_1m_in": 0.45, + "cost_per_1m_out": 2.25, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.1, + "cost_per_1m_out_cached": 0.07, "context_window": 262144, - "default_max_tokens": 26214, + "default_max_tokens": 131072, "can_reason": true, "reasoning_levels": [ "low", @@ -1657,9 +1693,9 @@ "cost_per_1m_in": 0.1, "cost_per_1m_out": 0.4, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.025, + "cost_per_1m_out_cached": 0.03, "context_window": 1047576, - "default_max_tokens": 16384, + "default_max_tokens": 104757, "can_reason": false, "supports_attachments": true }, @@ -1891,7 +1927,7 @@ "cost_per_1m_in": 1.25, "cost_per_1m_out": 10, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.125, + "cost_per_1m_out_cached": 0.13, "context_window": 400000, "default_max_tokens": 64000, "can_reason": true, @@ -1909,9 +1945,9 @@ "cost_per_1m_in": 1.25, "cost_per_1m_out": 10, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.13, + "cost_per_1m_out_cached": 0.125, "context_window": 128000, - "default_max_tokens": 16000, + "default_max_tokens": 8192, "can_reason": false, "supports_attachments": true }, @@ -2140,12 +2176,12 @@ { "id": "openai/gpt-oss-120b", "name": "OpenAI: gpt-oss-120b", - "cost_per_1m_in": 0.15, - "cost_per_1m_out": 0.6, + "cost_per_1m_in": 0.09, + "cost_per_1m_out": 0.36, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0, + "cost_per_1m_out_cached": 0.045, "context_window": 131072, - "default_max_tokens": 13107, + "default_max_tokens": 32768, "can_reason": true, "reasoning_levels": [ "low", @@ -2617,9 +2653,9 @@ "cost_per_1m_in": 0.1, "cost_per_1m_out": 0.3, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.1, + "cost_per_1m_out_cached": 0, "context_window": 262144, - "default_max_tokens": 131072, + "default_max_tokens": 26214, "can_reason": false, "supports_attachments": false }, @@ -2644,11 +2680,11 @@ { "id": "qwen/qwen3-32b", "name": "Qwen: Qwen3 32B", - "cost_per_1m_in": 0.104, - "cost_per_1m_out": 0.416, + "cost_per_1m_in": 0.08, + "cost_per_1m_out": 0.28, "cost_per_1m_in_cached": 0, "cost_per_1m_out_cached": 0, - "context_window": 131072, + "context_window": 40960, "default_max_tokens": 4096, "can_reason": true, "reasoning_levels": [ @@ -2680,12 +2716,12 @@ { "id": "qwen/qwen3-coder-30b-a3b-instruct", "name": "Qwen: Qwen3 Coder 30B A3B Instruct", - "cost_per_1m_in": 0.2925, - "cost_per_1m_out": 1.4625, + "cost_per_1m_in": 0.07, + "cost_per_1m_out": 0.27, "cost_per_1m_in_cached": 0, "cost_per_1m_out_cached": 0, - "context_window": 262144, - "default_max_tokens": 32768, + "context_window": 160000, + "default_max_tokens": 16384, "can_reason": false, "supports_attachments": false }, @@ -2824,8 +2860,8 @@ { "id": "qwen/qwen3-vl-235b-a22b-instruct", "name": "Qwen: Qwen3 VL 235B A22B Instruct", - "cost_per_1m_in": 0.26, - "cost_per_1m_out": 1.04, + "cost_per_1m_in": 0.21, + "cost_per_1m_out": 1.9, "cost_per_1m_in_cached": 0, "cost_per_1m_out_cached": 0, "context_window": 131072, @@ -2854,12 +2890,12 @@ { "id": "qwen/qwen3-vl-30b-a3b-instruct", "name": "Qwen: Qwen3 VL 30B A3B Instruct", - "cost_per_1m_in": 0.29, - "cost_per_1m_out": 1, + "cost_per_1m_in": 0.13, + "cost_per_1m_out": 0.52, "cost_per_1m_in_cached": 0, "cost_per_1m_out_cached": 0, - "context_window": 262144, - "default_max_tokens": 131072, + "context_window": 131072, + "default_max_tokens": 16384, "can_reason": false, "supports_attachments": true }, @@ -2931,7 +2967,7 @@ "cost_per_1m_in_cached": 0, "cost_per_1m_out_cached": 0, "context_window": 262144, - "default_max_tokens": 131072, + "default_max_tokens": 26214, "can_reason": true, "reasoning_levels": [ "low", @@ -2980,10 +3016,10 @@ { "id": "qwen/qwen3.5-27b", "name": "Qwen: Qwen3.5-27B", - "cost_per_1m_in": 0.3, - "cost_per_1m_out": 2.4, + "cost_per_1m_in": 0.27, + "cost_per_1m_out": 2.16, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0, + "cost_per_1m_out_cached": 0.27, "context_window": 262144, "default_max_tokens": 32768, "can_reason": true, @@ -2998,12 +3034,12 @@ { "id": "qwen/qwen3.5-35b-a3b", "name": "Qwen: Qwen3.5-35B-A3B", - "cost_per_1m_in": 0.225, + "cost_per_1m_in": 0.23, "cost_per_1m_out": 1.8, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0.225, + "cost_per_1m_out_cached": 0, "context_window": 262144, - "default_max_tokens": 32768, + "default_max_tokens": 131072, "can_reason": true, "reasoning_levels": [ "low", @@ -3016,12 +3052,12 @@ { "id": "qwen/qwen3.5-9b", "name": "Qwen: Qwen3.5-9B", - "cost_per_1m_in": 0.05, + "cost_per_1m_in": 0.1, "cost_per_1m_out": 0.15, "cost_per_1m_in_cached": 0, "cost_per_1m_out_cached": 0, - "context_window": 256000, - "default_max_tokens": 16384, + "context_window": 262144, + "default_max_tokens": 26214, "can_reason": true, "reasoning_levels": [ "low", @@ -3079,27 +3115,15 @@ "can_reason": false, "supports_attachments": false }, - { - "id": "sao10k/l3.1-euryale-70b", - "name": "Sao10K: Llama 3.1 Euryale 70B v2.2", - "cost_per_1m_in": 1.48, - "cost_per_1m_out": 1.48, - "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0, - "context_window": 8192, - "default_max_tokens": 4096, - "can_reason": false, - "supports_attachments": false - }, { "id": "stepfun/step-3.5-flash", "name": "StepFun: Step 3.5 Flash", "cost_per_1m_in": 0.1, "cost_per_1m_out": 0.3, "cost_per_1m_in_cached": 0, - "cost_per_1m_out_cached": 0, - "context_window": 262144, - "default_max_tokens": 32768, + "cost_per_1m_out_cached": 0.02, + "context_window": 256000, + "default_max_tokens": 128000, "can_reason": true, "reasoning_levels": [ "low", diff --git a/internal/providers/configs/synthetic.json b/internal/providers/configs/synthetic.json index 1ab1f45ce09912f1b944626dfa6262f6dedae18a..8e6dbeb9fa2b2f496bfe2deca524497291333bfa 100644 --- a/internal/providers/configs/synthetic.json +++ b/internal/providers/configs/synthetic.json @@ -95,7 +95,7 @@ "id": "hf:zai-org/GLM-5", "name": "GLM 5", "cost_per_1m_in": 1, - "cost_per_1m_out": 6, + "cost_per_1m_out": 3, "cost_per_1m_in_cached": 1, "cost_per_1m_out_cached": 1, "context_window": 196608, diff --git a/internal/providers/configs/venice.json b/internal/providers/configs/venice.json index 34cc41492b42e70ecced76c7632461322d9724f5..4237f61c13c72f75e581134bc48e98c55de3d816 100644 --- a/internal/providers/configs/venice.json +++ b/internal/providers/configs/venice.json @@ -5,7 +5,7 @@ "api_endpoint": "https://api.venice.ai/api/v1", "type": "openai-compat", "default_large_model_id": "claude-opus-4-6", - "default_small_model_id": "minimax-m25", + "default_small_model_id": "arcee-trinity-large-thinking", "models": [ { "id": "claude-opus-4-5", @@ -701,6 +701,24 @@ "can_reason": true, "supports_attachments": true }, + { + "id": "arcee-trinity-large-thinking", + "name": "Trinity Large Thinking", + "cost_per_1m_in": 0.3125, + "cost_per_1m_out": 1.125, + "cost_per_1m_in_cached": 0, + "cost_per_1m_out_cached": 0, + "context_window": 256000, + "default_max_tokens": 65536, + "can_reason": true, + "reasoning_levels": [ + "low", + "medium", + "high" + ], + "default_reasoning_effort": "medium", + "supports_attachments": false + }, { "id": "venice-uncensored-role-play", "name": "Venice Role Play Uncensored",