feat: add minimax china provider (#191)

AN Long created

Change summary

internal/providers/configs/minimax-china.json | 61 +++++++++++++++++++++
internal/providers/providers.go               |  8 ++
pkg/catwalk/provider.go                       | 44 +++++++-------
3 files changed, 92 insertions(+), 21 deletions(-)

Detailed changes

internal/providers/configs/minimax-china.json 🔗

@@ -0,0 +1,61 @@
+{
+  "name": "MiniMax China",
+  "id": "minimax-china",
+  "type": "anthropic",
+  "api_key": "$MINIMAX_API_KEY",
+  "api_endpoint": "https://api.minimaxi.com/anthropic",
+  "default_large_model_id": "MiniMax-M2.1",
+  "default_small_model_id": "MiniMax-M2.1",
+  "models": [
+    {
+      "id": "MiniMax-M2.5-highspeed",
+      "name": "MiniMax-M2.5-highspeed",
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 2.4,
+      "cost_per_1m_in_cached": 0.03,
+      "cost_per_1m_out_cached": 0.375,
+      "context_window": 204800,
+      "default_max_tokens": 131072
+    },
+    {
+      "id": "MiniMax-M2.5",
+      "name": "MiniMax-M2.5",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.03,
+      "cost_per_1m_out_cached": 0.375,
+      "context_window": 204800,
+      "default_max_tokens": 131072
+    },
+    {
+      "id": "MiniMax-M2.1-highspeed",
+      "name": "MiniMax-M2.1-highspeed",
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 2.4,
+      "cost_per_1m_in_cached": 0.03,
+      "cost_per_1m_out_cached": 0.375,
+      "context_window": 204800,
+      "default_max_tokens": 131072
+    },
+    {
+      "id": "MiniMax-M2.1",
+      "name": "MiniMax-M2.1",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.03,
+      "cost_per_1m_out_cached": 0.375,
+      "context_window": 204800,
+      "default_max_tokens": 131072
+    },
+    {
+      "id": "MiniMax-M2",
+      "name": "MiniMax-M2",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.03,
+      "cost_per_1m_out_cached": 0.375,
+      "context_window": 196608,
+      "default_max_tokens": 128000
+    }
+  ]
+}

internal/providers/providers.go 🔗

@@ -72,6 +72,9 @@ var vercelConfig []byte
 //go:embed configs/minimax.json
 var miniMaxConfig []byte
 
+//go:embed configs/minimax-china.json
+var miniMaxChinaConfig []byte
+
 //go:embed configs/ionet.json
 var ioNetConfig []byte
 
@@ -100,6 +103,7 @@ var providerRegistry = []ProviderFunc{
 	copilotProvider,
 	vercelProvider,
 	miniMaxProvider,
+	miniMaxChinaProvider,
 	ioNetProvider,
 }
 
@@ -205,6 +209,10 @@ func miniMaxProvider() catwalk.Provider {
 	return loadProviderFromConfig(miniMaxConfig)
 }
 
+func miniMaxChinaProvider() catwalk.Provider {
+	return loadProviderFromConfig(miniMaxChinaConfig)
+}
+
 func ioNetProvider() catwalk.Provider {
 	return loadProviderFromConfig(ioNetConfig)
 }

pkg/catwalk/provider.go 🔗

@@ -21,27 +21,28 @@ type InferenceProvider string
 
 // All the inference providers supported by the system.
 const (
-	InferenceProviderOpenAI      InferenceProvider = "openai"
-	InferenceProviderAnthropic   InferenceProvider = "anthropic"
-	InferenceProviderSynthetic   InferenceProvider = "synthetic"
-	InferenceProviderGemini      InferenceProvider = "gemini"
-	InferenceProviderAzure       InferenceProvider = "azure"
-	InferenceProviderBedrock     InferenceProvider = "bedrock"
-	InferenceProviderVertexAI    InferenceProvider = "vertexai"
-	InferenceProviderXAI         InferenceProvider = "xai"
-	InferenceProviderZAI         InferenceProvider = "zai"
-	InferenceProviderGROQ        InferenceProvider = "groq"
-	InferenceProviderOpenRouter  InferenceProvider = "openrouter"
-	InferenceProviderCerebras    InferenceProvider = "cerebras"
-	InferenceProviderVenice      InferenceProvider = "venice"
-	InferenceProviderChutes      InferenceProvider = "chutes"
-	InferenceProviderHuggingFace InferenceProvider = "huggingface"
-	InferenceAIHubMix            InferenceProvider = "aihubmix"
-	InferenceKimiCoding          InferenceProvider = "kimi-coding"
-	InferenceProviderCopilot     InferenceProvider = "copilot"
-	InferenceProviderVercel      InferenceProvider = "vercel"
-	InferenceProviderMiniMax     InferenceProvider = "minimax"
-	InferenceProviderIoNet       InferenceProvider = "ionet"
+	InferenceProviderOpenAI       InferenceProvider = "openai"
+	InferenceProviderAnthropic    InferenceProvider = "anthropic"
+	InferenceProviderSynthetic    InferenceProvider = "synthetic"
+	InferenceProviderGemini       InferenceProvider = "gemini"
+	InferenceProviderAzure        InferenceProvider = "azure"
+	InferenceProviderBedrock      InferenceProvider = "bedrock"
+	InferenceProviderVertexAI     InferenceProvider = "vertexai"
+	InferenceProviderXAI          InferenceProvider = "xai"
+	InferenceProviderZAI          InferenceProvider = "zai"
+	InferenceProviderGROQ         InferenceProvider = "groq"
+	InferenceProviderOpenRouter   InferenceProvider = "openrouter"
+	InferenceProviderCerebras     InferenceProvider = "cerebras"
+	InferenceProviderVenice       InferenceProvider = "venice"
+	InferenceProviderChutes       InferenceProvider = "chutes"
+	InferenceProviderHuggingFace  InferenceProvider = "huggingface"
+	InferenceAIHubMix             InferenceProvider = "aihubmix"
+	InferenceKimiCoding           InferenceProvider = "kimi-coding"
+	InferenceProviderCopilot      InferenceProvider = "copilot"
+	InferenceProviderVercel       InferenceProvider = "vercel"
+	InferenceProviderMiniMax      InferenceProvider = "minimax"
+	InferenceProviderMiniMaxChina InferenceProvider = "minimax-china"
+	InferenceProviderIoNet        InferenceProvider = "ionet"
 )
 
 // Provider represents an AI provider configuration.
@@ -107,6 +108,7 @@ func KnownProviders() []InferenceProvider {
 		InferenceProviderCopilot,
 		InferenceProviderVercel,
 		InferenceProviderMiniMax,
+		InferenceProviderMiniMaxChina,
 	}
 }