chore: add Synthetic provider with updated model configs (#100)

Diogenesoftoronto created

Change summary

internal/providers/configs/synthetic.json | 375 +++++++++++++++++++++++++
internal/providers/providers.go           |   8 
pkg/catwalk/provider.go                   |   2 
3 files changed, 385 insertions(+)

Detailed changes

internal/providers/configs/synthetic.json 🔗

@@ -0,0 +1,375 @@
+{
+  "name": "Synthetic",
+  "id": "synthetic",
+  "type": "openai-compat",
+  "api_key": "$SYNTHETIC_API_KEY",
+  "api_endpoint": "https://api.synthetic.new/openai/v1",
+  "default_large_model_id": "hf:zai-org/GLM-4.6",
+  "default_small_model_id": "hf:openai/gpt-oss-120b",
+  "models": [
+    {
+      "id": "hf:deepseek-ai/DeepSeek-R1",
+      "name": "DeepSeek R1",
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 2.19,
+      "cost_per_1m_in_cached": 0.07,
+      "cost_per_1m_out_cached": 0.14,
+      "context_window": 128000,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:deepseek-ai/DeepSeek-R1-0528",
+      "name": "DeepSeek R1 0528",
+      "cost_per_1m_in": 3.0,
+      "cost_per_1m_out": 8.0,
+      "cost_per_1m_in_cached": 0.07,
+      "cost_per_1m_out_cached": 0.14,
+      "context_window": 128000,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:deepseek-ai/DeepSeek-V3",
+      "name": "DeepSeek V3",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 1.25,
+      "cost_per_1m_in_cached": 0.07,
+      "cost_per_1m_out_cached": 0.14,
+      "context_window": 128000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:deepseek-ai/DeepSeek-V3-0324",
+      "name": "DeepSeek V3 0324",
+      "cost_per_1m_in": 1.2,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.07,
+      "cost_per_1m_out_cached": 0.14,
+      "context_window": 128000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:deepseek-ai/DeepSeek-V3.1",
+      "name": "DeepSeek V3.1",
+      "cost_per_1m_in": 0.56,
+      "cost_per_1m_out": 1.68,
+      "cost_per_1m_in_cached": 0.07,
+      "cost_per_1m_out_cached": 0.14,
+      "context_window": 131072,
+      "context_window": 128000,
+      "default_max_tokens": 8192,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
+      "name": "DeepSeek V3.1 Terminus",
+      "cost_per_1m_in": 1.2,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.07,
+      "cost_per_1m_out_cached": 0.14,
+      "context_window": 128000,
+      "default_max_tokens": 8192,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:meta-llama/Llama-3.1-405B-Instruct",
+      "name": "Llama 3.1 405B Instruct",
+      "cost_per_1m_in": 3.0,
+      "cost_per_1m_out": 3.0,
+      "cost_per_1m_in_cached": 0.27,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 131072,
+      "default_max_tokens": 4096,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:meta-llama/Llama-3.1-70B-Instruct",
+      "name": "Llama 3.1 70B Instruct",
+      "cost_per_1m_in": 0.9,
+      "cost_per_1m_out": 0.9,
+      "cost_per_1m_in_cached": 0.59,
+      "cost_per_1m_out_cached": 1.1,
+      "context_window": 131072,
+      "context_window": 128000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:meta-llama/Llama-3.1-8B-Instruct",
+      "name": "Llama 3.1 8B Instruct",
+      "cost_per_1m_in": 0.2,
+      "cost_per_1m_out": 0.2,
+      "cost_per_1m_in_cached": 0.07,
+      "cost_per_1m_out_cached": 0.2,
+      "context_window": 128000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:meta-llama/Llama-3.3-70B-Instruct",
+      "name": "Llama 3.3 70B Instruct",
+      "cost_per_1m_in": 0.9,
+      "cost_per_1m_out": 0.9,
+      "cost_per_1m_in_cached": 0.59,
+      "cost_per_1m_out_cached": 1.1,
+      "context_window": 128000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
+      "name": "Llama 4 Maverick 17B 128E Instruct FP8",
+      "cost_per_1m_in": 0.22,
+      "cost_per_1m_out": 0.88,
+      "cost_per_1m_in_cached": 0.14,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 524000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "hf:meta-llama/Llama-4-Scout-17B-16E-Instruct",
+      "name": "Llama 4 Scout 17B 16E Instruct",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0.14,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 328000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "hf:MiniMaxAI/MiniMax-M2",
+      "name": "MiniMax M2",
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 2.19,
+      "cost_per_1m_in_cached": 0.27,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 192000,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:moonshotai/Kimi-K2-Instruct",
+      "name": "Kimi K2 Instruct",
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 2.5,
+      "cost_per_1m_in_cached": 0.27,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 128000,
+      "default_max_tokens": 131072,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:moonshotai/Kimi-K2-Thinking",
+      "name": "Kimi K2 Instruct",
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 2.19,
+      "cost_per_1m_in_cached": 0.55,
+      "cost_per_1m_out_cached": 2.19,
+      "context_window": 128000,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:moonshotai/Kimi-K2-Instruct-0905",
+      "name": "Kimi K2 Instruct 0905",
+      "cost_per_1m_in": 1.2,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.55,
+      "cost_per_1m_out_cached": 1.1,
+      "context_window": 262144,
+      "context_window": 256000,
+      "default_max_tokens": 262144,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:openai/gpt-oss-120b",
+      "name": "GPT-OSS 120B",
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.1,
+      "cost_per_1m_in_cached": 0.55,
+      "cost_per_1m_out_cached": 1.1,
+      "context_window": 128000,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:Qwen/Qwen2.5-Coder-32B-Instruct",
+      "name": "Qwen2.5 Coder 32B Instruct",
+      "cost_per_1m_in": 0.14,
+      "cost_per_1m_out": 0.55,
+      "cost_per_1m_in_cached": 0.14,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 32768,
+      "default_max_tokens": 32768,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:Qwen/Qwen3-235B-A22B-Instruct-2507",
+      "name": "Qwen3 235B A22B Instruct 2507",
+      "cost_per_1m_in": 0.22,
+      "cost_per_1m_out": 0.88,
+      "cost_per_1m_in_cached": 0.55,
+      "cost_per_1m_out_cached": 1.1,
+      "context_window": 262144,
+      "context_window": 256000,
+      "default_max_tokens": 6912,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
+      "name": "Qwen3 235B A22B Thinking 2507",
+      "cost_per_1m_in": 0.65,
+      "cost_per_1m_out": 3.0,
+      "cost_per_1m_in_cached": 0.55,
+      "cost_per_1m_out_cached": 1.1,
+      "context_window": 256000,
+      "default_max_tokens": 81920,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
+      "name": "Qwen3 Coder 480B A35B Instruct",
+      "cost_per_1m_in": 0.45,
+      "cost_per_1m_out": 1.8,
+      "cost_per_1m_in_cached": 0.82,
+      "cost_per_1m_out_cached": 1.65,
+      "context_window": 256000,
+      "default_max_tokens": 262144,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:zai-org/GLM-4.5",
+      "name": "GLM-4.5",
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 2.19,
+      "cost_per_1m_in_cached": 0.14,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 128000,
+      "default_max_tokens": 98304,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "hf:zai-org/GLM-4.6",
+      "name": "GLM-4.6",
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 0.55,
+      "cost_per_1m_in_cached": 0.27,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 198000,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    }
+  ]
+}

internal/providers/providers.go 🔗

@@ -15,6 +15,9 @@ var openAIConfig []byte
 //go:embed configs/anthropic.json
 var anthropicConfig []byte
 
+//go:embed configs/synthetic.json
+var syntheticConfig []byte
+
 //go:embed configs/gemini.json
 var geminiConfig []byte
 
@@ -77,6 +80,7 @@ var providerRegistry = []ProviderFunc{
 	deepSeekProvider,
 	huggingFaceProvider,
 	aiHubMixProvider,
+	syntheticProvider,
 }
 
 // GetAll returns all registered providers.
@@ -101,6 +105,10 @@ func openAIProvider() catwalk.Provider {
 	return loadProviderFromConfig(openAIConfig)
 }
 
+func syntheticProvider() catwalk.Provider {
+	return loadProviderFromConfig(syntheticConfig)
+}
+
 func anthropicProvider() catwalk.Provider {
 	return loadProviderFromConfig(anthropicConfig)
 }

pkg/catwalk/provider.go 🔗

@@ -22,6 +22,7 @@ type InferenceProvider string
 const (
 	InferenceProviderOpenAI      InferenceProvider = "openai"
 	InferenceProviderAnthropic   InferenceProvider = "anthropic"
+	InferenceProviderSynthetic   InferenceProvider = "synthetic"
 	InferenceProviderGemini      InferenceProvider = "gemini"
 	InferenceProviderAzure       InferenceProvider = "azure"
 	InferenceProviderBedrock     InferenceProvider = "bedrock"
@@ -81,6 +82,7 @@ type Model struct {
 func KnownProviders() []InferenceProvider {
 	return []InferenceProvider{
 		InferenceProviderOpenAI,
+		InferenceProviderSynthetic,
 		InferenceProviderAnthropic,
 		InferenceProviderGemini,
 		InferenceProviderAzure,