feat(synthetic): add pricing (#128)

Amolith created

* feat(synthetic): add pricing and Pro/Max provider

Synthetic's API doesn't include pricing, so we add manual overrides from
their pricing page. Also generates synthetic-promax.json for Pro/Max
subscribers with zero pricing.

- Set default large model to GLM 4.7
- Add priceOverrides map with per-model pricing
- Set cached rates equal to regular (they don't have separate cached rates
  afaict)
- Add DeepSeek V3.2 metadata override (tools + reasoning)
- Generate separate config for Pro/Max subscription

Assisted-by: Claude Opus 4.5 via Crush

* refactor(synthetic): parse pricing from API

Assisted-by: Claude Opus 4.5 via Crush

* refactor(synthetic): remove Pro/Max provider generation

Assisted-by: Claude Opus 4.5 via Crush <crush@charm.land>

Change summary

cmd/synthetic/main.go                     |  38 ++---
internal/providers/configs/synthetic.json | 154 ++++++++++++------------
2 files changed, 97 insertions(+), 95 deletions(-)

Detailed changes

cmd/synthetic/main.go 🔗

@@ -54,30 +54,23 @@ type ModelPricing struct {
 	CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"`
 }
 
-func getPricing(model Model) ModelPricing {
-	pricing := ModelPricing{}
-	costPrompt, err := strconv.ParseFloat(model.Pricing.Prompt, 64)
-	if err != nil {
-		costPrompt = 0.0
-	}
-	pricing.CostPer1MIn = costPrompt * 1_000_000
-	costCompletion, err := strconv.ParseFloat(model.Pricing.Completion, 64)
+// parsePrice extracts a float from Synthetic's price format (e.g. "$0.00000055").
+func parsePrice(s string) float64 {
+	s = strings.TrimPrefix(s, "$")
+	v, err := strconv.ParseFloat(s, 64)
 	if err != nil {
-		costCompletion = 0.0
+		return 0.0
 	}
-	pricing.CostPer1MOut = costCompletion * 1_000_000
+	return v
+}
 
-	costPromptCached, err := strconv.ParseFloat(model.Pricing.InputCacheWrites, 64)
-	if err != nil {
-		costPromptCached = 0.0
-	}
-	pricing.CostPer1MInCached = costPromptCached * 1_000_000
-	costCompletionCached, err := strconv.ParseFloat(model.Pricing.InputCacheReads, 64)
-	if err != nil {
-		costCompletionCached = 0.0
+func getPricing(model Model) ModelPricing {
+	return ModelPricing{
+		CostPer1MIn:        parsePrice(model.Pricing.Prompt) * 1_000_000,
+		CostPer1MOut:       parsePrice(model.Pricing.Completion) * 1_000_000,
+		CostPer1MInCached:  parsePrice(model.Pricing.InputCacheReads) * 1_000_000,
+		CostPer1MOutCached: parsePrice(model.Pricing.InputCacheReads) * 1_000_000,
 	}
-	pricing.CostPer1MOutCached = costCompletionCached * 1_000_000
-	return pricing
 }
 
 // applyModelOverrides sets supported_features for models where Synthetic
@@ -95,6 +88,9 @@ func applyModelOverrides(model *Model) {
 	case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-V3.1"):
 		model.SupportedFeatures = []string{"tools", "reasoning"}
 
+	case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-V3.2"):
+		model.SupportedFeatures = []string{"tools", "reasoning"}
+
 	case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-V3"):
 		model.SupportedFeatures = []string{"tools"}
 
@@ -152,7 +148,7 @@ func main() {
 		APIKey:              "$SYNTHETIC_API_KEY",
 		APIEndpoint:         "https://api.synthetic.new/openai/v1",
 		Type:                catwalk.TypeOpenAICompat,
-		DefaultLargeModelID: "hf:zai-org/GLM-4.6",
+		DefaultLargeModelID: "hf:zai-org/GLM-4.7",
 		DefaultSmallModelID: "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
 		Models:              []catwalk.Model{},
 	}

internal/providers/configs/synthetic.json 🔗

@@ -4,16 +4,16 @@
   "api_key": "$SYNTHETIC_API_KEY",
   "api_endpoint": "https://api.synthetic.new/openai/v1",
   "type": "openai-compat",
-  "default_large_model_id": "hf:zai-org/GLM-4.6",
+  "default_large_model_id": "hf:zai-org/GLM-4.7",
   "default_small_model_id": "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
   "models": [
     {
       "id": "hf:deepseek-ai/DeepSeek-R1-0528",
       "name": "DeepSeek R1 0528",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 8,
+      "cost_per_1m_in_cached": 3,
+      "cost_per_1m_out_cached": 3,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": true,
@@ -29,10 +29,10 @@
     {
       "id": "hf:deepseek-ai/DeepSeek-V3",
       "name": "DeepSeek V3",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 1.25,
+      "cost_per_1m_in_cached": 1.25,
+      "cost_per_1m_out_cached": 1.25,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": false,
@@ -42,10 +42,10 @@
     {
       "id": "hf:deepseek-ai/DeepSeek-V3-0324",
       "name": "DeepSeek V3 0324",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 1.2,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 1.2,
+      "cost_per_1m_out_cached": 1.2,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": false,
@@ -55,10 +55,10 @@
     {
       "id": "hf:deepseek-ai/DeepSeek-V3.1",
       "name": "DeepSeek V3.1",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.56,
+      "cost_per_1m_out": 1.68,
+      "cost_per_1m_in_cached": 0.56,
+      "cost_per_1m_out_cached": 0.56,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": true,
@@ -74,10 +74,10 @@
     {
       "id": "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
       "name": "DeepSeek V3.1 Terminus",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 1.2,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 1.2,
+      "cost_per_1m_out_cached": 1.2,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": true,
@@ -93,23 +93,29 @@
     {
       "id": "hf:deepseek-ai/DeepSeek-V3.2",
       "name": "DeepSeek V3.2",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.56,
+      "cost_per_1m_out": 1.68,
+      "cost_per_1m_in_cached": 0.56,
+      "cost_per_1m_out_cached": 0.56,
       "context_window": 162816,
       "default_max_tokens": 16281,
-      "can_reason": false,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
       "supports_attachments": false,
       "options": {}
     },
     {
       "id": "hf:zai-org/GLM-4.5",
       "name": "GLM 4.5",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 2.1900000000000004,
+      "cost_per_1m_in_cached": 0.55,
+      "cost_per_1m_out_cached": 0.55,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": false,
@@ -119,10 +125,10 @@
     {
       "id": "hf:zai-org/GLM-4.7",
       "name": "GLM 4.7",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 2.1900000000000004,
+      "cost_per_1m_in_cached": 0.55,
+      "cost_per_1m_out_cached": 0.55,
       "context_window": 202752,
       "default_max_tokens": 20275,
       "can_reason": true,
@@ -138,10 +144,10 @@
     {
       "id": "hf:moonshotai/Kimi-K2-Instruct-0905",
       "name": "Kimi K2 Instruct 0905",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 1.2,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 1.2,
+      "cost_per_1m_out_cached": 1.2,
       "context_window": 262144,
       "default_max_tokens": 26214,
       "can_reason": false,
@@ -151,10 +157,10 @@
     {
       "id": "hf:moonshotai/Kimi-K2-Thinking",
       "name": "Kimi K2 Thinking",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 2.1900000000000004,
+      "cost_per_1m_in_cached": 0.55,
+      "cost_per_1m_out_cached": 0.55,
       "context_window": 262144,
       "default_max_tokens": 32768,
       "can_reason": true,
@@ -170,10 +176,10 @@
     {
       "id": "hf:meta-llama/Llama-3.3-70B-Instruct",
       "name": "Llama 3.3 70B Instruct",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.8999999999999999,
+      "cost_per_1m_out": 0.8999999999999999,
+      "cost_per_1m_in_cached": 0.8999999999999999,
+      "cost_per_1m_out_cached": 0.8999999999999999,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": false,
@@ -183,10 +189,10 @@
     {
       "id": "hf:meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
       "name": "Llama 4 Maverick 17B 128E Instruct FP8",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.22,
+      "cost_per_1m_out": 0.88,
+      "cost_per_1m_in_cached": 0.22,
+      "cost_per_1m_out_cached": 0.22,
       "context_window": 536576,
       "default_max_tokens": 53657,
       "can_reason": false,
@@ -196,10 +202,10 @@
     {
       "id": "hf:MiniMaxAI/MiniMax-M2.1",
       "name": "MiniMax M2.1",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 2.1900000000000004,
+      "cost_per_1m_in_cached": 0.55,
+      "cost_per_1m_out_cached": 0.55,
       "context_window": 196608,
       "default_max_tokens": 19660,
       "can_reason": true,
@@ -215,10 +221,10 @@
     {
       "id": "hf:Qwen/Qwen3-235B-A22B-Instruct-2507",
       "name": "Qwen3 235B A22B Instruct 2507",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.22,
+      "cost_per_1m_out": 0.88,
+      "cost_per_1m_in_cached": 0.22,
+      "cost_per_1m_out_cached": 0.22,
       "context_window": 262144,
       "default_max_tokens": 26214,
       "can_reason": true,
@@ -234,10 +240,10 @@
     {
       "id": "hf:Qwen/Qwen3-235B-A22B-Thinking-2507",
       "name": "Qwen3 235B A22B Thinking 2507",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.65,
+      "cost_per_1m_out": 3,
+      "cost_per_1m_in_cached": 0.65,
+      "cost_per_1m_out_cached": 0.65,
       "context_window": 262144,
       "default_max_tokens": 26214,
       "can_reason": true,
@@ -253,10 +259,10 @@
     {
       "id": "hf:Qwen/Qwen3-Coder-480B-A35B-Instruct",
       "name": "Qwen3 Coder 480B A35B Instruct",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.44999999999999996,
+      "cost_per_1m_out": 1.7999999999999998,
+      "cost_per_1m_in_cached": 0.44999999999999996,
+      "cost_per_1m_out_cached": 0.44999999999999996,
       "context_window": 262144,
       "default_max_tokens": 26214,
       "can_reason": false,
@@ -266,10 +272,10 @@
     {
       "id": "hf:Qwen/Qwen3-VL-235B-A22B-Instruct",
       "name": "Qwen3 VL 235B A22B Instruct",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.22,
+      "cost_per_1m_out": 0.88,
+      "cost_per_1m_in_cached": 0.22,
+      "cost_per_1m_out_cached": 0.22,
       "context_window": 256000,
       "default_max_tokens": 25600,
       "can_reason": false,
@@ -279,10 +285,10 @@
     {
       "id": "hf:openai/gpt-oss-120b",
       "name": "gpt oss 120b",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.09999999999999999,
+      "cost_per_1m_in_cached": 0.09999999999999999,
+      "cost_per_1m_out_cached": 0.09999999999999999,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": false,