feat: opencode zen (#246)

Harm Weites and Andrey Nering created

Co-authored-by: Andrey Nering <andreynering@users.noreply.github.com>

Change summary

.github/workflows/update.yml                 |   3 
Taskfile.yaml                                |   5 
cmd/opencode-zen/main.go                     | 209 ++++++
internal/providers/configs/opencode-zen.json | 731 ++++++++++++++++++++++
internal/providers/providers.go              |   8 
pkg/catwalk/provider.go                      |   2 
6 files changed, 958 insertions(+)

Detailed changes

.github/workflows/update.yml 🔗

@@ -40,6 +40,9 @@ jobs:
       - run: go run ./cmd/ionet/main.go
         continue-on-error: true
 
+      - run: go run ./cmd/opencode-zen/main.go
+        continue-on-error: true
+
       - run: go run ./cmd/openrouter/main.go
         continue-on-error: true
 

Taskfile.yaml 🔗

@@ -115,6 +115,11 @@ tasks:
     cmds:
       - go run cmd/neuralwatt/main.go
 
+  gen:opencode-zen:
+    desc: Generate OpenCode Zen provider configurations
+    cmds:
+      - go run cmd/opencode-zen/main.go
+
   gen:openrouter:
     desc: Generate openrouter provider configurations
     cmds:

cmd/opencode-zen/main.go 🔗

@@ -0,0 +1,209 @@
+// Package main generates the OpenCode Zen provider configuration.
+package main
+
+import (
+	"cmp"
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"net/http"
+	"os"
+	"slices"
+	"strings"
+	"time"
+
+	"charm.land/catwalk/pkg/catwalk"
+)
+
+type ZenModel struct {
+	ID      string `json:"id"`
+	Object  string `json:"object"`
+	Created int64  `json:"created"`
+	OwnedBy string `json:"owned_by"`
+}
+
+type ZenModelsResponse struct {
+	Object string     `json:"object"`
+	Data   []ZenModel `json:"data"`
+}
+
+type PricingData struct {
+	Input      float64 `json:"input"`
+	Output     float64 `json:"output"`
+	CacheRead  float64 `json:"cache_read,omitempty"`
+	CacheWrite float64 `json:"cache_write,omitempty"`
+}
+
+type ModelLimit struct {
+	Context int64 `json:"context"`
+	Output  int64 `json:"output"`
+}
+
+type ModelEnrichment struct {
+	Name       string      `json:"name"`
+	Attachment bool        `json:"attachment"`
+	Reasoning  bool        `json:"reasoning"`
+	Cost       PricingData `json:"cost"`
+	Limit      ModelLimit  `json:"limit"`
+}
+
+func fetchZenModels() ([]ZenModel, error) {
+	apiKey := cmp.Or(os.Getenv("OPENCODE_API_KEY"), "public")
+
+	client := &http.Client{Timeout: 30 * time.Second}
+	req, _ := http.NewRequestWithContext(
+		context.Background(),
+		"GET",
+		"https://opencode.ai/zen/v1/models",
+		nil,
+	)
+	req.Header.Set("User-Agent", "Catwalk/1.0")
+	req.Header.Set("Authorization", "Bearer "+apiKey)
+
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, fmt.Errorf("failed to fetch zen models: %w", err)
+	}
+	defer func() { _ = resp.Body.Close() }()
+	if resp.StatusCode != 200 {
+		body, _ := io.ReadAll(resp.Body)
+		return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
+	}
+
+	var mr ZenModelsResponse
+	if err := json.NewDecoder(resp.Body).Decode(&mr); err != nil {
+		return nil, fmt.Errorf("failed to decode zen models: %w", err)
+	}
+
+	return mr.Data, nil
+}
+
+func fetchEnrichmentData() (map[string]ModelEnrichment, error) {
+	client := &http.Client{Timeout: 30 * time.Second}
+	req, _ := http.NewRequestWithContext(
+		context.Background(),
+		"GET",
+		"https://models.dev/api.json",
+		nil,
+	)
+	req.Header.Set("User-Agent", "Catwalk/1.0")
+
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, fmt.Errorf("failed fetching enrichment data: %w", err)
+	}
+	defer func() { _ = resp.Body.Close() }()
+	if resp.StatusCode != 200 {
+		body, _ := io.ReadAll(resp.Body)
+		return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
+	}
+
+	var fullData map[string]json.RawMessage
+	if err := json.NewDecoder(resp.Body).Decode(&fullData); err != nil {
+		return nil, fmt.Errorf("failed to decode when fetching enrichment data: %w", err)
+	}
+
+	rawOpenCode, ok := fullData["opencode"]
+	if !ok {
+		return nil, fmt.Errorf("opencode provider not found in models.dev/api.json")
+	}
+
+	var openCodeData struct {
+		Models map[string]ModelEnrichment `json:"models"`
+	}
+	if err := json.Unmarshal(rawOpenCode, &openCodeData); err != nil {
+		return nil, fmt.Errorf("failed to unmarshal when fetching enrichment data: %w", err)
+	}
+
+	return openCodeData.Models, nil
+}
+
+func main() {
+	zenModels, err := fetchZenModels()
+	if err != nil {
+		log.Fatal("Error fetching OpenCode Zen models:", err)
+	}
+
+	enrichmentData, err := fetchEnrichmentData()
+	if err != nil {
+		log.Fatal("Error fetching enrichment data:", err)
+	}
+
+	zenProvider := catwalk.Provider{
+		Name:                "OpenCode Zen",
+		ID:                  catwalk.InferenceProviderOpenCodeZen,
+		APIKey:              "$OPENCODE_API_KEY",
+		APIEndpoint:         "https://opencode.ai/zen/v1",
+		Type:                catwalk.TypeOpenAICompat,
+		DefaultLargeModelID: "minimax-m2.5-free",
+		DefaultSmallModelID: "minimax-m2.5-free",
+	}
+
+	for _, zenModel := range zenModels {
+		enrichment, hasEnrichment := enrichmentData[zenModel.ID]
+
+		var costPer1MIn, costPer1MOut, costPer1MInCached, costPer1MOutCached float64
+		var contextWindow, defaultMaxTokens int64 = 200000, 20000
+		var supportsImages bool
+		var canReason bool
+		var reasoningLevels []string
+		var defaultReasoningEffort string
+		modelName := zenModel.ID
+
+		if hasEnrichment {
+			costPer1MIn = math.Round(enrichment.Cost.Input*100) / 100
+			costPer1MOut = math.Round(enrichment.Cost.Output*100) / 100
+			costPer1MInCached = math.Round(enrichment.Cost.CacheRead*100) / 100
+			costPer1MOutCached = math.Round(enrichment.Cost.CacheWrite*100) / 100
+			contextWindow = enrichment.Limit.Context
+			defaultMaxTokens = enrichment.Limit.Output
+			supportsImages = enrichment.Attachment
+			modelName = enrichment.Name
+
+			if enrichment.Reasoning {
+				reasoningLevels = []string{"low", "medium", "high"}
+				defaultReasoningEffort = "medium"
+				canReason = true
+			}
+		} else {
+			log.Printf("WARNING: No enrichment found for model %s, using defaults\n", zenModel.ID)
+		}
+
+		m := catwalk.Model{
+			ID:                     zenModel.ID,
+			Name:                   modelName,
+			CostPer1MIn:            costPer1MIn,
+			CostPer1MOut:           costPer1MOut,
+			CostPer1MInCached:      costPer1MInCached,
+			CostPer1MOutCached:     costPer1MOutCached,
+			ContextWindow:          contextWindow,
+			DefaultMaxTokens:       defaultMaxTokens,
+			SupportsImages:         supportsImages,
+			CanReason:              canReason,
+			ReasoningLevels:        reasoningLevels,
+			DefaultReasoningEffort: defaultReasoningEffort,
+		}
+
+		zenProvider.Models = append(zenProvider.Models, m)
+		fmt.Printf("Added model %s (%s)\n", zenModel.ID, modelName)
+	}
+
+	slices.SortFunc(zenProvider.Models, func(a catwalk.Model, b catwalk.Model) int {
+		return strings.Compare(a.Name, b.Name)
+	})
+
+	data, err := json.MarshalIndent(zenProvider, "", "  ")
+	if err != nil {
+		log.Fatal("Error marshaling provider:", err)
+	}
+	data = append(data, '\n')
+
+	if err := os.WriteFile("internal/providers/configs/opencode-zen.json", data, 0o600); err != nil {
+		log.Fatal("Error writing provider config:", err)
+	}
+
+	fmt.Printf("Generated opencode-zen.json with %d models\n", len(zenProvider.Models))
+}

internal/providers/configs/opencode-zen.json 🔗

@@ -0,0 +1,731 @@
+{
+  "name": "OpenCode Zen",
+  "id": "opencode-zen",
+  "api_key": "$OPENCODE_API_KEY",
+  "api_endpoint": "https://opencode.ai/zen/v1",
+  "type": "openai-compat",
+  "default_large_model_id": "minimax-m2.5-free",
+  "default_small_model_id": "minimax-m2.5-free",
+  "models": [
+    {
+      "id": "big-pickle",
+      "name": "Big Pickle",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 200000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "claude-3-5-haiku",
+      "name": "Claude Haiku 3.5",
+      "cost_per_1m_in": 0.8,
+      "cost_per_1m_out": 4,
+      "cost_per_1m_in_cached": 0.08,
+      "cost_per_1m_out_cached": 1,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-haiku-4-5",
+      "name": "Claude Haiku 4.5",
+      "cost_per_1m_in": 1,
+      "cost_per_1m_out": 5,
+      "cost_per_1m_in_cached": 0.1,
+      "cost_per_1m_out_cached": 1.25,
+      "context_window": 200000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-opus-4-1",
+      "name": "Claude Opus 4.1",
+      "cost_per_1m_in": 15,
+      "cost_per_1m_out": 75,
+      "cost_per_1m_in_cached": 1.5,
+      "cost_per_1m_out_cached": 18.75,
+      "context_window": 200000,
+      "default_max_tokens": 32000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-opus-4-5",
+      "name": "Claude Opus 4.5",
+      "cost_per_1m_in": 5,
+      "cost_per_1m_out": 25,
+      "cost_per_1m_in_cached": 0.5,
+      "cost_per_1m_out_cached": 6.25,
+      "context_window": 200000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-opus-4-6",
+      "name": "Claude Opus 4.6",
+      "cost_per_1m_in": 5,
+      "cost_per_1m_out": 25,
+      "cost_per_1m_in_cached": 0.5,
+      "cost_per_1m_out_cached": 6.25,
+      "context_window": 1000000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-sonnet-4",
+      "name": "Claude Sonnet 4",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0.3,
+      "cost_per_1m_out_cached": 3.75,
+      "context_window": 1000000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-sonnet-4-5",
+      "name": "Claude Sonnet 4.5",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0.3,
+      "cost_per_1m_out_cached": 3.75,
+      "context_window": 1000000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-sonnet-4-6",
+      "name": "Claude Sonnet 4.6",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0.3,
+      "cost_per_1m_out_cached": 3.75,
+      "context_window": 1000000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "glm-4.6",
+      "name": "GLM-4.6",
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 2.2,
+      "cost_per_1m_in_cached": 0.1,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "glm-4.7",
+      "name": "GLM-4.7",
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 2.2,
+      "cost_per_1m_in_cached": 0.1,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "glm-5",
+      "name": "GLM-5",
+      "cost_per_1m_in": 1,
+      "cost_per_1m_out": 3.2,
+      "cost_per_1m_in_cached": 0.2,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "glm-5.1",
+      "name": "GLM-5.1",
+      "cost_per_1m_in": 1.4,
+      "cost_per_1m_out": 4.4,
+      "cost_per_1m_in_cached": 0.26,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "gpt-5",
+      "name": "GPT-5",
+      "cost_per_1m_in": 1.07,
+      "cost_per_1m_out": 8.5,
+      "cost_per_1m_in_cached": 0.11,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5-codex",
+      "name": "GPT-5 Codex",
+      "cost_per_1m_in": 1.07,
+      "cost_per_1m_out": 8.5,
+      "cost_per_1m_in_cached": 0.11,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5-nano",
+      "name": "GPT-5 Nano",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.1",
+      "name": "GPT-5.1",
+      "cost_per_1m_in": 1.07,
+      "cost_per_1m_out": 8.5,
+      "cost_per_1m_in_cached": 0.11,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.1-codex",
+      "name": "GPT-5.1 Codex",
+      "cost_per_1m_in": 1.07,
+      "cost_per_1m_out": 8.5,
+      "cost_per_1m_in_cached": 0.11,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.1-codex-max",
+      "name": "GPT-5.1 Codex Max",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 0.13,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.1-codex-mini",
+      "name": "GPT-5.1 Codex Mini",
+      "cost_per_1m_in": 0.25,
+      "cost_per_1m_out": 2,
+      "cost_per_1m_in_cached": 0.03,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.2",
+      "name": "GPT-5.2",
+      "cost_per_1m_in": 1.75,
+      "cost_per_1m_out": 14,
+      "cost_per_1m_in_cached": 0.18,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.2-codex",
+      "name": "GPT-5.2 Codex",
+      "cost_per_1m_in": 1.75,
+      "cost_per_1m_out": 14,
+      "cost_per_1m_in_cached": 0.18,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.3-codex",
+      "name": "GPT-5.3 Codex",
+      "cost_per_1m_in": 1.75,
+      "cost_per_1m_out": 14,
+      "cost_per_1m_in_cached": 0.18,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.3-codex-spark",
+      "name": "GPT-5.3 Codex Spark",
+      "cost_per_1m_in": 1.75,
+      "cost_per_1m_out": 14,
+      "cost_per_1m_in_cached": 0.18,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "gpt-5.4",
+      "name": "GPT-5.4",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0.25,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1050000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.4-mini",
+      "name": "GPT-5.4 Mini",
+      "cost_per_1m_in": 0.75,
+      "cost_per_1m_out": 4.5,
+      "cost_per_1m_in_cached": 0.08,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.4-nano",
+      "name": "GPT-5.4 Nano",
+      "cost_per_1m_in": 0.2,
+      "cost_per_1m_out": 1.25,
+      "cost_per_1m_in_cached": 0.02,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-5.4-pro",
+      "name": "GPT-5.4 Pro",
+      "cost_per_1m_in": 30,
+      "cost_per_1m_out": 180,
+      "cost_per_1m_in_cached": 30,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1050000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gemini-3-flash",
+      "name": "Gemini 3 Flash",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 3,
+      "cost_per_1m_in_cached": 0.05,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gemini-3-pro",
+      "name": "Gemini 3 Pro",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 12,
+      "cost_per_1m_in_cached": 0.2,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "gemini-3.1-pro",
+      "name": "Gemini 3.1 Pro Preview",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 12,
+      "cost_per_1m_in_cached": 0.2,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "kimi-k2",
+      "name": "Kimi K2",
+      "cost_per_1m_in": 0.4,
+      "cost_per_1m_out": 2.5,
+      "cost_per_1m_in_cached": 0.4,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 262144,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "kimi-k2-thinking",
+      "name": "Kimi K2 Thinking",
+      "cost_per_1m_in": 0.4,
+      "cost_per_1m_out": 2.5,
+      "cost_per_1m_in_cached": 0.4,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 262144,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "kimi-k2.5",
+      "name": "Kimi K2.5",
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 3,
+      "cost_per_1m_in_cached": 0.08,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "minimax-m2.1",
+      "name": "MiniMax M2.1",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.1,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "minimax-m2.5",
+      "name": "MiniMax M2.5",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.06,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "minimax-m2.5-free",
+      "name": "MiniMax M2.5 Free",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "nemotron-3-super-free",
+      "name": "Nemotron 3 Super Free",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "qwen3.5-plus",
+      "name": "Qwen3.5 Plus",
+      "cost_per_1m_in": 0.2,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.02,
+      "cost_per_1m_out_cached": 0.25,
+      "context_window": 262144,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "qwen3.6-plus",
+      "name": "Qwen3.6 Plus",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 3,
+      "cost_per_1m_in_cached": 0.05,
+      "cost_per_1m_out_cached": 0.63,
+      "context_window": 262144,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "trinity-large-preview-free",
+      "name": "Trinity Large Preview",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 131072,
+      "can_reason": false,
+      "supports_attachments": false
+    }
+  ]
+}

internal/providers/providers.go 🔗

@@ -99,6 +99,9 @@ var avianConfig []byte
 //go:embed configs/neuralwatt.json
 var neuralwattConfig []byte
 
+//go:embed configs/opencode-zen.json
+var openCodeZenConfig []byte
+
 // ProviderFunc is a function that returns a Provider.
 type ProviderFunc func() catwalk.Provider
 
@@ -133,6 +136,7 @@ var providerRegistry = []ProviderFunc{
 	avianProvider,
 	nebiusProvider,
 	neuralwattProvider,
+	openCodeZenProvider,
 }
 
 // GetAll returns all registered providers.
@@ -272,3 +276,7 @@ func avianProvider() catwalk.Provider {
 func neuralwattProvider() catwalk.Provider {
 	return loadProviderFromConfig(neuralwattConfig)
 }
+
+func openCodeZenProvider() catwalk.Provider {
+	return loadProviderFromConfig(openCodeZenConfig)
+}

pkg/catwalk/provider.go 🔗

@@ -50,6 +50,7 @@ const (
 	InferenceProviderAvian        InferenceProvider = "avian"
 	InferenceProviderNebius       InferenceProvider = "nebius"
 	InferenceProviderNeuralwatt   InferenceProvider = "neuralwatt"
+	InferenceProviderOpenCodeZen  InferenceProvider = "opencode-zen"
 )
 
 // Provider represents an AI provider configuration.
@@ -123,6 +124,7 @@ func KnownProviders() []InferenceProvider {
 		InferenceProviderAvian,
 		InferenceProviderNebius,
 		InferenceProviderNeuralwatt,
+		InferenceProviderOpenCodeZen,
 	}
 }