feat: opencode go support (#248)

Andrey Nering created

Add generator for OpenCode Go provider that fetches from models.dev/api.json.
Uses OPENCODE_API_KEY env var, registers InferenceProviderOpenCodeGo,
and includes 9 models: glm-5, glm-5.1, kimi-k2.5, mimo-v2-omni,
mimo-v2-pro, minimax-m2.5, minimax-m2.7, qwen3.5-plus, qwen3.6-plus.

💘 Generated with Crush

Assisted-by: Kimi K2.5 via Crush <crush@charm.land>

Change summary

.github/workflows/update.yml                |   3 
Taskfile.yaml                               |   5 
cmd/opencode-go/main.go                     | 142 ++++++++++++++++++
internal/providers/configs/opencode-go.json | 173 +++++++++++++++++++++++
internal/providers/providers.go             |   8 +
pkg/catwalk/provider.go                     |   2 
6 files changed, 333 insertions(+)

Detailed changes

.github/workflows/update.yml 🔗

@@ -40,6 +40,9 @@ jobs:
       - run: go run ./cmd/ionet/main.go
         continue-on-error: true
 
+      - run: go run ./cmd/opencode-go/main.go
+        continue-on-error: true
+
       - run: go run ./cmd/opencode-zen/main.go
         continue-on-error: true
 

Taskfile.yaml 🔗

@@ -115,6 +115,11 @@ tasks:
     cmds:
       - go run cmd/neuralwatt/main.go
 
+  gen:opencode-go:
+    desc: Generate OpenCode Go provider configurations
+    cmds:
+      - go run cmd/opencode-go/main.go
+
   gen:opencode-zen:
     desc: Generate OpenCode Zen provider configurations
     cmds:

cmd/opencode-go/main.go 🔗

@@ -0,0 +1,142 @@
+// Package main generates the OpenCode Go provider configuration.
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"log"
+	"math"
+	"net/http"
+	"os"
+	"slices"
+	"strings"
+	"time"
+
+	"charm.land/catwalk/pkg/catwalk"
+)
+
+type PricingData struct {
+	Input      float64 `json:"input"`
+	Output     float64 `json:"output"`
+	CacheRead  float64 `json:"cache_read,omitempty"`
+	CacheWrite float64 `json:"cache_write,omitempty"`
+}
+
+type ModelLimit struct {
+	Context int64 `json:"context"`
+	Output  int64 `json:"output"`
+}
+
+type GoModel struct {
+	ID         string      `json:"id"`
+	Name       string      `json:"name"`
+	Attachment bool        `json:"attachment"`
+	Reasoning  bool        `json:"reasoning"`
+	Cost       PricingData `json:"cost"`
+	Limit      ModelLimit  `json:"limit"`
+}
+
+type GoProviderData struct {
+	ID     string             `json:"id"`
+	Name   string             `json:"name"`
+	API    string             `json:"api"`
+	Env    []string           `json:"env"`
+	Models map[string]GoModel `json:"models"`
+}
+
+func fetchGoModels() (map[string]GoModel, error) {
+	client := &http.Client{Timeout: 30 * time.Second}
+	req, _ := http.NewRequestWithContext(context.Background(), "GET", "https://models.dev/api.json", nil)
+	req.Header.Set("User-Agent", "Catwalk/1.0")
+
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, fmt.Errorf("failed to fetch models: %w", err)
+	}
+	defer func() { _ = resp.Body.Close() }()
+
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("status %d", resp.StatusCode)
+	}
+
+	var fullData map[string]json.RawMessage
+	if err := json.NewDecoder(resp.Body).Decode(&fullData); err != nil {
+		return nil, fmt.Errorf("failed to decode api.json: %w", err)
+	}
+
+	rawGoData, ok := fullData["opencode-go"]
+	if !ok {
+		return nil, fmt.Errorf("opencode-go provider not found in models.dev/api.json")
+	}
+
+	var goData GoProviderData
+	if err := json.Unmarshal(rawGoData, &goData); err != nil {
+		return nil, fmt.Errorf("failed to unmarshal opencode-go data: %w", err)
+	}
+
+	return goData.Models, nil
+}
+
+func main() {
+	goModels, err := fetchGoModels()
+	if err != nil {
+		log.Fatal("Error fetching OpenCode Go models:", err)
+	}
+
+	goProvider := catwalk.Provider{
+		Name:                "OpenCode Go",
+		ID:                  catwalk.InferenceProviderOpenCodeGo,
+		APIKey:              "$OPENCODE_API_KEY",
+		APIEndpoint:         "https://opencode.ai/zen/go/v1",
+		Type:                catwalk.TypeOpenAICompat,
+		DefaultLargeModelID: "minimax-m2.7",
+		DefaultSmallModelID: "minimax-m2.7",
+	}
+
+	for _, goModel := range goModels {
+		costPer1MIn := math.Round(goModel.Cost.Input*100) / 100
+		costPer1MOut := math.Round(goModel.Cost.Output*100) / 100
+		costPer1MInCached := math.Round(goModel.Cost.CacheRead*100) / 100
+
+		var reasoningLevels []string
+		var defaultReasoningEffort string
+		if goModel.Reasoning {
+			reasoningLevels = []string{"low", "medium", "high"}
+			defaultReasoningEffort = "medium"
+		}
+
+		m := catwalk.Model{
+			ID:                     goModel.ID,
+			Name:                   goModel.Name,
+			CostPer1MIn:            costPer1MIn,
+			CostPer1MOut:           costPer1MOut,
+			CostPer1MInCached:      costPer1MInCached,
+			ContextWindow:          goModel.Limit.Context,
+			DefaultMaxTokens:       goModel.Limit.Output,
+			SupportsImages:         goModel.Attachment,
+			CanReason:              goModel.Reasoning,
+			ReasoningLevels:        reasoningLevels,
+			DefaultReasoningEffort: defaultReasoningEffort,
+		}
+
+		goProvider.Models = append(goProvider.Models, m)
+		fmt.Printf("Added model %s (%s)\n", goModel.ID, goModel.Name)
+	}
+
+	slices.SortFunc(goProvider.Models, func(a catwalk.Model, b catwalk.Model) int {
+		return strings.Compare(a.Name, b.Name)
+	})
+
+	data, err := json.MarshalIndent(goProvider, "", "  ")
+	if err != nil {
+		log.Fatal("Error marshaling provider:", err)
+	}
+	data = append(data, '\n')
+
+	if err := os.WriteFile("internal/providers/configs/opencode-go.json", data, 0o600); err != nil {
+		log.Fatal("Error writing provider config:", err)
+	}
+
+	fmt.Printf("Generated opencode-go.json with %d models\n", len(goProvider.Models))
+}

internal/providers/configs/opencode-go.json 🔗

@@ -0,0 +1,173 @@
+{
+  "name": "OpenCode Go",
+  "id": "opencode-go",
+  "api_key": "$OPENCODE_API_KEY",
+  "api_endpoint": "https://opencode.ai/zen/go/v1",
+  "type": "openai-compat",
+  "default_large_model_id": "minimax-m2.7",
+  "default_small_model_id": "minimax-m2.7",
+  "models": [
+    {
+      "id": "glm-5",
+      "name": "GLM-5",
+      "cost_per_1m_in": 1,
+      "cost_per_1m_out": 3.2,
+      "cost_per_1m_in_cached": 0.2,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "glm-5.1",
+      "name": "GLM-5.1",
+      "cost_per_1m_in": 1.4,
+      "cost_per_1m_out": 4.4,
+      "cost_per_1m_in_cached": 0.26,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "kimi-k2.5",
+      "name": "Kimi K2.5",
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 3,
+      "cost_per_1m_in_cached": 0.1,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "mimo-v2-omni",
+      "name": "MiMo V2 Omni",
+      "cost_per_1m_in": 0.4,
+      "cost_per_1m_out": 2,
+      "cost_per_1m_in_cached": 0.08,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "mimo-v2-pro",
+      "name": "MiMo V2 Pro",
+      "cost_per_1m_in": 1,
+      "cost_per_1m_out": 3,
+      "cost_per_1m_in_cached": 0.2,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1048576,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "minimax-m2.5",
+      "name": "MiniMax M2.5",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.03,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "minimax-m2.7",
+      "name": "MiniMax M2.7",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.06,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 131072,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false
+    },
+    {
+      "id": "qwen3.5-plus",
+      "name": "Qwen3.5 Plus",
+      "cost_per_1m_in": 0.2,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0.02,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    },
+    {
+      "id": "qwen3.6-plus",
+      "name": "Qwen3.6 Plus",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 3,
+      "cost_per_1m_in_cached": 0.05,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true
+    }
+  ]
+}

internal/providers/providers.go 🔗

@@ -102,6 +102,9 @@ var neuralwattConfig []byte
 //go:embed configs/opencode-zen.json
 var openCodeZenConfig []byte
 
+//go:embed configs/opencode-go.json
+var openCodeGoConfig []byte
+
 // ProviderFunc is a function that returns a Provider.
 type ProviderFunc func() catwalk.Provider
 
@@ -137,6 +140,7 @@ var providerRegistry = []ProviderFunc{
 	nebiusProvider,
 	neuralwattProvider,
 	openCodeZenProvider,
+	openCodeGoProvider,
 }
 
 // GetAll returns all registered providers.
@@ -280,3 +284,7 @@ func neuralwattProvider() catwalk.Provider {
 func openCodeZenProvider() catwalk.Provider {
 	return loadProviderFromConfig(openCodeZenConfig)
 }
+
+func openCodeGoProvider() catwalk.Provider {
+	return loadProviderFromConfig(openCodeGoConfig)
+}

pkg/catwalk/provider.go 🔗

@@ -51,6 +51,7 @@ const (
 	InferenceProviderNebius       InferenceProvider = "nebius"
 	InferenceProviderNeuralwatt   InferenceProvider = "neuralwatt"
 	InferenceProviderOpenCodeZen  InferenceProvider = "opencode-zen"
+	InferenceProviderOpenCodeGo   InferenceProvider = "opencode-go"
 )
 
 // Provider represents an AI provider configuration.
@@ -125,6 +126,7 @@ func KnownProviders() []InferenceProvider {
 		InferenceProviderNebius,
 		InferenceProviderNeuralwatt,
 		InferenceProviderOpenCodeZen,
+		InferenceProviderOpenCodeGo,
 	}
 }