feat: add io.net provider (#179)

Andrey Nering created

Assisted-by: Kimi K2.5 via Crush <crush@charm.land>

Change summary

.github/workflows/update.yml          |   1 
Taskfile.yaml                         |   5 
cmd/ionet/main.go                     | 219 +++++++++++++++++++++++++++++
go.mod                                |   1 
go.sum                                |   2 
internal/providers/configs/ionet.json | 158 ++++++++++++++++++++
internal/providers/providers.go       |   8 +
pkg/catwalk/provider.go               |   1 
8 files changed, 395 insertions(+)

Detailed changes

.github/workflows/update.yml 🔗

@@ -19,6 +19,7 @@ jobs:
           go-version-file: go.mod
       - name: Generate provider configurations
         run: |
+          go run ./cmd/ionet/main.go
           go run ./cmd/openrouter/main.go
           go run ./cmd/synthetic/main.go
           go run ./cmd/vercel/main.go

Taskfile.yaml 🔗

@@ -72,6 +72,11 @@ tasks:
     cmds:
       - go run cmd/huggingface/main.go
 
+  gen:ionet:
+    desc: Generate io.net provider configurations
+    cmds:
+      - go run cmd/ionet/main.go
+
   gen:openrouter:
     desc: Generate openrouter provider configurations
     cmds:

cmd/ionet/main.go 🔗

@@ -0,0 +1,219 @@
+// Package main provides a command-line tool to fetch models from io.net
+// and generate a configuration file for the provider.
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"net/http"
+	"os"
+	"slices"
+	"strings"
+	"time"
+
+	"charm.land/catwalk/pkg/catwalk"
+	xstrings "github.com/charmbracelet/x/exp/strings"
+)
+
+// Model represents a model from the io.net API.
+type Model struct {
+	ID                   string       `json:"id"`
+	Object               string       `json:"object"`
+	Created              int64        `json:"created"`
+	OwnedBy              string       `json:"owned_by"`
+	Root                 *string      `json:"root"`
+	Parent               *string      `json:"parent"`
+	MaxModelLen          *int         `json:"max_model_len"`
+	Permission           []Permission `json:"permission"`
+	MaxTokens            *int         `json:"max_tokens"`
+	ContextWindow        int          `json:"context_window"`
+	SupportsImagesInput  bool         `json:"supports_images_input"`
+	SupportsPromptCache  bool         `json:"supports_prompt_cache"`
+	InputTokenPrice      float64      `json:"input_token_price"`
+	OutputTokenPrice     float64      `json:"output_token_price"`
+	CacheWriteTokenPrice float64      `json:"cache_write_token_price"`
+	CacheReadTokenPrice  float64      `json:"cache_read_token_price"`
+	Precision            *string      `json:"precision"`
+	AvgLatencyMsPerDay   float64      `json:"avg_latency_ms_per_day"`
+	AvgThroughputPerDay  float64      `json:"avg_throughput_per_day"`
+	SupportsAttestation  bool         `json:"supports_attestation"`
+}
+
+// Permission represents a model permission from the io.net API.
+type Permission struct {
+	ID                 string  `json:"id"`
+	Object             string  `json:"object"`
+	Created            int64   `json:"created"`
+	AllowCreateEngine  bool    `json:"allow_create_engine"`
+	AllowSampling      bool    `json:"allow_sampling"`
+	AllowLogprobs      bool    `json:"allow_logprobs"`
+	AllowSearchIndices bool    `json:"allow_search_indices"`
+	AllowView          bool    `json:"allow_view"`
+	AllowFineTuning    bool    `json:"allow_fine_tuning"`
+	Organization       string  `json:"organization"`
+	Group              *string `json:"group"`
+	IsBlocking         bool    `json:"is_blocking"`
+}
+
+// Response is the response structure for the io.net models API.
+type Response struct {
+	Object string  `json:"object"`
+	Data   []Model `json:"data"`
+}
+
+// This is used to generate the ionet.json config file.
+func main() {
+	provider := catwalk.Provider{
+		Name:                "io.net",
+		ID:                  "ionet",
+		APIKey:              "$IONET_API_KEY",
+		APIEndpoint:         "https://api.intelligence.io.solutions/api/v1",
+		Type:                catwalk.TypeOpenAICompat,
+		DefaultLargeModelID: "zai-org/GLM-4.7",
+		DefaultSmallModelID: "zai-org/GLM-4.7-Flash",
+	}
+
+	resp, err := fetchModels(provider.APIEndpoint)
+	if err != nil {
+		log.Fatal("Error fetching io.net models:", err)
+	}
+
+	provider.Models = make([]catwalk.Model, 0, len(resp.Data))
+
+	modelIDSet := make(map[string]struct{})
+
+	for _, model := range resp.Data {
+		// Avoid duplicate entries
+		if _, ok := modelIDSet[model.ID]; ok {
+			continue
+		}
+		modelIDSet[model.ID] = struct{}{}
+
+		if model.ContextWindow < 20000 {
+			continue
+		}
+		if !supportsTools(model.ID) {
+			continue
+		}
+
+		canReason := isReasoningModel(model.ID)
+		var reasoningLevels []string
+		var defaultReasoning string
+		if canReason {
+			reasoningLevels = []string{"low", "medium", "high"}
+			defaultReasoning = "medium"
+		}
+
+		// Convert token prices (per token) to cost per 1M tokens
+		costPer1MIn := model.InputTokenPrice * 1_000_000
+		costPer1MOut := model.OutputTokenPrice * 1_000_000
+		costPer1MInCached := model.CacheReadTokenPrice * 1_000_000
+		costPer1MOutCached := model.CacheWriteTokenPrice * 1_000_000
+
+		m := catwalk.Model{
+			ID:                     model.ID,
+			Name:                   getModelName(model.ID),
+			CostPer1MIn:            costPer1MIn,
+			CostPer1MOut:           costPer1MOut,
+			CostPer1MInCached:      costPer1MInCached,
+			CostPer1MOutCached:     costPer1MOutCached,
+			ContextWindow:          int64(model.ContextWindow),
+			DefaultMaxTokens:       int64(model.ContextWindow) / 10,
+			CanReason:              canReason,
+			ReasoningLevels:        reasoningLevels,
+			DefaultReasoningEffort: defaultReasoning,
+			SupportsImages:         model.SupportsImagesInput,
+		}
+
+		provider.Models = append(provider.Models, m)
+		fmt.Printf("Added model %s with context window %d\n", model.ID, model.ContextWindow)
+	}
+
+	slices.SortFunc(provider.Models, func(a catwalk.Model, b catwalk.Model) int {
+		return strings.Compare(a.Name, b.Name)
+	})
+
+	// Save the JSON in internal/providers/configs/ionet.json
+	data, err := json.MarshalIndent(provider, "", "  ")
+	if err != nil {
+		log.Fatal("Error marshaling io.net provider:", err)
+	}
+	data = append(data, '\n')
+
+	if err := os.WriteFile("internal/providers/configs/ionet.json", data, 0o600); err != nil {
+		log.Fatal("Error writing io.net provider config:", err)
+	}
+
+	fmt.Printf("Generated ionet.json with %d models\n", len(provider.Models))
+}
+
+func fetchModels(apiEndpoint string) (*Response, error) {
+	client := &http.Client{Timeout: 30 * time.Second}
+
+	req, err := http.NewRequestWithContext(context.Background(), "GET", apiEndpoint+"/models", nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to create http request: %w", err)
+	}
+	req.Header.Set("User-Agent", "Charm-Catwalk/1.0")
+
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, fmt.Errorf("failed to do http request: %w", err)
+	}
+	defer resp.Body.Close() //nolint:errcheck
+
+	body, _ := io.ReadAll(resp.Body)
+
+	// for debugging
+	_ = os.MkdirAll("tmp", 0o700)
+	_ = os.WriteFile("tmp/io-net-response.json", body, 0o600)
+
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
+	}
+
+	var mr Response
+	if err := json.Unmarshal(body, &mr); err != nil {
+		return nil, fmt.Errorf("unable to unmarshal json: %w", err)
+	}
+	return &mr, nil
+}
+
+// getModelName extracts a clean display name from the model ID.
+func getModelName(modelID string) string {
+	// Strip everything before the last /
+	name := modelID
+	if idx := strings.LastIndex(modelID, "/"); idx != -1 {
+		name = modelID[idx+1:]
+	}
+	// Replace hyphens with spaces
+	name = strings.ReplaceAll(name, "-", " ")
+	return name
+}
+
+// isReasoningModel checks if the model ID indicates reasoning capability.
+func isReasoningModel(modelID string) bool {
+	return xstrings.ContainsAnyOf(
+		strings.ToLower(modelID),
+		"-thinking",
+		"deepseek",
+		"glm",
+		"gpt-oss",
+		"llama",
+	)
+}
+
+// supportsTools determines if a model supports tool calling based on its ID.
+func supportsTools(modelID string) bool {
+	return !xstrings.ContainsAnyOf(
+		strings.ToLower(modelID),
+		"deepseek",
+		"llama-4",
+		"mistral-nemo",
+		"qwen2.5",
+		"gpt-oss",
+	)
+}

go.mod 🔗

@@ -4,6 +4,7 @@ go 1.25.5
 
 require (
 	github.com/charmbracelet/x/etag v0.2.0
+	github.com/charmbracelet/x/exp/strings v0.1.0
 	github.com/prometheus/client_golang v1.23.2
 )
 

go.sum 🔗

@@ -4,6 +4,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
 github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/charmbracelet/x/etag v0.2.0 h1:Euj1VkheoHfTYA9y+TCwkeXF/hN8Fb9l4LqZl79pt04=
 github.com/charmbracelet/x/etag v0.2.0/go.mod h1:C1B7/bsgvzzxpfu0Rabbd+rTHJa5TmC/qgTseCf6DF0=
+github.com/charmbracelet/x/exp/strings v0.1.0 h1:i69S2XI7uG1u4NLGeJPSYU++Nmjvpo9nwd6aoEm7gkA=
+github.com/charmbracelet/x/exp/strings v0.1.0/go.mod h1:/ehtMPNh9K4odGFkqYJKpIYyePhdp1hLBRvyY4bWkH8=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=

internal/providers/configs/ionet.json 🔗

@@ -0,0 +1,158 @@
+{
+  "name": "io.net",
+  "id": "ionet",
+  "api_key": "$IONET_API_KEY",
+  "api_endpoint": "https://api.intelligence.io.solutions/api/v1",
+  "type": "openai-compat",
+  "default_large_model_id": "zai-org/GLM-4.7",
+  "default_small_model_id": "zai-org/GLM-4.7-Flash",
+  "models": [
+    {
+      "id": "zai-org/GLM-4.6",
+      "name": "GLM 4.6",
+      "cost_per_1m_in": 0.35,
+      "cost_per_1m_out": 1.5,
+      "cost_per_1m_in_cached": 0.175,
+      "cost_per_1m_out_cached": 0.7,
+      "context_window": 200000,
+      "default_max_tokens": 20000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "zai-org/GLM-4.7",
+      "name": "GLM 4.7",
+      "cost_per_1m_in": 0.33,
+      "cost_per_1m_out": 1.54,
+      "cost_per_1m_in_cached": 0.165,
+      "cost_per_1m_out_cached": 0.66,
+      "context_window": 200000,
+      "default_max_tokens": 20000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "zai-org/GLM-4.7-Flash",
+      "name": "GLM 4.7 Flash",
+      "cost_per_1m_in": 0.07,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0.035,
+      "cost_per_1m_out_cached": 0.14,
+      "context_window": 200000,
+      "default_max_tokens": 20000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "moonshotai/Kimi-K2-Instruct-0905",
+      "name": "Kimi K2 Instruct 0905",
+      "cost_per_1m_in": 0.39,
+      "cost_per_1m_out": 1.9,
+      "cost_per_1m_in_cached": 0.195,
+      "cost_per_1m_out_cached": 0.78,
+      "context_window": 262144,
+      "default_max_tokens": 26214,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "moonshotai/Kimi-K2-Thinking",
+      "name": "Kimi K2 Thinking",
+      "cost_per_1m_in": 0.32,
+      "cost_per_1m_out": 0.48,
+      "cost_per_1m_in_cached": 0.16,
+      "cost_per_1m_out_cached": 0.64,
+      "context_window": 262144,
+      "default_max_tokens": 26214,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "meta-llama/Llama-3.3-70B-Instruct",
+      "name": "Llama 3.3 70B Instruct",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.32,
+      "cost_per_1m_in_cached": 0.049999999999999996,
+      "cost_per_1m_out_cached": 0.19999999999999998,
+      "context_window": 128000,
+      "default_max_tokens": 12800,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "mistralai/Mistral-Large-Instruct-2411",
+      "name": "Mistral Large Instruct 2411",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 6,
+      "cost_per_1m_in_cached": 1,
+      "cost_per_1m_out_cached": 4,
+      "context_window": 128000,
+      "default_max_tokens": 12800,
+      "can_reason": false,
+      "supports_attachments": true,
+      "options": {}
+    },
+    {
+      "id": "Intel/Qwen3-Coder-480B-A35B-Instruct-int4-mixed-ar",
+      "name": "Qwen3 Coder 480B A35B Instruct int4 mixed ar",
+      "cost_per_1m_in": 0.22,
+      "cost_per_1m_out": 0.95,
+      "cost_per_1m_in_cached": 0.11,
+      "cost_per_1m_out_cached": 0.44,
+      "context_window": 106000,
+      "default_max_tokens": 10600,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "Qwen/Qwen3-Next-80B-A3B-Instruct",
+      "name": "Qwen3 Next 80B A3B Instruct",
+      "cost_per_1m_in": 0.06,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0.03,
+      "cost_per_1m_out_cached": 0.12,
+      "context_window": 262144,
+      "default_max_tokens": 26214,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    }
+  ]
+}

internal/providers/providers.go 🔗

@@ -72,6 +72,9 @@ var vercelConfig []byte
 //go:embed configs/minimax.json
 var miniMaxConfig []byte
 
+//go:embed configs/ionet.json
+var ioNetConfig []byte
+
 // ProviderFunc is a function that returns a Provider.
 type ProviderFunc func() catwalk.Provider
 
@@ -97,6 +100,7 @@ var providerRegistry = []ProviderFunc{
 	copilotProvider,
 	vercelProvider,
 	miniMaxProvider,
+	ioNetProvider,
 }
 
 // GetAll returns all registered providers.
@@ -200,3 +204,7 @@ func vercelProvider() catwalk.Provider {
 func miniMaxProvider() catwalk.Provider {
 	return loadProviderFromConfig(miniMaxConfig)
 }
+
+func ioNetProvider() catwalk.Provider {
+	return loadProviderFromConfig(ioNetConfig)
+}

pkg/catwalk/provider.go 🔗

@@ -41,6 +41,7 @@ const (
 	InferenceProviderCopilot     InferenceProvider = "copilot"
 	InferenceProviderVercel      InferenceProvider = "vercel"
 	InferenceProviderMiniMax     InferenceProvider = "minimax"
+	InferenceProviderIoNet       InferenceProvider = "ionet"
 )
 
 // Provider represents an AI provider configuration.