main.go

  1// Package main provides a command-line tool to fetch models from io.net
  2// and generate a configuration file for the provider.
  3package main
  4
  5import (
  6	"context"
  7	"encoding/json"
  8	"fmt"
  9	"io"
 10	"log"
 11	"net/http"
 12	"os"
 13	"slices"
 14	"strings"
 15	"time"
 16
 17	"charm.land/catwalk/pkg/catwalk"
 18	xstrings "github.com/charmbracelet/x/exp/strings"
 19)
 20
 21// Model represents a model from the io.net API.
 22type Model struct {
 23	ID                   string  `json:"id"`
 24	Name                 string  `json:"name"`
 25	ContextWindow        int     `json:"context_window"`
 26	SupportsImagesInput  bool    `json:"supports_images_input"`
 27	InputTokenPrice      float64 `json:"input_token_price"`
 28	OutputTokenPrice     float64 `json:"output_token_price"`
 29	CacheWriteTokenPrice float64 `json:"cache_write_token_price"`
 30	CacheReadTokenPrice  float64 `json:"cache_read_token_price"`
 31}
 32
 33// Response is the response structure for the io.net models API.
 34type Response struct {
 35	Data []Model `json:"data"`
 36}
 37
 38// This is used to generate the ionet.json config file.
 39func main() {
 40	provider := catwalk.Provider{
 41		Name:                "io.net",
 42		ID:                  "ionet",
 43		APIKey:              "$IONET_API_KEY",
 44		APIEndpoint:         "https://api.intelligence.io.solutions/api/v1",
 45		Type:                catwalk.TypeOpenAICompat,
 46		DefaultLargeModelID: "zai-org/GLM-4.7",
 47		DefaultSmallModelID: "zai-org/GLM-4.7-Flash",
 48	}
 49
 50	resp, err := fetchModels(provider.APIEndpoint)
 51	if err != nil {
 52		log.Fatal("Error fetching io.net models:", err)
 53	}
 54
 55	provider.Models = make([]catwalk.Model, 0, len(resp.Data))
 56
 57	modelIDSet := make(map[string]struct{})
 58
 59	for _, model := range resp.Data {
 60		// Avoid duplicate entries
 61		if _, ok := modelIDSet[model.ID]; ok {
 62			continue
 63		}
 64		modelIDSet[model.ID] = struct{}{}
 65
 66		if model.ContextWindow < 20000 {
 67			continue
 68		}
 69		if !supportsTools(model.ID) {
 70			continue
 71		}
 72
 73		canReason := isReasoningModel(model.ID)
 74		var reasoningLevels []string
 75		var defaultReasoning string
 76		if canReason {
 77			reasoningLevels = []string{"low", "medium", "high"}
 78			defaultReasoning = "medium"
 79		}
 80
 81		// Convert token prices (per token) to cost per 1M tokens
 82		costPer1MIn := model.InputTokenPrice * 1_000_000
 83		costPer1MOut := model.OutputTokenPrice * 1_000_000
 84		costPer1MInCached := model.CacheReadTokenPrice * 1_000_000
 85		costPer1MOutCached := model.CacheWriteTokenPrice * 1_000_000
 86
 87		m := catwalk.Model{
 88			ID:                     model.ID,
 89			Name:                   model.Name,
 90			CostPer1MIn:            costPer1MIn,
 91			CostPer1MOut:           costPer1MOut,
 92			CostPer1MInCached:      costPer1MInCached,
 93			CostPer1MOutCached:     costPer1MOutCached,
 94			ContextWindow:          int64(model.ContextWindow),
 95			DefaultMaxTokens:       int64(model.ContextWindow) / 10,
 96			CanReason:              canReason,
 97			ReasoningLevels:        reasoningLevels,
 98			DefaultReasoningEffort: defaultReasoning,
 99			SupportsImages:         model.SupportsImagesInput,
100		}
101
102		provider.Models = append(provider.Models, m)
103		fmt.Printf("Added model %s with context window %d\n", model.ID, model.ContextWindow)
104	}
105
106	slices.SortFunc(provider.Models, func(a catwalk.Model, b catwalk.Model) int {
107		return strings.Compare(a.Name, b.Name)
108	})
109
110	// Save the JSON in internal/providers/configs/ionet.json
111	data, err := json.MarshalIndent(provider, "", "  ")
112	if err != nil {
113		log.Fatal("Error marshaling io.net provider:", err)
114	}
115	data = append(data, '\n')
116
117	if err := os.WriteFile("internal/providers/configs/ionet.json", data, 0o600); err != nil {
118		log.Fatal("Error writing io.net provider config:", err)
119	}
120
121	fmt.Printf("Generated ionet.json with %d models\n", len(provider.Models))
122}
123
124func fetchModels(apiEndpoint string) (*Response, error) {
125	client := &http.Client{Timeout: 30 * time.Second}
126
127	req, err := http.NewRequestWithContext(context.Background(), "GET", apiEndpoint+"/models", nil)
128	if err != nil {
129		return nil, fmt.Errorf("failed to create http request: %w", err)
130	}
131	req.Header.Set("User-Agent", "Charm-Catwalk/1.0")
132
133	resp, err := client.Do(req)
134	if err != nil {
135		return nil, fmt.Errorf("failed to do http request: %w", err)
136	}
137	defer resp.Body.Close() //nolint:errcheck
138
139	body, _ := io.ReadAll(resp.Body)
140
141	// for debugging
142	_ = os.MkdirAll("tmp", 0o700)
143	_ = os.WriteFile("tmp/io-net-response.json", body, 0o600)
144
145	if resp.StatusCode != http.StatusOK {
146		return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
147	}
148
149	var mr Response
150	if err := json.Unmarshal(body, &mr); err != nil {
151		return nil, fmt.Errorf("unable to unmarshal json: %w", err)
152	}
153	return &mr, nil
154}
155
156// isReasoningModel checks if the model ID indicates reasoning capability.
157func isReasoningModel(modelID string) bool {
158	return xstrings.ContainsAnyOf(
159		strings.ToLower(modelID),
160		"-thinking",
161		"deepseek",
162		"glm",
163		"gpt-oss",
164		"llama",
165	)
166}
167
168// supportsTools determines if a model supports tool calling based on its ID.
169func supportsTools(modelID string) bool {
170	return !xstrings.ContainsAnyOf(
171		strings.ToLower(modelID),
172		"deepseek",
173		"llama-4",
174		"mistral-nemo",
175		"qwen2.5",
176		"gpt-oss",
177	)
178}