main.go

  1// Package main provides a command-line tool to fetch models from io.net
  2// and generate a configuration file for the provider.
  3package main
  4
  5import (
  6	"context"
  7	"encoding/json"
  8	"fmt"
  9	"io"
 10	"log"
 11	"math"
 12	"net/http"
 13	"os"
 14	"slices"
 15	"strings"
 16	"time"
 17
 18	"charm.land/catwalk/pkg/catwalk"
 19	xstrings "github.com/charmbracelet/x/exp/strings"
 20)
 21
 22// Model represents a model from the io.net API.
 23type Model struct {
 24	ID                   string  `json:"id"`
 25	Name                 string  `json:"name"`
 26	ContextWindow        int     `json:"context_window"`
 27	SupportsImagesInput  bool    `json:"supports_images_input"`
 28	InputTokenPrice      float64 `json:"input_token_price"`
 29	OutputTokenPrice     float64 `json:"output_token_price"`
 30	CacheWriteTokenPrice float64 `json:"cache_write_token_price"`
 31	CacheReadTokenPrice  float64 `json:"cache_read_token_price"`
 32}
 33
 34// Response is the response structure for the io.net models API.
 35type Response struct {
 36	Data []Model `json:"data"`
 37}
 38
 39// This is used to generate the ionet.json config file.
 40func main() {
 41	provider := catwalk.Provider{
 42		Name:                "io.net",
 43		ID:                  "ionet",
 44		APIKey:              "$IONET_API_KEY",
 45		APIEndpoint:         "https://api.intelligence.io.solutions/api/v1",
 46		Type:                catwalk.TypeOpenAICompat,
 47		DefaultLargeModelID: "moonshotai/Kimi-K2.5",
 48		DefaultSmallModelID: "zai-org/GLM-4.7-Flash",
 49	}
 50
 51	resp, err := fetchModels(provider.APIEndpoint)
 52	if err != nil {
 53		log.Fatal("Error fetching io.net models:", err)
 54	}
 55
 56	provider.Models = make([]catwalk.Model, 0, len(resp.Data))
 57
 58	modelIDSet := make(map[string]struct{})
 59
 60	for _, model := range resp.Data {
 61		// Avoid duplicate entries
 62		if _, ok := modelIDSet[model.ID]; ok {
 63			continue
 64		}
 65		modelIDSet[model.ID] = struct{}{}
 66
 67		if model.ContextWindow < 20000 {
 68			continue
 69		}
 70		if !supportsTools(model.ID) {
 71			continue
 72		}
 73
 74		var (
 75			reasoningLevels  []string
 76			defaultReasoning string
 77		)
 78		if supportsReasoningLevels(model.ID) {
 79			reasoningLevels = []string{"low", "medium", "high"}
 80			defaultReasoning = "medium"
 81		}
 82
 83		// Convert token prices (per token) to cost per 1M tokens
 84		roundCost := func(v float64) float64 { return math.Round(v*1e5) / 1e5 }
 85		costPer1MIn := roundCost(model.InputTokenPrice * 1_000_000)
 86		costPer1MOut := roundCost(model.OutputTokenPrice * 1_000_000)
 87		costPer1MInCached := roundCost(model.CacheReadTokenPrice * 1_000_000)
 88		costPer1MOutCached := roundCost(model.CacheWriteTokenPrice * 1_000_000)
 89
 90		m := catwalk.Model{
 91			ID:                     model.ID,
 92			Name:                   model.Name,
 93			CostPer1MIn:            costPer1MIn,
 94			CostPer1MOut:           costPer1MOut,
 95			CostPer1MInCached:      costPer1MInCached,
 96			CostPer1MOutCached:     costPer1MOutCached,
 97			ContextWindow:          int64(model.ContextWindow),
 98			DefaultMaxTokens:       int64(model.ContextWindow) / 10,
 99			CanReason:              isReasoningModel(model.ID),
100			ReasoningLevels:        reasoningLevels,
101			DefaultReasoningEffort: defaultReasoning,
102			SupportsImages:         model.SupportsImagesInput,
103		}
104
105		provider.Models = append(provider.Models, m)
106		fmt.Printf("Added model %s with context window %d\n", model.ID, model.ContextWindow)
107	}
108
109	slices.SortFunc(provider.Models, func(a catwalk.Model, b catwalk.Model) int {
110		return strings.Compare(a.Name, b.Name)
111	})
112
113	// Save the JSON in internal/providers/configs/ionet.json
114	data, err := json.MarshalIndent(provider, "", "  ")
115	if err != nil {
116		log.Fatal("Error marshaling io.net provider:", err)
117	}
118	data = append(data, '\n')
119
120	if err := os.WriteFile("internal/providers/configs/ionet.json", data, 0o600); err != nil {
121		log.Fatal("Error writing io.net provider config:", err)
122	}
123
124	fmt.Printf("Generated ionet.json with %d models\n", len(provider.Models))
125}
126
127func fetchModels(apiEndpoint string) (*Response, error) {
128	client := &http.Client{Timeout: 30 * time.Second}
129
130	req, err := http.NewRequestWithContext(context.Background(), "GET", apiEndpoint+"/models", nil)
131	if err != nil {
132		return nil, fmt.Errorf("failed to create http request: %w", err)
133	}
134	req.Header.Set("User-Agent", "Charm-Catwalk/1.0")
135
136	resp, err := client.Do(req)
137	if err != nil {
138		return nil, fmt.Errorf("failed to do http request: %w", err)
139	}
140	defer resp.Body.Close() //nolint:errcheck
141
142	body, _ := io.ReadAll(resp.Body)
143
144	// for debugging
145	_ = os.MkdirAll("tmp", 0o700)
146	_ = os.WriteFile("tmp/io-net-response.json", body, 0o600)
147
148	if resp.StatusCode != http.StatusOK {
149		return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
150	}
151
152	var mr Response
153	if err := json.Unmarshal(body, &mr); err != nil {
154		return nil, fmt.Errorf("unable to unmarshal json: %w", err)
155	}
156	return &mr, nil
157}
158
159// isReasoningModel checks if the model ID indicates reasoning capability.
160func isReasoningModel(modelID string) bool {
161	return xstrings.ContainsAnyOf(
162		strings.ToLower(modelID),
163		"-thinking",
164		"kimi-k2.5",
165		"kimi-k2.6",
166		"deepseek",
167		"glm",
168		"gpt-oss",
169		"llama",
170		"gemma-4",
171	)
172}
173
174// supportsReasoningLevels returns whether the models supports reasoning levels.
175func supportsReasoningLevels(modelID string) bool {
176	return xstrings.ContainsAnyOf(
177		strings.ToLower(modelID),
178		"gpt-oss",
179	)
180}
181
182// supportsTools determines if a model supports tool calling based on its ID.
183func supportsTools(modelID string) bool {
184	return !xstrings.ContainsAnyOf(
185		strings.ToLower(modelID),
186		"deepseek",
187		"llama-4",
188		"mistral-nemo",
189		"qwen2.5",
190	)
191}