1// Package main provides a command-line tool to fetch models from io.net
2// and generate a configuration file for the provider.
3package main
4
5import (
6 "context"
7 "encoding/json"
8 "fmt"
9 "io"
10 "log"
11 "math"
12 "net/http"
13 "os"
14 "slices"
15 "strings"
16 "time"
17
18 "charm.land/catwalk/pkg/catwalk"
19 xstrings "github.com/charmbracelet/x/exp/strings"
20)
21
22// Model represents a model from the io.net API.
23type Model struct {
24 ID string `json:"id"`
25 Name string `json:"name"`
26 ContextWindow int `json:"context_window"`
27 SupportsImagesInput bool `json:"supports_images_input"`
28 InputTokenPrice float64 `json:"input_token_price"`
29 OutputTokenPrice float64 `json:"output_token_price"`
30 CacheWriteTokenPrice float64 `json:"cache_write_token_price"`
31 CacheReadTokenPrice float64 `json:"cache_read_token_price"`
32}
33
34// Response is the response structure for the io.net models API.
35type Response struct {
36 Data []Model `json:"data"`
37}
38
39// This is used to generate the ionet.json config file.
40func main() {
41 provider := catwalk.Provider{
42 Name: "io.net",
43 ID: "ionet",
44 APIKey: "$IONET_API_KEY",
45 APIEndpoint: "https://api.intelligence.io.solutions/api/v1",
46 Type: catwalk.TypeOpenAICompat,
47 DefaultLargeModelID: "moonshotai/Kimi-K2.5",
48 DefaultSmallModelID: "zai-org/GLM-4.7-Flash",
49 }
50
51 resp, err := fetchModels(provider.APIEndpoint)
52 if err != nil {
53 log.Fatal("Error fetching io.net models:", err)
54 }
55
56 provider.Models = make([]catwalk.Model, 0, len(resp.Data))
57
58 modelIDSet := make(map[string]struct{})
59
60 for _, model := range resp.Data {
61 // Avoid duplicate entries
62 if _, ok := modelIDSet[model.ID]; ok {
63 continue
64 }
65 modelIDSet[model.ID] = struct{}{}
66
67 if !shouldSkipModel(model.ID) {
68 continue
69 }
70
71 var (
72 reasoningLevels []string
73 defaultReasoning string
74 )
75 if supportsReasoningLevels(model.ID) {
76 reasoningLevels = []string{"low", "medium", "high"}
77 defaultReasoning = "medium"
78 }
79
80 // Convert token prices (per token) to cost per 1M tokens
81 roundCost := func(v float64) float64 { return math.Round(v*1e5) / 1e5 }
82 costPer1MIn := roundCost(model.InputTokenPrice * 1_000_000)
83 costPer1MOut := roundCost(model.OutputTokenPrice * 1_000_000)
84 costPer1MInCached := roundCost(model.CacheReadTokenPrice * 1_000_000)
85 costPer1MOutCached := roundCost(model.CacheWriteTokenPrice * 1_000_000)
86
87 m := catwalk.Model{
88 ID: model.ID,
89 Name: model.Name,
90 CostPer1MIn: costPer1MIn,
91 CostPer1MOut: costPer1MOut,
92 CostPer1MInCached: costPer1MInCached,
93 CostPer1MOutCached: costPer1MOutCached,
94 ContextWindow: int64(model.ContextWindow),
95 DefaultMaxTokens: int64(model.ContextWindow) / 10,
96 CanReason: isReasoningModel(model.ID),
97 ReasoningLevels: reasoningLevels,
98 DefaultReasoningEffort: defaultReasoning,
99 SupportsImages: model.SupportsImagesInput,
100 }
101
102 provider.Models = append(provider.Models, m)
103 fmt.Printf("Added model %s with context window %d\n", model.ID, model.ContextWindow)
104 }
105
106 slices.SortFunc(provider.Models, func(a catwalk.Model, b catwalk.Model) int {
107 return strings.Compare(a.Name, b.Name)
108 })
109
110 // Save the JSON in internal/providers/configs/ionet.json
111 data, err := json.MarshalIndent(provider, "", " ")
112 if err != nil {
113 log.Fatal("Error marshaling io.net provider:", err)
114 }
115 data = append(data, '\n')
116
117 if err := os.WriteFile("internal/providers/configs/ionet.json", data, 0o600); err != nil {
118 log.Fatal("Error writing io.net provider config:", err)
119 }
120
121 fmt.Printf("Generated ionet.json with %d models\n", len(provider.Models))
122}
123
124func fetchModels(apiEndpoint string) (*Response, error) {
125 client := &http.Client{Timeout: 30 * time.Second}
126
127 req, err := http.NewRequestWithContext(context.Background(), "GET", apiEndpoint+"/models", nil)
128 if err != nil {
129 return nil, fmt.Errorf("failed to create http request: %w", err)
130 }
131 req.Header.Set("User-Agent", "Charm-Catwalk/1.0")
132
133 resp, err := client.Do(req)
134 if err != nil {
135 return nil, fmt.Errorf("failed to do http request: %w", err)
136 }
137 defer resp.Body.Close() //nolint:errcheck
138
139 body, _ := io.ReadAll(resp.Body)
140
141 // for debugging
142 _ = os.MkdirAll("tmp", 0o700)
143 _ = os.WriteFile("tmp/io-net-response.json", body, 0o600)
144
145 if resp.StatusCode != http.StatusOK {
146 return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
147 }
148
149 var mr Response
150 if err := json.Unmarshal(body, &mr); err != nil {
151 return nil, fmt.Errorf("unable to unmarshal json: %w", err)
152 }
153 return &mr, nil
154}
155
156// isReasoningModel checks if the model ID indicates reasoning capability.
157func isReasoningModel(modelID string) bool {
158 return xstrings.ContainsAnyOf(
159 strings.ToLower(modelID),
160 "-thinking",
161 "kimi-k2.5",
162 "kimi-k2.6",
163 "deepseek",
164 "glm",
165 "gpt-oss",
166 "llama",
167 "gemma-4",
168 )
169}
170
171// supportsReasoningLevels returns whether the models supports reasoning levels.
172func supportsReasoningLevels(modelID string) bool {
173 return xstrings.ContainsAnyOf(
174 strings.ToLower(modelID),
175 "gpt-oss",
176 )
177}
178
179// shouldSkipModel return if we should skip a model, if it's not good enough
180// for coding.
181func shouldSkipModel(modelID string) bool {
182 return !xstrings.ContainsAnyOf(
183 strings.ToLower(modelID),
184 "deepseek-r1",
185 "llama-3.2",
186 "mistral-nemo",
187 )
188}