main.go

  1// Package main provides a command-line tool to fetch models from io.net
  2// and generate a configuration file for the provider.
  3package main
  4
  5import (
  6	"context"
  7	"encoding/json"
  8	"fmt"
  9	"io"
 10	"log"
 11	"net/http"
 12	"os"
 13	"slices"
 14	"strings"
 15	"time"
 16
 17	"charm.land/catwalk/pkg/catwalk"
 18	xstrings "github.com/charmbracelet/x/exp/strings"
 19)
 20
 21// Model represents a model from the io.net API.
 22type Model struct {
 23	ID                   string       `json:"id"`
 24	Object               string       `json:"object"`
 25	Created              int64        `json:"created"`
 26	OwnedBy              string       `json:"owned_by"`
 27	Root                 *string      `json:"root"`
 28	Parent               *string      `json:"parent"`
 29	MaxModelLen          *int         `json:"max_model_len"`
 30	Permission           []Permission `json:"permission"`
 31	MaxTokens            *int         `json:"max_tokens"`
 32	ContextWindow        int          `json:"context_window"`
 33	SupportsImagesInput  bool         `json:"supports_images_input"`
 34	SupportsPromptCache  bool         `json:"supports_prompt_cache"`
 35	InputTokenPrice      float64      `json:"input_token_price"`
 36	OutputTokenPrice     float64      `json:"output_token_price"`
 37	CacheWriteTokenPrice float64      `json:"cache_write_token_price"`
 38	CacheReadTokenPrice  float64      `json:"cache_read_token_price"`
 39	Precision            *string      `json:"precision"`
 40	AvgLatencyMsPerDay   float64      `json:"avg_latency_ms_per_day"`
 41	AvgThroughputPerDay  float64      `json:"avg_throughput_per_day"`
 42	SupportsAttestation  bool         `json:"supports_attestation"`
 43}
 44
 45// Permission represents a model permission from the io.net API.
 46type Permission struct {
 47	ID                 string  `json:"id"`
 48	Object             string  `json:"object"`
 49	Created            int64   `json:"created"`
 50	AllowCreateEngine  bool    `json:"allow_create_engine"`
 51	AllowSampling      bool    `json:"allow_sampling"`
 52	AllowLogprobs      bool    `json:"allow_logprobs"`
 53	AllowSearchIndices bool    `json:"allow_search_indices"`
 54	AllowView          bool    `json:"allow_view"`
 55	AllowFineTuning    bool    `json:"allow_fine_tuning"`
 56	Organization       string  `json:"organization"`
 57	Group              *string `json:"group"`
 58	IsBlocking         bool    `json:"is_blocking"`
 59}
 60
 61// Response is the response structure for the io.net models API.
 62type Response struct {
 63	Object string  `json:"object"`
 64	Data   []Model `json:"data"`
 65}
 66
 67// This is used to generate the ionet.json config file.
 68func main() {
 69	provider := catwalk.Provider{
 70		Name:                "io.net",
 71		ID:                  "ionet",
 72		APIKey:              "$IONET_API_KEY",
 73		APIEndpoint:         "https://api.intelligence.io.solutions/api/v1",
 74		Type:                catwalk.TypeOpenAICompat,
 75		DefaultLargeModelID: "zai-org/GLM-4.7",
 76		DefaultSmallModelID: "zai-org/GLM-4.7-Flash",
 77	}
 78
 79	resp, err := fetchModels(provider.APIEndpoint)
 80	if err != nil {
 81		log.Fatal("Error fetching io.net models:", err)
 82	}
 83
 84	provider.Models = make([]catwalk.Model, 0, len(resp.Data))
 85
 86	modelIDSet := make(map[string]struct{})
 87
 88	for _, model := range resp.Data {
 89		// Avoid duplicate entries
 90		if _, ok := modelIDSet[model.ID]; ok {
 91			continue
 92		}
 93		modelIDSet[model.ID] = struct{}{}
 94
 95		if model.ContextWindow < 20000 {
 96			continue
 97		}
 98		if !supportsTools(model.ID) {
 99			continue
100		}
101
102		canReason := isReasoningModel(model.ID)
103		var reasoningLevels []string
104		var defaultReasoning string
105		if canReason {
106			reasoningLevels = []string{"low", "medium", "high"}
107			defaultReasoning = "medium"
108		}
109
110		// Convert token prices (per token) to cost per 1M tokens
111		costPer1MIn := model.InputTokenPrice * 1_000_000
112		costPer1MOut := model.OutputTokenPrice * 1_000_000
113		costPer1MInCached := model.CacheReadTokenPrice * 1_000_000
114		costPer1MOutCached := model.CacheWriteTokenPrice * 1_000_000
115
116		m := catwalk.Model{
117			ID:                     model.ID,
118			Name:                   getModelName(model.ID),
119			CostPer1MIn:            costPer1MIn,
120			CostPer1MOut:           costPer1MOut,
121			CostPer1MInCached:      costPer1MInCached,
122			CostPer1MOutCached:     costPer1MOutCached,
123			ContextWindow:          int64(model.ContextWindow),
124			DefaultMaxTokens:       int64(model.ContextWindow) / 10,
125			CanReason:              canReason,
126			ReasoningLevels:        reasoningLevels,
127			DefaultReasoningEffort: defaultReasoning,
128			SupportsImages:         model.SupportsImagesInput,
129		}
130
131		provider.Models = append(provider.Models, m)
132		fmt.Printf("Added model %s with context window %d\n", model.ID, model.ContextWindow)
133	}
134
135	slices.SortFunc(provider.Models, func(a catwalk.Model, b catwalk.Model) int {
136		return strings.Compare(a.Name, b.Name)
137	})
138
139	// Save the JSON in internal/providers/configs/ionet.json
140	data, err := json.MarshalIndent(provider, "", "  ")
141	if err != nil {
142		log.Fatal("Error marshaling io.net provider:", err)
143	}
144	data = append(data, '\n')
145
146	if err := os.WriteFile("internal/providers/configs/ionet.json", data, 0o600); err != nil {
147		log.Fatal("Error writing io.net provider config:", err)
148	}
149
150	fmt.Printf("Generated ionet.json with %d models\n", len(provider.Models))
151}
152
153func fetchModels(apiEndpoint string) (*Response, error) {
154	client := &http.Client{Timeout: 30 * time.Second}
155
156	req, err := http.NewRequestWithContext(context.Background(), "GET", apiEndpoint+"/models", nil)
157	if err != nil {
158		return nil, fmt.Errorf("failed to create http request: %w", err)
159	}
160	req.Header.Set("User-Agent", "Charm-Catwalk/1.0")
161
162	resp, err := client.Do(req)
163	if err != nil {
164		return nil, fmt.Errorf("failed to do http request: %w", err)
165	}
166	defer resp.Body.Close() //nolint:errcheck
167
168	body, _ := io.ReadAll(resp.Body)
169
170	// for debugging
171	_ = os.MkdirAll("tmp", 0o700)
172	_ = os.WriteFile("tmp/io-net-response.json", body, 0o600)
173
174	if resp.StatusCode != http.StatusOK {
175		return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
176	}
177
178	var mr Response
179	if err := json.Unmarshal(body, &mr); err != nil {
180		return nil, fmt.Errorf("unable to unmarshal json: %w", err)
181	}
182	return &mr, nil
183}
184
185// getModelName extracts a clean display name from the model ID.
186func getModelName(modelID string) string {
187	// Strip everything before the last /
188	name := modelID
189	if idx := strings.LastIndex(modelID, "/"); idx != -1 {
190		name = modelID[idx+1:]
191	}
192	// Replace hyphens with spaces
193	name = strings.ReplaceAll(name, "-", " ")
194	return name
195}
196
197// isReasoningModel checks if the model ID indicates reasoning capability.
198func isReasoningModel(modelID string) bool {
199	return xstrings.ContainsAnyOf(
200		strings.ToLower(modelID),
201		"-thinking",
202		"deepseek",
203		"glm",
204		"gpt-oss",
205		"llama",
206	)
207}
208
209// supportsTools determines if a model supports tool calling based on its ID.
210func supportsTools(modelID string) bool {
211	return !xstrings.ContainsAnyOf(
212		strings.ToLower(modelID),
213		"deepseek",
214		"llama-4",
215		"mistral-nemo",
216		"qwen2.5",
217		"gpt-oss",
218	)
219}