1// Package main provides a command-line tool to fetch models from OpenRouter
2// and generate a configuration file for the provider.
3package main
4
5import (
6 "context"
7 "encoding/json"
8 "fmt"
9 "io"
10 "log"
11 "net/http"
12 "os"
13 "slices"
14 "strconv"
15 "time"
16
17 "github.com/charmbracelet/fur/pkg/provider"
18)
19
20// Model represents the complete model configuration.
21type Model struct {
22 ID string `json:"id"`
23 CanonicalSlug string `json:"canonical_slug"`
24 HuggingFaceID string `json:"hugging_face_id"`
25 Name string `json:"name"`
26 Created int64 `json:"created"`
27 Description string `json:"description"`
28 ContextLength int64 `json:"context_length"`
29 Architecture Architecture `json:"architecture"`
30 Pricing Pricing `json:"pricing"`
31 TopProvider TopProvider `json:"top_provider"`
32 SupportedParams []string `json:"supported_parameters"`
33}
34
35// Architecture defines the model's architecture details.
36type Architecture struct {
37 Modality string `json:"modality"`
38 InputModalities []string `json:"input_modalities"`
39 OutputModalities []string `json:"output_modalities"`
40 Tokenizer string `json:"tokenizer"`
41 InstructType *string `json:"instruct_type"`
42}
43
44// Pricing contains the pricing information for different operations.
45type Pricing struct {
46 Prompt string `json:"prompt"`
47 Completion string `json:"completion"`
48 Request string `json:"request"`
49 Image string `json:"image"`
50 WebSearch string `json:"web_search"`
51 InternalReasoning string `json:"internal_reasoning"`
52 InputCacheRead string `json:"input_cache_read"`
53 InputCacheWrite string `json:"input_cache_write"`
54}
55
56// TopProvider describes the top provider's capabilities.
57type TopProvider struct {
58 ContextLength int64 `json:"context_length"`
59 MaxCompletionTokens *int64 `json:"max_completion_tokens"`
60 IsModerated bool `json:"is_moderated"`
61}
62
63// ModelsResponse is the response structure for the models API.
64type ModelsResponse struct {
65 Data []Model `json:"data"`
66}
67
68// ModelPricing is the pricing structure for a model, detailing costs per
69// million tokens for input and output, both cached and uncached.
70type ModelPricing struct {
71 CostPer1MIn float64 `json:"cost_per_1m_in"`
72 CostPer1MOut float64 `json:"cost_per_1m_out"`
73 CostPer1MInCached float64 `json:"cost_per_1m_in_cached"`
74 CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"`
75}
76
77func getPricing(model Model) ModelPricing {
78 pricing := ModelPricing{}
79 costPrompt, err := strconv.ParseFloat(model.Pricing.Prompt, 64)
80 if err != nil {
81 costPrompt = 0.0
82 }
83 pricing.CostPer1MIn = costPrompt * 1_000_000
84 costCompletion, err := strconv.ParseFloat(model.Pricing.Completion, 64)
85 if err != nil {
86 costCompletion = 0.0
87 }
88 pricing.CostPer1MOut = costCompletion * 1_000_000
89
90 costPromptCached, err := strconv.ParseFloat(model.Pricing.InputCacheWrite, 64)
91 if err != nil {
92 costPromptCached = 0.0
93 }
94 pricing.CostPer1MInCached = costPromptCached * 1_000_000
95 costCompletionCached, err := strconv.ParseFloat(model.Pricing.InputCacheRead, 64)
96 if err != nil {
97 costCompletionCached = 0.0
98 }
99 pricing.CostPer1MOutCached = costCompletionCached * 1_000_000
100 return pricing
101}
102
103func fetchOpenRouterModels() (*ModelsResponse, error) {
104 client := &http.Client{Timeout: 30 * time.Second}
105 req, _ := http.NewRequestWithContext(
106 context.Background(),
107 "GET",
108 "https://openrouter.ai/api/v1/models",
109 nil,
110 )
111 req.Header.Set("User-Agent", "Crush-Client/1.0")
112 resp, err := client.Do(req)
113 if err != nil {
114 return nil, err //nolint:wrapcheck
115 }
116 defer resp.Body.Close() //nolint:errcheck
117 if resp.StatusCode != 200 {
118 body, _ := io.ReadAll(resp.Body)
119 return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
120 }
121 var mr ModelsResponse
122 if err := json.NewDecoder(resp.Body).Decode(&mr); err != nil {
123 return nil, err //nolint:wrapcheck
124 }
125 return &mr, nil
126}
127
128// This is used to generate the openrouter.json config file.
129func main() {
130 modelsResp, err := fetchOpenRouterModels()
131 if err != nil {
132 log.Fatal("Error fetching OpenRouter models:", err)
133 }
134
135 openRouterProvider := provider.Provider{
136 Name: "OpenRouter",
137 ID: "openrouter",
138 APIKey: "$OPENROUTER_API_KEY",
139 APIEndpoint: "https://openrouter.ai/api/v1",
140 Type: provider.TypeOpenAI,
141 DefaultLargeModelID: "anthropic/claude-sonnet-4",
142 DefaultSmallModelID: "anthropic/claude-haiku-3.5",
143 Models: []provider.Model{},
144 }
145
146 for _, model := range modelsResp.Data {
147 // skip nonβtext models or those without tools
148 if !slices.Contains(model.SupportedParams, "tools") ||
149 !slices.Contains(model.Architecture.InputModalities, "text") ||
150 !slices.Contains(model.Architecture.OutputModalities, "text") {
151 continue
152 }
153
154 pricing := getPricing(model)
155 canReason := slices.Contains(model.SupportedParams, "reasoning")
156 supportsImages := slices.Contains(model.Architecture.InputModalities, "image")
157
158 m := provider.Model{
159 ID: model.ID,
160 Name: model.Name,
161 CostPer1MIn: pricing.CostPer1MIn,
162 CostPer1MOut: pricing.CostPer1MOut,
163 CostPer1MInCached: pricing.CostPer1MInCached,
164 CostPer1MOutCached: pricing.CostPer1MOutCached,
165 ContextWindow: model.ContextLength,
166 CanReason: canReason,
167 SupportsImages: supportsImages,
168 }
169 if model.TopProvider.MaxCompletionTokens != nil {
170 m.DefaultMaxTokens = int64(*model.TopProvider.MaxCompletionTokens / 2)
171 } else {
172 m.DefaultMaxTokens = model.ContextLength / 10
173 }
174 openRouterProvider.Models = append(openRouterProvider.Models, m)
175 }
176
177 // save the json in internal/providers/config/openrouter.json
178 data, err := json.MarshalIndent(openRouterProvider, "", " ")
179 if err != nil {
180 log.Fatal("Error marshaling OpenRouter provider:", err)
181 }
182 // write to file
183 if err := os.WriteFile("internal/providers/configs/openrouter.json", data, 0o600); err != nil {
184 log.Fatal("Error writing OpenRouter provider config:", err)
185 }
186}