1// Package main provides a command-line tool to fetch models from Vercel AI Gateway
2// and generate a configuration file for the provider.
3package main
4
5import (
6 "context"
7 "encoding/json"
8 "fmt"
9 "io"
10 "log"
11 "math"
12 "net/http"
13 "os"
14 "slices"
15 "strconv"
16 "strings"
17 "time"
18
19 "charm.land/catwalk/pkg/catwalk"
20)
21
22// Model represents a model from the Vercel API.
23type Model struct {
24 ID string `json:"id"`
25 Object string `json:"object"`
26 Created int64 `json:"created"`
27 OwnedBy string `json:"owned_by"`
28 Name string `json:"name"`
29 Description string `json:"description"`
30 ContextWindow int64 `json:"context_window"`
31 MaxTokens int64 `json:"max_tokens"`
32 Type string `json:"type"`
33 Tags []string `json:"tags"`
34 Pricing Pricing `json:"pricing"`
35}
36
37// Pricing contains the pricing information for a model.
38type Pricing struct {
39 Input string `json:"input,omitempty"`
40 Output string `json:"output,omitempty"`
41 InputCacheRead string `json:"input_cache_read,omitempty"`
42 InputCacheWrite string `json:"input_cache_write,omitempty"`
43 WebSearch string `json:"web_search,omitempty"`
44 Image string `json:"image,omitempty"`
45}
46
47// ModelsResponse is the response structure for the Vercel models API.
48type ModelsResponse struct {
49 Object string `json:"object"`
50 Data []Model `json:"data"`
51}
52
53func fetchVercelModels() (*ModelsResponse, error) {
54 client := &http.Client{Timeout: 30 * time.Second}
55 req, _ := http.NewRequestWithContext(
56 context.Background(),
57 "GET",
58 "https://ai-gateway.vercel.sh/v1/models",
59 nil,
60 )
61 req.Header.Set("User-Agent", "Crush-Client/1.0")
62 resp, err := client.Do(req)
63 if err != nil {
64 return nil, err //nolint:wrapcheck
65 }
66 defer resp.Body.Close() //nolint:errcheck
67 if resp.StatusCode != 200 {
68 body, _ := io.ReadAll(resp.Body)
69 return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
70 }
71 var mr ModelsResponse
72 if err := json.NewDecoder(resp.Body).Decode(&mr); err != nil {
73 return nil, err //nolint:wrapcheck
74 }
75 return &mr, nil
76}
77
78func main() {
79 modelsResp, err := fetchVercelModels()
80 if err != nil {
81 log.Fatal("Error fetching Vercel models:", err)
82 }
83
84 vercelProvider := catwalk.Provider{
85 Name: "Vercel",
86 ID: catwalk.InferenceProviderVercel,
87 APIKey: "$VERCEL_API_KEY",
88 APIEndpoint: "https://ai-gateway.vercel.sh/v1",
89 Type: catwalk.TypeVercel,
90 DefaultLargeModelID: "anthropic/claude-sonnet-4",
91 DefaultSmallModelID: "anthropic/claude-haiku-4.5",
92 Models: []catwalk.Model{},
93 DefaultHeaders: map[string]string{
94 "HTTP-Referer": "https://charm.land",
95 "X-Title": "Crush",
96 },
97 }
98
99 for _, model := range modelsResp.Data {
100 // Only include language models, skip embedding and image models
101 if model.Type != "language" {
102 continue
103 }
104
105 // Skip models without tool support
106 if !slices.Contains(model.Tags, "tool-use") {
107 continue
108 }
109
110 // Parse pricing
111 roundCost := func(v float64) float64 { return math.Round(v*1e5) / 1e5 }
112 costPer1MIn := 0.0
113 costPer1MOut := 0.0
114 costPer1MInCached := 0.0
115 costPer1MOutCached := 0.0
116
117 if model.Pricing.Input != "" {
118 costPrompt, err := strconv.ParseFloat(model.Pricing.Input, 64)
119 if err == nil {
120 costPer1MIn = roundCost(costPrompt * 1_000_000)
121 }
122 }
123
124 if model.Pricing.Output != "" {
125 costCompletion, err := strconv.ParseFloat(model.Pricing.Output, 64)
126 if err == nil {
127 costPer1MOut = roundCost(costCompletion * 1_000_000)
128 }
129 }
130
131 if model.Pricing.InputCacheRead != "" {
132 costCached, err := strconv.ParseFloat(model.Pricing.InputCacheRead, 64)
133 if err == nil {
134 costPer1MInCached = roundCost(costCached * 1_000_000)
135 }
136 }
137
138 if model.Pricing.InputCacheWrite != "" {
139 costCacheWrite, err := strconv.ParseFloat(model.Pricing.InputCacheWrite, 64)
140 if err == nil {
141 costPer1MOutCached = roundCost(costCacheWrite * 1_000_000)
142 }
143 }
144
145 // Check if model supports reasoning
146 canReason := slices.Contains(model.Tags, "reasoning")
147
148 var reasoningLevels []string
149 var defaultReasoning string
150 if canReason {
151 // Base reasoning levels supported by most providers
152 reasoningLevels = []string{"low", "medium", "high"}
153 // Anthropic models support extended Vercel reasoning levels
154 if strings.HasPrefix(model.ID, "anthropic/") {
155 reasoningLevels = []string{"none", "minimal", "low", "medium", "high", "xhigh"}
156 }
157 defaultReasoning = "medium"
158 }
159
160 // Check if model supports images
161 supportsImages := slices.Contains(model.Tags, "vision")
162
163 // Calculate default max tokens
164 defaultMaxTokens := model.MaxTokens
165 if defaultMaxTokens == 0 {
166 defaultMaxTokens = model.ContextWindow / 10
167 }
168 if defaultMaxTokens > 8000 {
169 defaultMaxTokens = 8000
170 }
171
172 m := catwalk.Model{
173 ID: model.ID,
174 Name: model.Name,
175 CostPer1MIn: costPer1MIn,
176 CostPer1MOut: costPer1MOut,
177 CostPer1MInCached: costPer1MInCached,
178 CostPer1MOutCached: costPer1MOutCached,
179 ContextWindow: model.ContextWindow,
180 DefaultMaxTokens: defaultMaxTokens,
181 CanReason: canReason,
182 ReasoningLevels: reasoningLevels,
183 DefaultReasoningEffort: defaultReasoning,
184 SupportsImages: supportsImages,
185 }
186
187 vercelProvider.Models = append(vercelProvider.Models, m)
188 fmt.Printf("Added model %s with context window %d\n", model.ID, model.ContextWindow)
189 }
190
191 slices.SortFunc(vercelProvider.Models, func(a catwalk.Model, b catwalk.Model) int {
192 return strings.Compare(a.Name, b.Name)
193 })
194
195 // Save the JSON in internal/providers/configs/vercel.json
196 data, err := json.MarshalIndent(vercelProvider, "", " ")
197 if err != nil {
198 log.Fatal("Error marshaling Vercel provider:", err)
199 }
200
201 if err := os.WriteFile("internal/providers/configs/vercel.json", data, 0o600); err != nil {
202 log.Fatal("Error writing Vercel provider config:", err)
203 }
204
205 fmt.Printf("Generated vercel.json with %d models\n", len(vercelProvider.Models))
206}