1// Package main provides a command-line tool to fetch models from Vercel AI Gateway
2// and generate a configuration file for the provider.
3package main
4
5import (
6 "context"
7 "encoding/json"
8 "fmt"
9 "io"
10 "log"
11 "net/http"
12 "os"
13 "slices"
14 "strconv"
15 "strings"
16 "time"
17
18 "charm.land/catwalk/pkg/catwalk"
19)
20
21// Model represents a model from the Vercel API.
22type Model struct {
23 ID string `json:"id"`
24 Object string `json:"object"`
25 Created int64 `json:"created"`
26 OwnedBy string `json:"owned_by"`
27 Name string `json:"name"`
28 Description string `json:"description"`
29 ContextWindow int64 `json:"context_window"`
30 MaxTokens int64 `json:"max_tokens"`
31 Type string `json:"type"`
32 Tags []string `json:"tags"`
33 Pricing Pricing `json:"pricing"`
34}
35
36// Pricing contains the pricing information for a model.
37type Pricing struct {
38 Input string `json:"input,omitempty"`
39 Output string `json:"output,omitempty"`
40 InputCacheRead string `json:"input_cache_read,omitempty"`
41 InputCacheWrite string `json:"input_cache_write,omitempty"`
42 WebSearch string `json:"web_search,omitempty"`
43 Image string `json:"image,omitempty"`
44}
45
46// ModelsResponse is the response structure for the Vercel models API.
47type ModelsResponse struct {
48 Object string `json:"object"`
49 Data []Model `json:"data"`
50}
51
52func fetchVercelModels() (*ModelsResponse, error) {
53 client := &http.Client{Timeout: 30 * time.Second}
54 req, _ := http.NewRequestWithContext(
55 context.Background(),
56 "GET",
57 "https://ai-gateway.vercel.sh/v1/models",
58 nil,
59 )
60 req.Header.Set("User-Agent", "Crush-Client/1.0")
61 resp, err := client.Do(req)
62 if err != nil {
63 return nil, err //nolint:wrapcheck
64 }
65 defer resp.Body.Close() //nolint:errcheck
66 if resp.StatusCode != 200 {
67 body, _ := io.ReadAll(resp.Body)
68 return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
69 }
70 var mr ModelsResponse
71 if err := json.NewDecoder(resp.Body).Decode(&mr); err != nil {
72 return nil, err //nolint:wrapcheck
73 }
74 return &mr, nil
75}
76
77func main() {
78 modelsResp, err := fetchVercelModels()
79 if err != nil {
80 log.Fatal("Error fetching Vercel models:", err)
81 }
82
83 vercelProvider := catwalk.Provider{
84 Name: "Vercel",
85 ID: catwalk.InferenceProviderVercel,
86 APIKey: "$VERCEL_API_KEY",
87 APIEndpoint: "https://ai-gateway.vercel.sh/v1",
88 Type: catwalk.TypeOpenAICompat,
89 DefaultLargeModelID: "anthropic/claude-sonnet-4",
90 DefaultSmallModelID: "anthropic/claude-haiku-4.5",
91 Models: []catwalk.Model{},
92 DefaultHeaders: map[string]string{
93 "HTTP-Referer": "https://charm.land",
94 "X-Title": "Crush",
95 },
96 }
97
98 for _, model := range modelsResp.Data {
99 // Only include language models, skip embedding and image models
100 if model.Type != "language" {
101 continue
102 }
103
104 // Skip models without tool support
105 if !slices.Contains(model.Tags, "tool-use") {
106 continue
107 }
108
109 // Parse pricing
110 costPer1MIn := 0.0
111 costPer1MOut := 0.0
112 costPer1MInCached := 0.0
113 costPer1MOutCached := 0.0
114
115 if model.Pricing.Input != "" {
116 costPrompt, err := strconv.ParseFloat(model.Pricing.Input, 64)
117 if err == nil {
118 costPer1MIn = costPrompt * 1_000_000
119 }
120 }
121
122 if model.Pricing.Output != "" {
123 costCompletion, err := strconv.ParseFloat(model.Pricing.Output, 64)
124 if err == nil {
125 costPer1MOut = costCompletion * 1_000_000
126 }
127 }
128
129 if model.Pricing.InputCacheRead != "" {
130 costCached, err := strconv.ParseFloat(model.Pricing.InputCacheRead, 64)
131 if err == nil {
132 costPer1MInCached = costCached * 1_000_000
133 }
134 }
135
136 if model.Pricing.InputCacheWrite != "" {
137 costCacheWrite, err := strconv.ParseFloat(model.Pricing.InputCacheWrite, 64)
138 if err == nil {
139 costPer1MOutCached = costCacheWrite * 1_000_000
140 }
141 }
142
143 // Check if model supports reasoning
144 canReason := slices.Contains(model.Tags, "reasoning")
145
146 var reasoningLevels []string
147 var defaultReasoning string
148 if canReason {
149 reasoningLevels = []string{"low", "medium", "high"}
150 defaultReasoning = "medium"
151 }
152
153 // Check if model supports images
154 supportsImages := slices.Contains(model.Tags, "vision")
155
156 // Calculate default max tokens
157 defaultMaxTokens := model.MaxTokens
158 if defaultMaxTokens == 0 {
159 defaultMaxTokens = model.ContextWindow / 10
160 }
161 if defaultMaxTokens > 8000 {
162 defaultMaxTokens = 8000
163 }
164
165 m := catwalk.Model{
166 ID: model.ID,
167 Name: model.Name,
168 CostPer1MIn: costPer1MIn,
169 CostPer1MOut: costPer1MOut,
170 CostPer1MInCached: costPer1MInCached,
171 CostPer1MOutCached: costPer1MOutCached,
172 ContextWindow: model.ContextWindow,
173 DefaultMaxTokens: defaultMaxTokens,
174 CanReason: canReason,
175 ReasoningLevels: reasoningLevels,
176 DefaultReasoningEffort: defaultReasoning,
177 SupportsImages: supportsImages,
178 }
179
180 vercelProvider.Models = append(vercelProvider.Models, m)
181 fmt.Printf("Added model %s with context window %d\n", model.ID, model.ContextWindow)
182 }
183
184 slices.SortFunc(vercelProvider.Models, func(a catwalk.Model, b catwalk.Model) int {
185 return strings.Compare(a.Name, b.Name)
186 })
187
188 // Save the JSON in internal/providers/configs/vercel.json
189 data, err := json.MarshalIndent(vercelProvider, "", " ")
190 if err != nil {
191 log.Fatal("Error marshaling Vercel provider:", err)
192 }
193
194 if err := os.WriteFile("internal/providers/configs/vercel.json", data, 0o600); err != nil {
195 log.Fatal("Error writing Vercel provider config:", err)
196 }
197
198 fmt.Printf("Generated vercel.json with %d models\n", len(vercelProvider.Models))
199}