1// Package main provides a command-line tool to fetch models from Synthetic
2// and generate a configuration file for the provider.
3package main
4
5import (
6 "context"
7 "encoding/json"
8 "fmt"
9 "io"
10 "log"
11 "math"
12 "net/http"
13 "os"
14 "slices"
15 "strconv"
16 "strings"
17 "time"
18
19 "charm.land/catwalk/pkg/catwalk"
20)
21
22// Model represents a model from the Synthetic API.
23type Model struct {
24 ID string `json:"id"`
25 Name string `json:"name"`
26 InputModalities []string `json:"input_modalities"`
27 OutputModalities []string `json:"output_modalities"`
28 ContextLength int64 `json:"context_length"`
29 MaxOutputLength int64 `json:"max_output_length,omitempty"`
30 Pricing Pricing `json:"pricing"`
31 SupportedFeatures []string `json:"supported_features,omitempty"`
32}
33
34// Pricing contains the pricing information for different operations.
35type Pricing struct {
36 Prompt string `json:"prompt"`
37 Completion string `json:"completion"`
38 Image string `json:"image"`
39 Request string `json:"request"`
40 InputCacheReads string `json:"input_cache_reads"`
41 InputCacheWrites string `json:"input_cache_writes"`
42}
43
44// ModelsResponse is the response structure for the Synthetic models API.
45type ModelsResponse struct {
46 Data []Model `json:"data"`
47}
48
49// ModelPricing is the pricing structure for a model, detailing costs per
50// million tokens for input and output, both cached and uncached.
51type ModelPricing struct {
52 CostPer1MIn float64 `json:"cost_per_1m_in"`
53 CostPer1MOut float64 `json:"cost_per_1m_out"`
54 CostPer1MInCached float64 `json:"cost_per_1m_in_cached"`
55 CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"`
56}
57
58// parsePrice extracts a float from Synthetic's price format (e.g. "$0.00000055").
59func parsePrice(s string) float64 {
60 s = strings.TrimPrefix(s, "$")
61 v, err := strconv.ParseFloat(s, 64)
62 if err != nil {
63 return 0.0
64 }
65 return v
66}
67
68func roundCost(v float64) float64 {
69 return math.Round(v*1e5) / 1e5
70}
71
72func getPricing(model Model) ModelPricing {
73 return ModelPricing{
74 CostPer1MIn: roundCost(parsePrice(model.Pricing.Prompt) * 1_000_000),
75 CostPer1MOut: roundCost(parsePrice(model.Pricing.Completion) * 1_000_000),
76 CostPer1MInCached: roundCost(parsePrice(model.Pricing.InputCacheReads) * 1_000_000),
77 CostPer1MOutCached: roundCost(parsePrice(model.Pricing.InputCacheReads) * 1_000_000),
78 }
79}
80
81// applyModelOverrides sets supported_features for models where Synthetic
82// omits this metadata.
83// TODO: Remove this when they add the missing metadata.
84func applyModelOverrides(model *Model) {
85 switch {
86 // All of llama support tools, none do reasoning yet
87 case strings.HasPrefix(model.ID, "hf:meta-llama/Llama-"):
88 model.SupportedFeatures = []string{"tools"}
89
90 case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-R1"):
91 model.SupportedFeatures = []string{"tools", "reasoning"}
92
93 case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-V3.1"):
94 model.SupportedFeatures = []string{"tools", "reasoning"}
95
96 case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-V3.2"):
97 model.SupportedFeatures = []string{"tools", "reasoning"}
98
99 case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-V3"):
100 model.SupportedFeatures = []string{"tools"}
101
102 case strings.HasPrefix(model.ID, "hf:Qwen/Qwen3-235B-A22B-Thinking"):
103 model.SupportedFeatures = []string{"tools", "reasoning"}
104
105 case strings.HasPrefix(model.ID, "hf:Qwen/Qwen3-235B-A22B-Instruct"):
106 model.SupportedFeatures = []string{"tools", "reasoning"}
107
108 // The rest of Qwen3 don't support reasoning but do tools
109 case strings.HasPrefix(model.ID, "hf:Qwen/Qwen3"):
110 model.SupportedFeatures = []string{"tools"}
111
112 // Has correct metadata already, but the following k2 matchers would
113 // override it to omit reasoning
114 case strings.HasPrefix(model.ID, "hf:moonshotai/Kimi-K2-Thinking"):
115 model.SupportedFeatures = []string{"tools", "reasoning"}
116
117 case strings.HasPrefix(model.ID, "hf:moonshotai/Kimi-K2.5"):
118 model.SupportedFeatures = []string{"tools", "reasoning"}
119
120 case strings.HasPrefix(model.ID, "hf:moonshotai/Kimi-K2"):
121 model.SupportedFeatures = []string{"tools"}
122
123 case strings.HasPrefix(model.ID, "hf:zai-org/GLM-4.5"):
124 model.SupportedFeatures = []string{"tools"}
125
126 case strings.HasPrefix(model.ID, "hf:openai/gpt-oss"):
127 model.SupportedFeatures = []string{"tools", "reasoning"}
128
129 case strings.HasPrefix(model.ID, "hf:MiniMaxAI/MiniMax-M2.1"):
130 model.SupportedFeatures = []string{"tools", "reasoning"}
131 }
132}
133
134func fetchSyntheticModels(apiEndpoint string) (*ModelsResponse, error) {
135 client := &http.Client{Timeout: 30 * time.Second}
136 req, _ := http.NewRequestWithContext(context.Background(), "GET", apiEndpoint+"/models", nil)
137 req.Header.Set("User-Agent", "Crush-Client/1.0")
138 resp, err := client.Do(req)
139 if err != nil {
140 return nil, err //nolint:wrapcheck
141 }
142 defer resp.Body.Close() //nolint:errcheck
143 if resp.StatusCode != 200 {
144 body, _ := io.ReadAll(resp.Body)
145 return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
146 }
147 var mr ModelsResponse
148 if err := json.NewDecoder(resp.Body).Decode(&mr); err != nil {
149 return nil, err //nolint:wrapcheck
150 }
151 return &mr, nil
152}
153
154// This is used to generate the synthetic.json config file.
155func main() {
156 syntheticProvider := catwalk.Provider{
157 Name: "Synthetic",
158 ID: "synthetic",
159 APIKey: "$SYNTHETIC_API_KEY",
160 APIEndpoint: "https://api.synthetic.new/openai/v1",
161 Type: catwalk.TypeOpenAICompat,
162 DefaultLargeModelID: "hf:zai-org/GLM-4.7",
163 DefaultSmallModelID: "hf:deepseek-ai/DeepSeek-V3.2",
164 Models: []catwalk.Model{},
165 }
166
167 modelsResp, err := fetchSyntheticModels(syntheticProvider.APIEndpoint)
168 if err != nil {
169 log.Fatal("Error fetching Synthetic models:", err)
170 }
171
172 // Apply overrides for models missing supported_features metadata
173 for i := range modelsResp.Data {
174 applyModelOverrides(&modelsResp.Data[i])
175 }
176
177 for _, model := range modelsResp.Data {
178 // Skip models with small context windows
179 if model.ContextLength < 20000 {
180 continue
181 }
182
183 // Skip non-text models
184 if !slices.Contains(model.InputModalities, "text") ||
185 !slices.Contains(model.OutputModalities, "text") {
186 continue
187 }
188
189 // Ensure they support tools
190 supportsTools := slices.Contains(model.SupportedFeatures, "tools")
191 if !supportsTools {
192 continue
193 }
194
195 pricing := getPricing(model)
196 supportsImages := slices.Contains(model.InputModalities, "image")
197
198 // Check if model supports reasoning
199 canReason := slices.Contains(model.SupportedFeatures, "reasoning")
200 var reasoningLevels []string
201 var defaultReasoning string
202 if canReason {
203 reasoningLevels = []string{"low", "medium", "high"}
204 defaultReasoning = "medium"
205 }
206
207 // Strip everything before the first / for a cleaner name
208 modelName := model.Name
209 if idx := strings.Index(model.Name, "/"); idx != -1 {
210 modelName = model.Name[idx+1:]
211 }
212 // Replace hyphens with spaces
213 modelName = strings.ReplaceAll(modelName, "-", " ")
214
215 m := catwalk.Model{
216 ID: model.ID,
217 Name: modelName,
218 CostPer1MIn: pricing.CostPer1MIn,
219 CostPer1MOut: pricing.CostPer1MOut,
220 CostPer1MInCached: pricing.CostPer1MInCached,
221 CostPer1MOutCached: pricing.CostPer1MOutCached,
222 ContextWindow: model.ContextLength,
223 CanReason: canReason,
224 DefaultReasoningEffort: defaultReasoning,
225 ReasoningLevels: reasoningLevels,
226 SupportsImages: supportsImages,
227 }
228
229 // Set max tokens based on max_output_length if available, but cap at
230 // 15% of context length
231 maxFromOutput := model.MaxOutputLength / 2
232 maxAt15Pct := (model.ContextLength * 15) / 100
233 if model.MaxOutputLength > 0 && maxFromOutput <= maxAt15Pct {
234 m.DefaultMaxTokens = maxFromOutput
235 } else {
236 m.DefaultMaxTokens = model.ContextLength / 10
237 }
238
239 syntheticProvider.Models = append(syntheticProvider.Models, m)
240 fmt.Printf("Added model %s with context window %d\n",
241 model.ID, model.ContextLength)
242 }
243
244 slices.SortFunc(syntheticProvider.Models, func(a catwalk.Model, b catwalk.Model) int {
245 return strings.Compare(a.Name, b.Name)
246 })
247
248 // Save the JSON in internal/providers/configs/synthetic.json
249 data, err := json.MarshalIndent(syntheticProvider, "", " ")
250 if err != nil {
251 log.Fatal("Error marshaling Synthetic provider:", err)
252 }
253
254 if err := os.WriteFile("internal/providers/configs/synthetic.json", data, 0o600); err != nil {
255 log.Fatal("Error writing Synthetic provider config:", err)
256 }
257
258 fmt.Printf("Generated synthetic.json with %d models\n", len(syntheticProvider.Models))
259}