1// Package main provides a command-line tool to fetch models from Synthetic
2// and generate a configuration file for the provider.
3package main
4
5import (
6 "context"
7 "encoding/json"
8 "fmt"
9 "io"
10 "log"
11 "net/http"
12 "os"
13 "slices"
14 "strconv"
15 "strings"
16 "time"
17
18 "github.com/charmbracelet/catwalk/pkg/catwalk"
19)
20
21// Model represents a model from the Synthetic API.
22type Model struct {
23 ID string `json:"id"`
24 Name string `json:"name"`
25 InputModalities []string `json:"input_modalities"`
26 OutputModalities []string `json:"output_modalities"`
27 ContextLength int64 `json:"context_length"`
28 MaxOutputLength int64 `json:"max_output_length,omitempty"`
29 Pricing Pricing `json:"pricing"`
30 SupportedFeatures []string `json:"supported_features,omitempty"`
31}
32
33// Pricing contains the pricing information for different operations.
34type Pricing struct {
35 Prompt string `json:"prompt"`
36 Completion string `json:"completion"`
37 Image string `json:"image"`
38 Request string `json:"request"`
39 InputCacheReads string `json:"input_cache_reads"`
40 InputCacheWrites string `json:"input_cache_writes"`
41}
42
43// ModelsResponse is the response structure for the Synthetic models API.
44type ModelsResponse struct {
45 Data []Model `json:"data"`
46}
47
48// ModelPricing is the pricing structure for a model, detailing costs per
49// million tokens for input and output, both cached and uncached.
50type ModelPricing struct {
51 CostPer1MIn float64 `json:"cost_per_1m_in"`
52 CostPer1MOut float64 `json:"cost_per_1m_out"`
53 CostPer1MInCached float64 `json:"cost_per_1m_in_cached"`
54 CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"`
55}
56
57// parsePrice extracts a float from Synthetic's price format (e.g. "$0.00000055").
58func parsePrice(s string) float64 {
59 s = strings.TrimPrefix(s, "$")
60 v, err := strconv.ParseFloat(s, 64)
61 if err != nil {
62 return 0.0
63 }
64 return v
65}
66
67func getPricing(model Model) ModelPricing {
68 return ModelPricing{
69 CostPer1MIn: parsePrice(model.Pricing.Prompt) * 1_000_000,
70 CostPer1MOut: parsePrice(model.Pricing.Completion) * 1_000_000,
71 CostPer1MInCached: parsePrice(model.Pricing.InputCacheReads) * 1_000_000,
72 CostPer1MOutCached: parsePrice(model.Pricing.InputCacheReads) * 1_000_000,
73 }
74}
75
76// applyModelOverrides sets supported_features for models where Synthetic
77// omits this metadata.
78// TODO: Remove this when they add the missing metadata.
79func applyModelOverrides(model *Model) {
80 switch {
81 // All of llama support tools, none do reasoning yet
82 case strings.HasPrefix(model.ID, "hf:meta-llama/Llama-"):
83 model.SupportedFeatures = []string{"tools"}
84
85 case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-R1"):
86 model.SupportedFeatures = []string{"tools", "reasoning"}
87
88 case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-V3.1"):
89 model.SupportedFeatures = []string{"tools", "reasoning"}
90
91 case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-V3.2"):
92 model.SupportedFeatures = []string{"tools", "reasoning"}
93
94 case strings.HasPrefix(model.ID, "hf:deepseek-ai/DeepSeek-V3"):
95 model.SupportedFeatures = []string{"tools"}
96
97 case strings.HasPrefix(model.ID, "hf:Qwen/Qwen3-235B-A22B-Thinking"):
98 model.SupportedFeatures = []string{"tools", "reasoning"}
99
100 case strings.HasPrefix(model.ID, "hf:Qwen/Qwen3-235B-A22B-Instruct"):
101 model.SupportedFeatures = []string{"tools", "reasoning"}
102
103 // The rest of Qwen3 don't support reasoning but do tools
104 case strings.HasPrefix(model.ID, "hf:Qwen/Qwen3"):
105 model.SupportedFeatures = []string{"tools"}
106
107 // Has correct metadata already, but the Kimi-K2 matcher (next) would
108 // override it to omit reasoning
109 case strings.HasPrefix(model.ID, "hf:moonshotai/Kimi-K2-Thinking"):
110 model.SupportedFeatures = []string{"tools", "reasoning"}
111
112 case strings.HasPrefix(model.ID, "hf:moonshotai/Kimi-K2.5"):
113 model.SupportedFeatures = []string{"tools", "reasoning"}
114
115 case strings.HasPrefix(model.ID, "hf:moonshotai/Kimi-K2"):
116 model.SupportedFeatures = []string{"tools"}
117
118 case strings.HasPrefix(model.ID, "hf:zai-org/GLM-4.5"):
119 model.SupportedFeatures = []string{"tools"}
120
121 case strings.HasPrefix(model.ID, "hf:openai/gpt-oss"):
122 model.SupportedFeatures = []string{"tools"}
123 }
124}
125
126func fetchSyntheticModels(apiEndpoint string) (*ModelsResponse, error) {
127 client := &http.Client{Timeout: 30 * time.Second}
128 req, _ := http.NewRequestWithContext(context.Background(), "GET", apiEndpoint+"/models", nil)
129 req.Header.Set("User-Agent", "Crush-Client/1.0")
130 resp, err := client.Do(req)
131 if err != nil {
132 return nil, err //nolint:wrapcheck
133 }
134 defer resp.Body.Close() //nolint:errcheck
135 if resp.StatusCode != 200 {
136 body, _ := io.ReadAll(resp.Body)
137 return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
138 }
139 var mr ModelsResponse
140 if err := json.NewDecoder(resp.Body).Decode(&mr); err != nil {
141 return nil, err //nolint:wrapcheck
142 }
143 return &mr, nil
144}
145
146// This is used to generate the synthetic.json config file.
147func main() {
148 syntheticProvider := catwalk.Provider{
149 Name: "Synthetic",
150 ID: "synthetic",
151 APIKey: "$SYNTHETIC_API_KEY",
152 APIEndpoint: "https://api.synthetic.new/openai/v1",
153 Type: catwalk.TypeOpenAICompat,
154 DefaultLargeModelID: "hf:zai-org/GLM-4.7",
155 DefaultSmallModelID: "hf:deepseek-ai/DeepSeek-V3.1-Terminus",
156 Models: []catwalk.Model{},
157 }
158
159 modelsResp, err := fetchSyntheticModels(syntheticProvider.APIEndpoint)
160 if err != nil {
161 log.Fatal("Error fetching Synthetic models:", err)
162 }
163
164 // Apply overrides for models missing supported_features metadata
165 for i := range modelsResp.Data {
166 applyModelOverrides(&modelsResp.Data[i])
167 }
168
169 for _, model := range modelsResp.Data {
170 // Skip models with small context windows
171 if model.ContextLength < 20000 {
172 continue
173 }
174
175 // Skip non-text models
176 if !slices.Contains(model.InputModalities, "text") ||
177 !slices.Contains(model.OutputModalities, "text") {
178 continue
179 }
180
181 // Ensure they support tools
182 supportsTools := slices.Contains(model.SupportedFeatures, "tools")
183 if !supportsTools {
184 continue
185 }
186
187 pricing := getPricing(model)
188 supportsImages := slices.Contains(model.InputModalities, "image")
189
190 // Check if model supports reasoning
191 canReason := slices.Contains(model.SupportedFeatures, "reasoning")
192 var reasoningLevels []string
193 var defaultReasoning string
194 if canReason {
195 reasoningLevels = []string{"low", "medium", "high"}
196 defaultReasoning = "medium"
197 }
198
199 // Strip everything before the first / for a cleaner name
200 modelName := model.Name
201 if idx := strings.Index(model.Name, "/"); idx != -1 {
202 modelName = model.Name[idx+1:]
203 }
204 // Replace hyphens with spaces
205 modelName = strings.ReplaceAll(modelName, "-", " ")
206
207 m := catwalk.Model{
208 ID: model.ID,
209 Name: modelName,
210 CostPer1MIn: pricing.CostPer1MIn,
211 CostPer1MOut: pricing.CostPer1MOut,
212 CostPer1MInCached: pricing.CostPer1MInCached,
213 CostPer1MOutCached: pricing.CostPer1MOutCached,
214 ContextWindow: model.ContextLength,
215 CanReason: canReason,
216 DefaultReasoningEffort: defaultReasoning,
217 ReasoningLevels: reasoningLevels,
218 SupportsImages: supportsImages,
219 }
220
221 // Set max tokens based on max_output_length if available, but cap at
222 // 15% of context length
223 maxFromOutput := model.MaxOutputLength / 2
224 maxAt15Pct := (model.ContextLength * 15) / 100
225 if model.MaxOutputLength > 0 && maxFromOutput <= maxAt15Pct {
226 m.DefaultMaxTokens = maxFromOutput
227 } else {
228 m.DefaultMaxTokens = model.ContextLength / 10
229 }
230
231 syntheticProvider.Models = append(syntheticProvider.Models, m)
232 fmt.Printf("Added model %s with context window %d\n",
233 model.ID, model.ContextLength)
234 }
235
236 slices.SortFunc(syntheticProvider.Models, func(a catwalk.Model, b catwalk.Model) int {
237 return strings.Compare(a.Name, b.Name)
238 })
239
240 // Save the JSON in internal/providers/configs/synthetic.json
241 data, err := json.MarshalIndent(syntheticProvider, "", " ")
242 if err != nil {
243 log.Fatal("Error marshaling Synthetic provider:", err)
244 }
245
246 if err := os.WriteFile("internal/providers/configs/synthetic.json", data, 0o600); err != nil {
247 log.Fatal("Error writing Synthetic provider config:", err)
248 }
249
250 fmt.Printf("Generated synthetic.json with %d models\n", len(syntheticProvider.Models))
251}