1// Package main provides a command-line tool to generate a configuration file
2// for the Cortecs provider, which is OpenAI compatible.
3package main
4
5import (
6 "context"
7 "encoding/json"
8 "fmt"
9 "io"
10 "log"
11 "net/http"
12 "os"
13 "strings"
14 "time"
15
16 "charm.land/catwalk/pkg/catwalk"
17)
18
19type CortecsModel struct {
20 ID string `json:"id"`
21 ContextSize int64 `json:"context_size"`
22 Tags []string `json:"tags,omitempty"`
23}
24
25func (m CortecsModel) hasTag(tagValue string) bool {
26 if m.Tags != nil {
27 for _, tag := range m.Tags {
28 if strings.EqualFold(tag, tagValue) {
29 return true
30 }
31 }
32 }
33 return false
34}
35
36type ModelsResponse struct {
37 Data []CortecsModel `json:"data"`
38}
39
40type ModelDetailResponse struct {
41 Model ModelDetail `json:"model"`
42}
43
44type ModelDetail struct {
45 ScreenName string `json:"screen_name"`
46 Context int64 `json:"context"`
47 InputCost float64 `json:"input_tokens"`
48 OutputCost float64 `json:"output_tokens"`
49}
50
51// This is used to generate the cortecs.json config file.
52func main() {
53 client := &http.Client{Timeout: 30 * time.Second}
54 req, _ := http.NewRequestWithContext(
55 context.Background(),
56 "GET",
57 "https://api.cortecs.ai/v1/models",
58 nil,
59 )
60 req.Header.Set("User-Agent", "Crush-Client/1.0")
61
62 resp, err := client.Do(req)
63 if err != nil {
64 log.Fatal("Error fetching Cortecs models:", err)
65 }
66 defer resp.Body.Close() //nolint:errcheck
67
68 body, err := io.ReadAll(resp.Body)
69 if err != nil {
70 log.Fatal("Error reading Cortecs models response:", err)
71 }
72
73 if resp.StatusCode != http.StatusOK {
74 log.Fatalf("Error fetching Cortecs models: status %d: %s", resp.StatusCode, body)
75 }
76
77 var modelsResp ModelsResponse
78 if err := json.Unmarshal(body, &modelsResp); err != nil {
79 log.Fatal("Error parsing Cortecs models response:", err)
80 }
81
82 var models []catwalk.Model
83 for _, model := range modelsResp.Data {
84 // we skip models that don't support tool calling
85 if !model.hasTag("Tools") {
86 continue
87 }
88
89 // Fetch individual model details to get screen_name
90 detailReq, _ := http.NewRequestWithContext(
91 context.Background(),
92 "GET",
93 fmt.Sprintf("https://api.cortecs.ai/v1/models/%s", model.ID),
94 nil,
95 )
96 detailReq.Header.Set("User-Agent", "Crush-Client/1.0")
97
98 detailResp, err := client.Do(detailReq)
99 if err != nil {
100 log.Printf("Warning: Error fetching details for model %s: %v", model.ID, err)
101 // Continue with default model.ID as name if we can't get details
102 continue
103 }
104 defer func() {
105 if err := detailResp.Body.Close(); err != nil {
106 log.Printf("Warning: Error closing response body for model %s: %v", model.ID, err)
107 }
108 }()
109
110 detailBody, err := io.ReadAll(detailResp.Body)
111 if err != nil {
112 log.Printf("Warning: Error reading details for model %s: %v", model.ID, err)
113 continue
114 }
115
116 if detailResp.StatusCode != http.StatusOK {
117 log.Printf("Warning: Error fetching details for model %s: status %d: %s", model.ID, detailResp.StatusCode, detailBody)
118 continue
119 }
120
121 var detailRespData ModelDetailResponse
122 if err := json.Unmarshal(detailBody, &detailRespData); err != nil {
123 log.Printf("Warning: Error parsing details for model %s: %v", model.ID, err)
124 continue
125 }
126
127 var (
128 canReason = model.hasTag("Reasoning")
129 reasoningLevels []string
130 defaultReasoning string
131 )
132 if canReason {
133 reasoningLevels = []string{"low", "medium", "high"}
134 defaultReasoning = "medium"
135 }
136
137 model := catwalk.Model{
138 ID: model.ID,
139 Name: detailRespData.Model.ScreenName,
140 ContextWindow: detailRespData.Model.Context,
141 CostPer1MIn: detailRespData.Model.InputCost,
142 CostPer1MOut: detailRespData.Model.OutputCost,
143 CostPer1MInCached: 0,
144 CostPer1MOutCached: 0,
145 DefaultMaxTokens: model.ContextSize / 10,
146 CanReason: canReason,
147 DefaultReasoningEffort: defaultReasoning,
148 ReasoningLevels: reasoningLevels,
149 SupportsImages: model.hasTag("Image"),
150 }
151 models = append(models, model)
152 fmt.Printf("Added model %s (%s)\n", model.ID, model.Name)
153 }
154
155 cortecsProvider := catwalk.Provider{
156 Name: "Cortecs",
157 ID: "cortecs",
158 APIKey: "$CORTECS_API_KEY",
159 APIEndpoint: "https://api.cortecs.ai/v1",
160 Type: catwalk.TypeOpenAI,
161 DefaultLargeModelID: "qwen3-coder-30b-a3b-instruct",
162 DefaultSmallModelID: "glm-4.7-flash",
163 Models: models,
164 }
165
166 data, err := json.MarshalIndent(cortecsProvider, "", " ")
167 if err != nil {
168 log.Fatal("Error marshaling Cortecs provider:", err)
169 }
170 data = append(data, '\n')
171
172 if err := os.WriteFile("./internal/providers/configs/cortecs.json", data, 0o600); err != nil {
173 log.Fatal("Error writing Cortecs provider config:", err)
174 }
175
176 fmt.Println("Cortecs provider configuration generated successfully!")
177}