@@ -0,0 +1,127 @@
+// Package main provides a command-line tool to fetch models from Avian
+// and generate a configuration file for the provider.
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "slices"
+ "strings"
+ "time"
+
+ "charm.land/catwalk/pkg/catwalk"
+)
+
+// Model represents a model from the Avian API.
+type Model struct {
+ ID string `json:"id"`
+ DisplayName string `json:"display_name"`
+ ContextLength int64 `json:"context_length"`
+ MaxOutput int64 `json:"max_output"`
+ Reasoning bool `json:"reasoning"`
+ Pricing Pricing `json:"pricing"`
+}
+
+// Pricing contains the pricing information for a model.
+type Pricing struct {
+ InputPerMillion float64 `json:"input_per_million"`
+ OutputPerMillion float64 `json:"output_per_million"`
+ CacheReadPerMillion float64 `json:"cache_read_per_million"`
+}
+
+// ModelsResponse is the response structure for the Avian models API.
+type ModelsResponse struct {
+ Data []Model `json:"data"`
+}
+
+func fetchAvianModels() (*ModelsResponse, error) {
+ client := &http.Client{Timeout: 30 * time.Second}
+ req, _ := http.NewRequestWithContext(
+ context.Background(),
+ "GET",
+ "https://api.avian.io/v1/models",
+ nil,
+ )
+ req.Header.Set("User-Agent", "Crush-Client/1.0")
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err //nolint:wrapcheck
+ }
+ defer resp.Body.Close() //nolint:errcheck
+ if resp.StatusCode != 200 {
+ body, _ := io.ReadAll(resp.Body)
+ return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
+ }
+ var mr ModelsResponse
+ if err := json.NewDecoder(resp.Body).Decode(&mr); err != nil {
+ return nil, err //nolint:wrapcheck
+ }
+ return &mr, nil
+}
+
+func main() {
+ modelsResp, err := fetchAvianModels()
+ if err != nil {
+ log.Fatal("Error fetching Avian models:", err)
+ }
+
+ avianProvider := catwalk.Provider{
+ Name: "Avian",
+ ID: catwalk.InferenceProviderAvian,
+ APIKey: "$AVIAN_API_KEY",
+ APIEndpoint: "https://api.avian.io/v1",
+ Type: catwalk.TypeOpenAICompat,
+ DefaultLargeModelID: "moonshotai/kimi-k2.5",
+ DefaultSmallModelID: "deepseek/deepseek-v3.2",
+ Models: []catwalk.Model{},
+ }
+
+ for _, model := range modelsResp.Data {
+ var reasoningLevels []string
+ var defaultReasoning string
+ if model.Reasoning {
+ reasoningLevels = []string{"low", "medium", "high"}
+ defaultReasoning = "medium"
+ }
+
+ m := catwalk.Model{
+ ID: model.ID,
+ Name: model.DisplayName,
+ CostPer1MIn: model.Pricing.InputPerMillion,
+ CostPer1MOut: model.Pricing.OutputPerMillion,
+ CostPer1MInCached: model.Pricing.CacheReadPerMillion,
+ CostPer1MOutCached: 0,
+ ContextWindow: model.ContextLength,
+ DefaultMaxTokens: model.MaxOutput,
+ CanReason: model.Reasoning,
+ ReasoningLevels: reasoningLevels,
+ DefaultReasoningEffort: defaultReasoning,
+ SupportsImages: false,
+ }
+
+ avianProvider.Models = append(avianProvider.Models, m)
+ fmt.Printf("Added model %s with context window %d\n", model.ID, model.ContextLength)
+ }
+
+ slices.SortFunc(avianProvider.Models, func(a catwalk.Model, b catwalk.Model) int {
+ return strings.Compare(a.Name, b.Name)
+ })
+
+ // Save the JSON in internal/providers/configs/avian.json
+ data, err := json.MarshalIndent(avianProvider, "", " ")
+ if err != nil {
+ log.Fatal("Error marshaling Avian provider:", err)
+ }
+ data = append(data, '\n')
+
+ if err := os.WriteFile("internal/providers/configs/avian.json", data, 0o600); err != nil {
+ log.Fatal("Error writing Avian provider config:", err)
+ }
+
+ fmt.Printf("Generated avian.json with %d models\n", len(avianProvider.Models))
+}
@@ -1,35 +1,28 @@
{
"name": "Avian",
"id": "avian",
- "type": "openai-compat",
"api_key": "$AVIAN_API_KEY",
"api_endpoint": "https://api.avian.io/v1",
+ "type": "openai-compat",
"default_large_model_id": "moonshotai/kimi-k2.5",
"default_small_model_id": "deepseek/deepseek-v3.2",
"models": [
{
"id": "deepseek/deepseek-v3.2",
"name": "DeepSeek V3.2",
- "cost_per_1m_in": 0.28,
- "cost_per_1m_out": 0.42,
- "cost_per_1m_in_cached": 0.014,
- "cost_per_1m_out_cached": 0,
- "context_window": 164000,
- "default_max_tokens": 65000,
- "can_reason": false,
- "supports_attachments": false,
- "options": {}
- },
- {
- "id": "moonshotai/kimi-k2.5",
- "name": "Kimi K2.5",
- "cost_per_1m_in": 0.45,
- "cost_per_1m_out": 2.2,
- "cost_per_1m_in_cached": 0.225,
+ "cost_per_1m_in": 0.23,
+ "cost_per_1m_out": 0.33,
+ "cost_per_1m_in_cached": 0.012,
"cost_per_1m_out_cached": 0,
- "context_window": 262000,
+ "context_window": 163840,
"default_max_tokens": 65536,
"can_reason": true,
+ "reasoning_levels": [
+ "low",
+ "medium",
+ "high"
+ ],
+ "default_reasoning_effort": "medium",
"supports_attachments": false,
"options": {}
},
@@ -40,20 +33,33 @@
"cost_per_1m_out": 2.55,
"cost_per_1m_in_cached": 0.2,
"cost_per_1m_out_cached": 0,
- "context_window": 205000,
- "default_max_tokens": 65536,
- "can_reason": true,
+ "context_window": 204800,
+ "default_max_tokens": 131072,
+ "can_reason": false,
+ "supports_attachments": false,
+ "options": {}
+ },
+ {
+ "id": "moonshotai/kimi-k2.5",
+ "name": "Kimi K2.5",
+ "cost_per_1m_in": 0.45,
+ "cost_per_1m_out": 2.2,
+ "cost_per_1m_in_cached": 0.225,
+ "cost_per_1m_out_cached": 0,
+ "context_window": 262144,
+ "default_max_tokens": 262144,
+ "can_reason": false,
"supports_attachments": false,
"options": {}
},
{
"id": "minimax/minimax-m2.5",
"name": "MiniMax M2.5",
- "cost_per_1m_in": 0.3,
- "cost_per_1m_out": 1.1,
+ "cost_per_1m_in": 0.27,
+ "cost_per_1m_out": 1.08,
"cost_per_1m_in_cached": 0.15,
"cost_per_1m_out_cached": 0,
- "context_window": 200000,
+ "context_window": 196608,
"default_max_tokens": 131072,
"can_reason": false,
"supports_attachments": false,