chore: update openrouter models and script

Kujtim Hoxha created

Change summary

cmd/openrouter/main.go                     | 173 +++++++
internal/providers/configs/openrouter.json | 516 +++++------------------
2 files changed, 295 insertions(+), 394 deletions(-)

Detailed changes

cmd/openrouter/main.go 🔗

@@ -60,6 +60,32 @@ type TopProvider struct {
 	IsModerated         bool   `json:"is_moderated"`
 }
 
+// Endpoint represents a single endpoint configuration for a model.
+type Endpoint struct {
+	Name                string   `json:"name"`
+	ContextLength       int64    `json:"context_length"`
+	Pricing             Pricing  `json:"pricing"`
+	ProviderName        string   `json:"provider_name"`
+	Tag                 string   `json:"tag"`
+	Quantization        *string  `json:"quantization"`
+	MaxCompletionTokens *int64   `json:"max_completion_tokens"`
+	MaxPromptTokens     *int64   `json:"max_prompt_tokens"`
+	SupportedParams     []string `json:"supported_parameters"`
+	Status              int      `json:"status"`
+	UptimeLast30m       float64  `json:"uptime_last_30m"`
+}
+
+// EndpointsResponse is the response structure for the endpoints API.
+type EndpointsResponse struct {
+	Data struct {
+		ID          string     `json:"id"`
+		Name        string     `json:"name"`
+		Created     int64      `json:"created"`
+		Description string     `json:"description"`
+		Endpoints   []Endpoint `json:"endpoints"`
+	} `json:"data"`
+}
+
 // ModelsResponse is the response structure for the models API.
 type ModelsResponse struct {
 	Data []Model `json:"data"`
@@ -125,6 +151,69 @@ func fetchOpenRouterModels() (*ModelsResponse, error) {
 	return &mr, nil
 }
 
+func fetchModelEndpoints(modelID string) (*EndpointsResponse, error) {
+	client := &http.Client{Timeout: 30 * time.Second}
+	url := fmt.Sprintf("https://openrouter.ai/api/v1/models/%s/endpoints", modelID)
+	req, _ := http.NewRequestWithContext(
+		context.Background(),
+		"GET",
+		url,
+		nil,
+	)
+	req.Header.Set("User-Agent", "Crush-Client/1.0")
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, err //nolint:wrapcheck
+	}
+	defer resp.Body.Close() //nolint:errcheck
+	if resp.StatusCode != 200 {
+		body, _ := io.ReadAll(resp.Body)
+		return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
+	}
+	var er EndpointsResponse
+	if err := json.NewDecoder(resp.Body).Decode(&er); err != nil {
+		return nil, err //nolint:wrapcheck
+	}
+	return &er, nil
+}
+
+func selectBestEndpoint(endpoints []Endpoint) *Endpoint {
+	if len(endpoints) == 0 {
+		return nil
+	}
+	
+	var best *Endpoint
+	for i := range endpoints {
+		endpoint := &endpoints[i]
+		// Skip endpoints with poor status or uptime
+		if endpoint.Status < 0 || endpoint.UptimeLast30m < 90.0 {
+			continue
+		}
+		
+		if best == nil {
+			best = endpoint
+			continue
+		}
+		
+		// Prefer higher context length
+		if endpoint.ContextLength > best.ContextLength {
+			best = endpoint
+		} else if endpoint.ContextLength == best.ContextLength {
+			// If context length is the same, prefer better uptime
+			if endpoint.UptimeLast30m > best.UptimeLast30m {
+				best = endpoint
+			}
+		}
+	}
+	
+	// If no good endpoint found, return the first one as fallback
+	if best == nil {
+		best = &endpoints[0]
+	}
+	
+	return best
+}
+
 // This is used to generate the openrouter.json config file.
 func main() {
 	modelsResp, err := fetchOpenRouterModels()
@@ -151,8 +240,75 @@ func main() {
 			continue
 		}
 
-		pricing := getPricing(model)
-		canReason := slices.Contains(model.SupportedParams, "reasoning")
+		// Fetch endpoints for this model to get the best configuration
+		endpointsResp, err := fetchModelEndpoints(model.ID)
+		if err != nil {
+			fmt.Printf("Warning: Failed to fetch endpoints for %s: %v\n", model.ID, err)
+			// Fall back to using the original model data
+			pricing := getPricing(model)
+			canReason := slices.Contains(model.SupportedParams, "reasoning")
+			supportsImages := slices.Contains(model.Architecture.InputModalities, "image")
+
+			m := catwalk.Model{
+				ID:                 model.ID,
+				Name:               model.Name,
+				CostPer1MIn:        pricing.CostPer1MIn,
+				CostPer1MOut:       pricing.CostPer1MOut,
+				CostPer1MInCached:  pricing.CostPer1MInCached,
+				CostPer1MOutCached: pricing.CostPer1MOutCached,
+				ContextWindow:      model.ContextLength,
+				CanReason:          canReason,
+				SupportsImages:     supportsImages,
+			}
+			if model.TopProvider.MaxCompletionTokens != nil {
+				m.DefaultMaxTokens = *model.TopProvider.MaxCompletionTokens / 2
+			} else {
+				m.DefaultMaxTokens = model.ContextLength / 10
+			}
+			if model.TopProvider.ContextLength > 0 {
+				m.ContextWindow = model.TopProvider.ContextLength
+			}
+			openRouterProvider.Models = append(openRouterProvider.Models, m)
+			continue
+		}
+
+		// Select the best endpoint
+		bestEndpoint := selectBestEndpoint(endpointsResp.Data.Endpoints)
+		if bestEndpoint == nil {
+			fmt.Printf("Warning: No suitable endpoint found for %s\n", model.ID)
+			continue
+		}
+
+		// Check if the best endpoint supports tools
+		if !slices.Contains(bestEndpoint.SupportedParams, "tools") {
+			continue
+		}
+
+		// Use the best endpoint's configuration
+		pricing := ModelPricing{}
+		costPrompt, err := strconv.ParseFloat(bestEndpoint.Pricing.Prompt, 64)
+		if err != nil {
+			costPrompt = 0.0
+		}
+		pricing.CostPer1MIn = costPrompt * 1_000_000
+		costCompletion, err := strconv.ParseFloat(bestEndpoint.Pricing.Completion, 64)
+		if err != nil {
+			costCompletion = 0.0
+		}
+		pricing.CostPer1MOut = costCompletion * 1_000_000
+
+		costPromptCached, err := strconv.ParseFloat(bestEndpoint.Pricing.InputCacheWrite, 64)
+		if err != nil {
+			costPromptCached = 0.0
+		}
+		pricing.CostPer1MInCached = costPromptCached * 1_000_000
+		costCompletionCached, err := strconv.ParseFloat(bestEndpoint.Pricing.InputCacheRead, 64)
+		if err != nil {
+			costCompletionCached = 0.0
+		}
+		pricing.CostPer1MOutCached = costCompletionCached * 1_000_000
+
+		canReason := slices.Contains(bestEndpoint.SupportedParams, "reasoning")
 		supportsImages := slices.Contains(model.Architecture.InputModalities, "image")
 
 		m := catwalk.Model{
@@ -162,16 +318,21 @@ func main() {
 			CostPer1MOut:       pricing.CostPer1MOut,
 			CostPer1MInCached:  pricing.CostPer1MInCached,
 			CostPer1MOutCached: pricing.CostPer1MOutCached,
-			ContextWindow:      model.ContextLength,
+			ContextWindow:      bestEndpoint.ContextLength,
 			CanReason:          canReason,
 			SupportsImages:     supportsImages,
 		}
-		if model.TopProvider.MaxCompletionTokens != nil {
-			m.DefaultMaxTokens = *model.TopProvider.MaxCompletionTokens / 2
+
+		// Set max tokens based on the best endpoint
+		if bestEndpoint.MaxCompletionTokens != nil {
+			m.DefaultMaxTokens = *bestEndpoint.MaxCompletionTokens / 2
 		} else {
-			m.DefaultMaxTokens = model.ContextLength / 10
+			m.DefaultMaxTokens = bestEndpoint.ContextLength / 10
 		}
+
 		openRouterProvider.Models = append(openRouterProvider.Models, m)
+		fmt.Printf("Added model %s with context window %d from provider %s\n", 
+			model.ID, bestEndpoint.ContextLength, bestEndpoint.ProviderName)
 	}
 
 	// save the json in internal/providers/config/openrouter.json

internal/providers/configs/openrouter.json 🔗

@@ -7,13 +7,26 @@
   "default_large_model_id": "anthropic/claude-sonnet-4",
   "default_small_model_id": "anthropic/claude-3.5-haiku",
   "models": [
+    {
+      "id": "qwen/qwen3-coder:free",
+      "name": "Qwen: Qwen3 Coder  (free)",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 26214,
+      "can_reason": false,
+      "has_reasoning_efforts": false,
+      "supports_attachments": false
+    },
     {
       "id": "qwen/qwen3-coder",
       "name": "Qwen: Qwen3 Coder ",
       "cost_per_1m_in": 1,
       "cost_per_1m_out": 5,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
+      "cost_per_1m_out_cached": 0.39999999999999997,
       "context_window": 1000000,
       "default_max_tokens": 32768,
       "can_reason": false,
@@ -46,28 +59,15 @@
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
-    {
-      "id": "moonshotai/kimi-k2:free",
-      "name": "MoonshotAI: Kimi K2 (free)",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 65536,
-      "default_max_tokens": 6553,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
     {
       "id": "moonshotai/kimi-k2",
       "name": "MoonshotAI: Kimi K2",
-      "cost_per_1m_in": 0.14,
-      "cost_per_1m_out": 2.4899999999999998,
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 2.5,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 63000,
-      "default_max_tokens": 31500,
+      "context_window": 131072,
+      "default_max_tokens": 13107,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -88,12 +88,12 @@
     {
       "id": "mistralai/devstral-small",
       "name": "Mistral: Devstral Small 1.1",
-      "cost_per_1m_in": 0.07,
-      "cost_per_1m_out": 0.28,
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.3,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 12800,
+      "context_window": 131072,
+      "default_max_tokens": 13107,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -124,24 +124,11 @@
       "has_reasoning_efforts": false,
       "supports_attachments": true
     },
-    {
-      "id": "mistralai/mistral-small-3.2-24b-instruct",
-      "name": "Mistral: Mistral Small 3.2 24B",
-      "cost_per_1m_in": 0.049999999999999996,
-      "cost_per_1m_out": 0.09999999999999999,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 12800,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": true
-    },
     {
       "id": "minimax/minimax-m1",
       "name": "MiniMax: MiniMax M1",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 1.6500000000000001,
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 2.2,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1000000,
@@ -205,10 +192,10 @@
     {
       "id": "x-ai/grok-3-mini",
       "name": "xAI: Grok 3 Mini",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 0.5,
+      "cost_per_1m_in": 0.6,
+      "cost_per_1m_out": 4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.075,
+      "cost_per_1m_out_cached": 0.15,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": true,
@@ -280,19 +267,6 @@
       "has_reasoning_efforts": false,
       "supports_attachments": true
     },
-    {
-      "id": "deepseek/deepseek-r1-0528",
-      "name": "DeepSeek: R1 0528",
-      "cost_per_1m_in": 0.272,
-      "cost_per_1m_out": 0.272,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 163840,
-      "default_max_tokens": 16384,
-      "can_reason": true,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
     {
       "id": "anthropic/claude-opus-4",
       "name": "Anthropic: Claude Opus 4",
@@ -335,12 +309,12 @@
     {
       "id": "mistralai/devstral-small-2505",
       "name": "Mistral: Devstral Small 2505",
-      "cost_per_1m_in": 0.03,
-      "cost_per_1m_out": 0.03,
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.3,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 32768,
-      "default_max_tokens": 3276,
+      "context_window": 131072,
+      "default_max_tokens": 13107,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -379,7 +353,7 @@
       "cost_per_1m_in_cached": 1.625,
       "cost_per_1m_out_cached": 0.31,
       "context_window": 1048576,
-      "default_max_tokens": 32767,
+      "default_max_tokens": 32768,
       "can_reason": true,
       "has_reasoning_efforts": false,
       "supports_attachments": true
@@ -439,12 +413,12 @@
     {
       "id": "qwen/qwen3-30b-a3b",
       "name": "Qwen: Qwen3 30B A3B",
-      "cost_per_1m_in": 0.08,
-      "cost_per_1m_out": 0.29,
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 40960,
-      "default_max_tokens": 20480,
+      "context_window": 131072,
+      "default_max_tokens": 13107,
       "can_reason": true,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -465,12 +439,12 @@
     {
       "id": "qwen/qwen3-32b",
       "name": "Qwen: Qwen3 32B",
-      "cost_per_1m_in": 0.027,
-      "cost_per_1m_out": 0.027,
+      "cost_per_1m_in": 0.39999999999999997,
+      "cost_per_1m_out": 0.7999999999999999,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 40960,
-      "default_max_tokens": 4096,
+      "context_window": 131072,
+      "default_max_tokens": 16384,
       "can_reason": true,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -484,7 +458,7 @@
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
       "default_max_tokens": 13107,
-      "can_reason": true,
+      "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
@@ -595,10 +569,10 @@
     {
       "id": "x-ai/grok-3-beta",
       "name": "xAI: Grok 3 Beta",
-      "cost_per_1m_in": 3,
-      "cost_per_1m_out": 15,
+      "cost_per_1m_in": 5,
+      "cost_per_1m_out": 25,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.75,
+      "cost_per_1m_out_cached": 1.25,
       "context_window": 131072,
       "default_max_tokens": 13107,
       "can_reason": false,
@@ -609,11 +583,11 @@
       "id": "meta-llama/llama-4-maverick",
       "name": "Meta: Llama 4 Maverick",
       "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 0.6,
+      "cost_per_1m_out": 0.85,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 1048576,
-      "default_max_tokens": 8192,
+      "default_max_tokens": 524288,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": true
@@ -651,21 +625,8 @@
       "cost_per_1m_out": 0,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 32768,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
-    {
-      "id": "deepseek/deepseek-chat-v3-0324",
-      "name": "DeepSeek: DeepSeek V3 0324",
-      "cost_per_1m_in": 0.25,
-      "cost_per_1m_out": 0.85,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
       "context_window": 163840,
-      "default_max_tokens": 81920,
+      "default_max_tokens": 16384,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -677,19 +638,6 @@
       "cost_per_1m_out": 0,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 12800,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": true
-    },
-    {
-      "id": "mistralai/mistral-small-3.1-24b-instruct",
-      "name": "Mistral: Mistral Small 3.1 24B",
-      "cost_per_1m_in": 0.027,
-      "cost_per_1m_out": 0.027,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
       "context_window": 96000,
       "default_max_tokens": 48000,
       "can_reason": false,
@@ -865,45 +813,6 @@
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
-    {
-      "id": "mistralai/mistral-small-24b-instruct-2501",
-      "name": "Mistral: Mistral Small 3",
-      "cost_per_1m_in": 0.03,
-      "cost_per_1m_out": 0.03,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 32768,
-      "default_max_tokens": 3276,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
-    {
-      "id": "deepseek/deepseek-r1-distill-llama-70b",
-      "name": "DeepSeek: R1 Distill Llama 70B",
-      "cost_per_1m_in": 0.049999999999999996,
-      "cost_per_1m_out": 0.049999999999999996,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 13107,
-      "can_reason": true,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
-    {
-      "id": "deepseek/deepseek-r1",
-      "name": "DeepSeek: R1",
-      "cost_per_1m_in": 0.39999999999999997,
-      "cost_per_1m_out": 2,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 163840,
-      "default_max_tokens": 81920,
-      "can_reason": true,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
     {
       "id": "mistralai/codestral-2501",
       "name": "Mistral: Codestral 2501",
@@ -917,19 +826,6 @@
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
-    {
-      "id": "deepseek/deepseek-chat",
-      "name": "DeepSeek: DeepSeek V3",
-      "cost_per_1m_in": 0.272,
-      "cost_per_1m_out": 0.272,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 163840,
-      "default_max_tokens": 16384,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
     {
       "id": "openai/o1",
       "name": "OpenAI: o1",
@@ -985,12 +881,12 @@
     {
       "id": "meta-llama/llama-3.3-70b-instruct",
       "name": "Meta: Llama 3.3 70B Instruct",
-      "cost_per_1m_in": 0.038000000000000006,
+      "cost_per_1m_in": 0.039,
       "cost_per_1m_out": 0.12,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 8192,
+      "default_max_tokens": 4096,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -1087,21 +983,8 @@
       "supports_attachments": true
     },
     {
-      "id": "thedrummer/unslopnemo-12b",
-      "name": "TheDrummer: UnslopNemo 12B",
-      "cost_per_1m_in": 0.39999999999999997,
-      "cost_per_1m_out": 0.39999999999999997,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 32768,
-      "default_max_tokens": 3276,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
-    {
-      "id": "anthropic/claude-3.5-haiku:beta",
-      "name": "Anthropic: Claude 3.5 Haiku (self-moderated)",
+      "id": "anthropic/claude-3.5-haiku-20241022:beta",
+      "name": "Anthropic: Claude 3.5 Haiku (2024-10-22) (self-moderated)",
       "cost_per_1m_in": 0.7999999999999999,
       "cost_per_1m_out": 4,
       "cost_per_1m_in_cached": 1,
@@ -1113,8 +996,8 @@
       "supports_attachments": true
     },
     {
-      "id": "anthropic/claude-3.5-haiku",
-      "name": "Anthropic: Claude 3.5 Haiku",
+      "id": "anthropic/claude-3.5-haiku-20241022",
+      "name": "Anthropic: Claude 3.5 Haiku (2024-10-22)",
       "cost_per_1m_in": 0.7999999999999999,
       "cost_per_1m_out": 4,
       "cost_per_1m_in_cached": 1,
@@ -1126,8 +1009,8 @@
       "supports_attachments": true
     },
     {
-      "id": "anthropic/claude-3.5-haiku-20241022:beta",
-      "name": "Anthropic: Claude 3.5 Haiku (2024-10-22) (self-moderated)",
+      "id": "anthropic/claude-3.5-haiku:beta",
+      "name": "Anthropic: Claude 3.5 Haiku (self-moderated)",
       "cost_per_1m_in": 0.7999999999999999,
       "cost_per_1m_out": 4,
       "cost_per_1m_in_cached": 1,
@@ -1139,8 +1022,8 @@
       "supports_attachments": true
     },
     {
-      "id": "anthropic/claude-3.5-haiku-20241022",
-      "name": "Anthropic: Claude 3.5 Haiku (2024-10-22)",
+      "id": "anthropic/claude-3.5-haiku",
+      "name": "Anthropic: Claude 3.5 Haiku",
       "cost_per_1m_in": 0.7999999999999999,
       "cost_per_1m_out": 4,
       "cost_per_1m_in_cached": 1,
@@ -1229,70 +1112,31 @@
       "has_reasoning_efforts": false,
       "supports_attachments": true
     },
-    {
-      "id": "thedrummer/rocinante-12b",
-      "name": "TheDrummer: Rocinante 12B",
-      "cost_per_1m_in": 0.19999999999999998,
-      "cost_per_1m_out": 0.5,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 32768,
-      "default_max_tokens": 3276,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
     {
       "id": "meta-llama/llama-3.2-3b-instruct",
       "name": "Meta: Llama 3.2 3B Instruct",
-      "cost_per_1m_in": 0.003,
-      "cost_per_1m_out": 0.006,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 20000,
-      "default_max_tokens": 10000,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
-    {
-      "id": "meta-llama/llama-3.2-11b-vision-instruct",
-      "name": "Meta: Llama 3.2 11B Vision Instruct",
-      "cost_per_1m_in": 0.049,
-      "cost_per_1m_out": 0.049,
+      "cost_per_1m_in": 0.015,
+      "cost_per_1m_out": 0.024999999999999998,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": true
-    },
-    {
-      "id": "qwen/qwen-2.5-72b-instruct",
-      "name": "Qwen2.5 72B Instruct",
-      "cost_per_1m_in": 0.101,
-      "cost_per_1m_out": 0.101,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 32768,
-      "default_max_tokens": 3276,
+      "default_max_tokens": 65536,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
     {
-      "id": "mistralai/pixtral-12b",
-      "name": "Mistral: Pixtral 12B",
-      "cost_per_1m_in": 0.09999999999999999,
-      "cost_per_1m_out": 0.09999999999999999,
+      "id": "cohere/command-r-08-2024",
+      "name": "Cohere: Command R (08-2024)",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 32768,
-      "default_max_tokens": 3276,
+      "context_window": 128000,
+      "default_max_tokens": 2000,
       "can_reason": false,
       "has_reasoning_efforts": false,
-      "supports_attachments": true
+      "supports_attachments": false
     },
     {
       "id": "cohere/command-r-plus-08-2024",
@@ -1307,19 +1151,6 @@
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
-    {
-      "id": "cohere/command-r-08-2024",
-      "name": "Cohere: Command R (08-2024)",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 0.6,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 2000,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
     {
       "id": "microsoft/phi-3.5-mini-128k-instruct",
       "name": "Microsoft: Phi-3.5 Mini 128K Instruct",
@@ -1333,19 +1164,6 @@
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
-    {
-      "id": "nousresearch/hermes-3-llama-3.1-70b",
-      "name": "Nous: Hermes 3 70B Instruct",
-      "cost_per_1m_in": 0.09999999999999999,
-      "cost_per_1m_out": 0.28,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 13107,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
     {
       "id": "openai/gpt-4o-2024-08-06",
       "name": "OpenAI: GPT-4o (2024-08-06)",
@@ -1359,71 +1177,19 @@
       "has_reasoning_efforts": false,
       "supports_attachments": true
     },
-    {
-      "id": "meta-llama/llama-3.1-8b-instruct",
-      "name": "Meta: Llama 3.1 8B Instruct",
-      "cost_per_1m_in": 0.015,
-      "cost_per_1m_out": 0.02,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
-    {
-      "id": "meta-llama/llama-3.1-405b-instruct",
-      "name": "Meta: Llama 3.1 405B Instruct",
-      "cost_per_1m_in": 0.7999999999999999,
-      "cost_per_1m_out": 0.7999999999999999,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 32768,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
-    {
-      "id": "meta-llama/llama-3.1-70b-instruct",
-      "name": "Meta: Llama 3.1 70B Instruct",
-      "cost_per_1m_in": 0.09999999999999999,
-      "cost_per_1m_out": 0.28,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
     {
       "id": "mistralai/mistral-nemo",
       "name": "Mistral: Mistral Nemo",
-      "cost_per_1m_in": 0.0075,
+      "cost_per_1m_in": 0.024999999999999998,
       "cost_per_1m_out": 0.049999999999999996,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 32000,
-      "default_max_tokens": 3200,
+      "context_window": 131072,
+      "default_max_tokens": 65536,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
-    {
-      "id": "openai/gpt-4o-mini",
-      "name": "OpenAI: GPT-4o-mini",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 0.6,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.075,
-      "context_window": 128000,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": true
-    },
     {
       "id": "openai/gpt-4o-mini-2024-07-18",
       "name": "OpenAI: GPT-4o-mini (2024-07-18)",
@@ -1464,21 +1230,8 @@
       "supports_attachments": true
     },
     {
-      "id": "mistralai/mistral-7b-instruct:free",
-      "name": "Mistral: Mistral 7B Instruct (free)",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 32768,
-      "default_max_tokens": 8192,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
-    {
-      "id": "mistralai/mistral-7b-instruct",
-      "name": "Mistral: Mistral 7B Instruct",
+      "id": "mistralai/mistral-7b-instruct-v0.3",
+      "name": "Mistral: Mistral 7B Instruct v0.3",
       "cost_per_1m_in": 0.028,
       "cost_per_1m_out": 0.054,
       "cost_per_1m_in_cached": 0,
@@ -1490,10 +1243,10 @@
       "supports_attachments": false
     },
     {
-      "id": "mistralai/mistral-7b-instruct-v0.3",
-      "name": "Mistral: Mistral 7B Instruct v0.3",
-      "cost_per_1m_in": 0.028,
-      "cost_per_1m_out": 0.054,
+      "id": "mistralai/mistral-7b-instruct:free",
+      "name": "Mistral: Mistral 7B Instruct (free)",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 32768,
@@ -1541,13 +1294,26 @@
       "has_reasoning_efforts": false,
       "supports_attachments": true
     },
+    {
+      "id": "openai/gpt-4o-2024-05-13",
+      "name": "OpenAI: GPT-4o (2024-05-13)",
+      "cost_per_1m_in": 5,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 2048,
+      "can_reason": false,
+      "has_reasoning_efforts": false,
+      "supports_attachments": true
+    },
     {
       "id": "openai/gpt-4o",
       "name": "OpenAI: GPT-4o",
       "cost_per_1m_in": 2.5,
       "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 1.25,
+      "cost_per_1m_out_cached": 0,
       "context_window": 128000,
       "default_max_tokens": 8192,
       "can_reason": false,
@@ -1568,17 +1334,17 @@
       "supports_attachments": true
     },
     {
-      "id": "openai/gpt-4o-2024-05-13",
-      "name": "OpenAI: GPT-4o (2024-05-13)",
-      "cost_per_1m_in": 5,
-      "cost_per_1m_out": 15,
+      "id": "meta-llama/llama-3-70b-instruct",
+      "name": "Meta: Llama 3 70B Instruct",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 0.39999999999999997,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 2048,
+      "context_window": 8192,
+      "default_max_tokens": 8192,
       "can_reason": false,
       "has_reasoning_efforts": false,
-      "supports_attachments": true
+      "supports_attachments": false
     },
     {
       "id": "meta-llama/llama-3-8b-instruct",
@@ -1594,30 +1360,30 @@
       "supports_attachments": false
     },
     {
-      "id": "meta-llama/llama-3-70b-instruct",
-      "name": "Meta: Llama 3 70B Instruct",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 0.39999999999999997,
+      "id": "mistralai/mixtral-8x22b-instruct",
+      "name": "Mistral: Mixtral 8x22B Instruct",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 6,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 8192,
-      "default_max_tokens": 8192,
+      "context_window": 65536,
+      "default_max_tokens": 6553,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
     {
-      "id": "mistralai/mixtral-8x22b-instruct",
-      "name": "Mistral: Mixtral 8x22B Instruct",
-      "cost_per_1m_in": 0.8999999999999999,
-      "cost_per_1m_out": 0.8999999999999999,
+      "id": "openai/gpt-4-turbo",
+      "name": "OpenAI: GPT-4 Turbo",
+      "cost_per_1m_in": 10,
+      "cost_per_1m_out": 30,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 65536,
-      "default_max_tokens": 6553,
+      "context_window": 128000,
+      "default_max_tokens": 2048,
       "can_reason": false,
       "has_reasoning_efforts": false,
-      "supports_attachments": false
+      "supports_attachments": true
     },
     {
       "id": "google/gemini-pro-1.5",
@@ -1632,19 +1398,6 @@
       "has_reasoning_efforts": false,
       "supports_attachments": true
     },
-    {
-      "id": "openai/gpt-4-turbo",
-      "name": "OpenAI: GPT-4 Turbo",
-      "cost_per_1m_in": 10,
-      "cost_per_1m_out": 30,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 2048,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": true
-    },
     {
       "id": "cohere/command-r-plus",
       "name": "Cohere: Command R+",
@@ -1802,10 +1555,10 @@
       "supports_attachments": false
     },
     {
-      "id": "mistralai/mistral-small",
-      "name": "Mistral Small",
-      "cost_per_1m_in": 0.19999999999999998,
-      "cost_per_1m_out": 0.6,
+      "id": "mistralai/mistral-tiny",
+      "name": "Mistral Tiny",
+      "cost_per_1m_in": 0.25,
+      "cost_per_1m_out": 0.25,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 32768,
@@ -1815,10 +1568,10 @@
       "supports_attachments": false
     },
     {
-      "id": "mistralai/mistral-tiny",
-      "name": "Mistral Tiny",
-      "cost_per_1m_in": 0.25,
-      "cost_per_1m_out": 0.25,
+      "id": "mistralai/mistral-small",
+      "name": "Mistral Small",
+      "cost_per_1m_in": 0.19999999999999998,
+      "cost_per_1m_out": 0.6,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 32768,
@@ -1853,19 +1606,6 @@
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
-    {
-      "id": "mistralai/mistral-7b-instruct-v0.1",
-      "name": "Mistral: Mistral 7B Instruct v0.1",
-      "cost_per_1m_in": 0.11,
-      "cost_per_1m_out": 0.19,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 2824,
-      "default_max_tokens": 282,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": false
-    },
     {
       "id": "openai/gpt-3.5-turbo-16k",
       "name": "OpenAI: GPT-3.5 Turbo 16k",
@@ -1880,13 +1620,13 @@
       "supports_attachments": false
     },
     {
-      "id": "openai/gpt-3.5-turbo",
-      "name": "OpenAI: GPT-3.5 Turbo",
-      "cost_per_1m_in": 0.5,
-      "cost_per_1m_out": 1.5,
+      "id": "openai/gpt-4-0314",
+      "name": "OpenAI: GPT-4 (older v0314)",
+      "cost_per_1m_in": 30,
+      "cost_per_1m_out": 60,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 16385,
+      "context_window": 8191,
       "default_max_tokens": 2048,
       "can_reason": false,
       "has_reasoning_efforts": false,
@@ -1906,13 +1646,13 @@
       "supports_attachments": false
     },
     {
-      "id": "openai/gpt-4-0314",
-      "name": "OpenAI: GPT-4 (older v0314)",
-      "cost_per_1m_in": 30,
-      "cost_per_1m_out": 60,
+      "id": "openai/gpt-3.5-turbo",
+      "name": "OpenAI: GPT-3.5 Turbo",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 1.5,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 8191,
+      "context_window": 16385,
       "default_max_tokens": 2048,
       "can_reason": false,
       "has_reasoning_efforts": false,