improvements

Carlos Alexandro Becker created

Signed-off-by: Carlos Alexandro Becker <caarlos0@users.noreply.github.com>

Change summary

CRUSH.md                                 |   1 
cmd/aihubmix/main.go                     |   2 
internal/names/model.go                  | 153 +--
internal/providers/configs/aihubmix.json | 975 ++++++++++++-------------
4 files changed, 515 insertions(+), 616 deletions(-)

Detailed changes

CRUSH.md 🔗

@@ -19,6 +19,7 @@
 - HTTP: Always set timeouts, use context, defer close response bodies
 - JSON: Use `json.MarshalIndent` for pretty output, validate unmarshaling
 - File permissions: Use 0o600 for sensitive config files
+- Always format code with `gofumpt`
 
 ## Model Names
 

cmd/aihubmix/main.go 🔗

@@ -163,7 +163,7 @@ func main() {
 	}
 
 	slices.SortFunc(aiHubMixProvider.Models, func(a, b catwalk.Model) int {
-		return strings.Compare(a.Name, b.Name)
+		return strings.Compare(a.ID, b.ID)
 	})
 
 	data, err := json.MarshalIndent(aiHubMixProvider, "", "  ")

internal/names/model.go 🔗

@@ -4,8 +4,8 @@
 package names
 
 import (
+	"regexp"
 	"strings"
-	"unicode"
 )
 
 // modelNames maps model IDs to their human-readable display names.
@@ -20,6 +20,7 @@ var modelNames = map[string]string{
 	"claude-sonnet-4":            "Claude Sonnet 4",
 	"claude-sonnet-4-5":          "Claude Sonnet 4.5",
 	"claude-opus-4":              "Claude Opus 4",
+	"claude-opus-4-1":            "Claude Opus 4.1",
 	"claude-opus-4-5":            "Claude Opus 4.5",
 	"claude-opus-4-5-think":      "Claude Opus 4.5 Think",
 	"claude-sonnet-4-5-20250214": "Claude Sonnet 4.5",
@@ -65,6 +66,7 @@ var modelNames = map[string]string{
 	"deepseek-v3.1-terminus": "DeepSeek V3.1 Terminus",
 	"deepseek-v3.1-think":    "DeepSeek V3.1 Think",
 	"deepseek-v3.2":          "DeepSeek V3.2",
+	"deepseek-v3.2-exp":      "DeepSeek V3.2 Exp",
 	"deepseek-v3.2-fast":     "DeepSeek V3.2 Fast",
 	"deepseek-v3.2-think":    "DeepSeek V3.2 Think",
 	"deepseek-v3.2-speciale": "DeepSeek V3.2 Speciale",
@@ -112,7 +114,7 @@ var modelNames = map[string]string{
 	"glm-4.7-flash": "GLM-4.7 Flash",
 	"glm-4.6":       "GLM-4.6",
 	"glm-4.6v":      "GLM-4.6 Vision",
-	"glm-4.5":       "GLM-4.5",
+	"glm-4.5v":      "GLM-4.5 Vision",
 	"glm-4-flash":   "GLM-4 Flash",
 	"glm-4-plus":    "GLM-4 Plus",
 	"glm-4-air":     "GLM-4 Air",
@@ -253,124 +255,97 @@ var modelNames = map[string]string{
 }
 
 // GetDisplayName returns a human-readable display name for the given model ID.
-// It first checks the static mapping, then attempts to find a close match using
-// Levenshtein distance. If no good match is found, it returns a cleaned-up
-// version of the model ID.
 func GetDisplayName(modelID string) string {
-	// Normalize to lowercase and try exact match first
-	normalized := strings.ToLower(modelID)
-	if name, ok := modelNames[normalized]; ok {
+	if name := lookupInMappings(modelID); name != "" {
 		return name
 	}
 
-	// Try case-sensitive match
+	if bestMatch := findBestMatch(strings.ToLower(modelID)); bestMatch != "" {
+		return bestMatch
+	}
+
+	return formatModelName(modelID)
+}
+
+// lookupInMappings attempts to find the model ID in the static mappings,
+// checking both the original string and without provider prefix.
+func lookupInMappings(modelID string) string {
 	if name, ok := modelNames[modelID]; ok {
 		return name
 	}
 
-	// Try without provider prefix (e.g., "anthropic/claude-sonnet-4" -> "claude-sonnet-4")
+	lowered := strings.ToLower(modelID)
+	if name, ok := modelNames[lowered]; ok {
+		return name
+	}
+
 	if idx := strings.LastIndex(modelID, "/"); idx != -1 {
 		baseModel := modelID[idx+1:]
-		if name, ok := modelNames[strings.ToLower(baseModel)]; ok {
+		if name, ok := modelNames[baseModel]; ok {
 			return name
 		}
-		if name, ok := modelNames[baseModel]; ok {
+		if name, ok := modelNames[strings.ToLower(baseModel)]; ok {
 			return name
 		}
 	}
 
-	// Try fuzzy match with known models
-	if bestMatch := findBestMatch(normalized); bestMatch != "" {
-		return bestMatch
-	}
-
-	// Fall back to formatting the model ID nicely
-	return formatModelName(modelID)
+	return ""
 }
 
 // formatModelName converts a technical model ID to a more readable format.
-// It replaces separators with spaces and capitalizes properly.
 func formatModelName(modelID string) string {
+	result := modelID
+
 	// Remove provider prefix if present
-	baseModel := modelID
-	if idx := strings.LastIndex(modelID, "/"); idx != -1 {
-		baseModel = modelID[idx+1:]
+	if idx := strings.LastIndex(result, "/"); idx != -1 {
+		result = result[idx+1:]
 	}
 
-	// Split by common separators
-	separators := []string{"_", "/"}
-
-	// Replace all separators (except dashes which we'll handle special) with spaces
-	result := baseModel
-	for _, sep := range separators {
-		result = strings.ReplaceAll(result, sep, " ")
-	}
+	// Replace underscores and slashes with spaces
+	result = strings.ReplaceAll(strings.ReplaceAll(result, "_", " "), "/", " ")
 
-	// Convert version patterns like "3-5", "4-5" to "3.5", "4.5"
-	// This handles cases where version numbers use dashes as decimal separators
-	result = convertVersionDashes(result)
+	// Convert version patterns like "3-5" to "3.5"
+	versionDashRegex := regexp.MustCompile(`(\d)-(\d)`)
+	result = versionDashRegex.ReplaceAllString(result, "$1.$2")
 
-	// Now replace remaining dashes with spaces
+	// Replace remaining dashes with spaces and clean up
 	result = strings.ReplaceAll(result, "-", " ")
-
-	// Clean up extra spaces
 	result = strings.Join(strings.Fields(result), " ")
 
-	// Capitalize first letter of each word
-	result = titleCase(result)
-
-	// Handle special cases like "V3" -> "V3" (already capitalized)
-	result = preserveVersionNumbers(result)
-
-	return result
-}
-
-// convertVersionDashes converts dash-separated version numbers to dot-separated.
-// For example: "3-5" -> "3.5", "4-5-haiku" -> "4.5 Haiku"
-func convertVersionDashes(s string) string {
-	// Pattern: digit dash digit -> digit dot digit
-	// Use a simple loop to replace these patterns
-	result := s
-	for i := 0; i < len(result)-2; i++ {
-		// Check if we have "X-Y" pattern where X and Y are digits
-		if result[i] >= '0' && result[i] <= '9' &&
-			result[i+1] == '-' &&
-			result[i+2] >= '0' && result[i+2] <= '9' {
-			// Convert to "X.Y"
-			result = result[:i+1] + "." + result[i+2:]
-		}
-	}
-	return result
-}
-
-// titleCase capitalizes the first letter of each word.
-func titleCase(s string) string {
-	words := strings.Fields(s)
+	// Capitalize each word while preserving version indicators
+	words := strings.Fields(result)
+	var builder strings.Builder
 	for i, word := range words {
-		if len(word) > 0 {
-			words[i] = string(unicode.ToUpper(rune(word[0]))) + word[1:]
+		if i > 0 {
+			builder.WriteString(" ")
 		}
+		builder.WriteString(capitalizeWord(word))
 	}
-	return strings.Join(words, " ")
+
+	return builder.String()
 }
 
-// preserveVersionNumbers keeps version numbers properly formatted.
-func preserveVersionNumbers(s string) string {
-	// Handle patterns like V3, V3.1, etc.
-	result := strings.ReplaceAll(s, "V ", "V")
+// capitalizeWord capitalizes the first letter of a word, preserving version patterns.
+func capitalizeWord(word string) string {
+	if len(word) == 0 {
+		return word
+	}
 
-	// Fix double spaces that might have been introduced
-	result = strings.Join(strings.Fields(result), " ")
+	// Preserve "V" prefix for version numbers (e.g., "V3" -> "v3" becomes "V3")
+	upperWord := strings.ToUpper(word)
+	if strings.HasPrefix(upperWord, "V") && len(word) > 1 {
+		return strings.ToUpper(word[0:1]) + word[1:]
+	}
 
-	return result
+	return strings.ToUpper(word[0:1]) + word[1:]
 }
 
+const fuzzyMatchThreshold = 2 // Maximum edit distance to consider for fuzzy matching
+
 // findBestMatch uses Levenshtein distance to find the best matching model name.
 func findBestMatch(modelID string) string {
-	const threshold = 4 // Maximum edit distance to consider
-
 	var bestMatch string
-	minDistance := threshold + 1
+	minDistance := fuzzyMatchThreshold + 1
 
 	for knownID, name := range modelNames {
 		distance := levenshteinDistance(modelID, knownID)
@@ -380,28 +355,20 @@ func findBestMatch(modelID string) string {
 		}
 	}
 
-	if bestMatch != "" && minDistance <= threshold {
-		return bestMatch
-	}
-
-	return ""
+	return bestMatch
 }
 
 // levenshteinDistance computes the edit distance between two strings.
 func levenshteinDistance(a, b string) int {
-	// Optimization: if either string is empty, return the length of the other
-	if len(a) == 0 {
+	switch {
+	case len(a) == 0:
 		return len(b)
-	}
-	if len(b) == 0 {
+	case len(b) == 0:
 		return len(a)
 	}
 
-	// Use a single row to save memory
 	previous := make([]int, len(b)+1)
-
-	// Initialize the first row
-	for j := 0; j <= len(b); j++ {
+	for j := range previous {
 		previous[j] = j
 	}
 

internal/providers/configs/aihubmix.json 🔗

@@ -39,6 +39,167 @@
       "supports_attachments": false,
       "options": {}
     },
+    {
+      "id": "ByteDance-Seed/Seed-OSS-36B-Instruct",
+      "name": "ByteDance Seed OSS 36B",
+      "cost_per_1m_in": 0.2,
+      "cost_per_1m_out": 0.534,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 256000,
+      "default_max_tokens": 32000,
+      "can_reason": false,
+      "supports_attachments": true,
+      "options": {}
+    },
+    {
+      "id": "DeepSeek-R1",
+      "name": "DeepSeek R1",
+      "cost_per_1m_in": 0.4,
+      "cost_per_1m_out": 2,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1638000,
+      "default_max_tokens": 163800,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "DeepSeek-V3",
+      "name": "DeepSeek V3",
+      "cost_per_1m_in": 0.272,
+      "cost_per_1m_out": 1.088,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1638000,
+      "default_max_tokens": 163800,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "DeepSeek-V3-Fast",
+      "name": "DeepSeek V3 Fast",
+      "cost_per_1m_in": 0.56,
+      "cost_per_1m_out": 2.24,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32000,
+      "default_max_tokens": 3200,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "DeepSeek-V3.1-Fast",
+      "name": "DeepSeek V3.1 Fast",
+      "cost_per_1m_in": 1.096,
+      "cost_per_1m_out": 3.288,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 163000,
+      "default_max_tokens": 16300,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "DeepSeek-V3.1-Terminus",
+      "name": "DeepSeek V3.1 Terminus",
+      "cost_per_1m_in": 0.56,
+      "cost_per_1m_out": 1.68,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 160000,
+      "default_max_tokens": 32000,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "DeepSeek-V3.1-Think",
+      "name": "DeepSeek V3.1 Think",
+      "cost_per_1m_in": 0.56,
+      "cost_per_1m_out": 1.68,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 32000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "DeepSeek-V3.2-Exp",
+      "name": "DeepSeek V3.2 Exp",
+      "cost_per_1m_in": 0.274,
+      "cost_per_1m_out": 0.411,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.0274,
+      "context_window": 163000,
+      "default_max_tokens": 16300,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "DeepSeek-V3.2-Exp-Think",
+      "name": "DeepSeek V3.2 Exp Think",
+      "cost_per_1m_in": 0.274,
+      "cost_per_1m_out": 0.411,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.0274,
+      "context_window": 131000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "ERNIE-X1.1-Preview",
+      "name": "ERNIE X1.1 Preview",
+      "cost_per_1m_in": 0.136,
+      "cost_per_1m_out": 0.544,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 119000,
+      "default_max_tokens": 11900,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": false,
+      "options": {}
+    },
+    {
+      "id": "Kimi-K2-0905",
+      "name": "Kimi K2",
+      "cost_per_1m_in": 0.548,
+      "cost_per_1m_out": 2.192,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 26214,
+      "can_reason": false,
+      "supports_attachments": false,
+      "options": {}
+    },
     {
       "id": "aihub-Phi-4-mini-instruct",
       "name": "Aihub Phi 4 Mini Instruct",
@@ -65,19 +226,6 @@
       "supports_attachments": true,
       "options": {}
     },
-    {
-      "id": "ByteDance-Seed/Seed-OSS-36B-Instruct",
-      "name": "ByteDance Seed OSS 36B",
-      "cost_per_1m_in": 0.2,
-      "cost_per_1m_out": 0.534,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 256000,
-      "default_max_tokens": 32000,
-      "can_reason": false,
-      "supports_attachments": true,
-      "options": {}
-    },
     {
       "id": "claude-3-5-haiku",
       "name": "Claude 3.5 Haiku",
@@ -106,7 +254,7 @@
     },
     {
       "id": "claude-3-5-sonnet-20240620",
-      "name": "Claude 3.5 Sonnet",
+      "name": "Claude 3.5 Sonnet 20240620",
       "cost_per_1m_in": 3.3,
       "cost_per_1m_out": 16.5,
       "cost_per_1m_in_cached": 0,
@@ -170,7 +318,7 @@
     },
     {
       "id": "claude-opus-4-1",
-      "name": "Claude Opus 4",
+      "name": "Claude Opus 4.1",
       "cost_per_1m_in": 16.5,
       "cost_per_1m_out": 82.5,
       "cost_per_1m_in_cached": 0,
@@ -378,79 +526,40 @@
       "options": {}
     },
     {
-      "id": "DeepSeek-R1",
-      "name": "DeepSeek R1",
-      "cost_per_1m_in": 0.4,
-      "cost_per_1m_out": 2,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 1638000,
-      "default_max_tokens": 163800,
-      "can_reason": false,
-      "supports_attachments": false,
-      "options": {}
-    },
-    {
-      "id": "DeepSeek-V3",
-      "name": "DeepSeek V3",
-      "cost_per_1m_in": 0.272,
-      "cost_per_1m_out": 1.088,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 1638000,
-      "default_max_tokens": 163800,
-      "can_reason": false,
-      "supports_attachments": false,
-      "options": {}
-    },
-    {
-      "id": "DeepSeek-V3-Fast",
-      "name": "DeepSeek V3 Fast",
-      "cost_per_1m_in": 0.56,
-      "cost_per_1m_out": 2.24,
+      "id": "deepseek-v3.2",
+      "name": "DeepSeek V3.2",
+      "cost_per_1m_in": 0.302,
+      "cost_per_1m_out": 0.453,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 32000,
-      "default_max_tokens": 3200,
+      "cost_per_1m_out_cached": 0.0302,
+      "context_window": 128000,
+      "default_max_tokens": 64000,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
     },
     {
-      "id": "DeepSeek-V3.1-Fast",
-      "name": "DeepSeek V3.1 Fast",
+      "id": "deepseek-v3.2-fast",
+      "name": "DeepSeek V3.2 Fast",
       "cost_per_1m_in": 1.096,
       "cost_per_1m_out": 3.288,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 163000,
-      "default_max_tokens": 16300,
-      "can_reason": false,
-      "supports_attachments": false,
-      "options": {}
-    },
-    {
-      "id": "DeepSeek-V3.1-Terminus",
-      "name": "DeepSeek V3.1 Terminus",
-      "cost_per_1m_in": 0.56,
-      "cost_per_1m_out": 1.68,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 160000,
-      "default_max_tokens": 32000,
+      "cost_per_1m_out_cached": 1.096,
+      "context_window": 128000,
+      "default_max_tokens": 12800,
       "can_reason": false,
       "supports_attachments": false,
       "options": {}
     },
     {
-      "id": "DeepSeek-V3.1-Think",
-      "name": "DeepSeek V3.1 Think",
-      "cost_per_1m_in": 0.56,
-      "cost_per_1m_out": 1.68,
+      "id": "deepseek-v3.2-speciale",
+      "name": "DeepSeek V3.2 Speciale",
+      "cost_per_1m_in": 0.58,
+      "cost_per_1m_out": 1.680028,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 128000,
-      "default_max_tokens": 32000,
+      "default_max_tokens": 12800,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -462,8 +571,8 @@
       "options": {}
     },
     {
-      "id": "deepseek-v3.2",
-      "name": "DeepSeek V3.2",
+      "id": "deepseek-v3.2-think",
+      "name": "DeepSeek V3.2 Think",
       "cost_per_1m_in": 0.302,
       "cost_per_1m_out": 0.453,
       "cost_per_1m_in_cached": 0,
@@ -474,83 +583,6 @@
       "supports_attachments": false,
       "options": {}
     },
-    {
-      "id": "deepseek-v3.2-fast",
-      "name": "DeepSeek V3.2 Fast",
-      "cost_per_1m_in": 1.096,
-      "cost_per_1m_out": 3.288,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 1.096,
-      "context_window": 128000,
-      "default_max_tokens": 12800,
-      "can_reason": false,
-      "supports_attachments": false,
-      "options": {}
-    },
-    {
-      "id": "DeepSeek-V3.2-Exp",
-      "name": "DeepSeek V3.2 Fast",
-      "cost_per_1m_in": 0.274,
-      "cost_per_1m_out": 0.411,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.0274,
-      "context_window": 163000,
-      "default_max_tokens": 16300,
-      "can_reason": false,
-      "supports_attachments": false,
-      "options": {}
-    },
-    {
-      "id": "deepseek-v3.2-speciale",
-      "name": "DeepSeek V3.2 Speciale",
-      "cost_per_1m_in": 0.58,
-      "cost_per_1m_out": 1.680028,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 12800,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false,
-      "options": {}
-    },
-    {
-      "id": "deepseek-v3.2-think",
-      "name": "DeepSeek V3.2 Think",
-      "cost_per_1m_in": 0.302,
-      "cost_per_1m_out": 0.453,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.0302,
-      "context_window": 128000,
-      "default_max_tokens": 64000,
-      "can_reason": false,
-      "supports_attachments": false,
-      "options": {}
-    },
-    {
-      "id": "DeepSeek-V3.2-Exp-Think",
-      "name": "DeepSeek V3.2 Think",
-      "cost_per_1m_in": 0.274,
-      "cost_per_1m_out": 0.411,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.0274,
-      "context_window": 131000,
-      "default_max_tokens": 64000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false,
-      "options": {}
-    },
     {
       "id": "doubao-seed-1-6",
       "name": "Doubao Seed 1.6",
@@ -719,252 +751,144 @@
       "options": {}
     },
     {
-      "id": "ERNIE-X1.1-Preview",
-      "name": "ERNIE X1.1 Preview",
-      "cost_per_1m_in": 0.136,
-      "cost_per_1m_out": 0.544,
+      "id": "gemini-2.0-flash",
+      "name": "Gemini 2.5 Flash",
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 119000,
-      "default_max_tokens": 11900,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false,
+      "cost_per_1m_out_cached": 0.025,
+      "context_window": 1048576,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "glm-4.6",
-      "name": "GLM-4.6",
+      "id": "gemini-2.0-flash-free",
+      "name": "Gemini 2.0 Flash (Free)",
       "cost_per_1m_in": 0,
       "cost_per_1m_out": 0,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 204800,
-      "default_max_tokens": 20480,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false,
+      "context_window": 1048576,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "glm-4.6v",
-      "name": "GLM-4.6 Vision",
-      "cost_per_1m_in": 0.137,
-      "cost_per_1m_out": 0.411,
+      "id": "gemini-2.5-flash",
+      "name": "Gemini 2.5 Flash",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 2.499,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.0274,
-      "context_window": 128000,
-      "default_max_tokens": 12800,
+      "cost_per_1m_out_cached": 0.03,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "glm-4.5v",
-      "name": "GLM-4.6 Vision",
-      "cost_per_1m_in": 0.274,
-      "cost_per_1m_out": 0.822,
+      "id": "gemini-2.5-flash-lite",
+      "name": "Gemini 2.5 Flash Lite",
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.274,
-      "context_window": 64000,
-      "default_max_tokens": 16384,
+      "cost_per_1m_out_cached": 0.01,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "glm-4.7",
-      "name": "GLM-4.7",
-      "cost_per_1m_in": 0.273974,
-      "cost_per_1m_out": 1.095896,
+      "id": "gemini-2.5-flash-lite-preview-09-2025",
+      "name": "Gemini 2.5 Flash Lite Preview",
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.054795,
-      "context_window": 200000,
-      "default_max_tokens": 20000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false,
+      "cost_per_1m_out_cached": 0.01,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
+      "can_reason": false,
+      "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gpt-oss-120b",
-      "name": "GPT OSS 120B",
-      "cost_per_1m_in": 0.18,
-      "cost_per_1m_out": 0.9,
+      "id": "gemini-2.5-flash-nothink",
+      "name": "Gemini 2.5 Flash (No Think)",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 2.499,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 32768,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false,
+      "cost_per_1m_out_cached": 0.03,
+      "context_window": 1047576,
+      "default_max_tokens": 65536,
+      "can_reason": false,
+      "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gpt-oss-20b",
-      "name": "GPT OSS 20B",
-      "cost_per_1m_in": 0.11,
-      "cost_per_1m_out": 0.55,
+      "id": "gemini-2.5-flash-preview-05-20-nothink",
+      "name": "Gemini 2.5 Flash Preview (No Think)",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 2.499,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 12800,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
-      "supports_attachments": false,
+      "cost_per_1m_out_cached": 0.03,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
+      "can_reason": false,
+      "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gpt-4.1",
-      "name": "GPT-4.1",
-      "cost_per_1m_in": 2,
-      "cost_per_1m_out": 8,
+      "id": "gemini-2.5-flash-preview-05-20-search",
+      "name": "Gemini 2.5 Flash Search",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 2.499,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.5,
-      "context_window": 1047576,
-      "default_max_tokens": 32768,
+      "cost_per_1m_out_cached": 0.03,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gpt-4.1-mini",
-      "name": "GPT-4.1 Mini",
-      "cost_per_1m_in": 0.4,
-      "cost_per_1m_out": 1.6,
+      "id": "gemini-2.5-flash-preview-09-2025",
+      "name": "Gemini 2.5 Flash Preview",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 2.499,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.1,
-      "context_window": 1047576,
-      "default_max_tokens": 32768,
+      "cost_per_1m_out_cached": 0.03,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gpt-4.1-nano",
-      "name": "GPT-4.1 Nano",
-      "cost_per_1m_in": 0.1,
-      "cost_per_1m_out": 0.4,
+      "id": "gemini-2.5-flash-search",
+      "name": "Gemini 2.5 Flash Search",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 2.499,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.025,
-      "context_window": 1047576,
-      "default_max_tokens": 32768,
+      "cost_per_1m_out_cached": 0.03,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gpt-4o",
-      "name": "GPT-4o",
-      "cost_per_1m_in": 2.5,
-      "cost_per_1m_out": 10,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 1.25,
-      "context_window": 128000,
-      "default_max_tokens": 16384,
-      "can_reason": false,
-      "supports_attachments": true,
-      "options": {}
-    },
-    {
-      "id": "gpt-4o-2024-11-20",
-      "name": "GPT-4o",
-      "cost_per_1m_in": 2.5,
-      "cost_per_1m_out": 10,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 1.25,
-      "context_window": 128000,
-      "default_max_tokens": 16384,
-      "can_reason": false,
-      "supports_attachments": true,
-      "options": {}
-    },
-    {
-      "id": "gpt-4o-audio-preview",
-      "name": "GPT-4o Audio Preview",
-      "cost_per_1m_in": 2.5,
-      "cost_per_1m_out": 10,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 16384,
-      "can_reason": false,
-      "supports_attachments": false,
-      "options": {}
-    },
-    {
-      "id": "gpt-4o-mini",
-      "name": "GPT-4o Mini",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 0.6,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.075,
-      "context_window": 128000,
-      "default_max_tokens": 16384,
-      "can_reason": false,
-      "supports_attachments": true,
-      "options": {}
-    },
-    {
-      "id": "gpt-4o-mini-search-preview",
-      "name": "GPT-4o Mini Search",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 0.6,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.075,
-      "context_window": 128000,
-      "default_max_tokens": 16384,
-      "can_reason": false,
-      "supports_attachments": true,
-      "options": {}
-    },
-    {
-      "id": "gpt-4o-search-preview",
-      "name": "GPT-4o Search",
-      "cost_per_1m_in": 2.5,
-      "cost_per_1m_out": 10,
-      "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 1.25,
-      "context_window": 128000,
-      "default_max_tokens": 16384,
-      "can_reason": false,
-      "supports_attachments": true,
-      "options": {}
-    },
-    {
-      "id": "gpt-5",
-      "name": "GPT-5",
+      "id": "gemini-2.5-pro",
+      "name": "Gemini 2.5 Pro",
       "cost_per_1m_in": 1.25,
       "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.125,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -976,14 +900,14 @@
       "options": {}
     },
     {
-      "id": "gpt-5-codex",
-      "name": "GPT-5 Codex",
+      "id": "gemini-2.5-pro-preview-05-06",
+      "name": "Gemini 2.5 Pro Preview",
       "cost_per_1m_in": 1.25,
       "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.125,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -995,14 +919,14 @@
       "options": {}
     },
     {
-      "id": "gpt-5-mini",
-      "name": "GPT-5 Mini",
-      "cost_per_1m_in": 0.25,
-      "cost_per_1m_out": 2,
+      "id": "gemini-2.5-pro-preview-06-05",
+      "name": "Gemini 2.5 Pro Preview",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.025,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
+      "cost_per_1m_out_cached": 0.125,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1014,14 +938,14 @@
       "options": {}
     },
     {
-      "id": "gpt-5-nano",
-      "name": "GPT-5 Nano",
-      "cost_per_1m_in": 0.05,
-      "cost_per_1m_out": 0.4,
+      "id": "gemini-2.5-pro-search",
+      "name": "Gemini 2.5 Pro Search",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.005,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
+      "cost_per_1m_out_cached": 0.125,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1033,14 +957,14 @@
       "options": {}
     },
     {
-      "id": "gpt-5.2-pro",
-      "name": "GPT-5 Pro",
-      "cost_per_1m_in": 21,
-      "cost_per_1m_out": 168,
+      "id": "gemini-3-flash-preview",
+      "name": "Gemini 3.0 Flash Preview",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 3,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 2.1,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
+      "cost_per_1m_out_cached": 0.05,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1052,14 +976,14 @@
       "options": {}
     },
     {
-      "id": "gpt-5-pro",
-      "name": "GPT-5 Pro",
-      "cost_per_1m_in": 15,
-      "cost_per_1m_out": 120,
+      "id": "gemini-3-flash-preview-free",
+      "name": "Gemini 3.0 Flash Preview (Free)",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1071,33 +995,27 @@
       "options": {}
     },
     {
-      "id": "gpt-5.1",
-      "name": "GPT-5.1",
-      "cost_per_1m_in": 1.25,
-      "cost_per_1m_out": 10,
+      "id": "glm-4.5v",
+      "name": "GLM-4.5 Vision",
+      "cost_per_1m_in": 0.274,
+      "cost_per_1m_out": 0.822,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.125,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
+      "cost_per_1m_out_cached": 0.274,
+      "context_window": 64000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gpt-5.1-codex-max",
-      "name": "GPT-5.1 Codex",
-      "cost_per_1m_in": 1.25,
-      "cost_per_1m_out": 10,
+      "id": "glm-4.6",
+      "name": "GLM-4.6",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.125,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 204800,
+      "default_max_tokens": 20480,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1105,37 +1023,31 @@
         "high"
       ],
       "default_reasoning_effort": "medium",
-      "supports_attachments": true,
+      "supports_attachments": false,
       "options": {}
     },
     {
-      "id": "gpt-5.1-codex",
-      "name": "GPT-5.1 Codex",
-      "cost_per_1m_in": 1.25,
-      "cost_per_1m_out": 10,
+      "id": "glm-4.6v",
+      "name": "GLM-4.6 Vision",
+      "cost_per_1m_in": 0.137,
+      "cost_per_1m_out": 0.411,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.125,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
+      "cost_per_1m_out_cached": 0.0274,
+      "context_window": 128000,
+      "default_max_tokens": 12800,
+      "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gpt-5.2-low",
-      "name": "GPT-5.2",
-      "cost_per_1m_in": 1.75,
-      "cost_per_1m_out": 14,
+      "id": "glm-4.7",
+      "name": "GLM-4.7",
+      "cost_per_1m_in": 0.273974,
+      "cost_per_1m_out": 1.095896,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.175,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
+      "cost_per_1m_out_cached": 0.054795,
+      "context_window": 200000,
+      "default_max_tokens": 20000,
       "can_reason": true,
       "reasoning_levels": [
         "low",
@@ -1143,186 +1055,205 @@
         "high"
       ],
       "default_reasoning_effort": "medium",
-      "supports_attachments": true,
+      "supports_attachments": false,
       "options": {}
     },
     {
-      "id": "gpt-5.2",
-      "name": "GPT-5.2",
-      "cost_per_1m_in": 1.75,
-      "cost_per_1m_out": 14,
+      "id": "gpt-4.1",
+      "name": "GPT-4.1",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 8,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.175,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
+      "cost_per_1m_out_cached": 0.5,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gpt-5.2-codex",
-      "name": "GPT-5.2 Codex",
-      "cost_per_1m_in": 1.75,
-      "cost_per_1m_out": 14,
+      "id": "gpt-4.1-mini",
+      "name": "GPT-4.1 Mini",
+      "cost_per_1m_in": 0.4,
+      "cost_per_1m_out": 1.6,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.175,
-      "context_window": 400000,
-      "default_max_tokens": 128000,
-      "can_reason": true,
-      "reasoning_levels": [
-        "low",
-        "medium",
-        "high"
-      ],
-      "default_reasoning_effort": "medium",
+      "cost_per_1m_out_cached": 0.1,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gemini-2.0-flash-free",
-      "name": "Gemini 2.0 Flash (Free)",
-      "cost_per_1m_in": 0,
-      "cost_per_1m_out": 0,
+      "id": "gpt-4.1-nano",
+      "name": "GPT-4.1 Nano",
+      "cost_per_1m_in": 0.1,
+      "cost_per_1m_out": 0.4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0,
-      "context_window": 1048576,
-      "default_max_tokens": 8192,
+      "cost_per_1m_out_cached": 0.025,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gemini-2.5-flash",
-      "name": "Gemini 2.5 Flash",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 2.499,
+      "id": "gpt-4o",
+      "name": "GPT-4o",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.03,
-      "context_window": 1048576,
-      "default_max_tokens": 65536,
+      "cost_per_1m_out_cached": 1.25,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gemini-2.0-flash",
-      "name": "Gemini 2.5 Flash",
-      "cost_per_1m_in": 0.1,
-      "cost_per_1m_out": 0.4,
+      "id": "gpt-4o-2024-11-20",
+      "name": "GPT-4o",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.025,
-      "context_window": 1048576,
-      "default_max_tokens": 8192,
+      "cost_per_1m_out_cached": 1.25,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gemini-2.5-flash-nothink",
-      "name": "Gemini 2.5 Flash (No Think)",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 2.499,
+      "id": "gpt-4o-audio-preview",
+      "name": "GPT-4o Audio Preview",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.03,
-      "context_window": 1047576,
-      "default_max_tokens": 65536,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
       "can_reason": false,
-      "supports_attachments": true,
+      "supports_attachments": false,
       "options": {}
     },
     {
-      "id": "gemini-2.5-flash-lite",
-      "name": "Gemini 2.5 Flash Lite",
-      "cost_per_1m_in": 0.1,
-      "cost_per_1m_out": 0.4,
+      "id": "gpt-4o-mini",
+      "name": "GPT-4o Mini",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.01,
-      "context_window": 1048576,
-      "default_max_tokens": 65536,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gemini-2.5-flash-lite-preview-09-2025",
-      "name": "Gemini 2.5 Flash Lite Preview",
-      "cost_per_1m_in": 0.1,
-      "cost_per_1m_out": 0.4,
+      "id": "gpt-4o-mini-search-preview",
+      "name": "GPT-4o Mini Search",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.01,
-      "context_window": 1048576,
-      "default_max_tokens": 65536,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gemini-2.5-flash-preview-09-2025",
-      "name": "Gemini 2.5 Flash Preview",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 2.499,
+      "id": "gpt-4o-search-preview",
+      "name": "GPT-4o Search",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.03,
-      "context_window": 1048576,
-      "default_max_tokens": 65536,
+      "cost_per_1m_out_cached": 1.25,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gemini-2.5-flash-preview-05-20-nothink",
-      "name": "Gemini 2.5 Flash Preview (No Think)",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 2.499,
+      "id": "gpt-5",
+      "name": "GPT-5",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.03,
-      "context_window": 1048576,
-      "default_max_tokens": 65536,
-      "can_reason": false,
+      "cost_per_1m_out_cached": 0.125,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gemini-2.5-flash-preview-05-20-search",
-      "name": "Gemini 2.5 Flash Search",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 2.499,
+      "id": "gpt-5-chat-latest",
+      "name": "Gpt 5 Chat Latest",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.03,
-      "context_window": 1048576,
-      "default_max_tokens": 65536,
+      "cost_per_1m_out_cached": 0.125,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
       "can_reason": false,
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gemini-2.5-flash-search",
-      "name": "Gemini 2.5 Flash Search",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 2.499,
+      "id": "gpt-5-codex",
+      "name": "GPT-5 Codex",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.03,
-      "context_window": 1048576,
-      "default_max_tokens": 65536,
-      "can_reason": false,
+      "cost_per_1m_out_cached": 0.125,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
+      "supports_attachments": true,
+      "options": {}
+    },
+    {
+      "id": "gpt-5-mini",
+      "name": "GPT-5 Mini",
+      "cost_per_1m_in": 0.25,
+      "cost_per_1m_out": 2,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.025,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "reasoning_levels": [
+        "low",
+        "medium",
+        "high"
+      ],
+      "default_reasoning_effort": "medium",
       "supports_attachments": true,
       "options": {}
     },
     {
-      "id": "gemini-2.5-pro",
-      "name": "Gemini 2.5 Pro",
-      "cost_per_1m_in": 1.25,
-      "cost_per_1m_out": 10,
+      "id": "gpt-5-nano",
+      "name": "GPT-5 Nano",
+      "cost_per_1m_in": 0.05,
+      "cost_per_1m_out": 0.4,
       "cost_per_1m_in_cached": 0,
-      "cost_per_1m_out_cached": 0.125,
-      "context_window": 1048576,
-      "default_max_tokens": 65536,
+      "cost_per_1m_out_cached": 0.005,
+      "context_window": 400000,
+      "default_max_tokens": 128000,
       "can_reason": true,
       "reasoning_levels": [
         "low",