feat: intial models setup

Kujtim Hoxha created

Change summary

.gitignore                                 |   35 
README.md                                  |    3 
cmd/openrouter/main.go                     |  172 ++
go.mod                                     |    3 
internal/providers/configs/anthropic.json  |   82 +
internal/providers/configs/azure.json      |  143 +
internal/providers/configs/bedrock.json    |   58 
internal/providers/configs/gemini.json     |   34 
internal/providers/configs/openai.json     |  143 +
internal/providers/configs/openrouter.json | 1726 ++++++++++++++++++++++++
internal/providers/configs/vertexai.json   |   34 
internal/providers/configs/xai.json        |   34 
internal/providers/providers.go            |   72 +
internal/providers/types.go                |   88 +
main.go                                    |   34 
15 files changed, 2,661 insertions(+)

Detailed changes

.gitignore πŸ”—

@@ -0,0 +1,35 @@
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Code coverage profiles and other test artifacts
+*.out
+coverage.*
+*.coverprofile
+profile.cov
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
+go.work.sum
+
+# env file
+.env
+
+# Editor/IDE
+.idea/
+.vscode/
+
+# crush
+.crush

README.md πŸ”—

@@ -0,0 +1,3 @@
+# Fur
+
+Database for _crush_ compatible models

cmd/openrouter/main.go πŸ”—

@@ -0,0 +1,172 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"net/http"
+	"os"
+	"slices"
+	"strconv"
+	"time"
+
+	"github.com/charmbracelet/fur/internal/providers"
+)
+
+// Model represents the complete model configuration
+type Model struct {
+	ID              string       `json:"id"`
+	CanonicalSlug   string       `json:"canonical_slug"`
+	HuggingFaceID   string       `json:"hugging_face_id"`
+	Name            string       `json:"name"`
+	Created         int64        `json:"created"`
+	Description     string       `json:"description"`
+	ContextLength   int64        `json:"context_length"`
+	Architecture    Architecture `json:"architecture"`
+	Pricing         Pricing      `json:"pricing"`
+	TopProvider     TopProvider  `json:"top_provider"`
+	SupportedParams []string     `json:"supported_parameters"`
+}
+
+// Architecture defines the model's architecture details
+type Architecture struct {
+	Modality         string   `json:"modality"`
+	InputModalities  []string `json:"input_modalities"`
+	OutputModalities []string `json:"output_modalities"`
+	Tokenizer        string   `json:"tokenizer"`
+	InstructType     *string  `json:"instruct_type"`
+}
+
+// Pricing contains the pricing information for different operations
+type Pricing struct {
+	Prompt            string `json:"prompt"`
+	Completion        string `json:"completion"`
+	Request           string `json:"request"`
+	Image             string `json:"image"`
+	WebSearch         string `json:"web_search"`
+	InternalReasoning string `json:"internal_reasoning"`
+	InputCacheRead    string `json:"input_cache_read"`
+	InputCacheWrite   string `json:"input_cache_write"`
+}
+
+// TopProvider describes the top provider's capabilities
+type TopProvider struct {
+	ContextLength       int64  `json:"context_length"`
+	MaxCompletionTokens *int64 `json:"max_completion_tokens"`
+	IsModerated         bool   `json:"is_moderated"`
+}
+
+type ModelsResponse struct {
+	Data []Model `json:"data"`
+}
+type ModelPricing struct {
+	CostPer1MIn        float64 `json:"cost_per_1m_in"`
+	CostPer1MOut       float64 `json:"cost_per_1m_out"`
+	CostPer1MInCached  float64 `json:"cost_per_1m_in_cached"`
+	CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"`
+}
+
+func getPricing(model Model) ModelPricing {
+	pricing := ModelPricing{}
+	costPrompt, err := strconv.ParseFloat(model.Pricing.Prompt, 64)
+	if err != nil {
+		costPrompt = 0.0
+	}
+	pricing.CostPer1MIn = costPrompt * 1_000_000
+	costCompletion, err := strconv.ParseFloat(model.Pricing.Completion, 64)
+	if err != nil {
+		costCompletion = 0.0
+	}
+	pricing.CostPer1MOut = costCompletion * 1_000_000
+
+	costPromptCached, err := strconv.ParseFloat(model.Pricing.InputCacheWrite, 64)
+	if err != nil {
+		costPromptCached = 0.0
+	}
+	pricing.CostPer1MInCached = costPromptCached * 1_000_000
+	costCompletionCached, err := strconv.ParseFloat(model.Pricing.InputCacheRead, 64)
+	if err != nil {
+		costCompletionCached = 0.0
+	}
+	pricing.CostPer1MOutCached = costCompletionCached * 1_000_000
+	return pricing
+}
+
+func fetchOpenRouterModels() (*ModelsResponse, error) {
+	client := &http.Client{Timeout: 30 * time.Second}
+	req, _ := http.NewRequest("GET", "https://openrouter.ai/api/v1/models", nil)
+	req.Header.Set("User-Agent", "Crush-Client/1.0")
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != 200 {
+		body, _ := io.ReadAll(resp.Body)
+		return nil, fmt.Errorf("status %d: %s", resp.StatusCode, body)
+	}
+	var mr ModelsResponse
+	if err := json.NewDecoder(resp.Body).Decode(&mr); err != nil {
+		return nil, err
+	}
+	return &mr, nil
+}
+
+// This is used to generate the openrouter.json config file
+func main() {
+	modelsResp, err := fetchOpenRouterModels()
+	if err != nil {
+		log.Fatal("Error fetching OpenRouter models:", err)
+	}
+
+	openRouterProvider := providers.Provider{
+		Name:           "OpenRouter",
+		ID:             "openrouter",
+		APIKey:         "$OPENROUTER_API_KEY",
+		APIEndpoint:    "https://openrouter.ai/api/v1",
+		Type:           providers.ProviderTypeOpenAI,
+		DefaultModelID: "anthropic/claude-sonnet-4",
+		Models:         []providers.Model{},
+	}
+
+	for _, model := range modelsResp.Data {
+		// skip non‐text models or those without tools
+		if !slices.Contains(model.SupportedParams, "tools") ||
+			!slices.Contains(model.Architecture.InputModalities, "text") ||
+			!slices.Contains(model.Architecture.OutputModalities, "text") {
+			continue
+		}
+
+		pricing := getPricing(model)
+		canReason := slices.Contains(model.SupportedParams, "reasoning")
+		supportsImages := slices.Contains(model.Architecture.InputModalities, "image")
+
+		m := providers.Model{
+			ID:                 model.ID,
+			Name:               model.Name,
+			CostPer1MIn:        pricing.CostPer1MIn,
+			CostPer1MOut:       pricing.CostPer1MOut,
+			CostPer1MInCached:  pricing.CostPer1MInCached,
+			CostPer1MOutCached: pricing.CostPer1MOutCached,
+			ContextWindow:      model.ContextLength,
+			CanReason:          canReason,
+			SupportsImages:     supportsImages,
+		}
+		if model.TopProvider.MaxCompletionTokens != nil {
+			m.DefaultMaxTokens = *model.TopProvider.MaxCompletionTokens
+		}
+		openRouterProvider.Models = append(openRouterProvider.Models, m)
+	}
+
+	// save the json in internal/providers/config/openrouter.json
+	data, err := json.MarshalIndent(openRouterProvider, "", "  ")
+	if err != nil {
+		log.Fatal("Error marshaling OpenRouter provider:", err)
+	}
+	// write to file
+	err = os.WriteFile("internal/providers/configs/openrouter.json", data, 0o644)
+	if err != nil {
+		log.Fatal("Error writing OpenRouter provider config:", err)
+	}
+}

go.mod πŸ”—

@@ -0,0 +1,3 @@
+module github.com/charmbracelet/fur
+
+go 1.24.3

internal/providers/configs/anthropic.json πŸ”—

@@ -0,0 +1,82 @@
+{
+  "name": "Anthropic",
+  "id": "anthropic",
+  "type": "anthropic",
+  "api_key": "$ANTHROPIC_API_KEY",
+  "api_endpoint": "$ANTHROPIC_API_ENDPOINT",
+  "default_model_id": "claude-sonnet-4-20250514",
+  "models": [
+    {
+      "id": "claude-opus-4-20250514",
+      "model": "Claude Opus 4",
+      "cost_per_1m_in": 15,
+      "cost_per_1m_out": 75,
+      "cost_per_1m_in_cached": 18.75,
+      "cost_per_1m_out_cached": 1.5,
+      "context_window": 200000,
+      "default_max_tokens": 32000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-sonnet-4-20250514",
+      "model": "Claude Sonnet 4",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-3-7-sonnet-20250219",
+      "model": "Claude 3.7 Sonnet",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-3-5-haiku-20241022",
+      "model": "Claude 3.5 Haiku",
+      "cost_per_1m_in": 0.7999999999999999,
+      "cost_per_1m_out": 4,
+      "cost_per_1m_in_cached": 1,
+      "cost_per_1m_out_cached": 0.08,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-3-5-sonnet-20240620",
+      "model": "Claude 3.5 Sonnet (Old)",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "claude-3-5-sonnet-20241022",
+      "model": "Claude 3.5 Sonnet (New)",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    }
+  ]
+}

internal/providers/configs/azure.json πŸ”—

@@ -0,0 +1,143 @@
+{
+  "name": "Azure OpenAI",
+  "id": "azure",
+  "type": "azure",
+  "api_key": "$AZURE_OPENAI_API_KEY",
+  "api_endpoint": "$AZURE_OPENAI_API_ENDPOINT",
+  "default_model_id": "o4-mini",
+  "models": [
+    {
+      "id": "codex-mini-latest",
+      "model": "Codex Mini",
+      "cost_per_1m_in": 1.5,
+      "cost_per_1m_out": 6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.375,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "o4-mini",
+      "model": "o4 Mini",
+      "cost_per_1m_in": 1.1,
+      "cost_per_1m_out": 4.4,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.275,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "o3",
+      "model": "o3",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 8,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.5,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "o3-pro",
+      "model": "o3 Pro",
+      "cost_per_1m_in": 20,
+      "cost_per_1m_out": 80,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-4.1",
+      "model": "GPT-4.1",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 8,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.5,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-4.1-mini",
+      "model": "GPT-4.1 Mini",
+      "cost_per_1m_in": 0.39999999999999997,
+      "cost_per_1m_out": 1.5999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.09999999999999999,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-4.1-nano",
+      "model": "GPT-4.1 Nano",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.024999999999999998,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-4.5-preview",
+      "model": "GPT-4.5 (Preview)",
+      "cost_per_1m_in": 75,
+      "cost_per_1m_out": 150,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 37.5,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "o3-mini",
+      "model": "o3 Mini",
+      "cost_per_1m_in": 1.1,
+      "cost_per_1m_out": 4.4,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "gpt-4o",
+      "model": "GPT-4o",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 1.25,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-4o-mini",
+      "model": "GPT-4o-mini",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "reasoning_effort": "",
+      "supports_attachments": true
+    }
+  ]
+}

internal/providers/configs/bedrock.json πŸ”—

@@ -0,0 +1,58 @@
+{
+  "name": "AWS Bedrock",
+  "id": "bedrock",
+  "type": "bedrock",
+  "api_key": "",
+  "api_endpoint": "",
+  "default_model_id": "claude-sonnet-4-20250514",
+  "models": [
+    {
+      "id": "anthropic.claude-opus-4-20250514-v1:0",
+      "model": "AWS Claude Opus 4",
+      "cost_per_1m_in": 15,
+      "cost_per_1m_out": 75,
+      "cost_per_1m_in_cached": 18.75,
+      "cost_per_1m_out_cached": 1.5,
+      "context_window": 200000,
+      "default_max_tokens": 32000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic.claude-sonnet-4-20250514-v1:0",
+      "model": "AWS Claude Sonnet 4",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic.claude-3-7-sonnet-20250219-v1:0",
+      "model": "AWS Claude 3.7 Sonnet",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic.claude-3-5-haiku-20241022-v1:0",
+      "model": "AWS Claude 3.5 Haiku",
+      "cost_per_1m_in": 0.7999999999999999,
+      "cost_per_1m_out": 4,
+      "cost_per_1m_in_cached": 1,
+      "cost_per_1m_out_cached": 0.08,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    }
+  ]
+}

internal/providers/configs/gemini.json πŸ”—

@@ -0,0 +1,34 @@
+{
+  "name": "Google Gemini",
+  "id": "gemini",
+  "type": "gemini",
+  "api_key": "$GEMINI_API_KEY",
+  "api_endpoint": "$GEMINI_API_ENDPOINT",
+  "default_model_id": "gemini-2.5-pro",
+  "models": [
+    {
+      "id": "gemini-2.5-pro",
+      "model": "Gemini 2.5 Pro",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 1.625,
+      "cost_per_1m_out_cached": 0.31,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "gemini-2.5-flash",
+      "model": "Gemini 2.5 Flash",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 2.5,
+      "cost_per_1m_in_cached": 0.3833,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 1048576,
+      "default_max_tokens": 65535,
+      "can_reason": true,
+      "supports_attachments": true
+    }
+  ]
+}

internal/providers/configs/openai.json πŸ”—

@@ -0,0 +1,143 @@
+{
+  "name": "OpenAI",
+  "id": "openai",
+  "type": "openai",
+  "api_key": "$OPENAI_API_KEY",
+  "api_endpoint": "$OPENAI_API_ENDPOINT",
+  "default_model_id": "o4-mini",
+  "models": [
+    {
+      "id": "codex-mini-latest",
+      "model": "Codex Mini",
+      "cost_per_1m_in": 1.5,
+      "cost_per_1m_out": 6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.375,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "o4-mini",
+      "model": "o4 Mini",
+      "cost_per_1m_in": 1.1,
+      "cost_per_1m_out": 4.4,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.275,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "o3",
+      "model": "o3",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 8,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.5,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "o3-pro",
+      "model": "o3 Pro",
+      "cost_per_1m_in": 20,
+      "cost_per_1m_out": 80,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-4.1",
+      "model": "GPT-4.1",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 8,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.5,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-4.1-mini",
+      "model": "GPT-4.1 Mini",
+      "cost_per_1m_in": 0.39999999999999997,
+      "cost_per_1m_out": 1.5999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.09999999999999999,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-4.1-nano",
+      "model": "GPT-4.1 Nano",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.024999999999999998,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-4.5-preview",
+      "model": "GPT-4.5 (Preview)",
+      "cost_per_1m_in": 75,
+      "cost_per_1m_out": 150,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 37.5,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "o3-mini",
+      "model": "o3 Mini",
+      "cost_per_1m_in": 1.1,
+      "cost_per_1m_out": 4.4,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "gpt-4o",
+      "model": "GPT-4o",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 1.25,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "gpt-4o-mini",
+      "model": "GPT-4o-mini",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "reasoning_effort": "",
+      "supports_attachments": true
+    }
+  ]
+}

internal/providers/configs/openrouter.json πŸ”—

@@ -0,0 +1,1726 @@
+{
+  "name": "OpenRouter",
+  "id": "openrouter",
+  "api_key": "$OPENROUTER_API_KEY",
+  "api_endpoint": "https://openrouter.ai/api/v1",
+  "type": "openai",
+  "default_model_id": "anthropic/claude-sonnet-4",
+  "models": [
+    {
+      "id": "mistralai/mistral-small-3.2-24b-instruct:free",
+      "model": "Mistral: Mistral Small 3.2 24B (free)",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 96000,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "mistralai/mistral-small-3.2-24b-instruct",
+      "model": "Mistral: Mistral Small 3.2 24B",
+      "cost_per_1m_in": 0.049999999999999996,
+      "cost_per_1m_out": 0.09999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "minimax/minimax-m1:extended",
+      "model": "MiniMax: MiniMax M1 (extended)",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 256000,
+      "default_max_tokens": 0,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "minimax/minimax-m1",
+      "model": "MiniMax: MiniMax M1",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 1.6500000000000001,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1000000,
+      "default_max_tokens": 40000,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "google/gemini-2.5-flash-lite-preview-06-17",
+      "model": "Google: Gemini 2.5 Flash Lite Preview 06-17",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1048576,
+      "default_max_tokens": 65535,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "google/gemini-2.5-flash",
+      "model": "Google: Gemini 2.5 Flash",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 2.5,
+      "cost_per_1m_in_cached": 0.3833,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 1048576,
+      "default_max_tokens": 65535,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "google/gemini-2.5-pro",
+      "model": "Google: Gemini 2.5 Pro",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 1.625,
+      "cost_per_1m_out_cached": 0.31,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/o3-pro",
+      "model": "OpenAI: o3 Pro",
+      "cost_per_1m_in": 20,
+      "cost_per_1m_out": 80,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "x-ai/grok-3-mini",
+      "model": "xAI: Grok 3 Mini",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 0.5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "x-ai/grok-3",
+      "model": "xAI: Grok 3",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.75,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/magistral-small-2506",
+      "model": "Mistral: Magistral Small 2506",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 1.5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 40000,
+      "default_max_tokens": 40000,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/magistral-medium-2506",
+      "model": "Mistral: Magistral Medium 2506",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 40960,
+      "default_max_tokens": 40000,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/magistral-medium-2506:thinking",
+      "model": "Mistral: Magistral Medium 2506 (thinking)",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 40960,
+      "default_max_tokens": 40000,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "google/gemini-2.5-pro-preview",
+      "model": "Google: Gemini 2.5 Pro Preview 06-05",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 1.625,
+      "cost_per_1m_out_cached": 0.31,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "deepseek/deepseek-r1-0528",
+      "model": "DeepSeek: R1 0528",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 2.1500000000000004,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 32768,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "anthropic/claude-opus-4",
+      "model": "Anthropic: Claude Opus 4",
+      "cost_per_1m_in": 15,
+      "cost_per_1m_out": 75,
+      "cost_per_1m_in_cached": 18.75,
+      "cost_per_1m_out_cached": 1.5,
+      "context_window": 200000,
+      "default_max_tokens": 32000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-sonnet-4",
+      "model": "Anthropic: Claude Sonnet 4",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "mistralai/devstral-small:free",
+      "model": "Mistral: Devstral Small (free)",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/devstral-small",
+      "model": "Mistral: Devstral Small",
+      "cost_per_1m_in": 0.06,
+      "cost_per_1m_out": 0.12,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "google/gemini-2.5-flash-preview-05-20",
+      "model": "Google: Gemini 2.5 Flash Preview 05-20",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0.2333,
+      "cost_per_1m_out_cached": 0.0375,
+      "context_window": 1048576,
+      "default_max_tokens": 65535,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "google/gemini-2.5-flash-preview-05-20:thinking",
+      "model": "Google: Gemini 2.5 Flash Preview 05-20 (thinking)",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 3.5,
+      "cost_per_1m_in_cached": 0.2333,
+      "cost_per_1m_out_cached": 0.0375,
+      "context_window": 1048576,
+      "default_max_tokens": 65535,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/codex-mini",
+      "model": "OpenAI: Codex Mini",
+      "cost_per_1m_in": 1.5,
+      "cost_per_1m_out": 6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.375,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "mistralai/mistral-medium-3",
+      "model": "Mistral: Mistral Medium 3",
+      "cost_per_1m_in": 0.39999999999999997,
+      "cost_per_1m_out": 2,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "google/gemini-2.5-pro-preview-05-06",
+      "model": "Google: Gemini 2.5 Pro Preview 05-06",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 1.625,
+      "cost_per_1m_out_cached": 0.31,
+      "context_window": 1048576,
+      "default_max_tokens": 65535,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "arcee-ai/caller-large",
+      "model": "Arcee AI: Caller Large",
+      "cost_per_1m_in": 0.55,
+      "cost_per_1m_out": 0.85,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "arcee-ai/virtuoso-large",
+      "model": "Arcee AI: Virtuoso Large",
+      "cost_per_1m_in": 0.75,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 64000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "arcee-ai/virtuoso-medium-v2",
+      "model": "Arcee AI: Virtuoso Medium V2",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 0.7999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "qwen/qwen3-30b-a3b",
+      "model": "Qwen: Qwen3 30B A3B",
+      "cost_per_1m_in": 0.08,
+      "cost_per_1m_out": 0.29,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 40960,
+      "default_max_tokens": 40960,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "qwen/qwen3-14b",
+      "model": "Qwen: Qwen3 14B",
+      "cost_per_1m_in": 0.06,
+      "cost_per_1m_out": 0.24,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 40960,
+      "default_max_tokens": 40960,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "qwen/qwen3-32b",
+      "model": "Qwen: Qwen3 32B",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 40960,
+      "default_max_tokens": 0,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "qwen/qwen3-235b-a22b",
+      "model": "Qwen: Qwen3 235B A22B",
+      "cost_per_1m_in": 0.13,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 40960,
+      "default_max_tokens": 40960,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "google/gemini-2.5-flash-preview",
+      "model": "Google: Gemini 2.5 Flash Preview 04-17",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0.2333,
+      "cost_per_1m_out_cached": 0.0375,
+      "context_window": 1048576,
+      "default_max_tokens": 65535,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "google/gemini-2.5-flash-preview:thinking",
+      "model": "Google: Gemini 2.5 Flash Preview 04-17 (thinking)",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 3.5,
+      "cost_per_1m_in_cached": 0.2333,
+      "cost_per_1m_out_cached": 0.0375,
+      "context_window": 1048576,
+      "default_max_tokens": 65535,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/o4-mini-high",
+      "model": "OpenAI: o4 Mini High",
+      "cost_per_1m_in": 1.1,
+      "cost_per_1m_out": 4.4,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.275,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/o3",
+      "model": "OpenAI: o3",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 8,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.5,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/o4-mini",
+      "model": "OpenAI: o4 Mini",
+      "cost_per_1m_in": 1.1,
+      "cost_per_1m_out": 4.4,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.275,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/gpt-4.1",
+      "model": "OpenAI: GPT-4.1",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 8,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.5,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/gpt-4.1-mini",
+      "model": "OpenAI: GPT-4.1 Mini",
+      "cost_per_1m_in": 0.39999999999999997,
+      "cost_per_1m_out": 1.5999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.09999999999999999,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/gpt-4.1-nano",
+      "model": "OpenAI: GPT-4.1 Nano",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.024999999999999998,
+      "context_window": 1047576,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "x-ai/grok-3-mini-beta",
+      "model": "xAI: Grok 3 Mini Beta",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 0.5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "x-ai/grok-3-beta",
+      "model": "xAI: Grok 3 Beta",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.75,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "meta-llama/llama-4-maverick",
+      "model": "Meta: Llama 4 Maverick",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1048576,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "meta-llama/llama-4-scout",
+      "model": "Meta: Llama 4 Scout",
+      "cost_per_1m_in": 0.08,
+      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1048576,
+      "default_max_tokens": 1048576,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "all-hands/openhands-lm-32b-v0.1",
+      "model": "OpenHands LM 32B V0.1",
+      "cost_per_1m_in": 2.6,
+      "cost_per_1m_out": 3.4,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 16384,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "google/gemini-2.5-pro-exp-03-25",
+      "model": "Google: Gemini 2.5 Pro Experimental",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1048576,
+      "default_max_tokens": 65535,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "deepseek/deepseek-chat-v3-0324:free",
+      "model": "DeepSeek: DeepSeek V3 0324 (free)",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 163840,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "deepseek/deepseek-chat-v3-0324",
+      "model": "DeepSeek: DeepSeek V3 0324",
+      "cost_per_1m_in": 0.28,
+      "cost_per_1m_out": 0.88,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 163840,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-small-3.1-24b-instruct:free",
+      "model": "Mistral: Mistral Small 3.1 24B (free)",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 96000,
+      "default_max_tokens": 96000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "mistralai/mistral-small-3.1-24b-instruct",
+      "model": "Mistral: Mistral Small 3.1 24B",
+      "cost_per_1m_in": 0.049999999999999996,
+      "cost_per_1m_out": 0.15,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "ai21/jamba-1.6-large",
+      "model": "AI21: Jamba 1.6 Large",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 8,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 256000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "ai21/jamba-1.6-mini",
+      "model": "AI21: Jamba Mini 1.6",
+      "cost_per_1m_in": 0.19999999999999998,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 256000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/gpt-4.5-preview",
+      "model": "OpenAI: GPT-4.5 (Preview)",
+      "cost_per_1m_in": 75,
+      "cost_per_1m_out": 150,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 37.5,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "google/gemini-2.0-flash-lite-001",
+      "model": "Google: Gemini 2.0 Flash Lite",
+      "cost_per_1m_in": 0.075,
+      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1048576,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3.7-sonnet",
+      "model": "Anthropic: Claude 3.7 Sonnet",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 64000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3.7-sonnet:beta",
+      "model": "Anthropic: Claude 3.7 Sonnet (self-moderated)",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3.7-sonnet:thinking",
+      "model": "Anthropic: Claude 3.7 Sonnet (thinking)",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 128000,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "mistralai/mistral-saba",
+      "model": "Mistral: Saba",
+      "cost_per_1m_in": 0.19999999999999998,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/o3-mini-high",
+      "model": "OpenAI: o3 Mini High",
+      "cost_per_1m_in": 1.1,
+      "cost_per_1m_out": 4.4,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "google/gemini-2.0-flash-001",
+      "model": "Google: Gemini 2.0 Flash",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0.18330000000000002,
+      "cost_per_1m_out_cached": 0.024999999999999998,
+      "context_window": 1048576,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "qwen/qwen-turbo",
+      "model": "Qwen: Qwen-Turbo",
+      "cost_per_1m_in": 0.049999999999999996,
+      "cost_per_1m_out": 0.19999999999999998,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.02,
+      "context_window": 1000000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "qwen/qwen-plus",
+      "model": "Qwen: Qwen-Plus",
+      "cost_per_1m_in": 0.39999999999999997,
+      "cost_per_1m_out": 1.2,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.16,
+      "context_window": 131072,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "qwen/qwen-max",
+      "model": "Qwen: Qwen-Max ",
+      "cost_per_1m_in": 1.5999999999999999,
+      "cost_per_1m_out": 6.3999999999999995,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.64,
+      "context_window": 32768,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/o3-mini",
+      "model": "OpenAI: o3 Mini",
+      "cost_per_1m_in": 1.1,
+      "cost_per_1m_out": 4.4,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.55,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-small-24b-instruct-2501",
+      "model": "Mistral: Mistral Small 3",
+      "cost_per_1m_in": 0.049999999999999996,
+      "cost_per_1m_out": 0.09,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "deepseek/deepseek-r1-distill-llama-70b",
+      "model": "DeepSeek: R1 Distill Llama 70B",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 16384,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "deepseek/deepseek-r1",
+      "model": "DeepSeek: R1",
+      "cost_per_1m_in": 0.44999999999999996,
+      "cost_per_1m_out": 2.1500000000000004,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 32768,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/codestral-2501",
+      "model": "Mistral: Codestral 2501",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 0.8999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "deepseek/deepseek-chat",
+      "model": "DeepSeek: DeepSeek V3",
+      "cost_per_1m_in": 0.38,
+      "cost_per_1m_out": 0.8899999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 163840,
+      "default_max_tokens": 163840,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/o1",
+      "model": "OpenAI: o1",
+      "cost_per_1m_in": 15,
+      "cost_per_1m_out": 60,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 7.5,
+      "context_window": 200000,
+      "default_max_tokens": 100000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "x-ai/grok-2-1212",
+      "model": "xAI: Grok 2 1212",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "meta-llama/llama-3.3-70b-instruct",
+      "model": "Meta: Llama 3.3 70B Instruct",
+      "cost_per_1m_in": 0.049999999999999996,
+      "cost_per_1m_out": 0.19,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "amazon/nova-lite-v1",
+      "model": "Amazon: Nova Lite 1.0",
+      "cost_per_1m_in": 0.06,
+      "cost_per_1m_out": 0.24,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 300000,
+      "default_max_tokens": 5120,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "amazon/nova-micro-v1",
+      "model": "Amazon: Nova Micro 1.0",
+      "cost_per_1m_in": 0.035,
+      "cost_per_1m_out": 0.14,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 5120,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "amazon/nova-pro-v1",
+      "model": "Amazon: Nova Pro 1.0",
+      "cost_per_1m_in": 0.7999999999999999,
+      "cost_per_1m_out": 3.1999999999999997,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 300000,
+      "default_max_tokens": 5120,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/gpt-4o-2024-11-20",
+      "model": "OpenAI: GPT-4o (2024-11-20)",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 1.25,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "mistralai/mistral-large-2411",
+      "model": "Mistral Large 2411",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-large-2407",
+      "model": "Mistral Large 2407",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/pixtral-large-2411",
+      "model": "Mistral: Pixtral Large 2411",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "thedrummer/unslopnemo-12b",
+      "model": "TheDrummer: UnslopNemo 12B",
+      "cost_per_1m_in": 0.39999999999999997,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "anthropic/claude-3.5-haiku:beta",
+      "model": "Anthropic: Claude 3.5 Haiku (self-moderated)",
+      "cost_per_1m_in": 0.7999999999999999,
+      "cost_per_1m_out": 4,
+      "cost_per_1m_in_cached": 1,
+      "cost_per_1m_out_cached": 0.08,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3.5-haiku",
+      "model": "Anthropic: Claude 3.5 Haiku",
+      "cost_per_1m_in": 0.7999999999999999,
+      "cost_per_1m_out": 4,
+      "cost_per_1m_in_cached": 1,
+      "cost_per_1m_out_cached": 0.08,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3.5-haiku-20241022:beta",
+      "model": "Anthropic: Claude 3.5 Haiku (2024-10-22) (self-moderated)",
+      "cost_per_1m_in": 0.7999999999999999,
+      "cost_per_1m_out": 4,
+      "cost_per_1m_in_cached": 1,
+      "cost_per_1m_out_cached": 0.08,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3.5-haiku-20241022",
+      "model": "Anthropic: Claude 3.5 Haiku (2024-10-22)",
+      "cost_per_1m_in": 0.7999999999999999,
+      "cost_per_1m_out": 4,
+      "cost_per_1m_in_cached": 1,
+      "cost_per_1m_out_cached": 0.08,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3.5-sonnet:beta",
+      "model": "Anthropic: Claude 3.5 Sonnet (self-moderated)",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3.5-sonnet",
+      "model": "Anthropic: Claude 3.5 Sonnet",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "x-ai/grok-beta",
+      "model": "xAI: Grok Beta",
+      "cost_per_1m_in": 5,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/ministral-8b",
+      "model": "Mistral: Ministral 8B",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.09999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/ministral-3b",
+      "model": "Mistral: Ministral 3B",
+      "cost_per_1m_in": 0.04,
+      "cost_per_1m_out": 0.04,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "nvidia/llama-3.1-nemotron-70b-instruct",
+      "model": "NVIDIA: Llama 3.1 Nemotron 70B Instruct",
+      "cost_per_1m_in": 0.12,
+      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 131072,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "google/gemini-flash-1.5-8b",
+      "model": "Google: Gemini 1.5 Flash 8B",
+      "cost_per_1m_in": 0.0375,
+      "cost_per_1m_out": 0.15,
+      "cost_per_1m_in_cached": 0.0583,
+      "cost_per_1m_out_cached": 0.01,
+      "context_window": 1000000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "meta-llama/llama-3.2-3b-instruct",
+      "model": "Meta: Llama 3.2 3B Instruct",
+      "cost_per_1m_in": 0.01,
+      "cost_per_1m_out": 0.02,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "meta-llama/llama-3.2-11b-vision-instruct",
+      "model": "Meta: Llama 3.2 11B Vision Instruct",
+      "cost_per_1m_in": 0.049,
+      "cost_per_1m_out": 0.049,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "qwen/qwen-2.5-72b-instruct",
+      "model": "Qwen2.5 72B Instruct",
+      "cost_per_1m_in": 0.12,
+      "cost_per_1m_out": 0.39,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/pixtral-12b",
+      "model": "Mistral: Pixtral 12B",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.09999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "cohere/command-r-plus-08-2024",
+      "model": "Cohere: Command R+ (08-2024)",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 4000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "cohere/command-r-08-2024",
+      "model": "Cohere: Command R (08-2024)",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 4000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "microsoft/phi-3.5-mini-128k-instruct",
+      "model": "Microsoft: Phi-3.5 Mini 128K Instruct",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.09999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "nousresearch/hermes-3-llama-3.1-70b",
+      "model": "Nous: Hermes 3 70B Instruct",
+      "cost_per_1m_in": 0.12,
+      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 131072,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/gpt-4o-2024-08-06",
+      "model": "OpenAI: GPT-4o (2024-08-06)",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 1.25,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "meta-llama/llama-3.1-8b-instruct",
+      "model": "Meta: Llama 3.1 8B Instruct",
+      "cost_per_1m_in": 0.016,
+      "cost_per_1m_out": 0.023,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131000,
+      "default_max_tokens": 131000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "meta-llama/llama-3.1-405b-instruct",
+      "model": "Meta: Llama 3.1 405B Instruct",
+      "cost_per_1m_in": 0.7999999999999999,
+      "cost_per_1m_out": 0.7999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "meta-llama/llama-3.1-70b-instruct",
+      "model": "Meta: Llama 3.1 70B Instruct",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.28,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-nemo",
+      "model": "Mistral: Mistral Nemo",
+      "cost_per_1m_in": 0.01,
+      "cost_per_1m_out": 0.013000000000000001,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 131072,
+      "default_max_tokens": 131072,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/gpt-4o-mini",
+      "model": "OpenAI: GPT-4o-mini",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/gpt-4o-mini-2024-07-18",
+      "model": "OpenAI: GPT-4o-mini (2024-07-18)",
+      "cost_per_1m_in": 0.15,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3.5-sonnet-20240620:beta",
+      "model": "Anthropic: Claude 3.5 Sonnet (2024-06-20) (self-moderated)",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3.5-sonnet-20240620",
+      "model": "Anthropic: Claude 3.5 Sonnet (2024-06-20)",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "mistralai/mistral-7b-instruct:free",
+      "model": "Mistral: Mistral 7B Instruct (free)",
+      "cost_per_1m_in": 0,
+      "cost_per_1m_out": 0,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-7b-instruct",
+      "model": "Mistral: Mistral 7B Instruct",
+      "cost_per_1m_in": 0.028,
+      "cost_per_1m_out": 0.054,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-7b-instruct-v0.3",
+      "model": "Mistral: Mistral 7B Instruct v0.3",
+      "cost_per_1m_in": 0.028,
+      "cost_per_1m_out": 0.054,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "microsoft/phi-3-mini-128k-instruct",
+      "model": "Microsoft: Phi-3 Mini 128K Instruct",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.09999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "microsoft/phi-3-medium-128k-instruct",
+      "model": "Microsoft: Phi-3 Medium 128K Instruct",
+      "cost_per_1m_in": 1,
+      "cost_per_1m_out": 1,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "google/gemini-flash-1.5",
+      "model": "Google: Gemini 1.5 Flash ",
+      "cost_per_1m_in": 0.075,
+      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in_cached": 0.1583,
+      "cost_per_1m_out_cached": 0.01875,
+      "context_window": 1000000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/gpt-4o",
+      "model": "OpenAI: GPT-4o",
+      "cost_per_1m_in": 2.5,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 1.25,
+      "context_window": 128000,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/gpt-4o:extended",
+      "model": "OpenAI: GPT-4o (extended)",
+      "cost_per_1m_in": 6,
+      "cost_per_1m_out": 18,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 64000,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/gpt-4o-2024-05-13",
+      "model": "OpenAI: GPT-4o (2024-05-13)",
+      "cost_per_1m_in": 5,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "meta-llama/llama-3-8b-instruct",
+      "model": "Meta: Llama 3 8B Instruct",
+      "cost_per_1m_in": 0.03,
+      "cost_per_1m_out": 0.06,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 8192,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "meta-llama/llama-3-70b-instruct",
+      "model": "Meta: Llama 3 70B Instruct",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 8192,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mixtral-8x22b-instruct",
+      "model": "Mistral: Mixtral 8x22B Instruct",
+      "cost_per_1m_in": 0.8999999999999999,
+      "cost_per_1m_out": 0.8999999999999999,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 65536,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "google/gemini-pro-1.5",
+      "model": "Google: Gemini 1.5 Pro",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 2000000,
+      "default_max_tokens": 8192,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "openai/gpt-4-turbo",
+      "model": "OpenAI: GPT-4 Turbo",
+      "cost_per_1m_in": 10,
+      "cost_per_1m_out": 30,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "cohere/command-r-plus",
+      "model": "Cohere: Command R+",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 4000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "cohere/command-r-plus-04-2024",
+      "model": "Cohere: Command R+ (04-2024)",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 4000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "cohere/command-r",
+      "model": "Cohere: Command R",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 1.5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 4000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "anthropic/claude-3-haiku:beta",
+      "model": "Anthropic: Claude 3 Haiku (self-moderated)",
+      "cost_per_1m_in": 0.25,
+      "cost_per_1m_out": 1.25,
+      "cost_per_1m_in_cached": 0.3,
+      "cost_per_1m_out_cached": 0.03,
+      "context_window": 200000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3-haiku",
+      "model": "Anthropic: Claude 3 Haiku",
+      "cost_per_1m_in": 0.25,
+      "cost_per_1m_out": 1.25,
+      "cost_per_1m_in_cached": 0.3,
+      "cost_per_1m_out_cached": 0.03,
+      "context_window": 200000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3-opus:beta",
+      "model": "Anthropic: Claude 3 Opus (self-moderated)",
+      "cost_per_1m_in": 15,
+      "cost_per_1m_out": 75,
+      "cost_per_1m_in_cached": 18.75,
+      "cost_per_1m_out_cached": 1.5,
+      "context_window": 200000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3-opus",
+      "model": "Anthropic: Claude 3 Opus",
+      "cost_per_1m_in": 15,
+      "cost_per_1m_out": 75,
+      "cost_per_1m_in_cached": 18.75,
+      "cost_per_1m_out_cached": 1.5,
+      "context_window": 200000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3-sonnet:beta",
+      "model": "Anthropic: Claude 3 Sonnet (self-moderated)",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "anthropic/claude-3-sonnet",
+      "model": "Anthropic: Claude 3 Sonnet",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 3.75,
+      "cost_per_1m_out_cached": 0.3,
+      "context_window": 200000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "cohere/command-r-03-2024",
+      "model": "Cohere: Command R (03-2024)",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 1.5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 4000,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-large",
+      "model": "Mistral Large",
+      "cost_per_1m_in": 2,
+      "cost_per_1m_out": 6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/gpt-3.5-turbo-0613",
+      "model": "OpenAI: GPT-3.5 Turbo (older v0613)",
+      "cost_per_1m_in": 1,
+      "cost_per_1m_out": 2,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 4095,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/gpt-4-turbo-preview",
+      "model": "OpenAI: GPT-4 Turbo Preview",
+      "cost_per_1m_in": 10,
+      "cost_per_1m_out": 30,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-medium",
+      "model": "Mistral Medium",
+      "cost_per_1m_in": 2.75,
+      "cost_per_1m_out": 8.1,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-small",
+      "model": "Mistral Small",
+      "cost_per_1m_in": 0.19999999999999998,
+      "cost_per_1m_out": 0.6,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-tiny",
+      "model": "Mistral Tiny",
+      "cost_per_1m_in": 0.25,
+      "cost_per_1m_out": 0.25,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mixtral-8x7b-instruct",
+      "model": "Mistral: Mixtral 8x7B Instruct",
+      "cost_per_1m_in": 0.08,
+      "cost_per_1m_out": 0.24,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 32768,
+      "default_max_tokens": 16384,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/gpt-4-1106-preview",
+      "model": "OpenAI: GPT-4 Turbo (older v1106)",
+      "cost_per_1m_in": 10,
+      "cost_per_1m_out": 30,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 128000,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "mistralai/mistral-7b-instruct-v0.1",
+      "model": "Mistral: Mistral 7B Instruct v0.1",
+      "cost_per_1m_in": 0.11,
+      "cost_per_1m_out": 0.19,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 2824,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/gpt-3.5-turbo-16k",
+      "model": "OpenAI: GPT-3.5 Turbo 16k",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 4,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 16385,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/gpt-4",
+      "model": "OpenAI: GPT-4",
+      "cost_per_1m_in": 30,
+      "cost_per_1m_out": 60,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 8191,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "openai/gpt-4-0314",
+      "model": "OpenAI: GPT-4 (older v0314)",
+      "cost_per_1m_in": 30,
+      "cost_per_1m_out": 60,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 8191,
+      "default_max_tokens": 4096,
+      "can_reason": false,
+      "supports_attachments": false
+    }
+  ]
+}

internal/providers/configs/vertexai.json πŸ”—

@@ -0,0 +1,34 @@
+{
+  "name": "Google Vertex AI",
+  "id": "vertex",
+  "type": "vertex",
+  "api_key": "$VERTEX_API_KEY",
+  "api_endpoint": "$VERTEX_API_ENDPOINT",
+  "default_model_id": "gemini-2.5-pro",
+  "models": [
+    {
+      "id": "gemini-2.5-pro",
+      "model": "Gemini 2.5 Pro",
+      "cost_per_1m_in": 1.25,
+      "cost_per_1m_out": 10,
+      "cost_per_1m_in_cached": 1.625,
+      "cost_per_1m_out_cached": 0.31,
+      "context_window": 1048576,
+      "default_max_tokens": 65536,
+      "can_reason": true,
+      "supports_attachments": true
+    },
+    {
+      "id": "gemini-2.5-flash",
+      "model": "Gemini 2.5 Flash",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 2.5,
+      "cost_per_1m_in_cached": 0.3833,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 1048576,
+      "default_max_tokens": 65535,
+      "can_reason": true,
+      "supports_attachments": true
+    }
+  ]
+}

internal/providers/configs/xai.json πŸ”—

@@ -0,0 +1,34 @@
+{
+  "name": "xAI",
+  "id": "xai",
+  "api_key": "$XAI_API_KEY",
+  "api_endpoint": "https://api.x.ai/v1",
+  "type": "openai",
+  "default_model_id": "grok-3",
+  "models": [
+    {
+      "id": "grok-3-mini",
+      "model": "Grok 3 Mini",
+      "cost_per_1m_in": 0.3,
+      "cost_per_1m_out": 0.5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.075,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": true,
+      "supports_attachments": false
+    },
+    {
+      "id": "grok-3",
+      "model": "Grok 3",
+      "cost_per_1m_in": 3,
+      "cost_per_1m_out": 15,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0.75,
+      "context_window": 131072,
+      "default_max_tokens": 0,
+      "can_reason": false,
+      "supports_attachments": false
+    }
+  ]
+}

internal/providers/providers.go πŸ”—

@@ -0,0 +1,72 @@
+package providers
+
+import (
+	_ "embed"
+	"encoding/json"
+	"log"
+)
+
+//go:embed configs/openai.json
+var openAIConfig []byte
+
+//go:embed configs/anthropic.json
+var anthropicConfig []byte
+
+//go:embed configs/gemini.json
+var geminiConfig []byte
+
+//go:embed configs/openrouter.json
+var openRouterConfig []byte
+
+//go:embed configs/azure.json
+var azureConfig []byte
+
+//go:embed configs/vertexai.json
+var vertexAIConfig []byte
+
+//go:embed configs/xai.json
+var xAIConfig []byte
+
+//go:embed configs/bedrock.json
+var bedrockConfig []byte
+
+func loadProviderFromConfig(configData []byte) Provider {
+	var provider Provider
+	if err := json.Unmarshal(configData, &provider); err != nil {
+		log.Printf("Error loading provider config: %v", err)
+		return Provider{}
+	}
+	return provider
+}
+
+func openAIProvider() Provider {
+	return loadProviderFromConfig(openAIConfig)
+}
+
+func anthropicProvider() Provider {
+	return loadProviderFromConfig(anthropicConfig)
+}
+
+func geminiProvider() Provider {
+	return loadProviderFromConfig(geminiConfig)
+}
+
+func azureProvider() Provider {
+	return loadProviderFromConfig(azureConfig)
+}
+
+func bedrockProvider() Provider {
+	return loadProviderFromConfig(bedrockConfig)
+}
+
+func vertexAIProvider() Provider {
+	return loadProviderFromConfig(vertexAIConfig)
+}
+
+func xAIProvider() Provider {
+	return loadProviderFromConfig(xAIConfig)
+}
+
+func openRouterProvider() Provider {
+	return loadProviderFromConfig(openRouterConfig)
+}

internal/providers/types.go πŸ”—

@@ -0,0 +1,88 @@
+package providers
+
+type ProviderType string
+
+const (
+	ProviderTypeOpenAI     ProviderType = "openai"
+	ProviderTypeAnthropic  ProviderType = "anthropic"
+	ProviderTypeGemini     ProviderType = "gemini"
+	ProviderTypeAzure      ProviderType = "azure"
+	ProviderTypeBedrock    ProviderType = "bedrock"
+	ProviderTypeVertexAI   ProviderType = "vertexai"
+	ProviderTypeXAI        ProviderType = "xai"
+	ProviderTypeOpenRouter ProviderType = "openrouter"
+)
+
+type InferenceProvider string
+
+const (
+	InferenceProviderOpenAI     InferenceProvider = "openai"
+	InferenceProviderAnthropic  InferenceProvider = "anthropic"
+	InferenceProviderGemini     InferenceProvider = "gemini"
+	InferenceProviderAzure      InferenceProvider = "azure"
+	InferenceProviderBedrock    InferenceProvider = "bedrock"
+	InferenceProviderVertexAI   InferenceProvider = "vertexai"
+	InferenceProviderXAI        InferenceProvider = "xai"
+	InferenceProviderOpenRouter InferenceProvider = "openrouter"
+)
+
+type Provider struct {
+	Name           string            `json:"name"`
+	ID             InferenceProvider `json:"id"`
+	APIKey         string            `json:"api_key,omitempty"`
+	APIEndpoint    string            `json:"api_endpoint,omitempty"`
+	Type           ProviderType      `json:"type,omitempty"`
+	DefaultModelID string            `json:"default_model_id,omitempty"`
+	Models         []Model           `json:"models,omitempty"`
+}
+
+type Model struct {
+	ID                 string  `json:"id"`
+	Name               string  `json:"model"`
+	CostPer1MIn        float64 `json:"cost_per_1m_in"`
+	CostPer1MOut       float64 `json:"cost_per_1m_out"`
+	CostPer1MInCached  float64 `json:"cost_per_1m_in_cached"`
+	CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"`
+	ContextWindow      int64   `json:"context_window"`
+	DefaultMaxTokens   int64   `json:"default_max_tokens"`
+	CanReason          bool    `json:"can_reason"`
+	SupportsImages     bool    `json:"supports_attachments"`
+}
+
+type ProviderFunc func() Provider
+
+var providerRegistry = map[InferenceProvider]ProviderFunc{
+	InferenceProviderOpenAI:     openAIProvider,
+	InferenceProviderAnthropic:  anthropicProvider,
+	InferenceProviderGemini:     geminiProvider,
+	InferenceProviderAzure:      azureProvider,
+	InferenceProviderBedrock:    bedrockProvider,
+	InferenceProviderVertexAI:   vertexAIProvider,
+	InferenceProviderXAI:        xAIProvider,
+	InferenceProviderOpenRouter: openRouterProvider,
+}
+
+func GetAll() []Provider {
+	providers := make([]Provider, 0, len(providerRegistry))
+	for _, providerFunc := range providerRegistry {
+		providers = append(providers, providerFunc())
+	}
+	return providers
+}
+
+func GetByID(id InferenceProvider) (Provider, bool) {
+	providerFunc, exists := providerRegistry[id]
+	if !exists {
+		return Provider{}, false
+	}
+	return providerFunc(), true
+}
+
+func GetAvailableIDs() []InferenceProvider {
+	ids := make([]InferenceProvider, 0, len(providerRegistry))
+	for id := range providerRegistry {
+		ids = append(ids, id)
+	}
+	return ids
+}
+

main.go πŸ”—

@@ -0,0 +1,34 @@
+package main
+
+import (
+	"encoding/json"
+	"log"
+	"net/http"
+
+	"github.com/charmbracelet/fur/internal/providers"
+)
+
+func providersHandler(w http.ResponseWriter, r *http.Request) {
+	if r.Method != http.MethodGet {
+		http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+		return
+	}
+
+	allProviders := providers.GetAll()
+
+	w.Header().Set("Content-Type", "application/json")
+	if err := json.NewEncoder(w).Encode(allProviders); err != nil {
+		http.Error(w, "Internal server error", http.StatusInternalServerError)
+		return
+	}
+}
+
+func main() {
+	http.HandleFunc("/providers", providersHandler)
+
+	log.Println("Server starting on :8080")
+	if err := http.ListenAndServe(":8080", nil); err != nil {
+		log.Fatal("Server failed to start:", err)
+	}
+}
+