fix: rename everything

Carlos Alexandro Becker created

Change summary

.github/workflows/nightly.yml              |   4 
.github/workflows/release.yml              |   4 
.goreleaser.yaml                           |   2 
CRUSH.md                                   |   6 
cmd/openrouter/main.go                     |   2 
go.mod                                     |   2 
goreleaser.dockerfile                      |   4 
internal/providers/configs/openrouter.json | 189 +++++++++++------------
internal/providers/providers.go            |   2 
main.go                                    |   4 
pkg/client/client.go                       |  10 
11 files changed, 109 insertions(+), 120 deletions(-)

Detailed changes

.github/workflows/nightly.yml 🔗

@@ -30,6 +30,6 @@ jobs:
           token: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
           inputs: |
             {
-              "app": "fur",
-              "image": "ghcr.io/charmbracelet/fur:${{ github.sha }}-devel"
+              "app": "catwalk",
+              "image": "ghcr.io/charmbracelet/catwalk:${{ github.sha }}-devel"
             }

.github/workflows/release.yml 🔗

@@ -33,6 +33,6 @@ jobs:
           token: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
           inputs: |
             {
-              "app": "fur",
-              "image": "ghcr.io/charmbracelet/fur:${{ github.ref_name }}"
+              "app": "catwalk",
+              "image": "ghcr.io/charmbracelet/catwalk:${{ github.ref_name }}"
             }

.goreleaser.yaml 🔗

@@ -14,7 +14,7 @@ before:
     - go mod tidy
 
 builds:
-  - binary: fur
+  - binary: catwalk
     env:
       - CGO_ENABLED=0
     main: .

CRUSH.md 🔗

@@ -1,6 +1,7 @@
-# Fur - AI Provider Database
+# Catwalk - AI Provider Database
 
 ## Build/Test Commands
+
 - `go build` - Build the main HTTP server
 - `go build ./cmd/openrouter` - Build OpenRouter config generator
 - `go test ./...` - Run all tests
@@ -9,6 +10,7 @@
 - `go run ./cmd/openrouter/main.go` - Generate OpenRouter config
 
 ## Code Style Guidelines
+
 - Package comments: Start with "Package name provides/represents..."
 - Imports: Standard library first, then third-party, then local packages
 - Error handling: Use `fmt.Errorf("message: %w", err)` for wrapping
@@ -19,4 +21,4 @@
 - Comments: Use `//nolint:directive` for linter exceptions
 - HTTP: Always set timeouts, use context, defer close response bodies
 - JSON: Use `json.MarshalIndent` for pretty output, validate unmarshaling
-- File permissions: Use 0o600 for sensitive config files
+- File permissions: Use 0o600 for sensitive config files

cmd/openrouter/main.go 🔗

@@ -14,7 +14,7 @@ import (
 	"strconv"
 	"time"
 
-	"github.com/charmbracelet/fur/pkg/provider"
+	"github.com/charmbracelet/catwalk/pkg/provider"
 )
 
 // Model represents the complete model configuration.

go.mod 🔗

@@ -1,4 +1,4 @@
-module github.com/charmbracelet/fur
+module github.com/charmbracelet/catwalk
 
 go 1.24.3
 

goreleaser.dockerfile 🔗

@@ -1,4 +1,4 @@
 FROM alpine
-COPY fur /usr/bin/fur
+COPY catwalk /usr/bin/catwalk
 EXPOSE 8080
-CMD ["/usr/bin/fur"]
+CMD ["/usr/bin/catwalk"]

internal/providers/configs/openrouter.json 🔗

@@ -7,6 +7,45 @@
   "default_large_model_id": "anthropic/claude-sonnet-4",
   "default_small_model_id": "anthropic/claude-3.5-haiku",
   "models": [
+    {
+      "id": "qwen/qwen3-coder",
+      "model": "Qwen: Qwen3 Coder ",
+      "cost_per_1m_in": 1,
+      "cost_per_1m_out": 5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 1000000,
+      "default_max_tokens": 32768,
+      "can_reason": false,
+      "has_reasoning_efforts": false,
+      "supports_attachments": false
+    },
+    {
+      "id": "google/gemini-2.5-flash-lite",
+      "model": "Google: Gemini 2.5 Flash Lite",
+      "cost_per_1m_in": 0.09999999999999999,
+      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in_cached": 0.18330000000000002,
+      "cost_per_1m_out_cached": 0.024999999999999998,
+      "context_window": 1048576,
+      "default_max_tokens": 32767,
+      "can_reason": true,
+      "has_reasoning_efforts": false,
+      "supports_attachments": true
+    },
+    {
+      "id": "qwen/qwen3-235b-a22b-07-25",
+      "model": "Qwen: Qwen3 235B A22B 2507",
+      "cost_per_1m_in": 0.12,
+      "cost_per_1m_out": 0.59,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 262144,
+      "default_max_tokens": 131072,
+      "can_reason": false,
+      "has_reasoning_efforts": false,
+      "supports_attachments": false
+    },
     {
       "id": "moonshotai/kimi-k2:free",
       "model": "MoonshotAI: Kimi K2 (free)",
@@ -49,12 +88,12 @@
     {
       "id": "mistralai/devstral-small",
       "model": "Mistral: Devstral Small 1.1",
-      "cost_per_1m_in": 0.09,
-      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in": 0.07,
+      "cost_per_1m_out": 0.28,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 131072,
-      "default_max_tokens": 65536,
+      "context_window": 128000,
+      "default_max_tokens": 12800,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -192,12 +231,12 @@
     {
       "id": "mistralai/magistral-small-2506",
       "model": "Mistral: Magistral Small 2506",
-      "cost_per_1m_in": 0.09999999999999999,
-      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 1.5,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 40960,
-      "default_max_tokens": 20480,
+      "context_window": 40000,
+      "default_max_tokens": 20000,
       "can_reason": true,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -244,12 +283,12 @@
     {
       "id": "deepseek/deepseek-r1-0528",
       "model": "DeepSeek: R1 0528",
-      "cost_per_1m_in": 0.5,
-      "cost_per_1m_out": 2,
+      "cost_per_1m_in": 0.272,
+      "cost_per_1m_out": 0.272,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 163840,
-      "default_max_tokens": 81920,
+      "default_max_tokens": 16384,
       "can_reason": true,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -296,42 +335,16 @@
     {
       "id": "mistralai/devstral-small-2505",
       "model": "Mistral: Devstral Small 2505",
-      "cost_per_1m_in": 0.06,
-      "cost_per_1m_out": 0.12,
+      "cost_per_1m_in": 0.03,
+      "cost_per_1m_out": 0.03,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 12800,
+      "context_window": 32768,
+      "default_max_tokens": 3276,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
-    {
-      "id": "google/gemini-2.5-flash-preview-05-20",
-      "model": "Google: Gemini 2.5 Flash Preview 05-20",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 0.6,
-      "cost_per_1m_in_cached": 0.2333,
-      "cost_per_1m_out_cached": 0.0375,
-      "context_window": 1048576,
-      "default_max_tokens": 32767,
-      "can_reason": true,
-      "has_reasoning_efforts": false,
-      "supports_attachments": true
-    },
-    {
-      "id": "google/gemini-2.5-flash-preview-05-20:thinking",
-      "model": "Google: Gemini 2.5 Flash Preview 05-20 (thinking)",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 3.5,
-      "cost_per_1m_in_cached": 0.2333,
-      "cost_per_1m_out_cached": 0.0375,
-      "context_window": 1048576,
-      "default_max_tokens": 32767,
-      "can_reason": true,
-      "has_reasoning_efforts": false,
-      "supports_attachments": true
-    },
     {
       "id": "openai/codex-mini",
       "model": "OpenAI: Codex Mini",
@@ -452,8 +465,8 @@
     {
       "id": "qwen/qwen3-32b",
       "model": "Qwen: Qwen3 32B",
-      "cost_per_1m_in": 0.09999999999999999,
-      "cost_per_1m_out": 0.3,
+      "cost_per_1m_in": 0.027,
+      "cost_per_1m_out": 0.027,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 40960,
@@ -488,32 +501,6 @@
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
-    {
-      "id": "google/gemini-2.5-flash-preview",
-      "model": "Google: Gemini 2.5 Flash Preview 04-17",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 0.6,
-      "cost_per_1m_in_cached": 0.2333,
-      "cost_per_1m_out_cached": 0.0375,
-      "context_window": 1048576,
-      "default_max_tokens": 32767,
-      "can_reason": true,
-      "has_reasoning_efforts": false,
-      "supports_attachments": true
-    },
-    {
-      "id": "google/gemini-2.5-flash-preview:thinking",
-      "model": "Google: Gemini 2.5 Flash Preview 04-17 (thinking)",
-      "cost_per_1m_in": 0.15,
-      "cost_per_1m_out": 3.5,
-      "cost_per_1m_in_cached": 0.2333,
-      "cost_per_1m_out_cached": 0.0375,
-      "context_window": 1048576,
-      "default_max_tokens": 32767,
-      "can_reason": true,
-      "has_reasoning_efforts": false,
-      "supports_attachments": true
-    },
     {
       "id": "openai/o4-mini-high",
       "model": "OpenAI: o4 Mini High",
@@ -664,7 +651,7 @@
       "cost_per_1m_out": 0,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 16384,
+      "context_window": 32768,
       "default_max_tokens": 8192,
       "can_reason": false,
       "has_reasoning_efforts": false,
@@ -699,12 +686,12 @@
     {
       "id": "mistralai/mistral-small-3.1-24b-instruct",
       "model": "Mistral: Mistral Small 3.1 24B",
-      "cost_per_1m_in": 0.049999999999999996,
-      "cost_per_1m_out": 0.09999999999999999,
+      "cost_per_1m_in": 0.027,
+      "cost_per_1m_out": 0.027,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 128000,
-      "default_max_tokens": 12800,
+      "context_window": 96000,
+      "default_max_tokens": 48000,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": true
@@ -881,12 +868,12 @@
     {
       "id": "mistralai/mistral-small-24b-instruct-2501",
       "model": "Mistral: Mistral Small 3",
-      "cost_per_1m_in": 0.049999999999999996,
-      "cost_per_1m_out": 0.08,
+      "cost_per_1m_in": 0.03,
+      "cost_per_1m_out": 0.03,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 32768,
-      "default_max_tokens": 16384,
+      "default_max_tokens": 3276,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -894,12 +881,12 @@
     {
       "id": "deepseek/deepseek-r1-distill-llama-70b",
       "model": "DeepSeek: R1 Distill Llama 70B",
-      "cost_per_1m_in": 0.09999999999999999,
-      "cost_per_1m_out": 0.39999999999999997,
+      "cost_per_1m_in": 0.049999999999999996,
+      "cost_per_1m_out": 0.049999999999999996,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 131072,
-      "default_max_tokens": 8192,
+      "default_max_tokens": 13107,
       "can_reason": true,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -933,12 +920,12 @@
     {
       "id": "deepseek/deepseek-chat",
       "model": "DeepSeek: DeepSeek V3",
-      "cost_per_1m_in": 0.3,
-      "cost_per_1m_out": 0.85,
+      "cost_per_1m_in": 0.272,
+      "cost_per_1m_out": 0.272,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 163840,
-      "default_max_tokens": 81920,
+      "default_max_tokens": 16384,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -1284,12 +1271,12 @@
     {
       "id": "qwen/qwen-2.5-72b-instruct",
       "model": "Qwen2.5 72B Instruct",
-      "cost_per_1m_in": 0.12,
-      "cost_per_1m_out": 0.39,
+      "cost_per_1m_in": 0.101,
+      "cost_per_1m_out": 0.101,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
       "context_window": 32768,
-      "default_max_tokens": 8192,
+      "default_max_tokens": 3276,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -1379,8 +1366,8 @@
       "cost_per_1m_out": 0.02,
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0,
-      "context_window": 131000,
-      "default_max_tokens": 65500,
+      "context_window": 131072,
+      "default_max_tokens": 8192,
       "can_reason": false,
       "has_reasoning_efforts": false,
       "supports_attachments": false
@@ -1749,19 +1736,6 @@
       "has_reasoning_efforts": false,
       "supports_attachments": true
     },
-    {
-      "id": "anthropic/claude-3-sonnet:beta",
-      "model": "Anthropic: Claude 3 Sonnet (self-moderated)",
-      "cost_per_1m_in": 3,
-      "cost_per_1m_out": 15,
-      "cost_per_1m_in_cached": 3.75,
-      "cost_per_1m_out_cached": 0.3,
-      "context_window": 200000,
-      "default_max_tokens": 2048,
-      "can_reason": false,
-      "has_reasoning_efforts": false,
-      "supports_attachments": true
-    },
     {
       "id": "anthropic/claude-3-sonnet",
       "model": "Anthropic: Claude 3 Sonnet",
@@ -1905,6 +1879,19 @@
       "has_reasoning_efforts": false,
       "supports_attachments": false
     },
+    {
+      "id": "openai/gpt-3.5-turbo",
+      "model": "OpenAI: GPT-3.5 Turbo",
+      "cost_per_1m_in": 0.5,
+      "cost_per_1m_out": 1.5,
+      "cost_per_1m_in_cached": 0,
+      "cost_per_1m_out_cached": 0,
+      "context_window": 16385,
+      "default_max_tokens": 2048,
+      "can_reason": false,
+      "has_reasoning_efforts": false,
+      "supports_attachments": false
+    },
     {
       "id": "openai/gpt-4",
       "model": "OpenAI: GPT-4",

internal/providers/providers.go 🔗

@@ -6,7 +6,7 @@ import (
 	"encoding/json"
 	"log"
 
-	"github.com/charmbracelet/fur/pkg/provider"
+	"github.com/charmbracelet/catwalk/pkg/provider"
 )
 
 //go:embed configs/openai.json

main.go 🔗

@@ -8,14 +8,14 @@ import (
 	"net/http"
 	"time"
 
-	"github.com/charmbracelet/fur/internal/providers"
+	"github.com/charmbracelet/catwalk/internal/providers"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus/promauto"
 	"github.com/prometheus/client_golang/prometheus/promhttp"
 )
 
 var counter = promauto.NewCounter(prometheus.CounterOpts{
-	Namespace: "fur",
+	Namespace: "catwalk",
 	Subsystem: "providers",
 	Name:      "requests_total",
 	Help:      "Total number of requests to the providers endpoint",

pkg/client/client.go 🔗

@@ -1,4 +1,4 @@
-// Package client provides a client for interacting with the fur service.
+// Package client provides a client for interacting with the catwalk service.
 package client
 
 import (
@@ -7,21 +7,21 @@ import (
 	"net/http"
 	"os"
 
-	"github.com/charmbracelet/fur/pkg/provider"
+	"github.com/charmbracelet/catwalk/pkg/provider"
 )
 
 const defaultURL = "http://localhost:8080"
 
-// Client represents a client for the fur service.
+// Client represents a client for the catwalk service.
 type Client struct {
 	baseURL    string
 	httpClient *http.Client
 }
 
 // New creates a new client instance
-// Uses FUR_URL environment variable or falls back to localhost:8080.
+// Uses CATWALK_URL environment variable or falls back to localhost:8080.
 func New() *Client {
-	baseURL := os.Getenv("FUR_URL")
+	baseURL := os.Getenv("CATWALK_URL")
 	if baseURL == "" {
 		baseURL = defaultURL
 	}