1package config
  2
  3import (
  4	"github.com/charmbracelet/crush/internal/fur/provider"
  5)
  6
  7// MockProviders returns a mock list of providers for testing.
  8// This avoids making API calls during tests and provides consistent test data.
  9func MockProviders() []provider.Provider {
 10	return []provider.Provider{
 11		{
 12			Name:                "Anthropic",
 13			ID:                  provider.InferenceProviderAnthropic,
 14			APIKey:              "$ANTHROPIC_API_KEY",
 15			APIEndpoint:         "$ANTHROPIC_API_ENDPOINT",
 16			Type:                provider.TypeAnthropic,
 17			DefaultLargeModelID: "claude-3-opus",
 18			DefaultSmallModelID: "claude-3-haiku",
 19			Models: []provider.Model{
 20				{
 21					ID:                 "claude-3-opus",
 22					Name:               "Claude 3 Opus",
 23					CostPer1MIn:        15.0,
 24					CostPer1MOut:       75.0,
 25					CostPer1MInCached:  18.75,
 26					CostPer1MOutCached: 1.5,
 27					ContextWindow:      200000,
 28					DefaultMaxTokens:   4096,
 29					CanReason:          false,
 30					SupportsImages:     true,
 31				},
 32				{
 33					ID:                 "claude-3-haiku",
 34					Name:               "Claude 3 Haiku",
 35					CostPer1MIn:        0.25,
 36					CostPer1MOut:       1.25,
 37					CostPer1MInCached:  0.3,
 38					CostPer1MOutCached: 0.03,
 39					ContextWindow:      200000,
 40					DefaultMaxTokens:   4096,
 41					CanReason:          false,
 42					SupportsImages:     true,
 43				},
 44				{
 45					ID:                 "claude-3-5-sonnet-20241022",
 46					Name:               "Claude 3.5 Sonnet",
 47					CostPer1MIn:        3.0,
 48					CostPer1MOut:       15.0,
 49					CostPer1MInCached:  3.75,
 50					CostPer1MOutCached: 0.3,
 51					ContextWindow:      200000,
 52					DefaultMaxTokens:   8192,
 53					CanReason:          false,
 54					SupportsImages:     true,
 55				},
 56				{
 57					ID:                 "claude-3-5-haiku-20241022",
 58					Name:               "Claude 3.5 Haiku",
 59					CostPer1MIn:        0.8,
 60					CostPer1MOut:       4.0,
 61					CostPer1MInCached:  1.0,
 62					CostPer1MOutCached: 0.08,
 63					ContextWindow:      200000,
 64					DefaultMaxTokens:   8192,
 65					CanReason:          false,
 66					SupportsImages:     true,
 67				},
 68			},
 69		},
 70		{
 71			Name:                "OpenAI",
 72			ID:                  provider.InferenceProviderOpenAI,
 73			APIKey:              "$OPENAI_API_KEY",
 74			APIEndpoint:         "$OPENAI_API_ENDPOINT",
 75			Type:                provider.TypeOpenAI,
 76			DefaultLargeModelID: "gpt-4",
 77			DefaultSmallModelID: "gpt-3.5-turbo",
 78			Models: []provider.Model{
 79				{
 80					ID:                 "gpt-4",
 81					Name:               "GPT-4",
 82					CostPer1MIn:        30.0,
 83					CostPer1MOut:       60.0,
 84					CostPer1MInCached:  0.0,
 85					CostPer1MOutCached: 0.0,
 86					ContextWindow:      8192,
 87					DefaultMaxTokens:   4096,
 88					CanReason:          false,
 89					SupportsImages:     false,
 90				},
 91				{
 92					ID:                 "gpt-3.5-turbo",
 93					Name:               "GPT-3.5 Turbo",
 94					CostPer1MIn:        1.0,
 95					CostPer1MOut:       2.0,
 96					CostPer1MInCached:  0.0,
 97					CostPer1MOutCached: 0.0,
 98					ContextWindow:      4096,
 99					DefaultMaxTokens:   4096,
100					CanReason:          false,
101					SupportsImages:     false,
102				},
103				{
104					ID:                 "gpt-4-turbo",
105					Name:               "GPT-4 Turbo",
106					CostPer1MIn:        10.0,
107					CostPer1MOut:       30.0,
108					CostPer1MInCached:  0.0,
109					CostPer1MOutCached: 0.0,
110					ContextWindow:      128000,
111					DefaultMaxTokens:   4096,
112					CanReason:          false,
113					SupportsImages:     true,
114				},
115				{
116					ID:                 "gpt-4o",
117					Name:               "GPT-4o",
118					CostPer1MIn:        2.5,
119					CostPer1MOut:       10.0,
120					CostPer1MInCached:  0.0,
121					CostPer1MOutCached: 1.25,
122					ContextWindow:      128000,
123					DefaultMaxTokens:   16384,
124					CanReason:          false,
125					SupportsImages:     true,
126				},
127				{
128					ID:                 "gpt-4o-mini",
129					Name:               "GPT-4o-mini",
130					CostPer1MIn:        0.15,
131					CostPer1MOut:       0.6,
132					CostPer1MInCached:  0.0,
133					CostPer1MOutCached: 0.075,
134					ContextWindow:      128000,
135					DefaultMaxTokens:   16384,
136					CanReason:          false,
137					SupportsImages:     true,
138				},
139			},
140		},
141		{
142			Name:                "Google Gemini",
143			ID:                  provider.InferenceProviderGemini,
144			APIKey:              "$GEMINI_API_KEY",
145			APIEndpoint:         "$GEMINI_API_ENDPOINT",
146			Type:                provider.TypeGemini,
147			DefaultLargeModelID: "gemini-2.5-pro",
148			DefaultSmallModelID: "gemini-2.5-flash",
149			Models: []provider.Model{
150				{
151					ID:                 "gemini-2.5-pro",
152					Name:               "Gemini 2.5 Pro",
153					CostPer1MIn:        1.25,
154					CostPer1MOut:       10.0,
155					CostPer1MInCached:  1.625,
156					CostPer1MOutCached: 0.31,
157					ContextWindow:      1048576,
158					DefaultMaxTokens:   65536,
159					CanReason:          true,
160					SupportsImages:     true,
161				},
162				{
163					ID:                 "gemini-2.5-flash",
164					Name:               "Gemini 2.5 Flash",
165					CostPer1MIn:        0.3,
166					CostPer1MOut:       2.5,
167					CostPer1MInCached:  0.3833,
168					CostPer1MOutCached: 0.075,
169					ContextWindow:      1048576,
170					DefaultMaxTokens:   65535,
171					CanReason:          true,
172					SupportsImages:     true,
173				},
174			},
175		},
176	}
177}