provider_mock.go

  1package config
  2
  3import (
  4	"github.com/charmbracelet/crush/internal/fur/provider"
  5)
  6
  7// MockProviders returns a mock list of providers for testing.
  8// This avoids making API calls during tests and provides consistent test data.
  9func MockProviders() []provider.Provider {
 10	return []provider.Provider{
 11		{
 12			Name:                "Anthropic",
 13			ID:                  provider.InferenceProviderAnthropic,
 14			APIKey:              "$ANTHROPIC_API_KEY",
 15			APIEndpoint:         "$ANTHROPIC_API_ENDPOINT",
 16			Type:                provider.TypeAnthropic,
 17			DefaultLargeModelID: "claude-3-opus",
 18			DefaultSmallModelID: "claude-3-haiku",
 19			Models: []provider.Model{
 20				{
 21					ID:                 "claude-3-opus",
 22					Name:               "Claude 3 Opus",
 23					CostPer1MIn:        15.0,
 24					CostPer1MOut:       75.0,
 25					CostPer1MInCached:  18.75,
 26					CostPer1MOutCached: 1.5,
 27					ContextWindow:      200000,
 28					DefaultMaxTokens:   4096,
 29					CanReason:          false,
 30					SupportsImages:     true,
 31				},
 32				{
 33					ID:                 "claude-3-haiku",
 34					Name:               "Claude 3 Haiku",
 35					CostPer1MIn:        0.25,
 36					CostPer1MOut:       1.25,
 37					CostPer1MInCached:  0.3,
 38					CostPer1MOutCached: 0.03,
 39					ContextWindow:      200000,
 40					DefaultMaxTokens:   4096,
 41					CanReason:          false,
 42					SupportsImages:     true,
 43				},
 44				{
 45					ID:                 "claude-3-5-sonnet-20241022",
 46					Name:               "Claude 3.5 Sonnet",
 47					CostPer1MIn:        3.0,
 48					CostPer1MOut:       15.0,
 49					CostPer1MInCached:  3.75,
 50					CostPer1MOutCached: 0.3,
 51					ContextWindow:      200000,
 52					DefaultMaxTokens:   8192,
 53					CanReason:          false,
 54					SupportsImages:     true,
 55				},
 56				{
 57					ID:                 "claude-3-5-haiku-20241022",
 58					Name:               "Claude 3.5 Haiku",
 59					CostPer1MIn:        0.8,
 60					CostPer1MOut:       4.0,
 61					CostPer1MInCached:  1.0,
 62					CostPer1MOutCached: 0.08,
 63					ContextWindow:      200000,
 64					DefaultMaxTokens:   8192,
 65					CanReason:          false,
 66					SupportsImages:     true,
 67				},
 68			},
 69		},
 70		{
 71			Name:                "OpenAI",
 72			ID:                  provider.InferenceProviderOpenAI,
 73			APIKey:              "$OPENAI_API_KEY",
 74			APIEndpoint:         "$OPENAI_API_ENDPOINT",
 75			Type:                provider.TypeOpenAI,
 76			DefaultLargeModelID: "gpt-4",
 77			DefaultSmallModelID: "gpt-3.5-turbo",
 78			Models: []provider.Model{
 79				{
 80					ID:                 "gpt-4",
 81					Name:               "GPT-4",
 82					CostPer1MIn:        30.0,
 83					CostPer1MOut:       60.0,
 84					CostPer1MInCached:  0.0,
 85					CostPer1MOutCached: 0.0,
 86					ContextWindow:      8192,
 87					DefaultMaxTokens:   4096,
 88					CanReason:          false,
 89					SupportsImages:     false,
 90				},
 91				{
 92					ID:                 "gpt-3.5-turbo",
 93					Name:               "GPT-3.5 Turbo",
 94					CostPer1MIn:        1.0,
 95					CostPer1MOut:       2.0,
 96					CostPer1MInCached:  0.0,
 97					CostPer1MOutCached: 0.0,
 98					ContextWindow:      4096,
 99					DefaultMaxTokens:   4096,
100					CanReason:          false,
101					SupportsImages:     false,
102				},
103				{
104					ID:                 "gpt-4-turbo",
105					Name:               "GPT-4 Turbo",
106					CostPer1MIn:        10.0,
107					CostPer1MOut:       30.0,
108					CostPer1MInCached:  0.0,
109					CostPer1MOutCached: 0.0,
110					ContextWindow:      128000,
111					DefaultMaxTokens:   4096,
112					CanReason:          false,
113					SupportsImages:     true,
114				},
115				{
116					ID:                 "gpt-4o",
117					Name:               "GPT-4o",
118					CostPer1MIn:        2.5,
119					CostPer1MOut:       10.0,
120					CostPer1MInCached:  0.0,
121					CostPer1MOutCached: 1.25,
122					ContextWindow:      128000,
123					DefaultMaxTokens:   16384,
124					CanReason:          false,
125					SupportsImages:     true,
126				},
127				{
128					ID:                 "gpt-4o-mini",
129					Name:               "GPT-4o-mini",
130					CostPer1MIn:        0.15,
131					CostPer1MOut:       0.6,
132					CostPer1MInCached:  0.0,
133					CostPer1MOutCached: 0.075,
134					ContextWindow:      128000,
135					DefaultMaxTokens:   16384,
136					CanReason:          false,
137					SupportsImages:     true,
138				},
139				{
140					ID:                     "o1-preview",
141					Name:                   "o1-preview",
142					CostPer1MIn:            15.0,
143					CostPer1MOut:           60.0,
144					CostPer1MInCached:      0.0,
145					CostPer1MOutCached:     0.0,
146					ContextWindow:          128000,
147					DefaultMaxTokens:       32768,
148					CanReason:              true,
149					HasReasoningEffort:     true,
150					DefaultReasoningEffort: "medium",
151					SupportsImages:         true,
152				},
153				{
154					ID:                     "o1-mini",
155					Name:                   "o1-mini",
156					CostPer1MIn:            3.0,
157					CostPer1MOut:           12.0,
158					CostPer1MInCached:      0.0,
159					CostPer1MOutCached:     0.0,
160					ContextWindow:          128000,
161					DefaultMaxTokens:       65536,
162					CanReason:              true,
163					HasReasoningEffort:     true,
164					DefaultReasoningEffort: "medium",
165					SupportsImages:         true,
166				},
167			},
168		},
169		{
170			Name:                "Google Gemini",
171			ID:                  provider.InferenceProviderGemini,
172			APIKey:              "$GEMINI_API_KEY",
173			APIEndpoint:         "$GEMINI_API_ENDPOINT",
174			Type:                provider.TypeGemini,
175			DefaultLargeModelID: "gemini-2.5-pro",
176			DefaultSmallModelID: "gemini-2.5-flash",
177			Models: []provider.Model{
178				{
179					ID:                 "gemini-2.5-pro",
180					Name:               "Gemini 2.5 Pro",
181					CostPer1MIn:        1.25,
182					CostPer1MOut:       10.0,
183					CostPer1MInCached:  1.625,
184					CostPer1MOutCached: 0.31,
185					ContextWindow:      1048576,
186					DefaultMaxTokens:   65536,
187					CanReason:          true,
188					SupportsImages:     true,
189				},
190				{
191					ID:                 "gemini-2.5-flash",
192					Name:               "Gemini 2.5 Flash",
193					CostPer1MIn:        0.3,
194					CostPer1MOut:       2.5,
195					CostPer1MInCached:  0.3833,
196					CostPer1MOutCached: 0.075,
197					ContextWindow:      1048576,
198					DefaultMaxTokens:   65535,
199					CanReason:          true,
200					SupportsImages:     true,
201				},
202			},
203		},
204		{
205			Name:                "xAI",
206			ID:                  provider.InferenceProviderXAI,
207			APIKey:              "$XAI_API_KEY",
208			APIEndpoint:         "https://api.x.ai/v1",
209			Type:                provider.TypeXAI,
210			DefaultLargeModelID: "grok-beta",
211			DefaultSmallModelID: "grok-beta",
212			Models: []provider.Model{
213				{
214					ID:               "grok-beta",
215					Name:             "Grok Beta",
216					CostPer1MIn:      5.0,
217					CostPer1MOut:     15.0,
218					ContextWindow:    131072,
219					DefaultMaxTokens: 4096,
220					CanReason:        false,
221					SupportsImages:   true,
222				},
223			},
224		},
225		{
226			Name:                "OpenRouter",
227			ID:                  provider.InferenceProviderOpenRouter,
228			APIKey:              "$OPENROUTER_API_KEY",
229			APIEndpoint:         "https://openrouter.ai/api/v1",
230			Type:                provider.TypeOpenAI,
231			DefaultLargeModelID: "anthropic/claude-3.5-sonnet",
232			DefaultSmallModelID: "anthropic/claude-3.5-haiku",
233			Models: []provider.Model{
234				{
235					ID:               "anthropic/claude-3.5-sonnet",
236					Name:             "Claude 3.5 Sonnet",
237					CostPer1MIn:      3.0,
238					CostPer1MOut:     15.0,
239					ContextWindow:    200000,
240					DefaultMaxTokens: 8192,
241					CanReason:        false,
242					SupportsImages:   true,
243				},
244				{
245					ID:               "anthropic/claude-3.5-haiku",
246					Name:             "Claude 3.5 Haiku",
247					CostPer1MIn:      0.8,
248					CostPer1MOut:     4.0,
249					ContextWindow:    200000,
250					DefaultMaxTokens: 8192,
251					CanReason:        false,
252					SupportsImages:   true,
253				},
254			},
255		},
256	}
257}