1package catwalk
2
3// Type represents the type of AI provider.
4type Type string
5
6// All the supported AI provider types.
7const (
8 TypeOpenAI Type = "openai"
9 TypeAnthropic Type = "anthropic"
10 TypeGemini Type = "gemini"
11 TypeAzure Type = "azure"
12 TypeBedrock Type = "bedrock"
13 TypeVertexAI Type = "vertexai"
14)
15
16// InferenceProvider represents the inference provider identifier.
17type InferenceProvider string
18
19// All the inference providers supported by the system.
20const (
21 InferenceProviderOpenAI InferenceProvider = "openai"
22 InferenceProviderAnthropic InferenceProvider = "anthropic"
23 InferenceProviderGemini InferenceProvider = "gemini"
24 InferenceProviderAzure InferenceProvider = "azure"
25 InferenceProviderBedrock InferenceProvider = "bedrock"
26 InferenceProviderVertexAI InferenceProvider = "vertexai"
27 InferenceProviderXAI InferenceProvider = "xai"
28 InferenceProviderZAI InferenceProvider = "zai"
29 InferenceProviderGROQ InferenceProvider = "groq"
30 InferenceProviderOpenRouter InferenceProvider = "openrouter"
31 InferenceProviderLambda InferenceProvider = "lambda"
32 InferenceProviderCerebras InferenceProvider = "cerebras"
33)
34
35// Provider represents an AI provider configuration.
36type Provider struct {
37 Name string `json:"name"`
38 ID InferenceProvider `json:"id"`
39 APIKey string `json:"api_key,omitempty"`
40 APIEndpoint string `json:"api_endpoint,omitempty"`
41 Type Type `json:"type,omitempty"`
42 DefaultLargeModelID string `json:"default_large_model_id,omitempty"`
43 DefaultSmallModelID string `json:"default_small_model_id,omitempty"`
44 Models []Model `json:"models,omitempty"`
45 DefaultHeaders map[string]string `json:"default_headers,omitempty"`
46}
47
48// Model represents an AI model configuration.
49type Model struct {
50 ID string `json:"id"`
51 Name string `json:"name"`
52 CostPer1MIn float64 `json:"cost_per_1m_in"`
53 CostPer1MOut float64 `json:"cost_per_1m_out"`
54 CostPer1MInCached float64 `json:"cost_per_1m_in_cached"`
55 CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"`
56 ContextWindow int64 `json:"context_window"`
57 DefaultMaxTokens int64 `json:"default_max_tokens"`
58 CanReason bool `json:"can_reason"`
59 HasReasoningEffort bool `json:"has_reasoning_efforts"`
60 DefaultReasoningEffort string `json:"default_reasoning_effort,omitempty"`
61 SupportsImages bool `json:"supports_attachments"`
62}
63
64// KnownProviders returns all the known inference providers.
65func KnownProviders() []InferenceProvider {
66 return []InferenceProvider{
67 InferenceProviderOpenAI,
68 InferenceProviderAnthropic,
69 InferenceProviderGemini,
70 InferenceProviderAzure,
71 InferenceProviderBedrock,
72 InferenceProviderVertexAI,
73 InferenceProviderXAI,
74 InferenceProviderZAI,
75 InferenceProviderGROQ,
76 InferenceProviderOpenRouter,
77 InferenceProviderLambda,
78 InferenceProviderCerebras,
79 }
80}