1package catwalk
2
3// Type represents the type of AI provider.
4type Type string
5
6// All the supported AI provider types.
7const (
8 TypeOpenAI Type = "openai"
9 TypeAnthropic Type = "anthropic"
10 TypeGemini Type = "gemini"
11 TypeAzure Type = "azure"
12 TypeBedrock Type = "bedrock"
13 TypeVertexAI Type = "vertexai"
14)
15
16// InferenceProvider represents the inference provider identifier.
17type InferenceProvider string
18
19// All the inference providers supported by the system.
20const (
21 InferenceProviderOpenAI InferenceProvider = "openai"
22 InferenceProviderAnthropic InferenceProvider = "anthropic"
23 InferenceProviderGemini InferenceProvider = "gemini"
24 InferenceProviderAzure InferenceProvider = "azure"
25 InferenceProviderBedrock InferenceProvider = "bedrock"
26 InferenceProviderVertexAI InferenceProvider = "vertexai"
27 InferenceProviderXAI InferenceProvider = "xai"
28 InferenceProviderZAI InferenceProvider = "zai"
29 InferenceProviderGROQ InferenceProvider = "groq"
30 InferenceProviderOpenRouter InferenceProvider = "openrouter"
31 InferenceProviderCerebras InferenceProvider = "cerebras"
32 InferenceProviderVenice InferenceProvider = "venice"
33 InferenceProviderChutes InferenceProvider = "chutes"
34 InferenceProviderHuggingFace InferenceProvider = "huggingface"
35 InferenceAIHubMix InferenceProvider = "aihubmix"
36)
37
38// Provider represents an AI provider configuration.
39type Provider struct {
40 Name string `json:"name"`
41 ID InferenceProvider `json:"id"`
42 APIKey string `json:"api_key,omitempty"`
43 APIEndpoint string `json:"api_endpoint,omitempty"`
44 Type Type `json:"type,omitempty"`
45 DefaultLargeModelID string `json:"default_large_model_id,omitempty"`
46 DefaultSmallModelID string `json:"default_small_model_id,omitempty"`
47 Models []Model `json:"models,omitempty"`
48 DefaultHeaders map[string]string `json:"default_headers,omitempty"`
49}
50
51// Model represents an AI model configuration.
52type Model struct {
53 ID string `json:"id"`
54 Name string `json:"name"`
55 CostPer1MIn float64 `json:"cost_per_1m_in"`
56 CostPer1MOut float64 `json:"cost_per_1m_out"`
57 CostPer1MInCached float64 `json:"cost_per_1m_in_cached"`
58 CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"`
59 ContextWindow int64 `json:"context_window"`
60 DefaultMaxTokens int64 `json:"default_max_tokens"`
61 CanReason bool `json:"can_reason"`
62 HasReasoningEffort bool `json:"has_reasoning_efforts"`
63 DefaultReasoningEffort string `json:"default_reasoning_effort,omitempty"`
64 SupportsImages bool `json:"supports_attachments"`
65}
66
67// KnownProviders returns all the known inference providers.
68func KnownProviders() []InferenceProvider {
69 return []InferenceProvider{
70 InferenceProviderOpenAI,
71 InferenceProviderAnthropic,
72 InferenceProviderGemini,
73 InferenceProviderAzure,
74 InferenceProviderBedrock,
75 InferenceProviderVertexAI,
76 InferenceProviderXAI,
77 InferenceProviderZAI,
78 InferenceProviderGROQ,
79 InferenceProviderOpenRouter,
80 InferenceProviderCerebras,
81 InferenceProviderVenice,
82 InferenceProviderChutes,
83 InferenceProviderHuggingFace,
84 InferenceAIHubMix,
85 }
86}