1package catwalk
2
3// Type represents the type of AI provider.
4type Type string
5
6// All the supported AI provider types.
7const (
8 TypeOpenAI Type = "openai"
9 TypeOpenAICompat Type = "openai-compat"
10 TypeOpenRouter Type = "openrouter"
11 TypeVercel Type = "vercel"
12 TypeAnthropic Type = "anthropic"
13 TypeGoogle Type = "google"
14 TypeAzure Type = "azure"
15 TypeBedrock Type = "bedrock"
16 TypeVertexAI Type = "google-vertex"
17)
18
19// InferenceProvider represents the inference provider identifier.
20type InferenceProvider string
21
22// All the inference providers supported by the system.
23const (
24 InferenceProviderOpenAI InferenceProvider = "openai"
25 InferenceProviderAnthropic InferenceProvider = "anthropic"
26 InferenceProviderSynthetic InferenceProvider = "synthetic"
27 InferenceProviderGemini InferenceProvider = "gemini"
28 InferenceProviderAzure InferenceProvider = "azure"
29 InferenceProviderBedrock InferenceProvider = "bedrock"
30 InferenceProviderVertexAI InferenceProvider = "vertexai"
31 InferenceProviderXAI InferenceProvider = "xai"
32 InferenceProviderZAI InferenceProvider = "zai"
33 InferenceProviderZhipu InferenceProvider = "zhipu"
34 InferenceProviderZhipuCoding InferenceProvider = "zhipu-coding"
35 InferenceProviderGROQ InferenceProvider = "groq"
36 InferenceProviderOpenRouter InferenceProvider = "openrouter"
37 InferenceProviderCerebras InferenceProvider = "cerebras"
38 InferenceProviderVenice InferenceProvider = "venice"
39 InferenceProviderChutes InferenceProvider = "chutes"
40 InferenceProviderHuggingFace InferenceProvider = "huggingface"
41 InferenceAIHubMix InferenceProvider = "aihubmix"
42 InferenceKimiCoding InferenceProvider = "kimi-coding"
43 InferenceProviderCopilot InferenceProvider = "copilot"
44 InferenceProviderVercel InferenceProvider = "vercel"
45 InferenceProviderMiniMax InferenceProvider = "minimax"
46 InferenceProviderMiniMaxChina InferenceProvider = "minimax-china"
47 InferenceProviderIoNet InferenceProvider = "ionet"
48 InferenceProviderQiniuCloud InferenceProvider = "qiniucloud"
49)
50
51// Provider represents an AI provider configuration.
52type Provider struct {
53 Name string `json:"name"`
54 ID InferenceProvider `json:"id"`
55 APIKey string `json:"api_key,omitempty"`
56 APIEndpoint string `json:"api_endpoint,omitempty"`
57 Type Type `json:"type,omitempty"`
58 DefaultLargeModelID string `json:"default_large_model_id,omitempty"`
59 DefaultSmallModelID string `json:"default_small_model_id,omitempty"`
60 Models []Model `json:"models,omitempty"`
61 DefaultHeaders map[string]string `json:"default_headers,omitempty"`
62}
63
64// ModelOptions stores extra options for models.
65type ModelOptions struct {
66 Temperature *float64 `json:"temperature,omitempty"`
67 TopP *float64 `json:"top_p,omitempty"`
68 TopK *int64 `json:"top_k,omitempty"`
69 FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
70 PresencePenalty *float64 `json:"presence_penalty,omitempty"`
71 ProviderOptions map[string]any `json:"provider_options,omitempty"`
72}
73
74// Model represents an AI model configuration.
75type Model struct {
76 ID string `json:"id"`
77 Name string `json:"name"`
78 CostPer1MIn float64 `json:"cost_per_1m_in"`
79 CostPer1MOut float64 `json:"cost_per_1m_out"`
80 CostPer1MInCached float64 `json:"cost_per_1m_in_cached"`
81 CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"`
82 ContextWindow int64 `json:"context_window"`
83 DefaultMaxTokens int64 `json:"default_max_tokens"`
84 CanReason bool `json:"can_reason"`
85 ReasoningLevels []string `json:"reasoning_levels,omitempty"`
86 DefaultReasoningEffort string `json:"default_reasoning_effort,omitempty"`
87 SupportsImages bool `json:"supports_attachments"`
88 Options ModelOptions `json:"options"`
89}
90
91// KnownProviders returns all the known inference providers.
92func KnownProviders() []InferenceProvider {
93 return []InferenceProvider{
94 InferenceProviderOpenAI,
95 InferenceProviderSynthetic,
96 InferenceProviderAnthropic,
97 InferenceProviderGemini,
98 InferenceProviderAzure,
99 InferenceProviderBedrock,
100 InferenceProviderVertexAI,
101 InferenceProviderXAI,
102 InferenceProviderZAI,
103 InferenceProviderZhipu,
104 InferenceProviderZhipuCoding,
105 InferenceProviderGROQ,
106 InferenceProviderOpenRouter,
107 InferenceProviderCerebras,
108 InferenceProviderVenice,
109 InferenceProviderChutes,
110 InferenceProviderHuggingFace,
111 InferenceAIHubMix,
112 InferenceKimiCoding,
113 InferenceProviderCopilot,
114 InferenceProviderVercel,
115 InferenceProviderMiniMax,
116 InferenceProviderMiniMaxChina,
117 InferenceProviderQiniuCloud,
118 }
119}
120
121// KnownProviderTypes returns all the known inference providers types.
122func KnownProviderTypes() []Type {
123 return []Type{
124 TypeOpenAI,
125 TypeOpenAICompat,
126 TypeOpenRouter,
127 TypeVercel,
128 TypeAnthropic,
129 TypeGoogle,
130 TypeAzure,
131 TypeBedrock,
132 TypeVertexAI,
133 }
134}