1package catwalk
 2
 3// Type represents the type of AI provider.
 4type Type string
 5
 6// All the supported AI provider types.
 7const (
 8	TypeOpenAI    Type = "openai"
 9	TypeAnthropic Type = "anthropic"
10	TypeGemini    Type = "gemini"
11	TypeAzure     Type = "azure"
12	TypeBedrock   Type = "bedrock"
13	TypeVertexAI  Type = "vertexai"
14)
15
16// InferenceProvider represents the inference provider identifier.
17type InferenceProvider string
18
19// All the inference providers supported by the system.
20const (
21	InferenceProviderOpenAI     InferenceProvider = "openai"
22	InferenceProviderAnthropic  InferenceProvider = "anthropic"
23	InferenceProviderGemini     InferenceProvider = "gemini"
24	InferenceProviderAzure      InferenceProvider = "azure"
25	InferenceProviderBedrock    InferenceProvider = "bedrock"
26	InferenceProviderVertexAI   InferenceProvider = "vertexai"
27	InferenceProviderXAI        InferenceProvider = "xai"
28	InferenceProviderGROQ       InferenceProvider = "groq"
29	InferenceProviderOpenRouter InferenceProvider = "openrouter"
30	InferenceProviderLambda     InferenceProvider = "lambda"
31	InferenceProviderCerebras   InferenceProvider = "cerebras"
32)
33
34// Provider represents an AI provider configuration.
35type Provider struct {
36	Name                string            `json:"name"`
37	ID                  InferenceProvider `json:"id"`
38	APIKey              string            `json:"api_key,omitempty"`
39	APIEndpoint         string            `json:"api_endpoint,omitempty"`
40	Type                Type              `json:"type,omitempty"`
41	DefaultLargeModelID string            `json:"default_large_model_id,omitempty"`
42	DefaultSmallModelID string            `json:"default_small_model_id,omitempty"`
43	Models              []Model           `json:"models,omitempty"`
44	DefaultHeaders      map[string]string `json:"default_headers,omitempty"`
45}
46
47// Model represents an AI model configuration.
48type Model struct {
49	ID                     string  `json:"id"`
50	Name                   string  `json:"name"`
51	CostPer1MIn            float64 `json:"cost_per_1m_in"`
52	CostPer1MOut           float64 `json:"cost_per_1m_out"`
53	CostPer1MInCached      float64 `json:"cost_per_1m_in_cached"`
54	CostPer1MOutCached     float64 `json:"cost_per_1m_out_cached"`
55	ContextWindow          int64   `json:"context_window"`
56	DefaultMaxTokens       int64   `json:"default_max_tokens"`
57	CanReason              bool    `json:"can_reason"`
58	HasReasoningEffort     bool    `json:"has_reasoning_efforts"`
59	DefaultReasoningEffort string  `json:"default_reasoning_effort,omitempty"`
60	SupportsImages         bool    `json:"supports_attachments"`
61}
62
63// KnownProviders returns all the known inference providers.
64func KnownProviders() []InferenceProvider {
65	return []InferenceProvider{
66		InferenceProviderOpenAI,
67		InferenceProviderAnthropic,
68		InferenceProviderGemini,
69		InferenceProviderAzure,
70		InferenceProviderBedrock,
71		InferenceProviderVertexAI,
72		InferenceProviderXAI,
73		InferenceProviderGROQ,
74		InferenceProviderOpenRouter,
75		InferenceProviderLambda,
76		InferenceProviderCerebras,
77	}
78}