1package models
2
3const (
4 ProviderVertexAI InferenceProvider = "vertexai"
5
6 // Models
7 VertexAIGemini25Flash ModelID = "vertexai.gemini-2.5-flash"
8 VertexAIGemini25 ModelID = "vertexai.gemini-2.5"
9)
10
11var VertexAIGeminiModels = map[ModelID]Model{
12 VertexAIGemini25Flash: {
13 ID: VertexAIGemini25Flash,
14 Name: "VertexAI: Gemini 2.5 Flash",
15 Provider: ProviderVertexAI,
16 APIModel: "gemini-2.5-flash-preview-04-17",
17 CostPer1MIn: GeminiModels[Gemini25Flash].CostPer1MIn,
18 CostPer1MInCached: GeminiModels[Gemini25Flash].CostPer1MInCached,
19 CostPer1MOut: GeminiModels[Gemini25Flash].CostPer1MOut,
20 CostPer1MOutCached: GeminiModels[Gemini25Flash].CostPer1MOutCached,
21 ContextWindow: GeminiModels[Gemini25Flash].ContextWindow,
22 DefaultMaxTokens: GeminiModels[Gemini25Flash].DefaultMaxTokens,
23 SupportsAttachments: true,
24 },
25 VertexAIGemini25: {
26 ID: VertexAIGemini25,
27 Name: "VertexAI: Gemini 2.5 Pro",
28 Provider: ProviderVertexAI,
29 APIModel: "gemini-2.5-pro-preview-03-25",
30 CostPer1MIn: GeminiModels[Gemini25].CostPer1MIn,
31 CostPer1MInCached: GeminiModels[Gemini25].CostPer1MInCached,
32 CostPer1MOut: GeminiModels[Gemini25].CostPer1MOut,
33 CostPer1MOutCached: GeminiModels[Gemini25].CostPer1MOutCached,
34 ContextWindow: GeminiModels[Gemini25].ContextWindow,
35 DefaultMaxTokens: GeminiModels[Gemini25].DefaultMaxTokens,
36 SupportsAttachments: true,
37 },
38}