feat: support VertexAI provider (#153)

mineo created

* support: vertexai

fix

fix

set default for vertexai

added comment

fix

fix

* create schema

* fix README.md

* fix order

* added pupularity

* set tools if tools is exists

restore commentout

* fix comment

* set summarizer model

Change summary

README.md                         |   7 +
cmd/schema/main.go                |   1 
internal/config/config.go         |  45 +++++++
internal/llm/models/models.go     |   2 
internal/llm/models/vertexai.go   |  38 ++++++
internal/llm/provider/gemini.go   |  18 ++-
internal/llm/provider/provider.go |   5 
internal/llm/provider/vertexai.go |  34 +++++
opencode-schema.json              | 195 +++++++++++++++++---------------
9 files changed, 248 insertions(+), 97 deletions(-)

Detailed changes

README.md 🔗

@@ -89,6 +89,8 @@ You can configure OpenCode using environment variables:
 | `ANTHROPIC_API_KEY`        | For Claude models                                      |
 | `OPENAI_API_KEY`           | For OpenAI models                                      |
 | `GEMINI_API_KEY`           | For Google Gemini models                               |
+| `VERTEXAI_PROJECT`         | For Google Cloud VertexAI (Gemini)                     |
+| `VERTEXAI_LOCATION`        | For Google Cloud VertexAI (Gemini)                     |
 | `GROQ_API_KEY`             | For Groq models                                        |
 | `AWS_ACCESS_KEY_ID`        | For AWS Bedrock (Claude)                               |
 | `AWS_SECRET_ACCESS_KEY`    | For AWS Bedrock (Claude)                               |
@@ -227,6 +229,11 @@ OpenCode supports a variety of AI models from different providers:
 - O3 family (o3, o3-mini)
 - O4 Mini
 
+### Google Cloud VertexAI
+
+- Gemini 2.5
+- Gemini 2.5 Flash
+ 
 ## Usage
 
 ```bash

cmd/schema/main.go 🔗

@@ -199,6 +199,7 @@ func generateSchema() map[string]any {
 		string(models.ProviderOpenRouter),
 		string(models.ProviderBedrock),
 		string(models.ProviderAzure),
+		string(models.ProviderVertexAI),
 	}
 
 	providerSchema["additionalProperties"].(map[string]any)["properties"].(map[string]any)["provider"] = map[string]any{

internal/config/config.go 🔗

@@ -276,6 +276,7 @@ func setProviderDefaults() {
 	// 5. OpenRouter
 	// 6. AWS Bedrock
 	// 7. Azure
+	// 8. Google Cloud VertexAI
 
 	// Anthropic configuration
 	if key := viper.GetString("providers.anthropic.apiKey"); strings.TrimSpace(key) != "" {
@@ -348,6 +349,15 @@ func setProviderDefaults() {
 		viper.SetDefault("agents.title.model", models.AzureGPT41Mini)
 		return
 	}
+
+	// Google Cloud VertexAI configuration
+	if hasVertexAICredentials() {
+		viper.SetDefault("agents.coder.model", models.VertexAIGemini25)
+		viper.SetDefault("agents.summarizer.model", models.VertexAIGemini25)
+		viper.SetDefault("agents.task.model", models.VertexAIGemini25Flash)
+		viper.SetDefault("agents.title.model", models.VertexAIGemini25Flash)
+		return
+	}
 }
 
 // hasAWSCredentials checks if AWS credentials are available in the environment.
@@ -376,6 +386,19 @@ func hasAWSCredentials() bool {
 	return false
 }
 
+// hasVertexAICredentials checks if VertexAI credentials are available in the environment.
+func hasVertexAICredentials() bool {
+	// Check for explicit VertexAI parameters
+	if os.Getenv("VERTEXAI_PROJECT") != "" && os.Getenv("VERTEXAI_LOCATION") != "" {
+		return true
+	}
+	// Check for Google Cloud project and location
+	if os.Getenv("GOOGLE_CLOUD_PROJECT") != "" && (os.Getenv("GOOGLE_CLOUD_REGION") != "" || os.Getenv("GOOGLE_CLOUD_LOCATION") != "") {
+		return true
+	}
+	return false
+}
+
 // readConfig handles the result of reading a configuration file.
 func readConfig(err error) error {
 	if err == nil {
@@ -598,6 +621,10 @@ func getProviderAPIKey(provider models.ModelProvider) string {
 		if hasAWSCredentials() {
 			return "aws-credentials-available"
 		}
+	case models.ProviderVertexAI:
+		if hasVertexAICredentials() {
+			return "vertex-ai-credentials-available"
+		}
 	}
 	return ""
 }
@@ -718,6 +745,24 @@ func setDefaultModelForAgent(agent AgentName) bool {
 		return true
 	}
 
+	if hasVertexAICredentials() {
+		var model models.ModelID
+		maxTokens := int64(5000)
+
+		if agent == AgentTitle {
+			model = models.VertexAIGemini25Flash
+			maxTokens = 80
+		} else {
+			model = models.VertexAIGemini25
+		}
+
+		cfg.Agents[agent] = Agent{
+			Model:     model,
+			MaxTokens: maxTokens,
+		}
+		return true
+	}
+
 	return false
 }
 

internal/llm/models/models.go 🔗

@@ -43,6 +43,7 @@ var ProviderPopularity = map[ModelProvider]int{
 	ProviderOpenRouter: 5,
 	ProviderBedrock:    6,
 	ProviderAzure:      7,
+	ProviderVertexAI:   8,
 }
 
 var SupportedModels = map[ModelID]Model{
@@ -91,4 +92,5 @@ func init() {
 	maps.Copy(SupportedModels, AzureModels)
 	maps.Copy(SupportedModels, OpenRouterModels)
 	maps.Copy(SupportedModels, XAIModels)
+	maps.Copy(SupportedModels, VertexAIGeminiModels)
 }

internal/llm/models/vertexai.go 🔗

@@ -0,0 +1,38 @@
+package models
+
+const (
+	ProviderVertexAI ModelProvider = "vertexai"
+
+	// Models
+	VertexAIGemini25Flash ModelID = "vertexai.gemini-2.5-flash"
+	VertexAIGemini25      ModelID = "vertexai.gemini-2.5"
+)
+
+var VertexAIGeminiModels = map[ModelID]Model{
+	VertexAIGemini25Flash: {
+		ID:                  VertexAIGemini25Flash,
+		Name:                "VertexAI: Gemini 2.5 Flash",
+		Provider:            ProviderVertexAI,
+		APIModel:            "gemini-2.5-flash-preview-04-17",
+		CostPer1MIn:         GeminiModels[Gemini25Flash].CostPer1MIn,
+		CostPer1MInCached:   GeminiModels[Gemini25Flash].CostPer1MInCached,
+		CostPer1MOut:        GeminiModels[Gemini25Flash].CostPer1MOut,
+		CostPer1MOutCached:  GeminiModels[Gemini25Flash].CostPer1MOutCached,
+		ContextWindow:       GeminiModels[Gemini25Flash].ContextWindow,
+		DefaultMaxTokens:    GeminiModels[Gemini25Flash].DefaultMaxTokens,
+		SupportsAttachments: true,
+	},
+	VertexAIGemini25: {
+		ID:                  VertexAIGemini25,
+		Name:                "VertexAI: Gemini 2.5 Pro",
+		Provider:            ProviderVertexAI,
+		APIModel:            "gemini-2.5-pro-preview-03-25",
+		CostPer1MIn:         GeminiModels[Gemini25].CostPer1MIn,
+		CostPer1MInCached:   GeminiModels[Gemini25].CostPer1MInCached,
+		CostPer1MOut:        GeminiModels[Gemini25].CostPer1MOut,
+		CostPer1MOutCached:  GeminiModels[Gemini25].CostPer1MOutCached,
+		ContextWindow:       GeminiModels[Gemini25].ContextWindow,
+		DefaultMaxTokens:    GeminiModels[Gemini25].DefaultMaxTokens,
+		SupportsAttachments: true,
+	},
+}

internal/llm/provider/gemini.go 🔗

@@ -175,13 +175,16 @@ func (g *geminiClient) send(ctx context.Context, messages []message.Message, too
 
 	history := geminiMessages[:len(geminiMessages)-1] // All but last message
 	lastMsg := geminiMessages[len(geminiMessages)-1]
-	chat, _ := g.client.Chats.Create(ctx, g.providerOptions.model.APIModel, &genai.GenerateContentConfig{
+	config := &genai.GenerateContentConfig{
 		MaxOutputTokens: int32(g.providerOptions.maxTokens),
 		SystemInstruction: &genai.Content{
 			Parts: []*genai.Part{{Text: g.providerOptions.systemMessage}},
 		},
-		Tools: g.convertTools(tools),
-	}, history)
+	}
+	if len(tools) > 0 {
+		config.Tools = g.convertTools(tools)
+	}
+	chat, _ := g.client.Chats.Create(ctx, g.providerOptions.model.APIModel, config, history)
 
 	attempts := 0
 	for {
@@ -260,13 +263,16 @@ func (g *geminiClient) stream(ctx context.Context, messages []message.Message, t
 
 	history := geminiMessages[:len(geminiMessages)-1] // All but last message
 	lastMsg := geminiMessages[len(geminiMessages)-1]
-	chat, _ := g.client.Chats.Create(ctx, g.providerOptions.model.APIModel, &genai.GenerateContentConfig{
+	config := &genai.GenerateContentConfig{
 		MaxOutputTokens: int32(g.providerOptions.maxTokens),
 		SystemInstruction: &genai.Content{
 			Parts: []*genai.Part{{Text: g.providerOptions.systemMessage}},
 		},
-		Tools: g.convertTools(tools),
-	}, history)
+	}
+	if len(tools) > 0 {
+		config.Tools = g.convertTools(tools)
+	}
+	chat, _ := g.client.Chats.Create(ctx, g.providerOptions.model.APIModel, config, history)
 
 	attempts := 0
 	eventChan := make(chan ProviderEvent)

internal/llm/provider/provider.go 🔗

@@ -120,6 +120,11 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption
 			options: clientOptions,
 			client:  newAzureClient(clientOptions),
 		}, nil
+	case models.ProviderVertexAI:
+		return &baseProvider[VertexAIClient]{
+			options: clientOptions,
+			client:  newVertexAIClient(clientOptions),
+		}, nil
 	case models.ProviderOpenRouter:
 		clientOptions.openaiOptions = append(clientOptions.openaiOptions,
 			WithOpenAIBaseURL("https://openrouter.ai/api/v1"),

internal/llm/provider/vertexai.go 🔗

@@ -0,0 +1,34 @@
+package provider
+
+import (
+	"context"
+	"os"
+
+	"github.com/opencode-ai/opencode/internal/logging"
+	"google.golang.org/genai"
+)
+
+type VertexAIClient ProviderClient
+
+func newVertexAIClient(opts providerClientOptions) VertexAIClient {
+	geminiOpts := geminiOptions{}
+	for _, o := range opts.geminiOptions {
+		o(&geminiOpts)
+	}
+
+	client, err := genai.NewClient(context.Background(), &genai.ClientConfig{
+		Project:  os.Getenv("VERTEXAI_PROJECT"),
+		Location: os.Getenv("VERTEXAI_LOCATION"),
+		Backend:  genai.BackendVertexAI,
+	})
+	if err != nil {
+		logging.Error("Failed to create VertexAI client", "error", err)
+		return nil
+	}
+
+	return &geminiClient{
+		providerOptions: opts,
+		options:         geminiOpts,
+		client:          client,
+	}
+}

opencode-schema.json 🔗

@@ -12,63 +12,69 @@
         "model": {
           "description": "Model ID for the agent",
           "enum": [
-            "gpt-4o-mini",
-            "o1-pro",
+            "grok-3-fast-beta",
+            "claude-3-opus",
+            "gemini-2.5",
+            "openrouter.claude-3-haiku",
+            "grok-3-beta",
+            "gpt-4.1",
             "azure.gpt-4o-mini",
+            "openrouter.gpt-4o",
+            "openrouter.o4-mini",
+            "openrouter.o1-pro",
+            "gpt-4.1-nano",
+            "azure.gpt-4.5-preview",
+            "openrouter.gpt-4o-mini",
+            "claude-3.5-sonnet",
+            "claude-3-haiku",
+            "qwen-qwq",
+            "openrouter.claude-3.7-sonnet",
+            "gemini-2.5-flash",
+            "azure.o4-mini",
             "openrouter.gpt-4.1-mini",
+            "gpt-4o",
+            "openrouter.gemini-2.5",
+            "gpt-4.1-mini",
+            "azure.gpt-4.1",
+            "azure.o1-mini",
+            "o1-pro",
+            "claude-3.7-sonnet",
+            "o3",
+            "gpt-4.5-preview",
+            "azure.o3-mini",
+            "grok-3-mini-beta",
             "openrouter.o1-mini",
-            "bedrock.claude-3.7-sonnet",
             "meta-llama/llama-4-scout-17b-16e-instruct",
-            "openrouter.gpt-4o-mini",
+            "azure.o1",
+            "openrouter.gemini-2.5-flash",
+            "openrouter.claude-3-opus",
+            "o1-mini",
             "gemini-2.0-flash",
-            "deepseek-r1-distill-llama-70b",
-            "openrouter.claude-3.7-sonnet",
-            "openrouter.gpt-4.5-preview",
-            "azure.o3-mini",
+            "openrouter.gpt-4.1",
             "openrouter.claude-3.5-haiku",
-            "azure.o1-mini",
-            "openrouter.o1",
-            "openrouter.gemini-2.5",
+            "deepseek-r1-distill-llama-70b",
+            "claude-3.5-haiku",
+            "o3-mini",
             "llama-3.3-70b-versatile",
-            "gpt-4.5-preview",
-            "openrouter.claude-3-opus",
-            "openrouter.claude-3.5-sonnet",
-            "o4-mini",
+            "azure.gpt-4.1-nano",
+            "openrouter.gpt-4.5-preview",
             "gemini-2.0-flash-lite",
-            "azure.gpt-4.5-preview",
-            "openrouter.gpt-4o",
-            "o1",
             "azure.gpt-4o",
+            "openrouter.o3-mini",
+            "openrouter.o1",
             "openrouter.gpt-4.1-nano",
-            "o3",
-            "gpt-4.1",
-            "azure.o1",
-            "claude-3-haiku",
-            "claude-3-opus",
-            "gpt-4.1-mini",
-            "openrouter.o4-mini",
-            "openrouter.gemini-2.5-flash",
-            "claude-3.5-haiku",
-            "o3-mini",
+            "grok-3-mini-fast-beta",
+            "vertexai.gemini-2.5-flash",
+            "o4-mini",
             "azure.o3",
-            "gpt-4o",
-            "azure.gpt-4.1",
-            "openrouter.claude-3-haiku",
-            "gpt-4.1-nano",
-            "azure.gpt-4.1-nano",
-            "claude-3.7-sonnet",
-            "gemini-2.5",
-            "azure.o4-mini",
-            "o1-mini",
-            "qwen-qwq",
-            "meta-llama/llama-4-maverick-17b-128e-instruct",
-            "openrouter.gpt-4.1",
-            "openrouter.o1-pro",
-            "openrouter.o3",
-            "claude-3.5-sonnet",
-            "gemini-2.5-flash",
             "azure.gpt-4.1-mini",
-            "openrouter.o3-mini"
+            "openrouter.o3",
+            "gpt-4o-mini",
+            "o1",
+            "vertexai.gemini-2.5",
+            "bedrock.claude-3.7-sonnet",
+            "meta-llama/llama-4-maverick-17b-128e-instruct",
+            "openrouter.claude-3.5-sonnet"
           ],
           "type": "string"
         },
@@ -102,63 +108,69 @@
           "model": {
             "description": "Model ID for the agent",
             "enum": [
-              "gpt-4o-mini",
-              "o1-pro",
+              "grok-3-fast-beta",
+              "claude-3-opus",
+              "gemini-2.5",
+              "openrouter.claude-3-haiku",
+              "grok-3-beta",
+              "gpt-4.1",
               "azure.gpt-4o-mini",
+              "openrouter.gpt-4o",
+              "openrouter.o4-mini",
+              "openrouter.o1-pro",
+              "gpt-4.1-nano",
+              "azure.gpt-4.5-preview",
+              "openrouter.gpt-4o-mini",
+              "claude-3.5-sonnet",
+              "claude-3-haiku",
+              "qwen-qwq",
+              "openrouter.claude-3.7-sonnet",
+              "gemini-2.5-flash",
+              "azure.o4-mini",
               "openrouter.gpt-4.1-mini",
+              "gpt-4o",
+              "openrouter.gemini-2.5",
+              "gpt-4.1-mini",
+              "azure.gpt-4.1",
+              "azure.o1-mini",
+              "o1-pro",
+              "claude-3.7-sonnet",
+              "o3",
+              "gpt-4.5-preview",
+              "azure.o3-mini",
+              "grok-3-mini-beta",
               "openrouter.o1-mini",
-              "bedrock.claude-3.7-sonnet",
               "meta-llama/llama-4-scout-17b-16e-instruct",
-              "openrouter.gpt-4o-mini",
+              "azure.o1",
+              "openrouter.gemini-2.5-flash",
+              "openrouter.claude-3-opus",
+              "o1-mini",
               "gemini-2.0-flash",
-              "deepseek-r1-distill-llama-70b",
-              "openrouter.claude-3.7-sonnet",
-              "openrouter.gpt-4.5-preview",
-              "azure.o3-mini",
+              "openrouter.gpt-4.1",
               "openrouter.claude-3.5-haiku",
-              "azure.o1-mini",
-              "openrouter.o1",
-              "openrouter.gemini-2.5",
+              "deepseek-r1-distill-llama-70b",
+              "claude-3.5-haiku",
+              "o3-mini",
               "llama-3.3-70b-versatile",
-              "gpt-4.5-preview",
-              "openrouter.claude-3-opus",
-              "openrouter.claude-3.5-sonnet",
-              "o4-mini",
+              "azure.gpt-4.1-nano",
+              "openrouter.gpt-4.5-preview",
               "gemini-2.0-flash-lite",
-              "azure.gpt-4.5-preview",
-              "openrouter.gpt-4o",
-              "o1",
               "azure.gpt-4o",
+              "openrouter.o3-mini",
+              "openrouter.o1",
               "openrouter.gpt-4.1-nano",
-              "o3",
-              "gpt-4.1",
-              "azure.o1",
-              "claude-3-haiku",
-              "claude-3-opus",
-              "gpt-4.1-mini",
-              "openrouter.o4-mini",
-              "openrouter.gemini-2.5-flash",
-              "claude-3.5-haiku",
-              "o3-mini",
+              "grok-3-mini-fast-beta",
+              "vertexai.gemini-2.5-flash",
+              "o4-mini",
               "azure.o3",
-              "gpt-4o",
-              "azure.gpt-4.1",
-              "openrouter.claude-3-haiku",
-              "gpt-4.1-nano",
-              "azure.gpt-4.1-nano",
-              "claude-3.7-sonnet",
-              "gemini-2.5",
-              "azure.o4-mini",
-              "o1-mini",
-              "qwen-qwq",
-              "meta-llama/llama-4-maverick-17b-128e-instruct",
-              "openrouter.gpt-4.1",
-              "openrouter.o1-pro",
-              "openrouter.o3",
-              "claude-3.5-sonnet",
-              "gemini-2.5-flash",
               "azure.gpt-4.1-mini",
-              "openrouter.o3-mini"
+              "openrouter.o3",
+              "gpt-4o-mini",
+              "o1",
+              "vertexai.gemini-2.5",
+              "bedrock.claude-3.7-sonnet",
+              "meta-llama/llama-4-maverick-17b-128e-instruct",
+              "openrouter.claude-3.5-sonnet"
             ],
             "type": "string"
           },
@@ -341,7 +353,8 @@
               "groq",
               "openrouter",
               "bedrock",
-              "azure"
+              "azure",
+              "vertexai"
             ],
             "type": "string"
           }