feat: add azure openai models (#74)

YJG created

Change summary

README.md                         |  33 ++++--
cmd/schema/main.go                |   1 
go.mod                            |   7 +
go.sum                            |  15 +++
internal/config/config.go         |  11 ++
internal/llm/models/azure.go      | 157 +++++++++++++++++++++++++++++++++
internal/llm/models/models.go     |   1 
internal/llm/provider/azure.go    |  47 +++++++++
internal/llm/provider/provider.go |   5 +
opencode-schema.json              |  25 +++++
10 files changed, 291 insertions(+), 11 deletions(-)

Detailed changes

README.md 🔗

@@ -11,7 +11,7 @@ OpenCode is a Go-based CLI application that brings AI assistance to your termina
 ## Features
 
 - **Interactive TUI**: Built with [Bubble Tea](https://github.com/charmbracelet/bubbletea) for a smooth terminal experience
-- **Multiple AI Providers**: Support for OpenAI, Anthropic Claude, Google Gemini, AWS Bedrock, and Groq
+- **Multiple AI Providers**: Support for OpenAI, Anthropic Claude, Google Gemini, AWS Bedrock, Groq, and Azure OpenAI
 - **Session Management**: Save and manage multiple conversation sessions
 - **Tool Integration**: AI can execute commands, search files, and modify code
 - **Vim-like Editor**: Integrated editor with text input capabilities
@@ -66,15 +66,19 @@ OpenCode looks for configuration in the following locations:
 
 You can configure OpenCode using environment variables:
 
-| Environment Variable    | Purpose                  |
-| ----------------------- | ------------------------ |
-| `ANTHROPIC_API_KEY`     | For Claude models        |
-| `OPENAI_API_KEY`        | For OpenAI models        |
-| `GEMINI_API_KEY`        | For Google Gemini models |
-| `GROQ_API_KEY`          | For Groq models          |
-| `AWS_ACCESS_KEY_ID`     | For AWS Bedrock (Claude) |
-| `AWS_SECRET_ACCESS_KEY` | For AWS Bedrock (Claude) |
-| `AWS_REGION`            | For AWS Bedrock (Claude) |
+| Environment Variable       | Purpose                                                |
+|----------------------------|--------------------------------------------------------|
+| `ANTHROPIC_API_KEY`        | For Claude models                                      |
+| `OPENAI_API_KEY`           | For OpenAI models                                      |
+| `GEMINI_API_KEY`           | For Google Gemini models                               |
+| `GROQ_API_KEY`             | For Groq models                                        |
+| `AWS_ACCESS_KEY_ID`        | For AWS Bedrock (Claude)                               |
+| `AWS_SECRET_ACCESS_KEY`    | For AWS Bedrock (Claude)                               |
+| `AWS_REGION`               | For AWS Bedrock (Claude)                               |
+| `AZURE_OPENAI_ENDPOINT`    | For Azure OpenAI models                                |
+| `AZURE_OPENAI_API_KEY`     | For Azure OpenAI models (optional when using Entra ID) |
+| `AZURE_OPENAI_API_VERSION` | For Azure OpenAI models                                |
+
 
 ### Configuration File Structure
 
@@ -170,6 +174,15 @@ OpenCode supports a variety of AI models from different providers:
 - Deepseek R1 distill Llama 70b
 - Llama 3.3 70b Versatile
 
+### Azure OpenAI
+
+- GPT-4.1 family (gpt-4.1, gpt-4.1-mini, gpt-4.1-nano)
+- GPT-4.5 Preview
+- GPT-4o family (gpt-4o, gpt-4o-mini)
+- O1 family (o1, o1-mini)
+- O3 family (o3, o3-mini)
+- O4 Mini
+
 ## Usage
 
 ```bash

cmd/schema/main.go 🔗

@@ -174,6 +174,7 @@ func generateSchema() map[string]any {
 		string(models.ProviderGemini),
 		string(models.ProviderGROQ),
 		string(models.ProviderBedrock),
+		string(models.ProviderAzure),
 	}
 
 	providerSchema["additionalProperties"].(map[string]any)["properties"].(map[string]any)["provider"] = map[string]any{

go.mod 🔗

@@ -5,6 +5,7 @@ go 1.24.0
 toolchain go1.24.2
 
 require (
+	github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
 	github.com/JohannesKaufmann/html-to-markdown v1.6.0
 	github.com/PuerkitoBio/goquery v1.9.2
 	github.com/alecthomas/chroma/v2 v2.15.0
@@ -44,6 +45,9 @@ require (
 	cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect
 	cloud.google.com/go/compute/metadata v0.6.0 // indirect
 	cloud.google.com/go/longrunning v0.5.7 // indirect
+	github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
+	github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
+	github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
 	github.com/andybalholm/cascadia v1.3.2 // indirect
 	github.com/atotto/clipboard v0.1.4 // indirect
 	github.com/aws/aws-sdk-go-v2 v1.30.3 // indirect
@@ -74,11 +78,13 @@ require (
 	github.com/go-logr/logr v1.4.2 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
+	github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
 	github.com/google/s2a-go v0.1.8 // indirect
 	github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
 	github.com/googleapis/gax-go/v2 v2.14.1 // indirect
 	github.com/gorilla/css v1.0.1 // indirect
 	github.com/inconshreveable/mousetrap v1.1.0 // indirect
+	github.com/kylelemons/godebug v1.1.0 // indirect
 	github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
 	github.com/mattn/go-isatty v0.0.20 // indirect
 	github.com/mattn/go-localereader v0.0.1 // indirect
@@ -89,6 +95,7 @@ require (
 	github.com/muesli/cancelreader v0.2.2 // indirect
 	github.com/ncruces/julianday v1.0.0 // indirect
 	github.com/pelletier/go-toml/v2 v2.2.3 // indirect
+	github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
 	github.com/rivo/uniseg v0.4.7 // indirect
 	github.com/rogpeppe/go-internal v1.14.1 // indirect

go.sum 🔗

@@ -10,6 +10,14 @@ cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4
 cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
 cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU=
 cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
 github.com/JohannesKaufmann/html-to-markdown v1.6.0 h1:04VXMiE50YYfCfLboJCLcgqF5x+rHJnb1ssNmqpLH/k=
 github.com/JohannesKaufmann/html-to-markdown v1.6.0/go.mod h1:NUI78lGg/a7vpEJTz/0uOcYMaibytE4BUOQS8k78yPQ=
 github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
@@ -113,6 +121,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
 github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
+github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
 github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
 github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
 github.com/google/generative-ai-go v0.19.0 h1:R71szggh8wHMCUlEMsW2A/3T+5LdEIkiaHSYgSpUgdg=
@@ -140,6 +150,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
 github.com/lrstanley/bubblezone v0.0.0-20250315020633-c249a3fe1231 h1:9rjt7AfnrXKNSZhp36A3/4QAZAwGGCGD/p8Bse26zms=
 github.com/lrstanley/bubblezone v0.0.0-20250315020633-c249a3fe1231/go.mod h1:S5etECMx+sZnW0Gm100Ma9J1PgVCTgNyFaqGu2b08b4=
 github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
@@ -177,6 +189,8 @@ github.com/openai/openai-go v0.1.0-beta.2 h1:Ra5nCFkbEl9w+UJwAciC4kqnIBUCcJazhmM
 github.com/openai/openai-go v0.1.0-beta.2/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y=
 github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
 github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
 github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -295,6 +309,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

internal/config/config.go 🔗

@@ -272,6 +272,15 @@ func setProviderDefaults() {
 		viper.SetDefault("agents.title.model", models.BedrockClaude37Sonnet)
 		return
 	}
+
+	if os.Getenv("AZURE_OPENAI_ENDPOINT") != "" {
+		// api-key may be empty when using Entra ID credentials – that's okay
+		viper.SetDefault("providers.azure.apiKey", os.Getenv("AZURE_OPENAI_API_KEY"))
+		viper.SetDefault("agents.coder.model", models.AzureGPT41)
+		viper.SetDefault("agents.task.model", models.AzureGPT41Mini)
+		viper.SetDefault("agents.title.model", models.AzureGPT41Mini)
+		return
+	}
 }
 
 // hasAWSCredentials checks if AWS credentials are available in the environment.
@@ -506,6 +515,8 @@ func getProviderAPIKey(provider models.ModelProvider) string {
 		return os.Getenv("GEMINI_API_KEY")
 	case models.ProviderGROQ:
 		return os.Getenv("GROQ_API_KEY")
+	case models.ProviderAzure:
+		return os.Getenv("AZURE_OPENAI_API_KEY")
 	case models.ProviderBedrock:
 		if hasAWSCredentials() {
 			return "aws-credentials-available"

internal/llm/models/azure.go 🔗

@@ -0,0 +1,157 @@
+package models
+
+const ProviderAzure ModelProvider = "azure"
+
+const (
+	AzureGPT41        ModelID = "azure.gpt-4.1"
+	AzureGPT41Mini    ModelID = "azure.gpt-4.1-mini"
+	AzureGPT41Nano    ModelID = "azure.gpt-4.1-nano"
+	AzureGPT45Preview ModelID = "azure.gpt-4.5-preview"
+	AzureGPT4o        ModelID = "azure.gpt-4o"
+	AzureGPT4oMini    ModelID = "azure.gpt-4o-mini"
+	AzureO1           ModelID = "azure.o1"
+	AzureO1Mini       ModelID = "azure.o1-mini"
+	AzureO3           ModelID = "azure.o3"
+	AzureO3Mini       ModelID = "azure.o3-mini"
+	AzureO4Mini       ModelID = "azure.o4-mini"
+)
+
+var AzureModels = map[ModelID]Model{
+	AzureGPT41: {
+		ID:                 AzureGPT41,
+		Name:               "Azure OpenAI – GPT 4.1",
+		Provider:           ProviderAzure,
+		APIModel:           "gpt-4.1",
+		CostPer1MIn:        OpenAIModels[GPT41].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[GPT41].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[GPT41].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[GPT41].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[GPT41].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[GPT41].DefaultMaxTokens,
+	},
+	AzureGPT41Mini: {
+		ID:                 AzureGPT41Mini,
+		Name:               "Azure OpenAI – GPT 4.1 mini",
+		Provider:           ProviderAzure,
+		APIModel:           "gpt-4.1-mini",
+		CostPer1MIn:        OpenAIModels[GPT41Mini].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[GPT41Mini].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[GPT41Mini].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[GPT41Mini].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[GPT41Mini].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[GPT41Mini].DefaultMaxTokens,
+	},
+	AzureGPT41Nano: {
+		ID:                 AzureGPT41Nano,
+		Name:               "Azure OpenAI – GPT 4.1 nano",
+		Provider:           ProviderAzure,
+		APIModel:           "gpt-4.1-nano",
+		CostPer1MIn:        OpenAIModels[GPT41Nano].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[GPT41Nano].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[GPT41Nano].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[GPT41Nano].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[GPT41Nano].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[GPT41Nano].DefaultMaxTokens,
+	},
+	AzureGPT45Preview: {
+		ID:                 AzureGPT45Preview,
+		Name:               "Azure OpenAI – GPT 4.5 preview",
+		Provider:           ProviderAzure,
+		APIModel:           "gpt-4.5-preview",
+		CostPer1MIn:        OpenAIModels[GPT45Preview].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[GPT45Preview].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[GPT45Preview].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[GPT45Preview].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[GPT45Preview].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[GPT45Preview].DefaultMaxTokens,
+	},
+	AzureGPT4o: {
+		ID:                 AzureGPT4o,
+		Name:               "Azure OpenAI – GPT-4o",
+		Provider:           ProviderAzure,
+		APIModel:           "gpt-4o",
+		CostPer1MIn:        OpenAIModels[GPT4o].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[GPT4o].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[GPT4o].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[GPT4o].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[GPT4o].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[GPT4o].DefaultMaxTokens,
+	},
+	AzureGPT4oMini: {
+		ID:                 AzureGPT4oMini,
+		Name:               "Azure OpenAI – GPT-4o mini",
+		Provider:           ProviderAzure,
+		APIModel:           "gpt-4o-mini",
+		CostPer1MIn:        OpenAIModels[GPT4oMini].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[GPT4oMini].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[GPT4oMini].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[GPT4oMini].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[GPT4oMini].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[GPT4oMini].DefaultMaxTokens,
+	},
+	AzureO1: {
+		ID:                 AzureO1,
+		Name:               "Azure OpenAI – O1",
+		Provider:           ProviderAzure,
+		APIModel:           "o1",
+		CostPer1MIn:        OpenAIModels[O1].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[O1].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[O1].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[O1].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[O1].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[O1].DefaultMaxTokens,
+		CanReason:          OpenAIModels[O1].CanReason,
+	},
+	AzureO1Mini: {
+		ID:                 AzureO1Mini,
+		Name:               "Azure OpenAI – O1 mini",
+		Provider:           ProviderAzure,
+		APIModel:           "o1-mini",
+		CostPer1MIn:        OpenAIModels[O1Mini].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[O1Mini].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[O1Mini].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[O1Mini].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[O1Mini].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[O1Mini].DefaultMaxTokens,
+		CanReason:          OpenAIModels[O1Mini].CanReason,
+	},
+	AzureO3: {
+		ID:                 AzureO3,
+		Name:               "Azure OpenAI – O3",
+		Provider:           ProviderAzure,
+		APIModel:           "o3",
+		CostPer1MIn:        OpenAIModels[O3].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[O3].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[O3].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[O3].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[O3].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[O3].DefaultMaxTokens,
+		CanReason:          OpenAIModels[O3].CanReason,
+	},
+	AzureO3Mini: {
+		ID:                 AzureO3Mini,
+		Name:               "Azure OpenAI – O3 mini",
+		Provider:           ProviderAzure,
+		APIModel:           "o3-mini",
+		CostPer1MIn:        OpenAIModels[O3Mini].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[O3Mini].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[O3Mini].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[O3Mini].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[O3Mini].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[O3Mini].DefaultMaxTokens,
+		CanReason:          OpenAIModels[O3Mini].CanReason,
+	},
+	AzureO4Mini: {
+		ID:                 AzureO4Mini,
+		Name:               "Azure OpenAI – O4 mini",
+		Provider:           ProviderAzure,
+		APIModel:           "o4-mini",
+		CostPer1MIn:        OpenAIModels[O4Mini].CostPer1MIn,
+		CostPer1MInCached:  OpenAIModels[O4Mini].CostPer1MInCached,
+		CostPer1MOut:       OpenAIModels[O4Mini].CostPer1MOut,
+		CostPer1MOutCached: OpenAIModels[O4Mini].CostPer1MOutCached,
+		ContextWindow:      OpenAIModels[O4Mini].ContextWindow,
+		DefaultMaxTokens:   OpenAIModels[O4Mini].DefaultMaxTokens,
+		CanReason:          OpenAIModels[O4Mini].CanReason,
+	},
+}

internal/llm/models/models.go 🔗

@@ -76,4 +76,5 @@ func init() {
 	maps.Copy(SupportedModels, OpenAIModels)
 	maps.Copy(SupportedModels, GeminiModels)
 	maps.Copy(SupportedModels, GroqModels)
+	maps.Copy(SupportedModels, AzureModels)
 }

internal/llm/provider/azure.go 🔗

@@ -0,0 +1,47 @@
+package provider
+
+import (
+	"os"
+
+	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+	"github.com/openai/openai-go"
+	"github.com/openai/openai-go/azure"
+	"github.com/openai/openai-go/option"
+)
+
+type azureClient struct {
+	*openaiClient
+}
+
+type AzureClient ProviderClient
+
+func newAzureClient(opts providerClientOptions) AzureClient {
+
+	endpoint := os.Getenv("AZURE_OPENAI_ENDPOINT")      // ex: https://foo.openai.azure.com
+	apiVersion := os.Getenv("AZURE_OPENAI_API_VERSION") // ex: 2025-04-01-preview
+
+	if endpoint == "" || apiVersion == "" {
+		return &azureClient{openaiClient: newOpenAIClient(opts).(*openaiClient)}
+	}
+
+	reqOpts := []option.RequestOption{
+		azure.WithEndpoint(endpoint, apiVersion),
+	}
+
+	if opts.apiKey != "" || os.Getenv("AZURE_OPENAI_API_KEY") != "" {
+		key := opts.apiKey
+		if key == "" {
+			key = os.Getenv("AZURE_OPENAI_API_KEY")
+		}
+		reqOpts = append(reqOpts, azure.WithAPIKey(key))
+	} else if cred, err := azidentity.NewDefaultAzureCredential(nil); err == nil {
+		reqOpts = append(reqOpts, azure.WithTokenCredential(cred))
+	}
+
+	base := &openaiClient{
+		providerOptions: opts,
+		client:          openai.NewClient(reqOpts...),
+	}
+
+	return &azureClient{openaiClient: base}
+}

internal/llm/provider/provider.go 🔗

@@ -115,6 +115,11 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption
 			options: clientOptions,
 			client:  newOpenAIClient(clientOptions),
 		}, nil
+	case models.ProviderAzure:
+		return &baseProvider[AzureClient]{
+			options: clientOptions,
+			client:  newAzureClient(clientOptions),
+		}, nil
 	case models.ProviderMock:
 		// TODO: implement mock client for test
 		panic("not implemented")

opencode-schema.json 🔗

@@ -17,26 +17,37 @@
             "claude-3.7-sonnet",
             "claude-3.5-haiku",
             "o3",
+            "azure.o3",
             "gpt-4.5-preview",
+            "azure.gpt-4.5-preview",
             "o1-pro",
             "o4-mini",
+            "azure.o4-mini",
             "gpt-4.1",
+            "azure.gpt-4.1",
             "o3-mini",
+            "azure.o3-mini",
             "gpt-4.1-nano",
+            "azure.gpt-4.1-nano",
             "gpt-4o-mini",
+            "azure.gpt-4o-mini",
             "o1",
+            "azure.o1",
             "gemini-2.5-flash",
             "qwen-qwq",
             "meta-llama/llama-4-maverick-17b-128e-instruct",
             "claude-3-opus",
             "gpt-4o",
+            "azure.gpt-4o",
             "gemini-2.0-flash-lite",
             "gemini-2.0-flash",
             "deepseek-r1-distill-llama-70b",
             "llama-3.3-70b-versatile",
             "claude-3.5-sonnet",
             "o1-mini",
+            "azure.o1-mini",
             "gpt-4.1-mini",
+            "azure.gpt-4.1-mini",
             "gemini-2.5",
             "meta-llama/llama-4-scout-17b-16e-instruct"
           ],
@@ -77,26 +88,37 @@
               "claude-3.7-sonnet",
               "claude-3.5-haiku",
               "o3",
+              "azure.o3",
               "gpt-4.5-preview",
+              "azure.gpt-4.5-preview",
               "o1-pro",
               "o4-mini",
+              "azure.o4-mini",
               "gpt-4.1",
+              "azure.gpt-4.1",
               "o3-mini",
+              "azure.o3-mini",
               "gpt-4.1-nano",
+              "azure.gpt-4.1-nano",
               "gpt-4o-mini",
+              "azure.gpt-4o-mini",
               "o1",
+              "azure.o1",
               "gemini-2.5-flash",
               "qwen-qwq",
               "meta-llama/llama-4-maverick-17b-128e-instruct",
               "claude-3-opus",
               "gpt-4o",
+              "azure.gpt-4o",
               "gemini-2.0-flash-lite",
               "gemini-2.0-flash",
               "deepseek-r1-distill-llama-70b",
               "llama-3.3-70b-versatile",
               "claude-3.5-sonnet",
               "o1-mini",
+              "azure.o1-mini",
               "gpt-4.1-mini",
+              "azure.gpt-4.1-mini",
               "gemini-2.5",
               "meta-llama/llama-4-scout-17b-16e-instruct"
             ],
@@ -279,7 +301,8 @@
               "openai",
               "gemini",
               "groq",
-              "bedrock"
+              "bedrock",
+              "azure"
             ],
             "type": "string"
           }