feat(groq): add support for Groq using the OpenAI provider

Hunter Casten created

Change summary

README.md                         | 12 ++++
internal/llm/models/groq.go       | 82 +++++++++++++++++++++++++++++++++
internal/llm/models/models.go     | 18 ------
internal/llm/provider/provider.go |  8 +++
opencode-schema.json              | 70 ++++++++++++++++------------
5 files changed, 143 insertions(+), 47 deletions(-)

Detailed changes

README.md 🔗

@@ -91,6 +91,10 @@ You can configure OpenCode using environment variables:
     "anthropic": {
       "apiKey": "your-api-key",
       "disabled": false
+    },
+    "groq": {
+        "apiKey": "your-api-key",
+        "disabled": false
     }
   },
   "agents": {
@@ -158,6 +162,14 @@ OpenCode supports a variety of AI models from different providers:
 
 - Claude 3.7 Sonnet
 
+### Groq
+
+- Llama 4 Maverick (17b-128e-instruct) 
+- Llama 4 Scout (17b-16e-instruct)
+- QWEN QWQ-32b
+- Deepseek R1 distill Llama 70b
+- Llama 3.3 70b Versatile
+
 ## Usage
 
 ```bash

internal/llm/models/groq.go 🔗

@@ -0,0 +1,82 @@
+package models
+
+const (
+	ProviderGROQ ModelProvider = "groq"
+
+	// GROQ
+	QWENQwq ModelID = "qwen-qwq"
+
+	// GROQ preview models
+	Llama4Scout               ModelID = "meta-llama/llama-4-scout-17b-16e-instruct"
+	Llama4Maverick            ModelID = "meta-llama/llama-4-maverick-17b-128e-instruct"
+	Llama3_3_70BVersatile     ModelID = "llama-3.3-70b-versatile"
+	DeepseekR1DistillLlama70b ModelID = "deepseek-r1-distill-llama-70b"
+)
+
+var GroqModels = map[ModelID]Model{
+	//
+	// GROQ
+	QWENQwq: {
+		ID:                 QWENQwq,
+		Name:               "Qwen Qwq",
+		Provider:           ProviderGROQ,
+		APIModel:           "qwen-qwq-32b",
+		CostPer1MIn:        0.29,
+		CostPer1MInCached:  0.275,
+		CostPer1MOutCached: 0.0,
+		CostPer1MOut:       0.39,
+		ContextWindow:      128_000,
+		DefaultMaxTokens:   50000,
+		// for some reason, the groq api doesn't like the reasoningEffort parameter
+		CanReason: false,
+	},
+
+	Llama4Scout: {
+		ID:                 Llama4Scout,
+		Name:               "Llama4Scout",
+		Provider:           ProviderGROQ,
+		APIModel:           "meta-llama/llama-4-scout-17b-16e-instruct",
+		CostPer1MIn:        0.11,
+		CostPer1MInCached:  0,
+		CostPer1MOutCached: 0,
+		CostPer1MOut:       0.34,
+		ContextWindow:      128_000, // 10M when?
+	},
+
+	Llama4Maverick: {
+		ID:                 Llama4Maverick,
+		Name:               "Llama4Maverick",
+		Provider:           ProviderGROQ,
+		APIModel:           "meta-llama/llama-4-maverick-17b-128e-instruct",
+		CostPer1MIn:        0.20,
+		CostPer1MInCached:  0,
+		CostPer1MOutCached: 0,
+		CostPer1MOut:       0.20,
+		ContextWindow:      128_000,
+	},
+
+	Llama3_3_70BVersatile: {
+		ID:                 Llama3_3_70BVersatile,
+		Name:               "Llama3_3_70BVersatile",
+		Provider:           ProviderGROQ,
+		APIModel:           "llama-3.3-70b-versatile",
+		CostPer1MIn:        0.59,
+		CostPer1MInCached:  0,
+		CostPer1MOutCached: 0,
+		CostPer1MOut:       0.79,
+		ContextWindow:      128_000,
+	},
+
+	DeepseekR1DistillLlama70b: {
+		ID:                 DeepseekR1DistillLlama70b,
+		Name:               "DeepseekR1DistillLlama70b",
+		Provider:           ProviderGROQ,
+		APIModel:           "deepseek-r1-distill-llama-70b",
+		CostPer1MIn:        0.75,
+		CostPer1MInCached:  0,
+		CostPer1MOutCached: 0,
+		CostPer1MOut:       0.99,
+		ContextWindow:      128_000,
+		CanReason:          true,
+	},
+}

internal/llm/models/models.go 🔗

@@ -23,17 +23,12 @@ type Model struct {
 
 // Model IDs
 const ( // GEMINI
-	// GROQ
-	QWENQwq ModelID = "qwen-qwq"
-
 	// Bedrock
 	BedrockClaude37Sonnet ModelID = "bedrock.claude-3.7-sonnet"
 )
 
 const (
 	ProviderBedrock ModelProvider = "bedrock"
-	ProviderGROQ    ModelProvider = "groq"
-
 	// ForTests
 	ProviderMock ModelProvider = "__mock"
 )
@@ -63,18 +58,6 @@ var SupportedModels = map[ModelID]Model{
 	// 	CostPer1MOut:       0.4,
 	// },
 	//
-	// // GROQ
-	// QWENQwq: {
-	// 	ID:                 QWENQwq,
-	// 	Name:               "Qwen Qwq",
-	// 	Provider:           ProviderGROQ,
-	// 	APIModel:           "qwen-qwq-32b",
-	// 	CostPer1MIn:        0,
-	// 	CostPer1MInCached:  0,
-	// 	CostPer1MOutCached: 0,
-	// 	CostPer1MOut:       0,
-	// },
-	//
 	// // Bedrock
 	BedrockClaude37Sonnet: {
 		ID:                 BedrockClaude37Sonnet,
@@ -92,4 +75,5 @@ func init() {
 	maps.Copy(SupportedModels, AnthropicModels)
 	maps.Copy(SupportedModels, OpenAIModels)
 	maps.Copy(SupportedModels, GeminiModels)
+	maps.Copy(SupportedModels, GroqModels)
 }

internal/llm/provider/provider.go 🔗

@@ -107,6 +107,14 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption
 			options: clientOptions,
 			client:  newBedrockClient(clientOptions),
 		}, nil
+	case models.ProviderGROQ:
+		clientOptions.openaiOptions = append(clientOptions.openaiOptions,
+			WithOpenAIBaseURL("https://api.groq.com/openai/v1"),
+		)
+		return &baseProvider[OpenAIClient]{
+			options: clientOptions,
+			client:  newOpenAIClient(clientOptions),
+		}, nil
 	case models.ProviderMock:
 		// TODO: implement mock client for test
 		panic("not implemented")

opencode-schema.json 🔗

@@ -12,28 +12,33 @@
         "model": {
           "description": "Model ID for the agent",
           "enum": [
-            "gemini-2.0-flash",
-            "bedrock.claude-3.7-sonnet",
+            "claude-3.7-sonnet",
             "claude-3-opus",
-            "claude-3.5-sonnet",
+            "gpt-4.1-mini",
+            "gpt-4o",
             "gpt-4o-mini",
+            "gemini-2.0-flash-lite",
+            "meta-llama/llama-4-maverick-17b-128e-instruct",
+            "gpt-4.1",
+            "gpt-4.5-preview",
             "o1",
+            "gpt-4.1-nano",
             "o3-mini",
+            "gemini-2.5-flash",
+            "gemini-2.0-flash",
+            "meta-llama/llama-4-scout-17b-16e-instruct",
+            "bedrock.claude-3.7-sonnet",
             "o1-pro",
-            "o4-mini",
-            "claude-3-haiku",
-            "gpt-4o",
             "o3",
-            "gpt-4.1-mini",
-            "gpt-4.5-preview",
-            "gemini-2.5-flash",
+            "gemini-2.5",
+            "qwen-qwq",
+            "llama-3.3-70b-versatile",
+            "deepseek-r1-distill-llama-70b",
+            "claude-3.5-sonnet",
+            "claude-3-haiku",
             "claude-3.5-haiku",
-            "gpt-4.1",
-            "gemini-2.0-flash-lite",
-            "claude-3.7-sonnet",
-            "o1-mini",
-            "gpt-4.1-nano",
-            "gemini-2.5"
+            "o4-mini",
+            "o1-mini"
           ],
           "type": "string"
         },
@@ -67,28 +72,33 @@
           "model": {
             "description": "Model ID for the agent",
             "enum": [
-              "gemini-2.0-flash",
-              "bedrock.claude-3.7-sonnet",
+              "claude-3.7-sonnet",
               "claude-3-opus",
-              "claude-3.5-sonnet",
+              "gpt-4.1-mini",
+              "gpt-4o",
               "gpt-4o-mini",
+              "gemini-2.0-flash-lite",
+              "meta-llama/llama-4-maverick-17b-128e-instruct",
+              "gpt-4.1",
+              "gpt-4.5-preview",
               "o1",
+              "gpt-4.1-nano",
               "o3-mini",
+              "gemini-2.5-flash",
+              "gemini-2.0-flash",
+              "meta-llama/llama-4-scout-17b-16e-instruct",
+              "bedrock.claude-3.7-sonnet",
               "o1-pro",
-              "o4-mini",
-              "claude-3-haiku",
-              "gpt-4o",
               "o3",
-              "gpt-4.1-mini",
-              "gpt-4.5-preview",
-              "gemini-2.5-flash",
+              "gemini-2.5",
+              "qwen-qwq",
+              "llama-3.3-70b-versatile",
+              "deepseek-r1-distill-llama-70b",
+              "claude-3.5-sonnet",
+              "claude-3-haiku",
               "claude-3.5-haiku",
-              "gpt-4.1",
-              "gemini-2.0-flash-lite",
-              "claude-3.7-sonnet",
-              "o1-mini",
-              "gpt-4.1-nano",
-              "gemini-2.5"
+              "o4-mini",
+              "o1-mini"
             ],
             "type": "string"
           },