.gitignore 🔗
@@ -44,3 +44,4 @@ Thumbs.db
.opencode/
opencode
+opencode.md
Bryan Vaz created
* feat: add github copilot
* fix: add support for claude4
.gitignore | 1
README.md | 72 ++
internal/config/config.go | 116 +++++
internal/llm/agent/agent.go | 26 +
internal/llm/models/copilot.go | 219 ++++++++++
internal/llm/models/models.go | 18
internal/llm/provider/anthropic.go | 24
internal/llm/provider/copilot.go | 671 ++++++++++++++++++++++++++++++++
internal/llm/provider/provider.go | 12
internal/llm/tools/view.go | 2
internal/logging/logger.go | 133 ++++++
internal/logging/writer.go | 1
opencode-schema.json | 29 +
13 files changed, 1,276 insertions(+), 48 deletions(-)
@@ -44,3 +44,4 @@ Thumbs.db
.opencode/
opencode
+opencode.md
@@ -96,22 +96,23 @@ You can enable or disable this feature in your configuration file:
You can configure OpenCode using environment variables:
-| Environment Variable | Purpose |
-| -------------------------- | ------------------------------------------------------ |
-| `ANTHROPIC_API_KEY` | For Claude models |
-| `OPENAI_API_KEY` | For OpenAI models |
-| `GEMINI_API_KEY` | For Google Gemini models |
-| `VERTEXAI_PROJECT` | For Google Cloud VertexAI (Gemini) |
-| `VERTEXAI_LOCATION` | For Google Cloud VertexAI (Gemini) |
-| `GROQ_API_KEY` | For Groq models |
-| `AWS_ACCESS_KEY_ID` | For AWS Bedrock (Claude) |
-| `AWS_SECRET_ACCESS_KEY` | For AWS Bedrock (Claude) |
-| `AWS_REGION` | For AWS Bedrock (Claude) |
-| `AZURE_OPENAI_ENDPOINT` | For Azure OpenAI models |
-| `AZURE_OPENAI_API_KEY` | For Azure OpenAI models (optional when using Entra ID) |
-| `AZURE_OPENAI_API_VERSION` | For Azure OpenAI models |
-| `LOCAL_ENDPOINT` | For self-hosted models |
-| `SHELL` | Default shell to use (if not specified in config) |
+| Environment Variable | Purpose |
+| -------------------------- | -------------------------------------------------------------------------------- |
+| `ANTHROPIC_API_KEY` | For Claude models |
+| `OPENAI_API_KEY` | For OpenAI models |
+| `GEMINI_API_KEY` | For Google Gemini models |
+| `GITHUB_TOKEN` | For Github Copilot models (see [Using Github Copilot](#using-github-copilot)) |
+| `VERTEXAI_PROJECT` | For Google Cloud VertexAI (Gemini) |
+| `VERTEXAI_LOCATION` | For Google Cloud VertexAI (Gemini) |
+| `GROQ_API_KEY` | For Groq models |
+| `AWS_ACCESS_KEY_ID` | For AWS Bedrock (Claude) |
+| `AWS_SECRET_ACCESS_KEY` | For AWS Bedrock (Claude) |
+| `AWS_REGION` | For AWS Bedrock (Claude) |
+| `AZURE_OPENAI_ENDPOINT` | For Azure OpenAI models |
+| `AZURE_OPENAI_API_KEY` | For Azure OpenAI models (optional when using Entra ID) |
+| `AZURE_OPENAI_API_VERSION` | For Azure OpenAI models |
+| `LOCAL_ENDPOINT` | For self-hosted models |
+| `SHELL` | Default shell to use (if not specified in config) |
### Shell Configuration
@@ -146,6 +147,9 @@ This is useful if you want to use a different shell than your default system she
"apiKey": "your-api-key",
"disabled": false
},
+ "copilot": {
+ "disabled": false
+ },
"groq": {
"apiKey": "your-api-key",
"disabled": false
@@ -216,6 +220,23 @@ OpenCode supports a variety of AI models from different providers:
- Claude 3 Haiku
- Claude 3 Opus
+### GitHub Copilot
+
+- GPT-3.5 Turbo
+- GPT-4
+- GPT-4o
+- GPT-4o Mini
+- GPT-4.1
+- Claude 3.5 Sonnet
+- Claude 3.7 Sonnet
+- Claude 3.7 Sonnet Thinking
+- Claude Sonnet 4
+- O1
+- O3 Mini
+- O4 Mini
+- Gemini 2.0 Flash
+- Gemini 2.5 Pro
+
### Google
- Gemini 2.5
@@ -579,6 +600,25 @@ The AI assistant can access LSP features through the `diagnostics` tool, allowin
While the LSP client implementation supports the full LSP protocol (including completions, hover, definition, etc.), currently only diagnostics are exposed to the AI assistant.
+## Using Github Copilot
+
+_Copilot support is currently experimental._
+
+### Requirements
+- [Copilot chat in the IDE](https://github.com/settings/copilot) enabled in GitHub settings
+- One of:
+ - VSCode Github Copilot chat extension
+ - Github `gh` CLI
+ - Neovim Github Copilot plugin (`copilot.vim` or `copilot.lua`)
+ - Github token with copilot permissions
+
+If using one of the above plugins or cli tools, make sure you use the authenticate
+the tool with your github account. This should create a github token at one of the following locations:
+- ~/.config/github-copilot/[hosts,apps].json
+- $XDG_CONFIG_HOME/github-copilot/[hosts,apps].json
+
+If using an explicit github token, you may either set the $GITHUB_TOKEN environment variable or add it to the opencode.json config file at `providers.copilot.apiKey`.
+
## Using a self-hosted model provider
OpenCode can also load and use models from a self-hosted (OpenAI-like) provider.
@@ -7,6 +7,7 @@ import (
"log/slog"
"os"
"path/filepath"
+ "runtime"
"strings"
"github.com/opencode-ai/opencode/internal/llm/models"
@@ -161,6 +162,7 @@ func Load(workingDir string, debug bool) (*Config, error) {
}
if os.Getenv("OPENCODE_DEV_DEBUG") == "true" {
loggingFile := fmt.Sprintf("%s/%s", cfg.Data.Directory, "debug.log")
+ messagesPath := fmt.Sprintf("%s/%s", cfg.Data.Directory, "messages")
// if file does not exist create it
if _, err := os.Stat(loggingFile); os.IsNotExist(err) {
@@ -172,6 +174,13 @@ func Load(workingDir string, debug bool) (*Config, error) {
}
}
+ if _, err := os.Stat(messagesPath); os.IsNotExist(err) {
+ if err := os.MkdirAll(messagesPath, 0o756); err != nil {
+ return cfg, fmt.Errorf("failed to create directory: %w", err)
+ }
+ }
+ logging.MessageDir = messagesPath
+
sloggingFileWriter, err := os.OpenFile(loggingFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o666)
if err != nil {
return cfg, fmt.Errorf("failed to open log file: %w", err)
@@ -245,6 +254,7 @@ func setDefaults(debug bool) {
// environment variables and configuration file.
func setProviderDefaults() {
// Set all API keys we can find in the environment
+ // Note: Viper does not default if the json apiKey is ""
if apiKey := os.Getenv("ANTHROPIC_API_KEY"); apiKey != "" {
viper.SetDefault("providers.anthropic.apiKey", apiKey)
}
@@ -267,16 +277,32 @@ func setProviderDefaults() {
// api-key may be empty when using Entra ID credentials – that's okay
viper.SetDefault("providers.azure.apiKey", os.Getenv("AZURE_OPENAI_API_KEY"))
}
+ if apiKey, err := LoadGitHubToken(); err == nil && apiKey != "" {
+ viper.SetDefault("providers.copilot.apiKey", apiKey)
+ if viper.GetString("providers.copilot.apiKey") == "" {
+ viper.Set("providers.copilot.apiKey", apiKey)
+ }
+ }
// Use this order to set the default models
- // 1. Anthropic
- // 2. OpenAI
- // 3. Google Gemini
- // 4. Groq
- // 5. OpenRouter
- // 6. AWS Bedrock
- // 7. Azure
- // 8. Google Cloud VertexAI
+ // 1. Copilot
+ // 2. Anthropic
+ // 3. OpenAI
+ // 4. Google Gemini
+ // 5. Groq
+ // 6. OpenRouter
+ // 7. AWS Bedrock
+ // 8. Azure
+ // 9. Google Cloud VertexAI
+
+ // copilot configuration
+ if key := viper.GetString("providers.copilot.apiKey"); strings.TrimSpace(key) != "" {
+ viper.SetDefault("agents.coder.model", models.CopilotGPT4o)
+ viper.SetDefault("agents.summarizer.model", models.CopilotGPT4o)
+ viper.SetDefault("agents.task.model", models.CopilotGPT4o)
+ viper.SetDefault("agents.title.model", models.CopilotGPT4o)
+ return
+ }
// Anthropic configuration
if key := viper.GetString("providers.anthropic.apiKey"); strings.TrimSpace(key) != "" {
@@ -399,6 +425,14 @@ func hasVertexAICredentials() bool {
return false
}
+func hasCopilotCredentials() bool {
+ // Check for explicit Copilot parameters
+ if token, _ := LoadGitHubToken(); token != "" {
+ return true
+ }
+ return false
+}
+
// readConfig handles the result of reading a configuration file.
func readConfig(err error) error {
if err == nil {
@@ -440,6 +474,9 @@ func applyDefaultValues() {
// It validates model IDs and providers, ensuring they are supported.
func validateAgent(cfg *Config, name AgentName, agent Agent) error {
// Check if model exists
+ // TODO: If a copilot model is specified, but model is not found,
+ // it might be new model. The https://api.githubcopilot.com/models
+ // endpoint should be queried to validate if the model is supported.
model, modelExists := models.SupportedModels[agent.Model]
if !modelExists {
logging.Warn("unsupported model configured, reverting to default",
@@ -584,6 +621,7 @@ func Validate() error {
// Validate providers
for provider, providerCfg := range cfg.Providers {
if providerCfg.APIKey == "" && !providerCfg.Disabled {
+ fmt.Printf("provider has no API key, marking as disabled %s", provider)
logging.Warn("provider has no API key, marking as disabled", "provider", provider)
providerCfg.Disabled = true
cfg.Providers[provider] = providerCfg
@@ -631,6 +669,18 @@ func getProviderAPIKey(provider models.ModelProvider) string {
// setDefaultModelForAgent sets a default model for an agent based on available providers
func setDefaultModelForAgent(agent AgentName) bool {
+ if hasCopilotCredentials() {
+ maxTokens := int64(5000)
+ if agent == AgentTitle {
+ maxTokens = 80
+ }
+
+ cfg.Agents[agent] = Agent{
+ Model: models.CopilotGPT4o,
+ MaxTokens: maxTokens,
+ }
+ return true
+ }
// Check providers in order of preference
if apiKey := os.Getenv("ANTHROPIC_API_KEY"); apiKey != "" {
maxTokens := int64(5000)
@@ -878,3 +928,53 @@ func UpdateTheme(themeName string) error {
config.TUI.Theme = themeName
})
}
+
+// Tries to load Github token from all possible locations
+func LoadGitHubToken() (string, error) {
+ // First check environment variable
+ if token := os.Getenv("GITHUB_TOKEN"); token != "" {
+ return token, nil
+ }
+
+ // Get config directory
+ var configDir string
+ if xdgConfig := os.Getenv("XDG_CONFIG_HOME"); xdgConfig != "" {
+ configDir = xdgConfig
+ } else if runtime.GOOS == "windows" {
+ if localAppData := os.Getenv("LOCALAPPDATA"); localAppData != "" {
+ configDir = localAppData
+ } else {
+ configDir = filepath.Join(os.Getenv("HOME"), "AppData", "Local")
+ }
+ } else {
+ configDir = filepath.Join(os.Getenv("HOME"), ".config")
+ }
+
+ // Try both hosts.json and apps.json files
+ filePaths := []string{
+ filepath.Join(configDir, "github-copilot", "hosts.json"),
+ filepath.Join(configDir, "github-copilot", "apps.json"),
+ }
+
+ for _, filePath := range filePaths {
+ data, err := os.ReadFile(filePath)
+ if err != nil {
+ continue
+ }
+
+ var config map[string]map[string]interface{}
+ if err := json.Unmarshal(data, &config); err != nil {
+ continue
+ }
+
+ for key, value := range config {
+ if strings.Contains(key, "github.com") {
+ if oauthToken, ok := value["oauth_token"].(string); ok {
+ return oauthToken, nil
+ }
+ }
+ }
+ }
+
+ return "", fmt.Errorf("GitHub token not found in standard locations")
+}
@@ -162,6 +162,7 @@ func (a *agent) generateTitle(ctx context.Context, sessionID string, content str
if err != nil {
return err
}
+ ctx = context.WithValue(ctx, tools.SessionIDContextKey, sessionID)
parts := []message.ContentPart{message.TextContent{Text: content}}
response, err := a.titleProvider.SendMessages(
ctx,
@@ -230,6 +231,7 @@ func (a *agent) Run(ctx context.Context, sessionID string, content string, attac
}
func (a *agent) processGeneration(ctx context.Context, sessionID, content string, attachmentParts []message.ContentPart) AgentEvent {
+ cfg := config.Get()
// List existing messages; if none, start title generation asynchronously.
msgs, err := a.messages.List(ctx, sessionID)
if err != nil {
@@ -288,7 +290,13 @@ func (a *agent) processGeneration(ctx context.Context, sessionID, content string
}
return a.err(fmt.Errorf("failed to process events: %w", err))
}
- logging.Info("Result", "message", agentMessage.FinishReason(), "toolResults", toolResults)
+ if cfg.Debug {
+ seqId := (len(msgHistory) + 1) / 2
+ toolResultFilepath := logging.WriteToolResultsJson(sessionID, seqId, toolResults)
+ logging.Info("Result", "message", agentMessage.FinishReason(), "toolResults", "{}", "filepath", toolResultFilepath)
+ } else {
+ logging.Info("Result", "message", agentMessage.FinishReason(), "toolResults", toolResults)
+ }
if (agentMessage.FinishReason() == message.FinishReasonToolUse) && toolResults != nil {
// We are not done, we need to respond with the tool response
msgHistory = append(msgHistory, agentMessage, *toolResults)
@@ -312,6 +320,7 @@ func (a *agent) createUserMessage(ctx context.Context, sessionID, content string
}
func (a *agent) streamAndHandleEvents(ctx context.Context, sessionID string, msgHistory []message.Message) (message.Message, *message.Message, error) {
+ ctx = context.WithValue(ctx, tools.SessionIDContextKey, sessionID)
eventChan := a.provider.StreamResponse(ctx, msgHistory, a.tools)
assistantMsg, err := a.messages.Create(ctx, sessionID, message.CreateMessageParams{
@@ -325,7 +334,6 @@ func (a *agent) streamAndHandleEvents(ctx context.Context, sessionID string, msg
// Add the session and message ID into the context if needed by tools.
ctx = context.WithValue(ctx, tools.MessageIDContextKey, assistantMsg.ID)
- ctx = context.WithValue(ctx, tools.SessionIDContextKey, sessionID)
// Process each event in the stream.
for event := range eventChan {
@@ -357,10 +365,17 @@ func (a *agent) streamAndHandleEvents(ctx context.Context, sessionID string, msg
default:
// Continue processing
var tool tools.BaseTool
- for _, availableTools := range a.tools {
- if availableTools.Info().Name == toolCall.Name {
- tool = availableTools
+ for _, availableTool := range a.tools {
+ if availableTool.Info().Name == toolCall.Name {
+ tool = availableTool
+ break
}
+ // Monkey patch for Copilot Sonnet-4 tool repetition obfuscation
+ // if strings.HasPrefix(toolCall.Name, availableTool.Info().Name) &&
+ // strings.HasPrefix(toolCall.Name, availableTool.Info().Name+availableTool.Info().Name) {
+ // tool = availableTool
+ // break
+ // }
}
// Tool not found
@@ -553,6 +568,7 @@ func (a *agent) Summarize(ctx context.Context, sessionID string) error {
a.Publish(pubsub.CreatedEvent, event)
return
}
+ summarizeCtx = context.WithValue(summarizeCtx, tools.SessionIDContextKey, sessionID)
if len(msgs) == 0 {
event = AgentEvent{
@@ -0,0 +1,219 @@
+package models
+
+const (
+ ProviderCopilot ModelProvider = "copilot"
+
+ // GitHub Copilot models
+ CopilotGTP35Turbo ModelID = "copilot.gpt-3.5-turbo"
+ CopilotGPT4o ModelID = "copilot.gpt-4o"
+ CopilotGPT4oMini ModelID = "copilot.gpt-4o-mini"
+ CopilotGPT41 ModelID = "copilot.gpt-4.1"
+ CopilotClaude35 ModelID = "copilot.claude-3.5-sonnet"
+ CopilotClaude37 ModelID = "copilot.claude-3.7-sonnet"
+ CopilotClaude4 ModelID = "copilot.claude-sonnet-4"
+ CopilotO1 ModelID = "copilot.o1"
+ CopilotO3Mini ModelID = "copilot.o3-mini"
+ CopilotO4Mini ModelID = "copilot.o4-mini"
+ CopilotGemini20 ModelID = "copilot.gemini-2.0-flash"
+ CopilotGemini25 ModelID = "copilot.gemini-2.5-pro"
+ CopilotGPT4 ModelID = "copilot.gpt-4"
+ CopilotClaude37Thought ModelID = "copilot.claude-3.7-sonnet-thought"
+)
+
+var CopilotAnthropicModels = []ModelID{
+ CopilotClaude35,
+ CopilotClaude37,
+ CopilotClaude37Thought,
+ CopilotClaude4,
+}
+
+// GitHub Copilot models available through GitHub's API
+var CopilotModels = map[ModelID]Model{
+ CopilotGTP35Turbo: {
+ ID: CopilotGTP35Turbo,
+ Name: "GitHub Copilot GPT-3.5-turbo",
+ Provider: ProviderCopilot,
+ APIModel: "gpt-3.5-turbo",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 16_384,
+ DefaultMaxTokens: 4096,
+ SupportsAttachments: true,
+ },
+ CopilotGPT4o: {
+ ID: CopilotGPT4o,
+ Name: "GitHub Copilot GPT-4o",
+ Provider: ProviderCopilot,
+ APIModel: "gpt-4o",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 128_000,
+ DefaultMaxTokens: 16384,
+ SupportsAttachments: true,
+ },
+ CopilotGPT4oMini: {
+ ID: CopilotGPT4oMini,
+ Name: "GitHub Copilot GPT-4o Mini",
+ Provider: ProviderCopilot,
+ APIModel: "gpt-4o-mini",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 128_000,
+ DefaultMaxTokens: 4096,
+ SupportsAttachments: true,
+ },
+ CopilotGPT41: {
+ ID: CopilotGPT41,
+ Name: "GitHub Copilot GPT-4.1",
+ Provider: ProviderCopilot,
+ APIModel: "gpt-4.1",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 128_000,
+ DefaultMaxTokens: 16384,
+ CanReason: true,
+ SupportsAttachments: true,
+ },
+ CopilotClaude35: {
+ ID: CopilotClaude35,
+ Name: "GitHub Copilot Claude 3.5 Sonnet",
+ Provider: ProviderCopilot,
+ APIModel: "claude-3.5-sonnet",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 90_000,
+ DefaultMaxTokens: 8192,
+ SupportsAttachments: true,
+ },
+ CopilotClaude37: {
+ ID: CopilotClaude37,
+ Name: "GitHub Copilot Claude 3.7 Sonnet",
+ Provider: ProviderCopilot,
+ APIModel: "claude-3.7-sonnet",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 200_000,
+ DefaultMaxTokens: 16384,
+ SupportsAttachments: true,
+ },
+ CopilotClaude4: {
+ ID: CopilotClaude4,
+ Name: "GitHub Copilot Claude Sonnet 4",
+ Provider: ProviderCopilot,
+ APIModel: "claude-sonnet-4",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 128_000,
+ DefaultMaxTokens: 16000,
+ SupportsAttachments: true,
+ },
+ CopilotO1: {
+ ID: CopilotO1,
+ Name: "GitHub Copilot o1",
+ Provider: ProviderCopilot,
+ APIModel: "o1",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 200_000,
+ DefaultMaxTokens: 100_000,
+ CanReason: true,
+ SupportsAttachments: false,
+ },
+ CopilotO3Mini: {
+ ID: CopilotO3Mini,
+ Name: "GitHub Copilot o3-mini",
+ Provider: ProviderCopilot,
+ APIModel: "o3-mini",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 200_000,
+ DefaultMaxTokens: 100_000,
+ CanReason: true,
+ SupportsAttachments: false,
+ },
+ CopilotO4Mini: {
+ ID: CopilotO4Mini,
+ Name: "GitHub Copilot o4-mini",
+ Provider: ProviderCopilot,
+ APIModel: "o4-mini",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 128_000,
+ DefaultMaxTokens: 16_384,
+ CanReason: true,
+ SupportsAttachments: true,
+ },
+ CopilotGemini20: {
+ ID: CopilotGemini20,
+ Name: "GitHub Copilot Gemini 2.0 Flash",
+ Provider: ProviderCopilot,
+ APIModel: "gemini-2.0-flash-001",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 1_000_000,
+ DefaultMaxTokens: 8192,
+ SupportsAttachments: true,
+ },
+ CopilotGemini25: {
+ ID: CopilotGemini25,
+ Name: "GitHub Copilot Gemini 2.5 Pro",
+ Provider: ProviderCopilot,
+ APIModel: "gemini-2.5-pro",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 128_000,
+ DefaultMaxTokens: 64000,
+ SupportsAttachments: true,
+ },
+ CopilotGPT4: {
+ ID: CopilotGPT4,
+ Name: "GitHub Copilot GPT-4",
+ Provider: ProviderCopilot,
+ APIModel: "gpt-4",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 32_768,
+ DefaultMaxTokens: 4096,
+ SupportsAttachments: true,
+ },
+ CopilotClaude37Thought: {
+ ID: CopilotClaude37Thought,
+ Name: "GitHub Copilot Claude 3.7 Sonnet Thinking",
+ Provider: ProviderCopilot,
+ APIModel: "claude-3.7-sonnet-thought",
+ CostPer1MIn: 0.0, // Included in GitHub Copilot subscription
+ CostPer1MInCached: 0.0,
+ CostPer1MOutCached: 0.0,
+ CostPer1MOut: 0.0,
+ ContextWindow: 200_000,
+ DefaultMaxTokens: 16384,
+ CanReason: true,
+ SupportsAttachments: true,
+ },
+}
@@ -36,14 +36,15 @@ const (
// Providers in order of popularity
var ProviderPopularity = map[ModelProvider]int{
- ProviderAnthropic: 1,
- ProviderOpenAI: 2,
- ProviderGemini: 3,
- ProviderGROQ: 4,
- ProviderOpenRouter: 5,
- ProviderBedrock: 6,
- ProviderAzure: 7,
- ProviderVertexAI: 8,
+ ProviderCopilot: 1,
+ ProviderAnthropic: 2,
+ ProviderOpenAI: 3,
+ ProviderGemini: 4,
+ ProviderGROQ: 5,
+ ProviderOpenRouter: 6,
+ ProviderBedrock: 7,
+ ProviderAzure: 8,
+ ProviderVertexAI: 9,
}
var SupportedModels = map[ModelID]Model{
@@ -93,4 +94,5 @@ func init() {
maps.Copy(SupportedModels, OpenRouterModels)
maps.Copy(SupportedModels, XAIModels)
maps.Copy(SupportedModels, VertexAIGeminiModels)
+ maps.Copy(SupportedModels, CopilotModels)
}
@@ -14,7 +14,7 @@ import (
"github.com/anthropics/anthropic-sdk-go/option"
"github.com/opencode-ai/opencode/internal/config"
"github.com/opencode-ai/opencode/internal/llm/models"
- "github.com/opencode-ai/opencode/internal/llm/tools"
+ toolsPkg "github.com/opencode-ai/opencode/internal/llm/tools"
"github.com/opencode-ai/opencode/internal/logging"
"github.com/opencode-ai/opencode/internal/message"
)
@@ -118,7 +118,7 @@ func (a *anthropicClient) convertMessages(messages []message.Message) (anthropic
return
}
-func (a *anthropicClient) convertTools(tools []tools.BaseTool) []anthropic.ToolUnionParam {
+func (a *anthropicClient) convertTools(tools []toolsPkg.BaseTool) []anthropic.ToolUnionParam {
anthropicTools := make([]anthropic.ToolUnionParam, len(tools))
for i, tool := range tools {
@@ -195,7 +195,7 @@ func (a *anthropicClient) preparedMessages(messages []anthropic.MessageParam, to
}
}
-func (a *anthropicClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (resposne *ProviderResponse, err error) {
+func (a *anthropicClient) send(ctx context.Context, messages []message.Message, tools []toolsPkg.BaseTool) (resposne *ProviderResponse, err error) {
preparedMessages := a.preparedMessages(a.convertMessages(messages), a.convertTools(tools))
cfg := config.Get()
if cfg.Debug {
@@ -244,12 +244,24 @@ func (a *anthropicClient) send(ctx context.Context, messages []message.Message,
}
}
-func (a *anthropicClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
+func (a *anthropicClient) stream(ctx context.Context, messages []message.Message, tools []toolsPkg.BaseTool) <-chan ProviderEvent {
preparedMessages := a.preparedMessages(a.convertMessages(messages), a.convertTools(tools))
cfg := config.Get()
+
+ var sessionId string
+ requestSeqId := (len(messages) + 1) / 2
if cfg.Debug {
- // jsonData, _ := json.Marshal(preparedMessages)
- // logging.Debug("Prepared messages", "messages", string(jsonData))
+ if sid, ok := ctx.Value(toolsPkg.SessionIDContextKey).(string); ok {
+ sessionId = sid
+ }
+ jsonData, _ := json.Marshal(preparedMessages)
+ if sessionId != "" {
+ filepath := logging.WriteRequestMessageJson(sessionId, requestSeqId, preparedMessages)
+ logging.Debug("Prepared messages", "filepath", filepath)
+ } else {
+ logging.Debug("Prepared messages", "messages", string(jsonData))
+ }
+
}
attempts := 0
eventChan := make(chan ProviderEvent)
@@ -0,0 +1,671 @@
+package provider
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/openai/openai-go"
+ "github.com/openai/openai-go/option"
+ "github.com/openai/openai-go/shared"
+ "github.com/opencode-ai/opencode/internal/config"
+ "github.com/opencode-ai/opencode/internal/llm/models"
+ toolsPkg "github.com/opencode-ai/opencode/internal/llm/tools"
+ "github.com/opencode-ai/opencode/internal/logging"
+ "github.com/opencode-ai/opencode/internal/message"
+)
+
+type copilotOptions struct {
+ reasoningEffort string
+ extraHeaders map[string]string
+ bearerToken string
+}
+
+type CopilotOption func(*copilotOptions)
+
+type copilotClient struct {
+ providerOptions providerClientOptions
+ options copilotOptions
+ client openai.Client
+ httpClient *http.Client
+}
+
+type CopilotClient ProviderClient
+
+// CopilotTokenResponse represents the response from GitHub's token exchange endpoint
+type CopilotTokenResponse struct {
+ Token string `json:"token"`
+ ExpiresAt int64 `json:"expires_at"`
+}
+
+func (c *copilotClient) isAnthropicModel() bool {
+ for _, modelId := range models.CopilotAnthropicModels {
+ if c.providerOptions.model.ID == modelId {
+ return true
+ }
+ }
+ return false
+}
+
+// loadGitHubToken loads the GitHub OAuth token from the standard GitHub CLI/Copilot locations
+
+// exchangeGitHubToken exchanges a GitHub token for a Copilot bearer token
+func (c *copilotClient) exchangeGitHubToken(githubToken string) (string, error) {
+ req, err := http.NewRequest("GET", "https://api.github.com/copilot_internal/v2/token", nil)
+ if err != nil {
+ return "", fmt.Errorf("failed to create token exchange request: %w", err)
+ }
+
+ req.Header.Set("Authorization", "Token "+githubToken)
+ req.Header.Set("User-Agent", "OpenCode/1.0")
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return "", fmt.Errorf("failed to exchange GitHub token: %w", err)
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ body, _ := io.ReadAll(resp.Body)
+ return "", fmt.Errorf("token exchange failed with status %d: %s", resp.StatusCode, string(body))
+ }
+
+ var tokenResp CopilotTokenResponse
+ if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil {
+ return "", fmt.Errorf("failed to decode token response: %w", err)
+ }
+
+ return tokenResp.Token, nil
+}
+
+func newCopilotClient(opts providerClientOptions) CopilotClient {
+ copilotOpts := copilotOptions{
+ reasoningEffort: "medium",
+ }
+ // Apply copilot-specific options
+ for _, o := range opts.copilotOptions {
+ o(&copilotOpts)
+ }
+
+ // Create HTTP client for token exchange
+ httpClient := &http.Client{
+ Timeout: 30 * time.Second,
+ }
+
+ var bearerToken string
+
+ // If bearer token is already provided, use it
+ if copilotOpts.bearerToken != "" {
+ bearerToken = copilotOpts.bearerToken
+ } else {
+ // Try to get GitHub token from multiple sources
+ var githubToken string
+
+ // 1. Environment variable
+ githubToken = os.Getenv("GITHUB_TOKEN")
+
+ // 2. API key from options
+ if githubToken == "" {
+ githubToken = opts.apiKey
+ }
+
+ // 3. Standard GitHub CLI/Copilot locations
+ if githubToken == "" {
+ var err error
+ githubToken, err = config.LoadGitHubToken()
+ if err != nil {
+ logging.Debug("Failed to load GitHub token from standard locations", "error", err)
+ }
+ }
+
+ if githubToken == "" {
+ logging.Error("GitHub token is required for Copilot provider. Set GITHUB_TOKEN environment variable, configure it in opencode.json, or ensure GitHub CLI/Copilot is properly authenticated.")
+ return &copilotClient{
+ providerOptions: opts,
+ options: copilotOpts,
+ httpClient: httpClient,
+ }
+ }
+
+ // Create a temporary client for token exchange
+ tempClient := &copilotClient{
+ providerOptions: opts,
+ options: copilotOpts,
+ httpClient: httpClient,
+ }
+
+ // Exchange GitHub token for bearer token
+ var err error
+ bearerToken, err = tempClient.exchangeGitHubToken(githubToken)
+ if err != nil {
+ logging.Error("Failed to exchange GitHub token for Copilot bearer token", "error", err)
+ return &copilotClient{
+ providerOptions: opts,
+ options: copilotOpts,
+ httpClient: httpClient,
+ }
+ }
+ }
+
+ copilotOpts.bearerToken = bearerToken
+
+ // GitHub Copilot API base URL
+ baseURL := "https://api.githubcopilot.com"
+
+ openaiClientOptions := []option.RequestOption{
+ option.WithBaseURL(baseURL),
+ option.WithAPIKey(bearerToken), // Use bearer token as API key
+ }
+
+ // Add GitHub Copilot specific headers
+ openaiClientOptions = append(openaiClientOptions,
+ option.WithHeader("Editor-Version", "OpenCode/1.0"),
+ option.WithHeader("Editor-Plugin-Version", "OpenCode/1.0"),
+ option.WithHeader("Copilot-Integration-Id", "vscode-chat"),
+ )
+
+ // Add any extra headers
+ if copilotOpts.extraHeaders != nil {
+ for key, value := range copilotOpts.extraHeaders {
+ openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
+ }
+ }
+
+ client := openai.NewClient(openaiClientOptions...)
+ // logging.Debug("Copilot client created", "opts", opts, "copilotOpts", copilotOpts, "model", opts.model)
+ return &copilotClient{
+ providerOptions: opts,
+ options: copilotOpts,
+ client: client,
+ httpClient: httpClient,
+ }
+}
+
+func (c *copilotClient) convertMessages(messages []message.Message) (copilotMessages []openai.ChatCompletionMessageParamUnion) {
+ // Add system message first
+ copilotMessages = append(copilotMessages, openai.SystemMessage(c.providerOptions.systemMessage))
+
+ for _, msg := range messages {
+ switch msg.Role {
+ case message.User:
+ var content []openai.ChatCompletionContentPartUnionParam
+ textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
+ content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
+
+ for _, binaryContent := range msg.BinaryContent() {
+ imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(models.ProviderCopilot)}
+ imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
+ content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
+ }
+
+ copilotMessages = append(copilotMessages, openai.UserMessage(content))
+
+ case message.Assistant:
+ assistantMsg := openai.ChatCompletionAssistantMessageParam{
+ Role: "assistant",
+ }
+
+ if msg.Content().String() != "" {
+ assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
+ OfString: openai.String(msg.Content().String()),
+ }
+ }
+
+ if len(msg.ToolCalls()) > 0 {
+ assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls()))
+ for i, call := range msg.ToolCalls() {
+ assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
+ ID: call.ID,
+ Type: "function",
+ Function: openai.ChatCompletionMessageToolCallFunctionParam{
+ Name: call.Name,
+ Arguments: call.Input,
+ },
+ }
+ }
+ }
+
+ copilotMessages = append(copilotMessages, openai.ChatCompletionMessageParamUnion{
+ OfAssistant: &assistantMsg,
+ })
+
+ case message.Tool:
+ for _, result := range msg.ToolResults() {
+ copilotMessages = append(copilotMessages,
+ openai.ToolMessage(result.Content, result.ToolCallID),
+ )
+ }
+ }
+ }
+
+ return
+}
+
+func (c *copilotClient) convertTools(tools []toolsPkg.BaseTool) []openai.ChatCompletionToolParam {
+ copilotTools := make([]openai.ChatCompletionToolParam, len(tools))
+
+ for i, tool := range tools {
+ info := tool.Info()
+ copilotTools[i] = openai.ChatCompletionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: info.Name,
+ Description: openai.String(info.Description),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": info.Parameters,
+ "required": info.Required,
+ },
+ },
+ }
+ }
+
+ return copilotTools
+}
+
+func (c *copilotClient) finishReason(reason string) message.FinishReason {
+ switch reason {
+ case "stop":
+ return message.FinishReasonEndTurn
+ case "length":
+ return message.FinishReasonMaxTokens
+ case "tool_calls":
+ return message.FinishReasonToolUse
+ default:
+ return message.FinishReasonUnknown
+ }
+}
+
+func (c *copilotClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
+ params := openai.ChatCompletionNewParams{
+ Model: openai.ChatModel(c.providerOptions.model.APIModel),
+ Messages: messages,
+ Tools: tools,
+ }
+
+ if c.providerOptions.model.CanReason == true {
+ params.MaxCompletionTokens = openai.Int(c.providerOptions.maxTokens)
+ switch c.options.reasoningEffort {
+ case "low":
+ params.ReasoningEffort = shared.ReasoningEffortLow
+ case "medium":
+ params.ReasoningEffort = shared.ReasoningEffortMedium
+ case "high":
+ params.ReasoningEffort = shared.ReasoningEffortHigh
+ default:
+ params.ReasoningEffort = shared.ReasoningEffortMedium
+ }
+ } else {
+ params.MaxTokens = openai.Int(c.providerOptions.maxTokens)
+ }
+
+ return params
+}
+
+func (c *copilotClient) send(ctx context.Context, messages []message.Message, tools []toolsPkg.BaseTool) (response *ProviderResponse, err error) {
+ params := c.preparedParams(c.convertMessages(messages), c.convertTools(tools))
+ cfg := config.Get()
+ var sessionId string
+ requestSeqId := (len(messages) + 1) / 2
+ if cfg.Debug {
+ // jsonData, _ := json.Marshal(params)
+ // logging.Debug("Prepared messages", "messages", string(jsonData))
+ if sid, ok := ctx.Value(toolsPkg.SessionIDContextKey).(string); ok {
+ sessionId = sid
+ }
+ jsonData, _ := json.Marshal(params)
+ if sessionId != "" {
+ filepath := logging.WriteRequestMessageJson(sessionId, requestSeqId, params)
+ logging.Debug("Prepared messages", "filepath", filepath)
+ } else {
+ logging.Debug("Prepared messages", "messages", string(jsonData))
+ }
+ }
+
+ attempts := 0
+ for {
+ attempts++
+ copilotResponse, err := c.client.Chat.Completions.New(
+ ctx,
+ params,
+ )
+
+ // If there is an error we are going to see if we can retry the call
+ if err != nil {
+ retry, after, retryErr := c.shouldRetry(attempts, err)
+ if retryErr != nil {
+ return nil, retryErr
+ }
+ if retry {
+ logging.WarnPersist(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), logging.PersistTimeArg, time.Millisecond*time.Duration(after+100))
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-time.After(time.Duration(after) * time.Millisecond):
+ continue
+ }
+ }
+ return nil, retryErr
+ }
+
+ content := ""
+ if copilotResponse.Choices[0].Message.Content != "" {
+ content = copilotResponse.Choices[0].Message.Content
+ }
+
+ toolCalls := c.toolCalls(*copilotResponse)
+ finishReason := c.finishReason(string(copilotResponse.Choices[0].FinishReason))
+
+ if len(toolCalls) > 0 {
+ finishReason = message.FinishReasonToolUse
+ }
+
+ return &ProviderResponse{
+ Content: content,
+ ToolCalls: toolCalls,
+ Usage: c.usage(*copilotResponse),
+ FinishReason: finishReason,
+ }, nil
+ }
+}
+
+func (c *copilotClient) stream(ctx context.Context, messages []message.Message, tools []toolsPkg.BaseTool) <-chan ProviderEvent {
+ params := c.preparedParams(c.convertMessages(messages), c.convertTools(tools))
+ params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
+ IncludeUsage: openai.Bool(true),
+ }
+
+ cfg := config.Get()
+ var sessionId string
+ requestSeqId := (len(messages) + 1) / 2
+ if cfg.Debug {
+ if sid, ok := ctx.Value(toolsPkg.SessionIDContextKey).(string); ok {
+ sessionId = sid
+ }
+ jsonData, _ := json.Marshal(params)
+ if sessionId != "" {
+ filepath := logging.WriteRequestMessageJson(sessionId, requestSeqId, params)
+ logging.Debug("Prepared messages", "filepath", filepath)
+ } else {
+ logging.Debug("Prepared messages", "messages", string(jsonData))
+ }
+
+ }
+
+ attempts := 0
+ eventChan := make(chan ProviderEvent)
+
+ go func() {
+ for {
+ attempts++
+ copilotStream := c.client.Chat.Completions.NewStreaming(
+ ctx,
+ params,
+ )
+
+ acc := openai.ChatCompletionAccumulator{}
+ currentContent := ""
+ toolCalls := make([]message.ToolCall, 0)
+
+ var currentToolCallId string
+ var currentToolCall openai.ChatCompletionMessageToolCall
+ var msgToolCalls []openai.ChatCompletionMessageToolCall
+ for copilotStream.Next() {
+ chunk := copilotStream.Current()
+ acc.AddChunk(chunk)
+
+ if cfg.Debug {
+ logging.AppendToStreamSessionLogJson(sessionId, requestSeqId, chunk)
+ }
+
+ for _, choice := range chunk.Choices {
+ if choice.Delta.Content != "" {
+ eventChan <- ProviderEvent{
+ Type: EventContentDelta,
+ Content: choice.Delta.Content,
+ }
+ currentContent += choice.Delta.Content
+ }
+ }
+
+ if c.isAnthropicModel() {
+ // Monkeypatch adapter for Sonnet-4 multi-tool use
+ for _, choice := range chunk.Choices {
+ if choice.Delta.ToolCalls != nil && len(choice.Delta.ToolCalls) > 0 {
+ toolCall := choice.Delta.ToolCalls[0]
+ // Detect tool use start
+ if currentToolCallId == "" {
+ if toolCall.ID != "" {
+ currentToolCallId = toolCall.ID
+ currentToolCall = openai.ChatCompletionMessageToolCall{
+ ID: toolCall.ID,
+ Type: "function",
+ Function: openai.ChatCompletionMessageToolCallFunction{
+ Name: toolCall.Function.Name,
+ Arguments: toolCall.Function.Arguments,
+ },
+ }
+ }
+ } else {
+ // Delta tool use
+ if toolCall.ID == "" {
+ currentToolCall.Function.Arguments += toolCall.Function.Arguments
+ } else {
+ // Detect new tool use
+ if toolCall.ID != currentToolCallId {
+ msgToolCalls = append(msgToolCalls, currentToolCall)
+ currentToolCallId = toolCall.ID
+ currentToolCall = openai.ChatCompletionMessageToolCall{
+ ID: toolCall.ID,
+ Type: "function",
+ Function: openai.ChatCompletionMessageToolCallFunction{
+ Name: toolCall.Function.Name,
+ Arguments: toolCall.Function.Arguments,
+ },
+ }
+ }
+ }
+ }
+ }
+ if choice.FinishReason == "tool_calls" {
+ msgToolCalls = append(msgToolCalls, currentToolCall)
+ acc.ChatCompletion.Choices[0].Message.ToolCalls = msgToolCalls
+ }
+ }
+ }
+ }
+
+ err := copilotStream.Err()
+ if err == nil || errors.Is(err, io.EOF) {
+ if cfg.Debug {
+ respFilepath := logging.WriteChatResponseJson(sessionId, requestSeqId, acc.ChatCompletion)
+ logging.Debug("Chat completion response", "filepath", respFilepath)
+ }
+ // Stream completed successfully
+ finishReason := c.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason))
+ if len(acc.ChatCompletion.Choices[0].Message.ToolCalls) > 0 {
+ toolCalls = append(toolCalls, c.toolCalls(acc.ChatCompletion)...)
+ }
+ if len(toolCalls) > 0 {
+ finishReason = message.FinishReasonToolUse
+ }
+
+ eventChan <- ProviderEvent{
+ Type: EventComplete,
+ Response: &ProviderResponse{
+ Content: currentContent,
+ ToolCalls: toolCalls,
+ Usage: c.usage(acc.ChatCompletion),
+ FinishReason: finishReason,
+ },
+ }
+ close(eventChan)
+ return
+ }
+
+ // If there is an error we are going to see if we can retry the call
+ retry, after, retryErr := c.shouldRetry(attempts, err)
+ if retryErr != nil {
+ eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
+ close(eventChan)
+ return
+ }
+ // shouldRetry is not catching the max retries...
+ // TODO: Figure out why
+ if attempts > maxRetries {
+ logging.Warn("Maximum retry attempts reached for rate limit", "attempts", attempts, "max_retries", maxRetries)
+ retry = false
+ }
+ if retry {
+ logging.WarnPersist(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d (paused for %d ms)", attempts, maxRetries, after), logging.PersistTimeArg, time.Millisecond*time.Duration(after+100))
+ select {
+ case <-ctx.Done():
+ // context cancelled
+ if ctx.Err() == nil {
+ eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
+ }
+ close(eventChan)
+ return
+ case <-time.After(time.Duration(after) * time.Millisecond):
+ continue
+ }
+ }
+ eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
+ close(eventChan)
+ return
+ }
+ }()
+
+ return eventChan
+}
+
+func (c *copilotClient) shouldRetry(attempts int, err error) (bool, int64, error) {
+ var apierr *openai.Error
+ if !errors.As(err, &apierr) {
+ return false, 0, err
+ }
+
+ // Check for token expiration (401 Unauthorized)
+ if apierr.StatusCode == 401 {
+ // Try to refresh the bearer token
+ var githubToken string
+
+ // 1. Environment variable
+ githubToken = os.Getenv("GITHUB_TOKEN")
+
+ // 2. API key from options
+ if githubToken == "" {
+ githubToken = c.providerOptions.apiKey
+ }
+
+ // 3. Standard GitHub CLI/Copilot locations
+ if githubToken == "" {
+ var err error
+ githubToken, err = config.LoadGitHubToken()
+ if err != nil {
+ logging.Debug("Failed to load GitHub token from standard locations during retry", "error", err)
+ }
+ }
+
+ if githubToken != "" {
+ newBearerToken, tokenErr := c.exchangeGitHubToken(githubToken)
+ if tokenErr == nil {
+ c.options.bearerToken = newBearerToken
+ // Update the client with the new token
+ // Note: This is a simplified approach. In a production system,
+ // you might want to recreate the entire client with the new token
+ logging.Info("Refreshed Copilot bearer token")
+ return true, 1000, nil // Retry immediately with new token
+ }
+ logging.Error("Failed to refresh Copilot bearer token", "error", tokenErr)
+ }
+ return false, 0, fmt.Errorf("authentication failed: %w", err)
+ }
+ logging.Debug("Copilot API Error", "status", apierr.StatusCode, "headers", apierr.Response.Header, "body", apierr.RawJSON())
+
+ if apierr.StatusCode != 429 && apierr.StatusCode != 500 {
+ return false, 0, err
+ }
+
+ if apierr.StatusCode == 500 {
+ logging.Warn("Copilot API returned 500 error, retrying", "error", err)
+ }
+
+ if attempts > maxRetries {
+ return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
+ }
+
+ retryMs := 0
+ retryAfterValues := apierr.Response.Header.Values("Retry-After")
+
+ backoffMs := 2000 * (1 << (attempts - 1))
+ jitterMs := int(float64(backoffMs) * 0.2)
+ retryMs = backoffMs + jitterMs
+ if len(retryAfterValues) > 0 {
+ if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
+ retryMs = retryMs * 1000
+ }
+ }
+ return true, int64(retryMs), nil
+}
+
+func (c *copilotClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
+ var toolCalls []message.ToolCall
+
+ if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
+ for _, call := range completion.Choices[0].Message.ToolCalls {
+ toolCall := message.ToolCall{
+ ID: call.ID,
+ Name: call.Function.Name,
+ Input: call.Function.Arguments,
+ Type: "function",
+ Finished: true,
+ }
+ toolCalls = append(toolCalls, toolCall)
+ }
+ }
+
+ return toolCalls
+}
+
+func (c *copilotClient) usage(completion openai.ChatCompletion) TokenUsage {
+ cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
+ inputTokens := completion.Usage.PromptTokens - cachedTokens
+
+ return TokenUsage{
+ InputTokens: inputTokens,
+ OutputTokens: completion.Usage.CompletionTokens,
+ CacheCreationTokens: 0, // GitHub Copilot doesn't provide this directly
+ CacheReadTokens: cachedTokens,
+ }
+}
+
+func WithCopilotReasoningEffort(effort string) CopilotOption {
+ return func(options *copilotOptions) {
+ defaultReasoningEffort := "medium"
+ switch effort {
+ case "low", "medium", "high":
+ defaultReasoningEffort = effort
+ default:
+ logging.Warn("Invalid reasoning effort, using default: medium")
+ }
+ options.reasoningEffort = defaultReasoningEffort
+ }
+}
+
+func WithCopilotExtraHeaders(headers map[string]string) CopilotOption {
+ return func(options *copilotOptions) {
+ options.extraHeaders = headers
+ }
+}
+
+func WithCopilotBearerToken(bearerToken string) CopilotOption {
+ return func(options *copilotOptions) {
+ options.bearerToken = bearerToken
+ }
+}
+
@@ -68,6 +68,7 @@ type providerClientOptions struct {
openaiOptions []OpenAIOption
geminiOptions []GeminiOption
bedrockOptions []BedrockOption
+ copilotOptions []CopilotOption
}
type ProviderClientOption func(*providerClientOptions)
@@ -88,6 +89,11 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption
o(&clientOptions)
}
switch providerName {
+ case models.ProviderCopilot:
+ return &baseProvider[CopilotClient]{
+ options: clientOptions,
+ client: newCopilotClient(clientOptions),
+ }, nil
case models.ProviderAnthropic:
return &baseProvider[AnthropicClient]{
options: clientOptions,
@@ -233,3 +239,9 @@ func WithBedrockOptions(bedrockOptions ...BedrockOption) ProviderClientOption {
options.bedrockOptions = bedrockOptions
}
}
+
+func WithCopilotOptions(copilotOptions ...CopilotOption) ProviderClientOption {
+ return func(options *providerClientOptions) {
+ options.copilotOptions = copilotOptions
+ }
+}
@@ -11,6 +11,7 @@ import (
"strings"
"github.com/opencode-ai/opencode/internal/config"
+ "github.com/opencode-ai/opencode/internal/logging"
"github.com/opencode-ai/opencode/internal/lsp"
)
@@ -97,6 +98,7 @@ func (v *viewTool) Info() ToolInfo {
// Run implements Tool.
func (v *viewTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params ViewParams
+ logging.Debug("view tool params", "params", call.Input)
if err := json.Unmarshal([]byte(call.Input), ¶ms); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
@@ -4,16 +4,33 @@ import (
"fmt"
"log/slog"
"os"
+ // "path/filepath"
+ "encoding/json"
+ "runtime"
"runtime/debug"
+ "sync"
"time"
)
+func getCaller() string {
+ var caller string
+ if _, file, line, ok := runtime.Caller(2); ok {
+ // caller = fmt.Sprintf("%s:%d", filepath.Base(file), line)
+ caller = fmt.Sprintf("%s:%d", file, line)
+ } else {
+ caller = "unknown"
+ }
+ return caller
+}
func Info(msg string, args ...any) {
- slog.Info(msg, args...)
+ source := getCaller()
+ slog.Info(msg, append([]any{"source", source}, args...)...)
}
func Debug(msg string, args ...any) {
- slog.Debug(msg, args...)
+ // slog.Debug(msg, args...)
+ source := getCaller()
+ slog.Debug(msg, append([]any{"source", source}, args...)...)
}
func Warn(msg string, args ...any) {
@@ -76,3 +93,115 @@ func RecoverPanic(name string, cleanup func()) {
}
}
}
+
+// Message Logging for Debug
+var MessageDir string
+
+func GetSessionPrefix(sessionId string) string {
+ return sessionId[:8]
+}
+
+var sessionLogMutex sync.Mutex
+
+func AppendToSessionLogFile(sessionId string, filename string, content string) string {
+ if MessageDir == "" || sessionId == "" {
+ return ""
+ }
+ sessionPrefix := GetSessionPrefix(sessionId)
+
+ sessionLogMutex.Lock()
+ defer sessionLogMutex.Unlock()
+
+ sessionPath := fmt.Sprintf("%s/%s", MessageDir, sessionPrefix)
+ if _, err := os.Stat(sessionPath); os.IsNotExist(err) {
+ if err := os.MkdirAll(sessionPath, 0o766); err != nil {
+ Error("Failed to create session directory", "dirpath", sessionPath, "error", err)
+ return ""
+ }
+ }
+
+ filePath := fmt.Sprintf("%s/%s", sessionPath, filename)
+
+ f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ Error("Failed to open session log file", "filepath", filePath, "error", err)
+ return ""
+ }
+ defer f.Close()
+
+ // Append chunk to file
+ _, err = f.WriteString(content)
+ if err != nil {
+ Error("Failed to write chunk to session log file", "filepath", filePath, "error", err)
+ return ""
+ }
+ return filePath
+}
+
+func WriteRequestMessageJson(sessionId string, requestSeqId int, message any) string {
+ if MessageDir == "" || sessionId == "" || requestSeqId <= 0 {
+ return ""
+ }
+ msgJson, err := json.Marshal(message)
+ if err != nil {
+ Error("Failed to marshal message", "session_id", sessionId, "request_seq_id", requestSeqId, "error", err)
+ return ""
+ }
+ return WriteRequestMessage(sessionId, requestSeqId, string(msgJson))
+}
+
+func WriteRequestMessage(sessionId string, requestSeqId int, message string) string {
+ if MessageDir == "" || sessionId == "" || requestSeqId <= 0 {
+ return ""
+ }
+ filename := fmt.Sprintf("%d_request.json", requestSeqId)
+
+ return AppendToSessionLogFile(sessionId, filename, message)
+}
+
+func AppendToStreamSessionLogJson(sessionId string, requestSeqId int, jsonableChunk any) string {
+ if MessageDir == "" || sessionId == "" || requestSeqId <= 0 {
+ return ""
+ }
+ chunkJson, err := json.Marshal(jsonableChunk)
+ if err != nil {
+ Error("Failed to marshal message", "session_id", sessionId, "request_seq_id", requestSeqId, "error", err)
+ return ""
+ }
+ return AppendToStreamSessionLog(sessionId, requestSeqId, string(chunkJson))
+}
+
+func AppendToStreamSessionLog(sessionId string, requestSeqId int, chunk string) string {
+ if MessageDir == "" || sessionId == "" || requestSeqId <= 0 {
+ return ""
+ }
+ filename := fmt.Sprintf("%d_response_stream.log", requestSeqId)
+ return AppendToSessionLogFile(sessionId, filename, chunk)
+}
+
+func WriteChatResponseJson(sessionId string, requestSeqId int, response any) string {
+ if MessageDir == "" || sessionId == "" || requestSeqId <= 0 {
+ return ""
+ }
+ responseJson, err := json.Marshal(response)
+ if err != nil {
+ Error("Failed to marshal response", "session_id", sessionId, "request_seq_id", requestSeqId, "error", err)
+ return ""
+ }
+ filename := fmt.Sprintf("%d_response.json", requestSeqId)
+
+ return AppendToSessionLogFile(sessionId, filename, string(responseJson))
+}
+
+func WriteToolResultsJson(sessionId string, requestSeqId int, toolResults any) string {
+ if MessageDir == "" || sessionId == "" || requestSeqId <= 0 {
+ return ""
+ }
+ toolResultsJson, err := json.Marshal(toolResults)
+ if err != nil {
+ Error("Failed to marshal tool results", "session_id", sessionId, "request_seq_id", requestSeqId, "error", err)
+ return ""
+ }
+ filename := fmt.Sprintf("%d_tool_results.json", requestSeqId)
+ return AppendToSessionLogFile(sessionId, filename, string(toolResultsJson))
+}
@@ -45,6 +45,7 @@ type writer struct{}
func (w *writer) Write(p []byte) (int, error) {
d := logfmt.NewDecoder(bytes.NewReader(p))
+
for d.ScanRecord() {
msg := LogMessage{
ID: fmt.Sprintf("%d", time.Now().UnixNano()),
@@ -77,7 +77,18 @@
"openrouter.o4-mini",
"openrouter.claude-3.5-haiku",
"claude-4-opus",
- "openrouter.o1-pro"
+ "openrouter.o1-pro",
+ "copilot.gpt-4o",
+ "copilot.gpt-4o-mini",
+ "copilot.gpt-4.1",
+ "copilot.claude-3.5-sonnet",
+ "copilot.claude-3.7-sonnet",
+ "copilot.claude-sonnet-4",
+ "copilot.o1",
+ "copilot.o3-mini",
+ "copilot.o4-mini",
+ "copilot.gemini-2.0-flash",
+ "copilot.gemini-2.5-pro"
],
"type": "string"
},
@@ -176,7 +187,18 @@
"openrouter.o4-mini",
"openrouter.claude-3.5-haiku",
"claude-4-opus",
- "openrouter.o1-pro"
+ "openrouter.o1-pro",
+ "copilot.gpt-4o",
+ "copilot.gpt-4o-mini",
+ "copilot.gpt-4.1",
+ "copilot.claude-3.5-sonnet",
+ "copilot.claude-3.7-sonnet",
+ "copilot.claude-sonnet-4",
+ "copilot.o1",
+ "copilot.o3-mini",
+ "copilot.o4-mini",
+ "copilot.gemini-2.0-flash",
+ "copilot.gemini-2.5-pro"
],
"type": "string"
},
@@ -360,7 +382,8 @@
"openrouter",
"bedrock",
"azure",
- "vertexai"
+ "vertexai",
+ "copilot"
],
"type": "string"
}