diff --git a/cmd/root.go b/cmd/root.go index ad558173c6eb1dd1bf4fdda30524ca7b04793ff5..b3ea36c8a976face0bb29c3d77e0d2c82dfb1399 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -6,14 +6,11 @@ import ( "io" "log/slog" "os" - "time" tea "github.com/charmbracelet/bubbletea/v2" "github.com/charmbracelet/crush/internal/app" "github.com/charmbracelet/crush/internal/config" "github.com/charmbracelet/crush/internal/db" - "github.com/charmbracelet/crush/internal/llm/agent" - "github.com/charmbracelet/crush/internal/log" "github.com/charmbracelet/crush/internal/tui" "github.com/charmbracelet/crush/internal/version" "github.com/charmbracelet/fang" @@ -92,9 +89,6 @@ to assist developers in writing, debugging, and understanding code directly from } defer app.Shutdown() - // Initialize MCP tools early for both modes - initMCPTools(ctx, app, cfg) - prompt, err = maybePrependStdin(prompt) if err != nil { slog.Error(fmt.Sprintf("Failed to read from stdin: %v", err)) @@ -126,20 +120,6 @@ to assist developers in writing, debugging, and understanding code directly from }, } -func initMCPTools(ctx context.Context, app *app.App, cfg *config.Config) { - go func() { - defer log.RecoverPanic("MCP-goroutine", nil) - - // Create a context with timeout for the initial MCP tools fetch - ctxWithTimeout, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - // Set this up once with proper error handling - agent.GetMcpTools(ctxWithTimeout, app.Permissions, cfg) - slog.Info("MCP message handling goroutine exiting") - }() -} - func Execute() { if err := fang.Execute( context.Background(), diff --git a/internal/fur/provider/provider.go b/internal/fur/provider/provider.go index 44a8cb419f55e80bf3eeb2d92b029ceec962c44a..2bfe95a5bc3db4f1e52feebcaf7d484f4d5de948 100644 --- a/internal/fur/provider/provider.go +++ b/internal/fur/provider/provider.go @@ -27,6 +27,7 @@ const ( InferenceProviderBedrock InferenceProvider = "bedrock" InferenceProviderVertexAI InferenceProvider = "vertexai" InferenceProviderXAI InferenceProvider = "xai" + InferenceProviderGROQ InferenceProvider = "groq" InferenceProviderOpenRouter InferenceProvider = "openrouter" ) @@ -68,6 +69,7 @@ func KnownProviders() []InferenceProvider { InferenceProviderBedrock, InferenceProviderVertexAI, InferenceProviderXAI, + InferenceProviderGROQ, InferenceProviderOpenRouter, } } diff --git a/internal/llm/agent/agent.go b/internal/llm/agent/agent.go index 7cd01e91900a6b0a2720a092e6740ab7d989fb6d..107bdd0a23529a8a1e441ab5d55714a0315c7473 100644 --- a/internal/llm/agent/agent.go +++ b/internal/llm/agent/agent.go @@ -94,7 +94,7 @@ func NewAgent( ) (Service, error) { ctx := context.Background() cfg := config.Get() - otherTools := GetMcpTools(ctx, permissions, cfg) + otherTools := GetMCPTools(ctx, permissions, cfg) if len(lspClients) > 0 { otherTools = append(otherTools, tools.NewDiagnosticsTool(lspClients)) } @@ -600,12 +600,17 @@ func (a *agent) processEvent(ctx context.Context, sessionID string, assistantMsg switch event.Type { case provider.EventThinkingDelta: - assistantMsg.AppendReasoningContent(event.Content) + assistantMsg.AppendReasoningContent(event.Thinking) + return a.messages.Update(ctx, *assistantMsg) + case provider.EventSignatureDelta: + assistantMsg.AppendReasoningSignature(event.Signature) return a.messages.Update(ctx, *assistantMsg) case provider.EventContentDelta: + assistantMsg.FinishThinking() assistantMsg.AppendContent(event.Content) return a.messages.Update(ctx, *assistantMsg) case provider.EventToolUseStart: + assistantMsg.FinishThinking() slog.Info("Tool call started", "toolCall", event.ToolCall) assistantMsg.AddToolCall(*event.ToolCall) return a.messages.Update(ctx, *assistantMsg) @@ -619,6 +624,7 @@ func (a *agent) processEvent(ctx context.Context, sessionID string, assistantMsg case provider.EventError: return event.Error case provider.EventComplete: + assistantMsg.FinishThinking() assistantMsg.SetToolCalls(event.Response.ToolCalls) assistantMsg.AddFinish(event.Response.FinishReason, "", "") if err := a.messages.Update(ctx, *assistantMsg); err != nil { diff --git a/internal/llm/agent/mcp-tools.go b/internal/llm/agent/mcp-tools.go index c655e01815c45959247ba0f02241232346dc166f..0165b0f7194d029a6dee9113f82877820ce96c00 100644 --- a/internal/llm/agent/mcp-tools.go +++ b/internal/llm/agent/mcp-tools.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "log/slog" + "sync" "github.com/charmbracelet/crush/internal/config" "github.com/charmbracelet/crush/internal/llm/tools" @@ -154,8 +155,6 @@ func NewMcpTool(name string, tool mcp.Tool, permissions permission.Service, mcpC } } -var mcpTools []tools.BaseTool - func getTools(ctx context.Context, name string, m config.MCPConfig, permissions permission.Service, c MCPClient, workingDir string) []tools.BaseTool { var stdioTools []tools.BaseTool initRequest := mcp.InitializeRequest{} @@ -183,52 +182,72 @@ func getTools(ctx context.Context, name string, m config.MCPConfig, permissions return stdioTools } -func GetMcpTools(ctx context.Context, permissions permission.Service, cfg *config.Config) []tools.BaseTool { - if len(mcpTools) > 0 { - return mcpTools - } +var ( + mcpToolsOnce sync.Once + mcpTools []tools.BaseTool +) + +func GetMCPTools(ctx context.Context, permissions permission.Service, cfg *config.Config) []tools.BaseTool { + mcpToolsOnce.Do(func() { + mcpTools = doGetMCPTools(ctx, permissions, cfg) + }) + return mcpTools +} + +func doGetMCPTools(ctx context.Context, permissions permission.Service, cfg *config.Config) []tools.BaseTool { + var mu sync.Mutex + var wg sync.WaitGroup + var result []tools.BaseTool for name, m := range cfg.MCP { if m.Disabled { slog.Debug("skipping disabled mcp", "name", name) continue } - - switch m.Type { - case config.MCPStdio: - c, err := client.NewStdioMCPClient( - m.Command, - m.ResolvedEnv(), - m.Args..., - ) - if err != nil { - slog.Error("error creating mcp client", "error", err) - continue - } - - mcpTools = append(mcpTools, getTools(ctx, name, m, permissions, c, cfg.WorkingDir())...) - case config.MCPHttp: - slog.Info("creating mcp client", "name", name, "url", m.URL, "headers", m.ResolvedHeaders()) - c, err := client.NewStreamableHttpClient( - m.URL, - transport.WithHTTPHeaders(m.ResolvedHeaders()), - ) - if err != nil { - slog.Error("error creating mcp client", "error", err) - continue - } - mcpTools = append(mcpTools, getTools(ctx, name, m, permissions, c, cfg.WorkingDir())...) - case config.MCPSse: - c, err := client.NewSSEMCPClient( - m.URL, - client.WithHeaders(m.ResolvedHeaders()), - ) - if err != nil { - slog.Error("error creating mcp client", "error", err) - continue + wg.Add(1) + go func(name string, m config.MCPConfig) { + defer wg.Done() + switch m.Type { + case config.MCPStdio: + c, err := client.NewStdioMCPClient( + m.Command, + m.ResolvedEnv(), + m.Args..., + ) + if err != nil { + slog.Error("error creating mcp client", "error", err) + return + } + + mu.Lock() + result = append(result, getTools(ctx, name, m, permissions, c, cfg.WorkingDir())...) + mu.Unlock() + case config.MCPHttp: + c, err := client.NewStreamableHttpClient( + m.URL, + transport.WithHTTPHeaders(m.ResolvedHeaders()), + ) + if err != nil { + slog.Error("error creating mcp client", "error", err) + return + } + mu.Lock() + result = append(result, getTools(ctx, name, m, permissions, c, cfg.WorkingDir())...) + mu.Unlock() + case config.MCPSse: + c, err := client.NewSSEMCPClient( + m.URL, + client.WithHeaders(m.ResolvedHeaders()), + ) + if err != nil { + slog.Error("error creating mcp client", "error", err) + return + } + mu.Lock() + result = append(result, getTools(ctx, name, m, permissions, c, cfg.WorkingDir())...) + mu.Unlock() } - mcpTools = append(mcpTools, getTools(ctx, name, m, permissions, c, cfg.WorkingDir())...) - } + }(name, m) } - - return mcpTools + wg.Wait() + return result } diff --git a/internal/llm/prompt/coder.go b/internal/llm/prompt/coder.go index dfe2068cd45edf515291b2d759fac4e133912980..f4284faccee052e82e8ed82a820b16af58ccc64c 100644 --- a/internal/llm/prompt/coder.go +++ b/internal/llm/prompt/coder.go @@ -74,7 +74,7 @@ When making changes to files, first understand the file's code conventions. Mimi - Always follow security best practices. Never introduce code that exposes or logs secrets and keys. Never commit secrets or keys to the repository. # Code style -- Do not add comments to the code you write, unless the user asks you to, or the code is complex and requires additional context. +- IMPORTANT: DO NOT ADD ***ANY*** COMMENTS unless asked - If completing the user's task requires writing or modifying files: - Your code and final answer should follow these _CODING GUIDELINES_: @@ -204,7 +204,7 @@ When making changes to files, first understand the file's code conventions. Mimi - Always follow security best practices. Never introduce code that exposes or logs secrets and keys. Never commit secrets or keys to the repository. # Code style -- Do not add comments to the code you write, unless the user asks you to, or the code is complex and requires additional context. +- IMPORTANT: DO NOT ADD ***ANY*** COMMENTS unless asked # Doing tasks The user will primarily request you perform software engineering tasks. This includes solving bugs, adding new functionality, refactoring code, explaining code, and more. For these tasks the following steps are recommended: @@ -249,6 +249,9 @@ When you spend time searching for commands to typecheck, lint, build, or test, y - **Explaining Changes:** After completing a code modification or file operation *do not* provide summaries unless asked. - **Do Not revert changes:** Do not revert changes to the codebase unless asked to do so by the user. Only revert changes made by you if they have resulted in an error or if the user has explicitly asked you to revert the changes. +# Code style +- IMPORTANT: DO NOT ADD ***ANY*** COMMENTS unless asked + # Primary Workflows ## Software Engineering Tasks diff --git a/internal/llm/provider/anthropic.go b/internal/llm/provider/anthropic.go index 8e8e3237f55d58fa995d15baf60400a485ec95a2..a65f0b752367ca7b2e62f9dd263a7dd6e5ce7a53 100644 --- a/internal/llm/provider/anthropic.go +++ b/internal/llm/provider/anthropic.go @@ -9,6 +9,7 @@ import ( "log/slog" "regexp" "strconv" + "strings" "time" "github.com/anthropics/anthropic-sdk-go" @@ -72,6 +73,13 @@ func (a *anthropicClient) convertMessages(messages []message.Message) (anthropic case message.Assistant: blocks := []anthropic.ContentBlockParamUnion{} + + // Add thinking blocks first if present (required when thinking is enabled with tool use) + if reasoningContent := msg.ReasoningContent(); reasoningContent.Thinking != "" { + thinkingBlock := anthropic.NewThinkingBlock(reasoningContent.Signature, reasoningContent.Thinking) + blocks = append(blocks, thinkingBlock) + } + if msg.Content().String() != "" { content := anthropic.NewTextBlock(msg.Content().String()) if cache && !a.providerOptions.disableCache { @@ -159,16 +167,14 @@ func (a *anthropicClient) preparedMessages(messages []anthropic.MessageParam, to } temperature := anthropic.Float(0) - if a.Model().CanReason && modelConfig.Think { - thinkingParam = anthropic.ThinkingConfigParamOfEnabled(int64(float64(a.providerOptions.maxTokens) * 0.8)) - temperature = anthropic.Float(1) - } - maxTokens := model.DefaultMaxTokens if modelConfig.MaxTokens > 0 { maxTokens = modelConfig.MaxTokens } - + if a.Model().CanReason && modelConfig.Think { + thinkingParam = anthropic.ThinkingConfigParamOfEnabled(int64(float64(maxTokens) * 0.8)) + temperature = anthropic.Float(1) + } // Override max tokens if set in provider options if a.providerOptions.maxTokens > 0 { maxTokens = a.providerOptions.maxTokens @@ -265,6 +271,7 @@ func (a *anthropicClient) stream(ctx context.Context, messages []message.Message anthropicStream := a.client.Messages.NewStreaming( ctx, preparedMessages, + option.WithHeaderAdd("anthropic-beta", "interleaved-thinking-2025-05-14"), ) accumulatedMessage := anthropic.Message{} @@ -300,6 +307,11 @@ func (a *anthropicClient) stream(ctx context.Context, messages []message.Message Type: EventThinkingDelta, Thinking: event.Delta.Thinking, } + } else if event.Delta.Type == "signature_delta" && event.Delta.Signature != "" { + eventChan <- ProviderEvent{ + Type: EventSignatureDelta, + Signature: event.Delta.Signature, + } } else if event.Delta.Type == "text_delta" && event.Delta.Text != "" { eventChan <- ProviderEvent{ Type: EventContentDelta, @@ -416,7 +428,8 @@ func (a *anthropicClient) shouldRetry(attempts int, err error) (bool, int64, err } } - if apiErr.StatusCode != 429 && apiErr.StatusCode != 529 { + isOverloaded := strings.Contains(apiErr.Error(), "overloaded") || strings.Contains(apiErr.Error(), "rate limit exceeded") + if apiErr.StatusCode != 429 && apiErr.StatusCode != 529 && !isOverloaded { return false, 0, err } diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 51baaf3a27a0596483bef18663cd2ded2ead0af9..4afac2c70809d6c98e0aa35022c296c3d95ef05e 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -77,13 +77,16 @@ func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessag Role: "assistant", } + hasContent := false if msg.Content().String() != "" { + hasContent = true assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{ OfString: openai.String(msg.Content().String()), } } if len(msg.ToolCalls()) > 0 { + hasContent = true assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls())) for i, call := range msg.ToolCalls() { assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{ @@ -96,6 +99,10 @@ func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessag } } } + if !hasContent { + slog.Warn("There is a message without content, investigate, this should not happen") + continue + } openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{ OfAssistant: &assistantMsg, @@ -224,6 +231,10 @@ func (o *openaiClient) send(ctx context.Context, messages []message.Message, too return nil, retryErr } + if len(openaiResponse.Choices) == 0 { + return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration") + } + content := "" if openaiResponse.Choices[0].Message.Content != "" { content = openaiResponse.Choices[0].Message.Content @@ -324,7 +335,9 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t } if choice.FinishReason == "tool_calls" { msgToolCalls = append(msgToolCalls, currentToolCall) - acc.Choices[0].Message.ToolCalls = msgToolCalls + if len(acc.Choices) > 0 { + acc.Choices[0].Message.ToolCalls = msgToolCalls + } } } } @@ -336,7 +349,15 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t slog.Debug("Response", "messages", string(jsonData)) } - resultFinishReason := acc.ChatCompletion.Choices[0].FinishReason + if len(acc.Choices) == 0 { + eventChan <- ProviderEvent{ + Type: EventError, + Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"), + } + return + } + + resultFinishReason := acc.Choices[0].FinishReason if resultFinishReason == "" { // If the finish reason is empty, we assume it was a successful completion // INFO: this is happening for openrouter for some reason diff --git a/internal/llm/provider/openai_test.go b/internal/llm/provider/openai_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c11e8ff14d7995859cccd3c95eeae4008fb20ac9 --- /dev/null +++ b/internal/llm/provider/openai_test.go @@ -0,0 +1,91 @@ +package provider + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/charmbracelet/crush/internal/config" + "github.com/charmbracelet/crush/internal/fur/provider" + "github.com/charmbracelet/crush/internal/llm/tools" + "github.com/charmbracelet/crush/internal/message" + "github.com/openai/openai-go" + "github.com/openai/openai-go/option" +) + +func TestMain(m *testing.M) { + _, err := config.Init(".", true) + if err != nil { + panic("Failed to initialize config: " + err.Error()) + } + + os.Exit(m.Run()) +} + +func TestOpenAIClientStreamChoices(t *testing.T) { + // Create a mock server that returns Server-Sent Events with empty choices + // This simulates the 🤡 behavior when a server returns 200 instead of 404 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + w.WriteHeader(http.StatusOK) + + emptyChoicesChunk := map[string]any{ + "id": "chat-completion-test", + "object": "chat.completion.chunk", + "created": time.Now().Unix(), + "model": "test-model", + "choices": []any{}, // Empty choices array that causes panic + } + + jsonData, _ := json.Marshal(emptyChoicesChunk) + w.Write([]byte("data: " + string(jsonData) + "\n\n")) + w.Write([]byte("data: [DONE]\n\n")) + })) + defer server.Close() + + // Create OpenAI client pointing to our mock server + client := &openaiClient{ + providerOptions: providerClientOptions{ + modelType: config.SelectedModelTypeLarge, + apiKey: "test-key", + systemMessage: "test", + model: func(config.SelectedModelType) provider.Model { + return provider.Model{ + ID: "test-model", + Model: "test-model", + } + }, + }, + client: openai.NewClient( + option.WithAPIKey("test-key"), + option.WithBaseURL(server.URL), + ), + } + + // Create test messages + messages := []message.Message{ + { + Role: message.User, + Parts: []message.ContentPart{message.TextContent{Text: "Hello"}}, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + eventsChan := client.stream(ctx, messages, []tools.BaseTool{}) + + // Collect events - this will panic without the bounds check + for event := range eventsChan { + t.Logf("Received event: %+v", event) + if event.Type == EventError || event.Type == EventComplete { + break + } + } +} diff --git a/internal/llm/provider/provider.go b/internal/llm/provider/provider.go index 193affc2a2b5a6dcdecee596a839882c40f70a42..12dd09392942b0c00e7caa975deefffa994b47b8 100644 --- a/internal/llm/provider/provider.go +++ b/internal/llm/provider/provider.go @@ -15,16 +15,17 @@ type EventType string const maxRetries = 8 const ( - EventContentStart EventType = "content_start" - EventToolUseStart EventType = "tool_use_start" - EventToolUseDelta EventType = "tool_use_delta" - EventToolUseStop EventType = "tool_use_stop" - EventContentDelta EventType = "content_delta" - EventThinkingDelta EventType = "thinking_delta" - EventContentStop EventType = "content_stop" - EventComplete EventType = "complete" - EventError EventType = "error" - EventWarning EventType = "warning" + EventContentStart EventType = "content_start" + EventToolUseStart EventType = "tool_use_start" + EventToolUseDelta EventType = "tool_use_delta" + EventToolUseStop EventType = "tool_use_stop" + EventContentDelta EventType = "content_delta" + EventThinkingDelta EventType = "thinking_delta" + EventSignatureDelta EventType = "signature_delta" + EventContentStop EventType = "content_stop" + EventComplete EventType = "complete" + EventError EventType = "error" + EventWarning EventType = "warning" ) type TokenUsage struct { @@ -44,11 +45,12 @@ type ProviderResponse struct { type ProviderEvent struct { Type EventType - Content string - Thinking string - Response *ProviderResponse - ToolCall *message.ToolCall - Error error + Content string + Thinking string + Signature string + Response *ProviderResponse + ToolCall *message.ToolCall + Error error } type Provider interface { SendMessages(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (*ProviderResponse, error) diff --git a/internal/message/content.go b/internal/message/content.go index b8d2c1aa370559977f4c8eb80803ab5fbfe83cf9..bdaf1577e34a4667bdb5c8cd2683865ec5cd08ac 100644 --- a/internal/message/content.go +++ b/internal/message/content.go @@ -36,7 +36,10 @@ type ContentPart interface { } type ReasoningContent struct { - Thinking string `json:"thinking"` + Thinking string `json:"thinking"` + Signature string `json:"signature"` + StartedAt int64 `json:"started_at,omitempty"` + FinishedAt int64 `json:"finished_at,omitempty"` } func (tc ReasoningContent) String() string { @@ -230,15 +233,68 @@ func (m *Message) AppendReasoningContent(delta string) { found := false for i, part := range m.Parts { if c, ok := part.(ReasoningContent); ok { - m.Parts[i] = ReasoningContent{Thinking: c.Thinking + delta} + m.Parts[i] = ReasoningContent{ + Thinking: c.Thinking + delta, + Signature: c.Signature, + StartedAt: c.StartedAt, + FinishedAt: c.FinishedAt, + } found = true } } if !found { - m.Parts = append(m.Parts, ReasoningContent{Thinking: delta}) + m.Parts = append(m.Parts, ReasoningContent{ + Thinking: delta, + StartedAt: time.Now().Unix(), + }) + } +} + +func (m *Message) AppendReasoningSignature(signature string) { + for i, part := range m.Parts { + if c, ok := part.(ReasoningContent); ok { + m.Parts[i] = ReasoningContent{ + Thinking: c.Thinking, + Signature: c.Signature + signature, + StartedAt: c.StartedAt, + FinishedAt: c.FinishedAt, + } + return + } + } + m.Parts = append(m.Parts, ReasoningContent{Signature: signature}) +} + +func (m *Message) FinishThinking() { + for i, part := range m.Parts { + if c, ok := part.(ReasoningContent); ok { + if c.FinishedAt == 0 { + m.Parts[i] = ReasoningContent{ + Thinking: c.Thinking, + Signature: c.Signature, + StartedAt: c.StartedAt, + FinishedAt: time.Now().Unix(), + } + } + return + } } } +func (m *Message) ThinkingDuration() time.Duration { + reasoning := m.ReasoningContent() + if reasoning.StartedAt == 0 { + return 0 + } + + endTime := reasoning.FinishedAt + if endTime == 0 { + endTime = time.Now().Unix() + } + + return time.Duration(endTime-reasoning.StartedAt) * time.Second +} + func (m *Message) FinishToolCall(toolCallID string) { for i, part := range m.Parts { if c, ok := part.(ToolCall); ok { diff --git a/internal/tui/components/anim/anim.go b/internal/tui/components/anim/anim.go index 63d365b2d5f3adf138a61a91db0b90f9edd1688d..07d02483d0b470b6b4cadf36fbe5acd52e8857ba 100644 --- a/internal/tui/components/anim/anim.go +++ b/internal/tui/components/anim/anim.go @@ -80,6 +80,7 @@ type Anim struct { cyclingCharWidth int label []string labelWidth int + labelColor color.Color startTime time.Time birthOffsets []time.Duration initialFrames [][]string // frames for the initial characters @@ -112,6 +113,7 @@ func New(opts Settings) (a Anim) { a.startTime = time.Now() a.cyclingCharWidth = opts.Size a.labelWidth = lipgloss.Width(opts.Label) + a.labelColor = opts.LabelColor // Total width of anim, in cells. a.width = opts.Size @@ -119,25 +121,8 @@ func New(opts Settings) (a Anim) { a.width += labelGapWidth + lipgloss.Width(opts.Label) } - if a.labelWidth > 0 { - // Pre-render the label. - // XXX: We should really get the graphemes for the label, not the runes. - labelRunes := []rune(opts.Label) - a.label = make([]string, len(labelRunes)) - for i := range a.label { - a.label[i] = lipgloss.NewStyle(). - Foreground(opts.LabelColor). - Render(string(labelRunes[i])) - } - - // Pre-render the ellipsis frames which come after the label. - a.ellipsisFrames = make([]string, len(ellipsisFrames)) - for i, frame := range ellipsisFrames { - a.ellipsisFrames[i] = lipgloss.NewStyle(). - Foreground(opts.LabelColor). - Render(frame) - } - } + // Render the label + a.renderLabel(opts.Label) // Pre-generate gradient. var ramp []color.Color @@ -208,6 +193,45 @@ func New(opts Settings) (a Anim) { return a } +// SetLabel updates the label text and re-renders it. +func (a *Anim) SetLabel(newLabel string) { + a.labelWidth = lipgloss.Width(newLabel) + + // Update total width + a.width = a.cyclingCharWidth + if newLabel != "" { + a.width += labelGapWidth + a.labelWidth + } + + // Re-render the label + a.renderLabel(newLabel) +} + +// renderLabel renders the label with the current label color. +func (a *Anim) renderLabel(label string) { + if a.labelWidth > 0 { + // Pre-render the label. + labelRunes := []rune(label) + a.label = make([]string, len(labelRunes)) + for i := range a.label { + a.label[i] = lipgloss.NewStyle(). + Foreground(a.labelColor). + Render(string(labelRunes[i])) + } + + // Pre-render the ellipsis frames which come after the label. + a.ellipsisFrames = make([]string, len(ellipsisFrames)) + for i, frame := range ellipsisFrames { + a.ellipsisFrames[i] = lipgloss.NewStyle(). + Foreground(a.labelColor). + Render(frame) + } + } else { + a.label = nil + a.ellipsisFrames = nil + } +} + // Width returns the total width of the animation. func (a Anim) Width() (w int) { w = a.width diff --git a/internal/tui/components/chat/chat.go b/internal/tui/components/chat/chat.go index 71f6e1e66ed7d6d1ad80486c1017d02af14b11f4..091231039c71e24b918a755d56ba0a0de27ae509 100644 --- a/internal/tui/components/chat/chat.go +++ b/internal/tui/components/chat/chat.go @@ -304,14 +304,15 @@ func (m *messageListCmp) updateAssistantMessageContent(msg message.Message, assi shouldShowMessage := m.shouldShowAssistantMessage(msg) hasToolCallsOnly := len(msg.ToolCalls()) > 0 && msg.Content().Text == "" + var cmd tea.Cmd if shouldShowMessage { + items := m.listCmp.Items() + uiMsg := items[assistantIndex].(messages.MessageCmp) + uiMsg.SetMessage(msg) m.listCmp.UpdateItem( assistantIndex, - messages.NewMessageCmp( - msg, - ), + uiMsg, ) - if msg.FinishPart() != nil && msg.FinishPart().Reason == message.FinishReasonEndTurn { m.listCmp.AppendItem( messages.NewAssistantSection( @@ -324,12 +325,12 @@ func (m *messageListCmp) updateAssistantMessageContent(msg message.Message, assi m.listCmp.DeleteItem(assistantIndex) } - return nil + return cmd } // shouldShowAssistantMessage determines if an assistant message should be displayed. func (m *messageListCmp) shouldShowAssistantMessage(msg message.Message) bool { - return len(msg.ToolCalls()) == 0 || msg.Content().Text != "" || msg.IsThinking() + return len(msg.ToolCalls()) == 0 || msg.Content().Text != "" || msg.ReasoningContent().Thinking != "" || msg.IsThinking() } // updateToolCalls handles updates to tool calls, updating existing ones and adding new ones. diff --git a/internal/tui/components/chat/messages/messages.go b/internal/tui/components/chat/messages/messages.go index bfb8af47b6bd13eb2e1e9fb844b1935a6fccbd4d..078caf7dcd941eebab7d1a2989b25539c41989e4 100644 --- a/internal/tui/components/chat/messages/messages.go +++ b/internal/tui/components/chat/messages/messages.go @@ -6,6 +6,7 @@ import ( "strings" "time" + "github.com/charmbracelet/bubbles/v2/viewport" tea "github.com/charmbracelet/bubbletea/v2" "github.com/charmbracelet/lipgloss/v2" "github.com/charmbracelet/x/ansi" @@ -24,11 +25,12 @@ import ( // MessageCmp defines the interface for message components in the chat interface. // It combines standard UI model interfaces with message-specific functionality. type MessageCmp interface { - util.Model // Basic Bubble Tea model interface - layout.Sizeable // Width/height management - layout.Focusable // Focus state management - GetMessage() message.Message // Access to underlying message data - Spinning() bool // Animation state for loading messages + util.Model // Basic Bubble Tea model interface + layout.Sizeable // Width/height management + layout.Focusable // Focus state management + GetMessage() message.Message // Access to underlying message data + SetMessage(msg message.Message) // Update the message content + Spinning() bool // Animation state for loading messages } // messageCmp implements the MessageCmp interface for displaying chat messages. @@ -41,7 +43,10 @@ type messageCmp struct { // Core message data and state message message.Message // The underlying message content spinning bool // Whether to show loading animation - anim util.Model // Animation component for loading states + anim anim.Anim // Animation component for loading states + + // Thinking viewport for displaying reasoning content + thinkingViewport viewport.Model } var focusedMessageBorder = lipgloss.Border{ @@ -51,6 +56,11 @@ var focusedMessageBorder = lipgloss.Border{ // NewMessageCmp creates a new message component with the given message and options func NewMessageCmp(msg message.Message) MessageCmp { t := styles.CurrentTheme() + + thinkingViewport := viewport.New() + thinkingViewport.SetHeight(1) + thinkingViewport.KeyMap = viewport.KeyMap{} + m := &messageCmp{ message: msg, anim: anim.New(anim.Settings{ @@ -59,6 +69,7 @@ func NewMessageCmp(msg message.Message) MessageCmp { GradColorB: t.Secondary, CycleColors: true, }), + thinkingViewport: thinkingViewport, } return m } @@ -78,7 +89,7 @@ func (m *messageCmp) Update(msg tea.Msg) (tea.Model, tea.Cmd) { m.spinning = m.shouldSpin() if m.spinning { u, cmd := m.anim.Update(msg) - m.anim = u.(util.Model) + m.anim = u.(anim.Anim) return m, cmd } } @@ -88,7 +99,7 @@ func (m *messageCmp) Update(msg tea.Msg) (tea.Model, tea.Cmd) { // View renders the message component based on its current state. // Returns different views for spinning, user, and assistant messages. func (m *messageCmp) View() string { - if m.spinning { + if m.spinning && m.message.ReasoningContent().Thinking == "" { return m.style().PaddingLeft(1).Render(m.anim.View()) } if m.message.ID != "" { @@ -108,6 +119,10 @@ func (m *messageCmp) GetMessage() message.Message { return m.message } +func (m *messageCmp) SetMessage(msg message.Message) { + m.message = msg +} + // textWidth calculates the available width for text content, // accounting for borders and padding func (m *messageCmp) textWidth() int { @@ -139,8 +154,39 @@ func (msg *messageCmp) style() lipgloss.Style { // renderAssistantMessage renders assistant messages with optional footer information. // Shows model name, response time, and finish reason when the message is complete. func (m *messageCmp) renderAssistantMessage() string { - parts := []string{ - m.markdownContent(), + t := styles.CurrentTheme() + parts := []string{} + content := m.message.Content().String() + thinking := m.message.IsThinking() + finished := m.message.IsFinished() + finishedData := m.message.FinishPart() + thinkingContent := "" + + if thinking || m.message.ReasoningContent().Thinking != "" { + m.anim.SetLabel("Thinking") + thinkingContent = m.renderThinkingContent() + } else if finished && content == "" && finishedData.Reason == message.FinishReasonEndTurn { + content = "" + } else if finished && content == "" && finishedData.Reason == message.FinishReasonCanceled { + content = "*Canceled*" + } else if finished && content == "" && finishedData.Reason == message.FinishReasonError { + errTag := t.S().Base.Padding(0, 1).Background(t.Red).Foreground(t.White).Render("ERROR") + truncated := ansi.Truncate(finishedData.Message, m.textWidth()-2-lipgloss.Width(errTag), "...") + title := fmt.Sprintf("%s %s", errTag, t.S().Base.Foreground(t.FgHalfMuted).Render(truncated)) + details := t.S().Base.Foreground(t.FgSubtle).Width(m.textWidth() - 2).Render(finishedData.Details) + // Handle error messages differently + return fmt.Sprintf("%s\n\n%s", title, details) + } + + if thinkingContent != "" { + parts = append(parts, thinkingContent) + } + + if content != "" { + if thinkingContent != "" { + parts = append(parts, "") + } + parts = append(parts, m.toMarkdown(content)) } joined := lipgloss.JoinVertical(lipgloss.Left, parts...) @@ -152,7 +198,7 @@ func (m *messageCmp) renderAssistantMessage() string { func (m *messageCmp) renderUserMessage() string { t := styles.CurrentTheme() parts := []string{ - m.markdownContent(), + m.toMarkdown(m.message.Content().String()), } attachmentStyles := t.S().Text. MarginLeft(1). @@ -182,34 +228,46 @@ func (m *messageCmp) toMarkdown(content string) string { return strings.TrimSuffix(rendered, "\n") } -// markdownContent processes the message content and handles special states. -// Returns appropriate content for thinking, finished, and error states. -func (m *messageCmp) markdownContent() string { +func (m *messageCmp) renderThinkingContent() string { t := styles.CurrentTheme() - content := m.message.Content().String() - if m.message.Role == message.Assistant { - thinking := m.message.IsThinking() - finished := m.message.IsFinished() - finishedData := m.message.FinishPart() - if thinking { - // Handle the thinking state - // TODO: maybe add the thinking content if available later. - content = fmt.Sprintf("**%s %s**", styles.LoadingIcon, "Thinking...") - } else if finished && content == "" && finishedData.Reason == message.FinishReasonEndTurn { - // Sometimes the LLMs respond with no content when they think the previous tool result - // provides the requested question - content = "" - } else if finished && content == "" && finishedData.Reason == message.FinishReasonCanceled { - content = "*Canceled*" - } else if finished && content == "" && finishedData.Reason == message.FinishReasonError { - errTag := t.S().Base.Padding(0, 1).Background(t.Red).Foreground(t.White).Render("ERROR") - truncated := ansi.Truncate(finishedData.Message, m.textWidth()-2-lipgloss.Width(errTag), "...") - title := fmt.Sprintf("%s %s", errTag, t.S().Base.Foreground(t.FgHalfMuted).Render(truncated)) - details := t.S().Base.Foreground(t.FgSubtle).Width(m.textWidth() - 2).Render(finishedData.Details) - return fmt.Sprintf("%s\n\n%s", title, details) + reasoningContent := m.message.ReasoningContent() + if reasoningContent.Thinking == "" { + return "" + } + lines := strings.Split(reasoningContent.Thinking, "\n") + var content strings.Builder + lineStyle := t.S().Subtle.Background(t.BgBaseLighter) + for i, line := range lines { + if line == "" { + continue + } + content.WriteString(lineStyle.Width(m.textWidth() - 2).Render(line)) + if i < len(lines)-1 { + content.WriteString("\n") } } - return m.toMarkdown(content) + fullContent := content.String() + height := util.Clamp(lipgloss.Height(fullContent), 1, 10) + m.thinkingViewport.SetHeight(height) + m.thinkingViewport.SetWidth(m.textWidth()) + m.thinkingViewport.SetContent(fullContent) + m.thinkingViewport.GotoBottom() + var footer string + if reasoningContent.StartedAt > 0 { + duration := m.message.ThinkingDuration() + if reasoningContent.FinishedAt > 0 { + m.anim.SetLabel("") + opts := core.StatusOpts{ + Title: "Thought for", + Description: duration.String(), + NoIcon: true, + } + footer = t.S().Base.PaddingLeft(1).Render(core.Status(opts, m.textWidth()-1)) + } else { + footer = m.anim.View() + } + } + return lineStyle.Width(m.textWidth()).Padding(0, 1).Render(m.thinkingViewport.View()) + "\n\n" + footer } // shouldSpin determines whether the message should show a loading animation. @@ -226,11 +284,12 @@ func (m *messageCmp) shouldSpin() bool { if m.message.Content().Text != "" { return false } + if len(m.message.ToolCalls()) > 0 { + return false + } return true } -// Focus management methods - // Blur removes focus from the message component func (m *messageCmp) Blur() tea.Cmd { m.focused = false @@ -257,8 +316,8 @@ func (m *messageCmp) GetSize() (int, int) { // SetSize updates the width of the message component for text wrapping func (m *messageCmp) SetSize(width int, height int) tea.Cmd { - // For better readability, we limit the width to a maximum of 120 characters - m.width = min(width, 120) + m.width = util.Clamp(width, 1, 120) + m.thinkingViewport.SetWidth(m.width - 4) return nil } diff --git a/internal/tui/components/chat/splash/splash.go b/internal/tui/components/chat/splash/splash.go index c05e2d9947222298dd141c9e762d33f99883b467..0ba04f6d16f2b93ac5556cd204fe63bcca5594e2 100644 --- a/internal/tui/components/chat/splash/splash.go +++ b/internal/tui/components/chat/splash/splash.go @@ -105,6 +105,7 @@ func (s *splashCmp) SetOnboarding(onboarding bool) { "openai", "gemini", "xai", + "groq", "openrouter", } for _, p := range providers { diff --git a/internal/tui/components/dialogs/commands/commands.go b/internal/tui/components/dialogs/commands/commands.go index 10cdbbd539f06836550b7da6a857d35db3becd74..a14138ff51ecf8164cf0fc595c758b0247aa3277 100644 --- a/internal/tui/components/dialogs/commands/commands.go +++ b/internal/tui/components/dialogs/commands/commands.go @@ -6,6 +6,8 @@ import ( tea "github.com/charmbracelet/bubbletea/v2" "github.com/charmbracelet/lipgloss/v2" + "github.com/charmbracelet/crush/internal/config" + "github.com/charmbracelet/crush/internal/fur/provider" "github.com/charmbracelet/crush/internal/llm/prompt" "github.com/charmbracelet/crush/internal/tui/components/chat" "github.com/charmbracelet/crush/internal/tui/components/completions" @@ -58,6 +60,7 @@ type ( SwitchSessionsMsg struct{} SwitchModelMsg struct{} ToggleCompactModeMsg struct{} + ToggleThinkingMsg struct{} CompactMsg struct { SessionID string } @@ -260,6 +263,30 @@ func (c *commandDialogCmp) defaultCommands() []Command { }, }) } + + // Only show thinking toggle for Anthropic models that can reason + cfg := config.Get() + if agentCfg, ok := cfg.Agents["coder"]; ok { + providerCfg := cfg.GetProviderForModel(agentCfg.Model) + model := cfg.GetModelByType(agentCfg.Model) + if providerCfg != nil && model != nil && + providerCfg.Type == provider.TypeAnthropic && model.CanReason { + selectedModel := cfg.Models[agentCfg.Model] + status := "Enable" + if selectedModel.Think { + status = "Disable" + } + commands = append(commands, Command{ + ID: "toggle_thinking", + Title: status + " Thinking Mode", + Description: "Toggle model thinking for reasoning-capable models", + Handler: func(cmd Command) tea.Cmd { + return util.CmdHandler(ToggleThinkingMsg{}) + }, + }) + } + } + // Only show toggle compact mode command if window width is larger than compact breakpoint (90) if c.wWidth > 120 && c.sessionID != "" { commands = append(commands, Command{ diff --git a/internal/tui/components/dialogs/models/list.go b/internal/tui/components/dialogs/models/list.go index 8425b8f2c04569749a33867fb7e14e4b628d019e..ae09ac9ce6acfa8cb41a34a241a5e473ecd4ac81 100644 --- a/internal/tui/components/dialogs/models/list.go +++ b/internal/tui/components/dialogs/models/list.go @@ -98,14 +98,17 @@ func (m *ModelListComponent) SetModelType(modelType int) tea.Cmd { // First, add any configured providers that are not in the known providers list // These should appear at the top of the list - knownProviders := provider.KnownProviders() + knownProviders, err := config.Providers() + if err != nil { + return util.ReportError(err) + } for providerID, providerConfig := range cfg.Providers { if providerConfig.Disable { continue } // Check if this provider is not in the known providers list - if !slices.Contains(knownProviders, provider.InferenceProvider(providerID)) { + if !slices.ContainsFunc(knownProviders, func(p provider.Provider) bool { return p.ID == provider.InferenceProvider(providerID) }) { // Convert config provider to provider.Provider format configProvider := provider.Provider{ Name: providerConfig.Name, diff --git a/internal/tui/components/dialogs/permissions/permissions.go b/internal/tui/components/dialogs/permissions/permissions.go index fa08885e7db516f11248430e74046e978dd00e88..e104f60e5255928a9fa6bddc80d6cc16816399ee 100644 --- a/internal/tui/components/dialogs/permissions/permissions.go +++ b/internal/tui/components/dialogs/permissions/permissions.go @@ -52,9 +52,10 @@ type permissionDialogCmp struct { selectedOption int // 0: Allow, 1: Allow for session, 2: Deny // Diff view state - diffSplitMode bool // true for split, false for unified - diffXOffset int // horizontal scroll offset - diffYOffset int // vertical scroll offset + defaultDiffSplitMode bool // true for split, false for unified + diffSplitMode *bool // nil means use defaultDiffSplitMode + diffXOffset int // horizontal scroll offset + diffYOffset int // vertical scroll offset // Caching cachedContent string @@ -122,7 +123,12 @@ func (p *permissionDialogCmp) Update(msg tea.Msg) (tea.Model, tea.Cmd) { ) case key.Matches(msg, p.keyMap.ToggleDiffMode): if p.supportsDiffView() { - p.diffSplitMode = !p.diffSplitMode + if p.diffSplitMode == nil { + diffSplitMode := !p.defaultDiffSplitMode + p.diffSplitMode = &diffSplitMode + } else { + *p.diffSplitMode = !*p.diffSplitMode + } p.contentDirty = true // Mark content as dirty when diff mode changes return p, nil } @@ -354,7 +360,7 @@ func (p *permissionDialogCmp) generateEditContent() string { Width(p.contentViewPort.Width()). XOffset(p.diffXOffset). YOffset(p.diffYOffset) - if p.diffSplitMode { + if p.useDiffSplitMode() { formatter = formatter.Split() } else { formatter = formatter.Unified() @@ -376,7 +382,7 @@ func (p *permissionDialogCmp) generateWriteContent() string { Width(p.contentViewPort.Width()). XOffset(p.diffXOffset). YOffset(p.diffYOffset) - if p.diffSplitMode { + if p.useDiffSplitMode() { formatter = formatter.Split() } else { formatter = formatter.Unified() @@ -438,6 +444,14 @@ func (p *permissionDialogCmp) generateDefaultContent() string { return finalContent } +func (p *permissionDialogCmp) useDiffSplitMode() bool { + if p.diffSplitMode != nil { + return *p.diffSplitMode + } else { + return p.defaultDiffSplitMode + } +} + func (p *permissionDialogCmp) styleViewport() string { t := styles.CurrentTheme() return t.S().Base.Render(p.contentViewPort.View()) @@ -525,6 +539,9 @@ func (p *permissionDialogCmp) SetSize() tea.Cmd { p.height = int(float64(p.wHeight) * 0.5) } + // Default to diff split mode when dialog is wide enough. + p.defaultDiffSplitMode = p.width >= 140 + // Mark content as dirty if size changed if oldWidth != p.width || oldHeight != p.height { p.contentDirty = true diff --git a/internal/tui/page/chat/chat.go b/internal/tui/page/chat/chat.go index be7c0f2658202ea59e70778df1785b98310bc458..2c7bf17fcb2c78e43e0d130696474379985ac99d 100644 --- a/internal/tui/page/chat/chat.go +++ b/internal/tui/page/chat/chat.go @@ -183,6 +183,8 @@ func (p *chatPage) Update(msg tea.Msg) (tea.Model, tea.Cmd) { cmd = p.updateCompactConfig(false) } return p, tea.Batch(p.SetSize(p.width, p.height), cmd) + case commands.ToggleThinkingMsg: + return p, p.toggleThinking() case pubsub.Event[session.Session]: u, cmd := p.header.Update(msg) p.header = u.(header.Header) @@ -380,7 +382,7 @@ func (p *chatPage) View() string { Width(p.detailsWidth). Border(lipgloss.RoundedBorder()). BorderForeground(t.BorderFocus) - version := t.S().Subtle.Width(p.detailsWidth - 2).AlignHorizontal(lipgloss.Right).Render(version.Version) + version := t.S().Base.Foreground(t.Border).Width(p.detailsWidth - 4).AlignHorizontal(lipgloss.Right).Render(version.Version) details := style.Render( lipgloss.JoinVertical( lipgloss.Left, @@ -409,6 +411,35 @@ func (p *chatPage) updateCompactConfig(compact bool) tea.Cmd { } } +func (p *chatPage) toggleThinking() tea.Cmd { + return func() tea.Msg { + cfg := config.Get() + agentCfg := cfg.Agents["coder"] + currentModel := cfg.Models[agentCfg.Model] + + // Toggle the thinking mode + currentModel.Think = !currentModel.Think + cfg.Models[agentCfg.Model] = currentModel + + // Update the agent with the new configuration + if err := p.app.UpdateAgentModel(); err != nil { + return util.InfoMsg{ + Type: util.InfoTypeError, + Msg: "Failed to update thinking mode: " + err.Error(), + } + } + + status := "disabled" + if currentModel.Think { + status = "enabled" + } + return util.InfoMsg{ + Type: util.InfoTypeInfo, + Msg: "Thinking mode " + status, + } + } +} + func (p *chatPage) setCompactMode(compact bool) { if p.compact == compact { return @@ -474,6 +505,8 @@ func (p *chatPage) newSession() tea.Cmd { p.session = session.Session{} p.focusedPane = PanelTypeEditor + p.editor.Focus() + p.chat.Blur() p.isCanceling = false return tea.Batch( util.CmdHandler(chat.SessionClearedMsg{}),