diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 70bbe128663ce6163a93a2eb172e6d23f5873af3..9b612b64d63a55d278a686ba5b3f7bda7f973c69 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -2,6 +2,7 @@ package provider import ( "context" + "encoding/json" "errors" "fmt" "io" @@ -348,8 +349,18 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t chunk.Choices[0].Delta.ToolCalls[0].Index = 0 } acc.AddChunk(chunk) - // This fixes multiple tool calls for some providers for i, choice := range chunk.Choices { + reasoning, ok := choice.Delta.JSON.ExtraFields["reasoning"] + if ok && reasoning.Raw() != "" { + reasoningStr := "" + json.Unmarshal([]byte(reasoning.Raw()), &reasoningStr) + if reasoningStr != "" { + eventChan <- ProviderEvent{ + Type: EventThinkingDelta, + Thinking: reasoningStr, + } + } + } if choice.Delta.Content != "" { eventChan <- ProviderEvent{ Type: EventContentDelta, diff --git a/internal/tui/components/chat/messages/messages.go b/internal/tui/components/chat/messages/messages.go index 17bb582dcadbea1f314b976bc31a31639f8d9609..17c157df5292280c6f094ec4e0f95bee82c6a77b 100644 --- a/internal/tui/components/chat/messages/messages.go +++ b/internal/tui/components/chat/messages/messages.go @@ -274,6 +274,9 @@ func (m *messageCmp) renderThinkingContent() string { if reasoningContent.StartedAt > 0 { duration := m.message.ThinkingDuration() if reasoningContent.FinishedAt > 0 { + if duration.String() == "0s" { + return "" + } m.anim.SetLabel("") opts := core.StatusOpts{ Title: "Thought for",