From e156b8fcb8538276c532000f7a218bd8a282d7fd Mon Sep 17 00:00:00 2001 From: Fabio Mora <17892293+Fuabioo@users.noreply.github.com> Date: Tue, 15 Jul 2025 20:59:05 -0600 Subject: [PATCH] fix(openai): handle empty responses from OpenAI API in send and stream methods --- internal/llm/provider/openai.go | 16 ++++- internal/llm/provider/openai_test.go | 91 ++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 internal/llm/provider/openai_test.go diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 51baaf3a27a0596483bef18663cd2ded2ead0af9..ed5d95ed909124219ef9b5f093153de9ab55d6c1 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -224,6 +224,10 @@ func (o *openaiClient) send(ctx context.Context, messages []message.Message, too return nil, retryErr } + if len(openaiResponse.Choices) == 0 { + return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration") + } + content := "" if openaiResponse.Choices[0].Message.Content != "" { content = openaiResponse.Choices[0].Message.Content @@ -324,7 +328,9 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t } if choice.FinishReason == "tool_calls" { msgToolCalls = append(msgToolCalls, currentToolCall) - acc.Choices[0].Message.ToolCalls = msgToolCalls + if len(acc.Choices) > 0 { + acc.Choices[0].Message.ToolCalls = msgToolCalls + } } } } @@ -336,6 +342,14 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t slog.Debug("Response", "messages", string(jsonData)) } + if len(acc.ChatCompletion.Choices) == 0 { + eventChan <- ProviderEvent{ + Type: EventError, + Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"), + } + return + } + resultFinishReason := acc.ChatCompletion.Choices[0].FinishReason if resultFinishReason == "" { // If the finish reason is empty, we assume it was a successful completion diff --git a/internal/llm/provider/openai_test.go b/internal/llm/provider/openai_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c11e8ff14d7995859cccd3c95eeae4008fb20ac9 --- /dev/null +++ b/internal/llm/provider/openai_test.go @@ -0,0 +1,91 @@ +package provider + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/charmbracelet/crush/internal/config" + "github.com/charmbracelet/crush/internal/fur/provider" + "github.com/charmbracelet/crush/internal/llm/tools" + "github.com/charmbracelet/crush/internal/message" + "github.com/openai/openai-go" + "github.com/openai/openai-go/option" +) + +func TestMain(m *testing.M) { + _, err := config.Init(".", true) + if err != nil { + panic("Failed to initialize config: " + err.Error()) + } + + os.Exit(m.Run()) +} + +func TestOpenAIClientStreamChoices(t *testing.T) { + // Create a mock server that returns Server-Sent Events with empty choices + // This simulates the 🤡 behavior when a server returns 200 instead of 404 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + w.WriteHeader(http.StatusOK) + + emptyChoicesChunk := map[string]any{ + "id": "chat-completion-test", + "object": "chat.completion.chunk", + "created": time.Now().Unix(), + "model": "test-model", + "choices": []any{}, // Empty choices array that causes panic + } + + jsonData, _ := json.Marshal(emptyChoicesChunk) + w.Write([]byte("data: " + string(jsonData) + "\n\n")) + w.Write([]byte("data: [DONE]\n\n")) + })) + defer server.Close() + + // Create OpenAI client pointing to our mock server + client := &openaiClient{ + providerOptions: providerClientOptions{ + modelType: config.SelectedModelTypeLarge, + apiKey: "test-key", + systemMessage: "test", + model: func(config.SelectedModelType) provider.Model { + return provider.Model{ + ID: "test-model", + Model: "test-model", + } + }, + }, + client: openai.NewClient( + option.WithAPIKey("test-key"), + option.WithBaseURL(server.URL), + ), + } + + // Create test messages + messages := []message.Message{ + { + Role: message.User, + Parts: []message.ContentPart{message.TextContent{Text: "Hello"}}, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + eventsChan := client.stream(ctx, messages, []tools.BaseTool{}) + + // Collect events - this will panic without the bounds check + for event := range eventsChan { + t.Logf("Received event: %+v", event) + if event.Type == EventError || event.Type == EventComplete { + break + } + } +}