@@ -224,6 +224,10 @@ func (o *openaiClient) send(ctx context.Context, messages []message.Message, too
return nil, retryErr
}
+ if len(openaiResponse.Choices) == 0 {
+ return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration")
+ }
+
content := ""
if openaiResponse.Choices[0].Message.Content != "" {
content = openaiResponse.Choices[0].Message.Content
@@ -324,7 +328,9 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t
}
if choice.FinishReason == "tool_calls" {
msgToolCalls = append(msgToolCalls, currentToolCall)
- acc.Choices[0].Message.ToolCalls = msgToolCalls
+ if len(acc.Choices) > 0 {
+ acc.Choices[0].Message.ToolCalls = msgToolCalls
+ }
}
}
}
@@ -336,6 +342,14 @@ func (o *openaiClient) stream(ctx context.Context, messages []message.Message, t
slog.Debug("Response", "messages", string(jsonData))
}
+ if len(acc.ChatCompletion.Choices) == 0 {
+ eventChan <- ProviderEvent{
+ Type: EventError,
+ Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"),
+ }
+ return
+ }
+
resultFinishReason := acc.ChatCompletion.Choices[0].FinishReason
if resultFinishReason == "" {
// If the finish reason is empty, we assume it was a successful completion
@@ -0,0 +1,91 @@
+package provider
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/charmbracelet/crush/internal/config"
+ "github.com/charmbracelet/crush/internal/fur/provider"
+ "github.com/charmbracelet/crush/internal/llm/tools"
+ "github.com/charmbracelet/crush/internal/message"
+ "github.com/openai/openai-go"
+ "github.com/openai/openai-go/option"
+)
+
+func TestMain(m *testing.M) {
+ _, err := config.Init(".", true)
+ if err != nil {
+ panic("Failed to initialize config: " + err.Error())
+ }
+
+ os.Exit(m.Run())
+}
+
+func TestOpenAIClientStreamChoices(t *testing.T) {
+ // Create a mock server that returns Server-Sent Events with empty choices
+ // This simulates the 🤡 behavior when a server returns 200 instead of 404
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/event-stream")
+ w.Header().Set("Cache-Control", "no-cache")
+ w.Header().Set("Connection", "keep-alive")
+ w.WriteHeader(http.StatusOK)
+
+ emptyChoicesChunk := map[string]any{
+ "id": "chat-completion-test",
+ "object": "chat.completion.chunk",
+ "created": time.Now().Unix(),
+ "model": "test-model",
+ "choices": []any{}, // Empty choices array that causes panic
+ }
+
+ jsonData, _ := json.Marshal(emptyChoicesChunk)
+ w.Write([]byte("data: " + string(jsonData) + "\n\n"))
+ w.Write([]byte("data: [DONE]\n\n"))
+ }))
+ defer server.Close()
+
+ // Create OpenAI client pointing to our mock server
+ client := &openaiClient{
+ providerOptions: providerClientOptions{
+ modelType: config.SelectedModelTypeLarge,
+ apiKey: "test-key",
+ systemMessage: "test",
+ model: func(config.SelectedModelType) provider.Model {
+ return provider.Model{
+ ID: "test-model",
+ Model: "test-model",
+ }
+ },
+ },
+ client: openai.NewClient(
+ option.WithAPIKey("test-key"),
+ option.WithBaseURL(server.URL),
+ ),
+ }
+
+ // Create test messages
+ messages := []message.Message{
+ {
+ Role: message.User,
+ Parts: []message.ContentPart{message.TextContent{Text: "Hello"}},
+ },
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ eventsChan := client.stream(ctx, messages, []tools.BaseTool{})
+
+ // Collect events - this will panic without the bounds check
+ for event := range eventsChan {
+ t.Logf("Received event: %+v", event)
+ if event.Type == EventError || event.Type == EventComplete {
+ break
+ }
+ }
+}