fix: guard against nil response in Generate to prevent panic (#228)

slzcdhd and liaoyijun created

Some OpenAI-compatible API endpoints (e.g. DeepSeek) may return a nil
response with no error from the underlying SDK under edge conditions
(empty response body, connection issues during response parsing, etc.).

When this happens, accessing response.Choices (Chat Completions),
response.Error (Responses API), or response.Content (Anthropic)
causes a nil pointer dereference panic that crashes the process.

Add nil response checks in Generate methods for:
- providers/openai: languageModel.Generate (Chat Completions API)
- providers/openai: responsesLanguageModel.Generate (Responses API)
- providers/anthropic: languageModel.Generate

Each returns a descriptive fantasy.Error instead of panicking.

Co-authored-by: liaoyijun <β€œliaoyijun@wps.cn”>

Change summary

providers/anthropic/anthropic.go             |  3 ++
providers/openai/language_model.go           |  3 ++
providers/openai/openai_test.go              | 25 +++++++++++++++++++++
providers/openai/responses_language_model.go |  3 ++
4 files changed, 34 insertions(+)

Detailed changes

providers/anthropic/anthropic.go πŸ”—

@@ -1171,6 +1171,9 @@ func (a languageModel) Generate(ctx context.Context, call fantasy.Call) (*fantas
 	if err != nil {
 		return nil, toProviderErr(err)
 	}
+	if response == nil {
+		return nil, &fantasy.Error{Title: "no response", Message: "provider returned nil response"}
+	}
 
 	var content []fantasy.Content
 	for _, block := range response.Content {

providers/openai/language_model.go πŸ”—

@@ -251,6 +251,9 @@ func (o languageModel) Generate(ctx context.Context, call fantasy.Call) (*fantas
 	if err != nil {
 		return nil, toProviderErr(err)
 	}
+	if response == nil {
+		return nil, &fantasy.Error{Title: "no response", Message: "provider returned nil response"}
+	}
 
 	if len(response.Choices) == 0 {
 		return nil, &fantasy.Error{Title: "no response", Message: "no response generated"}

providers/openai/openai_test.go πŸ”—

@@ -2010,6 +2010,31 @@ func TestDoGenerate(t *testing.T) {
 		require.Equal(t, "ServiceTier", result.Warnings[0].Setting)
 		require.Contains(t, result.Warnings[0].Details, "priority processing is only available")
 	})
+
+	t.Run("should return error instead of panic on empty response body", func(t *testing.T) {
+		t.Parallel()
+
+		server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			w.Header().Set("Content-Type", "application/json")
+			w.WriteHeader(http.StatusOK)
+			// Write empty body β€” some OpenAI-compatible endpoints may do this
+			// under edge conditions, causing the SDK to return (nil, nil).
+		}))
+		defer server.Close()
+
+		provider, err := New(
+			WithAPIKey("test-api-key"),
+			WithBaseURL(server.URL),
+		)
+		require.NoError(t, err)
+		model, _ := provider.LanguageModel(t.Context(), "gpt-3.5-turbo")
+
+		require.NotPanics(t, func() {
+			_, _ = model.Generate(context.Background(), fantasy.Call{
+				Prompt: testPrompt,
+			})
+		})
+	})
 }
 
 type streamingMockServer struct {

providers/openai/responses_language_model.go πŸ”—

@@ -789,6 +789,9 @@ func (o responsesLanguageModel) Generate(ctx context.Context, call fantasy.Call)
 	if err != nil {
 		return nil, toProviderErr(err)
 	}
+	if response == nil {
+		return nil, &fantasy.Error{Title: "no response", Message: "provider returned nil response"}
+	}
 
 	if response.Error.Message != "" {
 		return nil, &fantasy.Error{