Fix OpenAI response chaining review findings

Michael Suchacz created

Change summary

providers/openai/responses_language_model.go | 12 +++++++++
providers/openai/responses_options.go        | 26 +++++++++++++--------
providers/openai/responses_params_test.go    | 11 +++++++++
3 files changed, 38 insertions(+), 11 deletions(-)

Detailed changes

providers/openai/responses_language_model.go 🔗

@@ -163,7 +163,7 @@ func (o responsesLanguageModel) prepareParams(call fantasy.Call) (*responses.Res
 		params.Store = param.NewOpt(false)
 	}
 
-	if openaiOptions != nil && openaiOptions.PreviousResponseID != nil {
+	if openaiOptions != nil && openaiOptions.PreviousResponseID != nil && *openaiOptions.PreviousResponseID != "" {
 		if err := validatePreviousResponseIDPrompt(call.Prompt); err != nil {
 			return nil, warnings, err
 		}
@@ -901,6 +901,11 @@ func (o responsesLanguageModel) Stream(ctx context.Context, call fantasy.Call) (
 
 	finishReason := fantasy.FinishReasonUnknown
 	var usage fantasy.Usage
+	// responseID tracks the server-assigned response ID. It's first set from the
+	// response.created event and may be overwritten by response.completed or
+	// response.incomplete events. Per the OpenAI API contract, these IDs are
+	// identical; the overwrites ensure we have the final value even if an event
+	// is missed.
 	responseID := ""
 	ongoingToolCalls := make(map[int64]*ongoingToolCall)
 	hasFunctionCall := false
@@ -1449,6 +1454,11 @@ func (o responsesLanguageModel) streamObjectWithJSONMode(ctx context.Context, ca
 		var lastParsedObject any
 		var usage fantasy.Usage
 		var finishReason fantasy.FinishReason
+		// responseID tracks the server-assigned response ID. It's first set from the
+		// response.created event and may be overwritten by response.completed or
+		// response.incomplete events. Per the OpenAI API contract, these IDs are
+		// identical; the overwrites ensure we have the final value even if an event
+		// is missed.
 		var responseID string
 		var streamErr error
 		hasFunctionCall := false

providers/openai/responses_options.go 🔗

@@ -141,22 +141,28 @@ const (
 
 // ResponsesProviderOptions represents additional options for OpenAI Responses API.
 type ResponsesProviderOptions struct {
-	Include            []IncludeType    `json:"include"`
-	Instructions       *string          `json:"instructions"`
-	Logprobs           any              `json:"logprobs"`
-	MaxToolCalls       *int64           `json:"max_tool_calls"`
-	Metadata           map[string]any   `json:"metadata"`
-	ParallelToolCalls  *bool            `json:"parallel_tool_calls"`
+	Include           []IncludeType  `json:"include"`
+	Instructions      *string        `json:"instructions"`
+	Logprobs          any            `json:"logprobs"`
+	MaxToolCalls      *int64         `json:"max_tool_calls"`
+	Metadata          map[string]any `json:"metadata"`
+	ParallelToolCalls *bool          `json:"parallel_tool_calls"`
+	// PreviousResponseID chains this request to a prior stored response, enabling
+	// server-side conversation state. When set, the prompt should contain only the
+	// new incremental turn—not replayed assistant history.
 	PreviousResponseID *string          `json:"previous_response_id"`
 	PromptCacheKey     *string          `json:"prompt_cache_key"`
 	ReasoningEffort    *ReasoningEffort `json:"reasoning_effort"`
 	ReasoningSummary   *string          `json:"reasoning_summary"`
 	SafetyIdentifier   *string          `json:"safety_identifier"`
 	ServiceTier        *ServiceTier     `json:"service_tier"`
-	Store              *bool            `json:"store"`
-	StrictJSONSchema   *bool            `json:"strict_json_schema"`
-	TextVerbosity      *TextVerbosity   `json:"text_verbosity"`
-	User               *string          `json:"user"`
+	// Store indicates whether OpenAI should persist this response for future
+	// retrieval and chaining via PreviousResponseID. Defaults to false to prevent
+	// unintended storage of potentially sensitive conversations.
+	Store            *bool          `json:"store"`
+	StrictJSONSchema *bool          `json:"strict_json_schema"`
+	TextVerbosity    *TextVerbosity `json:"text_verbosity"`
+	User             *string        `json:"user"`
 }
 
 // Options implements the ProviderOptions interface.

providers/openai/responses_params_test.go 🔗

@@ -81,6 +81,17 @@ func TestPrepareParams_PreviousResponseID(t *testing.T) {
 		require.Empty(t, warnings)
 		require.False(t, params.PreviousResponseID.Valid())
 	})
+
+	t.Run("empty string ignored", func(t *testing.T) {
+		t.Parallel()
+
+		params, warnings, err := lm.prepareParams(testCall(prompt, &ResponsesProviderOptions{
+			PreviousResponseID: fantasy.Opt(""),
+		}))
+		require.NoError(t, err)
+		require.Empty(t, warnings)
+		require.False(t, params.PreviousResponseID.Valid())
+	})
 }
 
 func TestPrepareParams_PreviousResponseID_Validation(t *testing.T) {