responses_language_model.go

   1package openai
   2
   3import (
   4	"context"
   5	"encoding/base64"
   6	"encoding/json"
   7	"fmt"
   8	"reflect"
   9	"strings"
  10
  11	"charm.land/fantasy"
  12	"charm.land/fantasy/object"
  13	"charm.land/fantasy/schema"
  14	"github.com/google/uuid"
  15	"github.com/openai/openai-go/v2"
  16	"github.com/openai/openai-go/v2/packages/param"
  17	"github.com/openai/openai-go/v2/responses"
  18	"github.com/openai/openai-go/v2/shared"
  19)
  20
  21const topLogprobsMax = 20
  22
  23type responsesLanguageModel struct {
  24	provider   string
  25	modelID    string
  26	client     openai.Client
  27	objectMode fantasy.ObjectMode
  28}
  29
  30// newResponsesLanguageModel implements a responses api model
  31// INFO: (kujtim) currently we do not support stored parameter we default it to false.
  32func newResponsesLanguageModel(modelID string, provider string, client openai.Client, objectMode fantasy.ObjectMode) responsesLanguageModel {
  33	return responsesLanguageModel{
  34		modelID:    modelID,
  35		provider:   provider,
  36		client:     client,
  37		objectMode: objectMode,
  38	}
  39}
  40
  41func (o responsesLanguageModel) Model() string {
  42	return o.modelID
  43}
  44
  45func (o responsesLanguageModel) Provider() string {
  46	return o.provider
  47}
  48
  49type responsesModelConfig struct {
  50	isReasoningModel           bool
  51	systemMessageMode          string
  52	requiredAutoTruncation     bool
  53	supportsFlexProcessing     bool
  54	supportsPriorityProcessing bool
  55}
  56
  57func getResponsesModelConfig(modelID string) responsesModelConfig {
  58	supportsFlexProcessing := strings.HasPrefix(modelID, "o3") ||
  59		strings.Contains(modelID, "-o3") || strings.Contains(modelID, "o4-mini") ||
  60		(strings.Contains(modelID, "gpt-5") && !strings.Contains(modelID, "gpt-5-chat"))
  61
  62	supportsPriorityProcessing := strings.Contains(modelID, "gpt-4") ||
  63		strings.Contains(modelID, "gpt-5-mini") ||
  64		(strings.Contains(modelID, "gpt-5") &&
  65			!strings.Contains(modelID, "gpt-5-nano") &&
  66			!strings.Contains(modelID, "gpt-5-chat")) ||
  67		strings.HasPrefix(modelID, "o3") ||
  68		strings.Contains(modelID, "-o3") ||
  69		strings.Contains(modelID, "o4-mini")
  70
  71	defaults := responsesModelConfig{
  72		requiredAutoTruncation:     false,
  73		systemMessageMode:          "system",
  74		supportsFlexProcessing:     supportsFlexProcessing,
  75		supportsPriorityProcessing: supportsPriorityProcessing,
  76	}
  77
  78	if strings.Contains(modelID, "gpt-5-chat") {
  79		return responsesModelConfig{
  80			isReasoningModel:           false,
  81			systemMessageMode:          defaults.systemMessageMode,
  82			requiredAutoTruncation:     defaults.requiredAutoTruncation,
  83			supportsFlexProcessing:     defaults.supportsFlexProcessing,
  84			supportsPriorityProcessing: defaults.supportsPriorityProcessing,
  85		}
  86	}
  87
  88	if strings.HasPrefix(modelID, "o1") || strings.Contains(modelID, "-o1") ||
  89		strings.HasPrefix(modelID, "o3") || strings.Contains(modelID, "-o3") ||
  90		strings.HasPrefix(modelID, "o4") || strings.Contains(modelID, "-o4") ||
  91		strings.HasPrefix(modelID, "oss") || strings.Contains(modelID, "-oss") ||
  92		strings.Contains(modelID, "gpt-5") || strings.Contains(modelID, "codex-") ||
  93		strings.Contains(modelID, "computer-use") {
  94		if strings.Contains(modelID, "o1-mini") || strings.Contains(modelID, "o1-preview") {
  95			return responsesModelConfig{
  96				isReasoningModel:           true,
  97				systemMessageMode:          "remove",
  98				requiredAutoTruncation:     defaults.requiredAutoTruncation,
  99				supportsFlexProcessing:     defaults.supportsFlexProcessing,
 100				supportsPriorityProcessing: defaults.supportsPriorityProcessing,
 101			}
 102		}
 103
 104		return responsesModelConfig{
 105			isReasoningModel:           true,
 106			systemMessageMode:          "developer",
 107			requiredAutoTruncation:     defaults.requiredAutoTruncation,
 108			supportsFlexProcessing:     defaults.supportsFlexProcessing,
 109			supportsPriorityProcessing: defaults.supportsPriorityProcessing,
 110		}
 111	}
 112
 113	return responsesModelConfig{
 114		isReasoningModel:           false,
 115		systemMessageMode:          defaults.systemMessageMode,
 116		requiredAutoTruncation:     defaults.requiredAutoTruncation,
 117		supportsFlexProcessing:     defaults.supportsFlexProcessing,
 118		supportsPriorityProcessing: defaults.supportsPriorityProcessing,
 119	}
 120}
 121
 122func (o responsesLanguageModel) prepareParams(call fantasy.Call) (*responses.ResponseNewParams, []fantasy.CallWarning) {
 123	var warnings []fantasy.CallWarning
 124	params := &responses.ResponseNewParams{
 125		Store: param.NewOpt(false),
 126	}
 127
 128	modelConfig := getResponsesModelConfig(o.modelID)
 129
 130	if call.TopK != nil {
 131		warnings = append(warnings, fantasy.CallWarning{
 132			Type:    fantasy.CallWarningTypeUnsupportedSetting,
 133			Setting: "topK",
 134		})
 135	}
 136
 137	if call.PresencePenalty != nil {
 138		warnings = append(warnings, fantasy.CallWarning{
 139			Type:    fantasy.CallWarningTypeUnsupportedSetting,
 140			Setting: "presencePenalty",
 141		})
 142	}
 143
 144	if call.FrequencyPenalty != nil {
 145		warnings = append(warnings, fantasy.CallWarning{
 146			Type:    fantasy.CallWarningTypeUnsupportedSetting,
 147			Setting: "frequencyPenalty",
 148		})
 149	}
 150
 151	var openaiOptions *ResponsesProviderOptions
 152	if opts, ok := call.ProviderOptions[Name]; ok {
 153		if typedOpts, ok := opts.(*ResponsesProviderOptions); ok {
 154			openaiOptions = typedOpts
 155		}
 156	}
 157
 158	input, inputWarnings := toResponsesPrompt(call.Prompt, modelConfig.systemMessageMode)
 159	warnings = append(warnings, inputWarnings...)
 160
 161	var include []IncludeType
 162
 163	addInclude := func(key IncludeType) {
 164		include = append(include, key)
 165	}
 166
 167	topLogprobs := 0
 168	if openaiOptions != nil && openaiOptions.Logprobs != nil {
 169		switch v := openaiOptions.Logprobs.(type) {
 170		case bool:
 171			if v {
 172				topLogprobs = topLogprobsMax
 173			}
 174		case float64:
 175			topLogprobs = int(v)
 176		case int:
 177			topLogprobs = v
 178		}
 179	}
 180
 181	if topLogprobs > 0 {
 182		addInclude(IncludeMessageOutputTextLogprobs)
 183	}
 184
 185	params.Model = o.modelID
 186	params.Input = responses.ResponseNewParamsInputUnion{
 187		OfInputItemList: input,
 188	}
 189
 190	if call.Temperature != nil {
 191		params.Temperature = param.NewOpt(*call.Temperature)
 192	}
 193	if call.TopP != nil {
 194		params.TopP = param.NewOpt(*call.TopP)
 195	}
 196	if call.MaxOutputTokens != nil {
 197		params.MaxOutputTokens = param.NewOpt(*call.MaxOutputTokens)
 198	}
 199
 200	if openaiOptions != nil {
 201		if openaiOptions.MaxToolCalls != nil {
 202			params.MaxToolCalls = param.NewOpt(*openaiOptions.MaxToolCalls)
 203		}
 204		if openaiOptions.Metadata != nil {
 205			metadata := make(shared.Metadata)
 206			for k, v := range openaiOptions.Metadata {
 207				if str, ok := v.(string); ok {
 208					metadata[k] = str
 209				}
 210			}
 211			params.Metadata = metadata
 212		}
 213		if openaiOptions.ParallelToolCalls != nil {
 214			params.ParallelToolCalls = param.NewOpt(*openaiOptions.ParallelToolCalls)
 215		}
 216		if openaiOptions.User != nil {
 217			params.User = param.NewOpt(*openaiOptions.User)
 218		}
 219		if openaiOptions.Instructions != nil {
 220			params.Instructions = param.NewOpt(*openaiOptions.Instructions)
 221		}
 222		if openaiOptions.ServiceTier != nil {
 223			params.ServiceTier = responses.ResponseNewParamsServiceTier(*openaiOptions.ServiceTier)
 224		}
 225		if openaiOptions.PromptCacheKey != nil {
 226			params.PromptCacheKey = param.NewOpt(*openaiOptions.PromptCacheKey)
 227		}
 228		if openaiOptions.SafetyIdentifier != nil {
 229			params.SafetyIdentifier = param.NewOpt(*openaiOptions.SafetyIdentifier)
 230		}
 231		if topLogprobs > 0 {
 232			params.TopLogprobs = param.NewOpt(int64(topLogprobs))
 233		}
 234
 235		if len(openaiOptions.Include) > 0 {
 236			include = append(include, openaiOptions.Include...)
 237		}
 238
 239		if modelConfig.isReasoningModel && (openaiOptions.ReasoningEffort != nil || openaiOptions.ReasoningSummary != nil) {
 240			reasoning := shared.ReasoningParam{}
 241			if openaiOptions.ReasoningEffort != nil {
 242				reasoning.Effort = shared.ReasoningEffort(*openaiOptions.ReasoningEffort)
 243			}
 244			if openaiOptions.ReasoningSummary != nil {
 245				reasoning.Summary = shared.ReasoningSummary(*openaiOptions.ReasoningSummary)
 246			}
 247			params.Reasoning = reasoning
 248		}
 249	}
 250
 251	if modelConfig.requiredAutoTruncation {
 252		params.Truncation = responses.ResponseNewParamsTruncationAuto
 253	}
 254
 255	if len(include) > 0 {
 256		includeParams := make([]responses.ResponseIncludable, len(include))
 257		for i, inc := range include {
 258			includeParams[i] = responses.ResponseIncludable(string(inc))
 259		}
 260		params.Include = includeParams
 261	}
 262
 263	if modelConfig.isReasoningModel {
 264		if call.Temperature != nil {
 265			params.Temperature = param.Opt[float64]{}
 266			warnings = append(warnings, fantasy.CallWarning{
 267				Type:    fantasy.CallWarningTypeUnsupportedSetting,
 268				Setting: "temperature",
 269				Details: "temperature is not supported for reasoning models",
 270			})
 271		}
 272
 273		if call.TopP != nil {
 274			params.TopP = param.Opt[float64]{}
 275			warnings = append(warnings, fantasy.CallWarning{
 276				Type:    fantasy.CallWarningTypeUnsupportedSetting,
 277				Setting: "topP",
 278				Details: "topP is not supported for reasoning models",
 279			})
 280		}
 281	} else {
 282		if openaiOptions != nil {
 283			if openaiOptions.ReasoningEffort != nil {
 284				warnings = append(warnings, fantasy.CallWarning{
 285					Type:    fantasy.CallWarningTypeUnsupportedSetting,
 286					Setting: "reasoningEffort",
 287					Details: "reasoningEffort is not supported for non-reasoning models",
 288				})
 289			}
 290
 291			if openaiOptions.ReasoningSummary != nil {
 292				warnings = append(warnings, fantasy.CallWarning{
 293					Type:    fantasy.CallWarningTypeUnsupportedSetting,
 294					Setting: "reasoningSummary",
 295					Details: "reasoningSummary is not supported for non-reasoning models",
 296				})
 297			}
 298		}
 299	}
 300
 301	if openaiOptions != nil && openaiOptions.ServiceTier != nil {
 302		if *openaiOptions.ServiceTier == ServiceTierFlex && !modelConfig.supportsFlexProcessing {
 303			warnings = append(warnings, fantasy.CallWarning{
 304				Type:    fantasy.CallWarningTypeUnsupportedSetting,
 305				Setting: "serviceTier",
 306				Details: "flex processing is only available for o3, o4-mini, and gpt-5 models",
 307			})
 308			params.ServiceTier = ""
 309		}
 310
 311		if *openaiOptions.ServiceTier == ServiceTierPriority && !modelConfig.supportsPriorityProcessing {
 312			warnings = append(warnings, fantasy.CallWarning{
 313				Type:    fantasy.CallWarningTypeUnsupportedSetting,
 314				Setting: "serviceTier",
 315				Details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported",
 316			})
 317			params.ServiceTier = ""
 318		}
 319	}
 320
 321	tools, toolChoice, toolWarnings := toResponsesTools(call.Tools, call.ToolChoice, openaiOptions)
 322	warnings = append(warnings, toolWarnings...)
 323
 324	if len(tools) > 0 {
 325		params.Tools = tools
 326		params.ToolChoice = toolChoice
 327	}
 328
 329	return params, warnings
 330}
 331
 332func toResponsesPrompt(prompt fantasy.Prompt, systemMessageMode string) (responses.ResponseInputParam, []fantasy.CallWarning) {
 333	var input responses.ResponseInputParam
 334	var warnings []fantasy.CallWarning
 335
 336	for _, msg := range prompt {
 337		switch msg.Role {
 338		case fantasy.MessageRoleSystem:
 339			var systemText string
 340			for _, c := range msg.Content {
 341				if c.GetType() != fantasy.ContentTypeText {
 342					warnings = append(warnings, fantasy.CallWarning{
 343						Type:    fantasy.CallWarningTypeOther,
 344						Message: "system prompt can only have text content",
 345					})
 346					continue
 347				}
 348				textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
 349				if !ok {
 350					warnings = append(warnings, fantasy.CallWarning{
 351						Type:    fantasy.CallWarningTypeOther,
 352						Message: "system prompt text part does not have the right type",
 353					})
 354					continue
 355				}
 356				if strings.TrimSpace(textPart.Text) != "" {
 357					systemText += textPart.Text
 358				}
 359			}
 360
 361			if systemText == "" {
 362				warnings = append(warnings, fantasy.CallWarning{
 363					Type:    fantasy.CallWarningTypeOther,
 364					Message: "system prompt has no text parts",
 365				})
 366				continue
 367			}
 368
 369			switch systemMessageMode {
 370			case "system":
 371				input = append(input, responses.ResponseInputItemParamOfMessage(systemText, responses.EasyInputMessageRoleSystem))
 372			case "developer":
 373				input = append(input, responses.ResponseInputItemParamOfMessage(systemText, responses.EasyInputMessageRoleDeveloper))
 374			case "remove":
 375				warnings = append(warnings, fantasy.CallWarning{
 376					Type:    fantasy.CallWarningTypeOther,
 377					Message: "system messages are removed for this model",
 378				})
 379			}
 380
 381		case fantasy.MessageRoleUser:
 382			var contentParts responses.ResponseInputMessageContentListParam
 383			for i, c := range msg.Content {
 384				switch c.GetType() {
 385				case fantasy.ContentTypeText:
 386					textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
 387					if !ok {
 388						warnings = append(warnings, fantasy.CallWarning{
 389							Type:    fantasy.CallWarningTypeOther,
 390							Message: "user message text part does not have the right type",
 391						})
 392						continue
 393					}
 394					contentParts = append(contentParts, responses.ResponseInputContentUnionParam{
 395						OfInputText: &responses.ResponseInputTextParam{
 396							Type: "input_text",
 397							Text: textPart.Text,
 398						},
 399					})
 400
 401				case fantasy.ContentTypeFile:
 402					filePart, ok := fantasy.AsContentType[fantasy.FilePart](c)
 403					if !ok {
 404						warnings = append(warnings, fantasy.CallWarning{
 405							Type:    fantasy.CallWarningTypeOther,
 406							Message: "user message file part does not have the right type",
 407						})
 408						continue
 409					}
 410
 411					if strings.HasPrefix(filePart.MediaType, "image/") {
 412						base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
 413						imageURL := fmt.Sprintf("data:%s;base64,%s", filePart.MediaType, base64Encoded)
 414						contentParts = append(contentParts, responses.ResponseInputContentUnionParam{
 415							OfInputImage: &responses.ResponseInputImageParam{
 416								Type:     "input_image",
 417								ImageURL: param.NewOpt(imageURL),
 418							},
 419						})
 420					} else if filePart.MediaType == "application/pdf" {
 421						base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
 422						fileData := fmt.Sprintf("data:application/pdf;base64,%s", base64Encoded)
 423						filename := filePart.Filename
 424						if filename == "" {
 425							filename = fmt.Sprintf("part-%d.pdf", i)
 426						}
 427						contentParts = append(contentParts, responses.ResponseInputContentUnionParam{
 428							OfInputFile: &responses.ResponseInputFileParam{
 429								Type:     "input_file",
 430								Filename: param.NewOpt(filename),
 431								FileData: param.NewOpt(fileData),
 432							},
 433						})
 434					} else {
 435						warnings = append(warnings, fantasy.CallWarning{
 436							Type:    fantasy.CallWarningTypeOther,
 437							Message: fmt.Sprintf("file part media type %s not supported", filePart.MediaType),
 438						})
 439					}
 440				}
 441			}
 442
 443			if !hasVisibleResponsesUserContent(contentParts) {
 444				warnings = append(warnings, fantasy.CallWarning{
 445					Type:    fantasy.CallWarningTypeOther,
 446					Message: "dropping empty user message (contains neither user-facing content nor tool results)",
 447				})
 448				continue
 449			}
 450
 451			input = append(input, responses.ResponseInputItemParamOfMessage(contentParts, responses.EasyInputMessageRoleUser))
 452
 453		case fantasy.MessageRoleAssistant:
 454			startIdx := len(input)
 455			for _, c := range msg.Content {
 456				switch c.GetType() {
 457				case fantasy.ContentTypeText:
 458					textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
 459					if !ok {
 460						warnings = append(warnings, fantasy.CallWarning{
 461							Type:    fantasy.CallWarningTypeOther,
 462							Message: "assistant message text part does not have the right type",
 463						})
 464						continue
 465					}
 466					input = append(input, responses.ResponseInputItemParamOfMessage(textPart.Text, responses.EasyInputMessageRoleAssistant))
 467
 468				case fantasy.ContentTypeToolCall:
 469					toolCallPart, ok := fantasy.AsContentType[fantasy.ToolCallPart](c)
 470					if !ok {
 471						warnings = append(warnings, fantasy.CallWarning{
 472							Type:    fantasy.CallWarningTypeOther,
 473							Message: "assistant message tool call part does not have the right type",
 474						})
 475						continue
 476					}
 477
 478					if toolCallPart.ProviderExecuted {
 479						continue
 480					}
 481
 482					inputJSON, err := json.Marshal(toolCallPart.Input)
 483					if err != nil {
 484						warnings = append(warnings, fantasy.CallWarning{
 485							Type:    fantasy.CallWarningTypeOther,
 486							Message: fmt.Sprintf("failed to marshal tool call input: %v", err),
 487						})
 488						continue
 489					}
 490
 491					input = append(input, responses.ResponseInputItemParamOfFunctionCall(string(inputJSON), toolCallPart.ToolCallID, toolCallPart.ToolName))
 492				case fantasy.ContentTypeReasoning:
 493					reasoningMetadata := GetReasoningMetadata(c.Options())
 494					if reasoningMetadata == nil || reasoningMetadata.ItemID == "" {
 495						continue
 496					}
 497					if len(reasoningMetadata.Summary) == 0 && reasoningMetadata.EncryptedContent == nil {
 498						warnings = append(warnings, fantasy.CallWarning{
 499							Type:    fantasy.CallWarningTypeOther,
 500							Message: "assistant message reasoning part does is empty",
 501						})
 502						continue
 503					}
 504					// we want to always send an empty array
 505					summary := []responses.ResponseReasoningItemSummaryParam{}
 506					for _, s := range reasoningMetadata.Summary {
 507						summary = append(summary, responses.ResponseReasoningItemSummaryParam{
 508							Type: "summary_text",
 509							Text: s,
 510						})
 511					}
 512					reasoning := &responses.ResponseReasoningItemParam{
 513						ID:      reasoningMetadata.ItemID,
 514						Summary: summary,
 515					}
 516					if reasoningMetadata.EncryptedContent != nil {
 517						reasoning.EncryptedContent = param.NewOpt(*reasoningMetadata.EncryptedContent)
 518					}
 519					input = append(input, responses.ResponseInputItemUnionParam{
 520						OfReasoning: reasoning,
 521					})
 522				}
 523			}
 524
 525			if !hasVisibleResponsesAssistantContent(input, startIdx) {
 526				warnings = append(warnings, fantasy.CallWarning{
 527					Type:    fantasy.CallWarningTypeOther,
 528					Message: "dropping empty assistant message (contains neither user-facing content nor tool calls)",
 529				})
 530				// Remove any items that were added during this iteration
 531				input = input[:startIdx]
 532				continue
 533			}
 534
 535		case fantasy.MessageRoleTool:
 536			for _, c := range msg.Content {
 537				if c.GetType() != fantasy.ContentTypeToolResult {
 538					warnings = append(warnings, fantasy.CallWarning{
 539						Type:    fantasy.CallWarningTypeOther,
 540						Message: "tool message can only have tool result content",
 541					})
 542					continue
 543				}
 544
 545				toolResultPart, ok := fantasy.AsContentType[fantasy.ToolResultPart](c)
 546				if !ok {
 547					warnings = append(warnings, fantasy.CallWarning{
 548						Type:    fantasy.CallWarningTypeOther,
 549						Message: "tool message result part does not have the right type",
 550					})
 551					continue
 552				}
 553
 554				var outputStr string
 555				switch toolResultPart.Output.GetType() {
 556				case fantasy.ToolResultContentTypeText:
 557					output, ok := fantasy.AsToolResultOutputType[fantasy.ToolResultOutputContentText](toolResultPart.Output)
 558					if !ok {
 559						warnings = append(warnings, fantasy.CallWarning{
 560							Type:    fantasy.CallWarningTypeOther,
 561							Message: "tool result output does not have the right type",
 562						})
 563						continue
 564					}
 565					outputStr = output.Text
 566				case fantasy.ToolResultContentTypeError:
 567					output, ok := fantasy.AsToolResultOutputType[fantasy.ToolResultOutputContentError](toolResultPart.Output)
 568					if !ok {
 569						warnings = append(warnings, fantasy.CallWarning{
 570							Type:    fantasy.CallWarningTypeOther,
 571							Message: "tool result output does not have the right type",
 572						})
 573						continue
 574					}
 575					outputStr = output.Error.Error()
 576				}
 577
 578				input = append(input, responses.ResponseInputItemParamOfFunctionCallOutput(toolResultPart.ToolCallID, outputStr))
 579			}
 580		}
 581	}
 582
 583	return input, warnings
 584}
 585
 586func hasVisibleResponsesUserContent(content responses.ResponseInputMessageContentListParam) bool {
 587	return len(content) > 0
 588}
 589
 590func hasVisibleResponsesAssistantContent(items []responses.ResponseInputItemUnionParam, startIdx int) bool {
 591	// Check if we added any assistant content parts from this message
 592	for i := startIdx; i < len(items); i++ {
 593		if items[i].OfMessage != nil || items[i].OfFunctionCall != nil {
 594			return true
 595		}
 596	}
 597	return false
 598}
 599
 600func toResponsesTools(tools []fantasy.Tool, toolChoice *fantasy.ToolChoice, options *ResponsesProviderOptions) ([]responses.ToolUnionParam, responses.ResponseNewParamsToolChoiceUnion, []fantasy.CallWarning) {
 601	warnings := make([]fantasy.CallWarning, 0)
 602	var openaiTools []responses.ToolUnionParam
 603
 604	if len(tools) == 0 {
 605		return nil, responses.ResponseNewParamsToolChoiceUnion{}, nil
 606	}
 607
 608	strictJSONSchema := false
 609	if options != nil && options.StrictJSONSchema != nil {
 610		strictJSONSchema = *options.StrictJSONSchema
 611	}
 612
 613	for _, tool := range tools {
 614		if tool.GetType() == fantasy.ToolTypeFunction {
 615			ft, ok := tool.(fantasy.FunctionTool)
 616			if !ok {
 617				continue
 618			}
 619			openaiTools = append(openaiTools, responses.ToolUnionParam{
 620				OfFunction: &responses.FunctionToolParam{
 621					Name:        ft.Name,
 622					Description: param.NewOpt(ft.Description),
 623					Parameters:  ft.InputSchema,
 624					Strict:      param.NewOpt(strictJSONSchema),
 625					Type:        "function",
 626				},
 627			})
 628			continue
 629		}
 630
 631		warnings = append(warnings, fantasy.CallWarning{
 632			Type:    fantasy.CallWarningTypeUnsupportedTool,
 633			Tool:    tool,
 634			Message: "tool is not supported",
 635		})
 636	}
 637
 638	if toolChoice == nil {
 639		return openaiTools, responses.ResponseNewParamsToolChoiceUnion{}, warnings
 640	}
 641
 642	var openaiToolChoice responses.ResponseNewParamsToolChoiceUnion
 643
 644	switch *toolChoice {
 645	case fantasy.ToolChoiceAuto:
 646		openaiToolChoice = responses.ResponseNewParamsToolChoiceUnion{
 647			OfToolChoiceMode: param.NewOpt(responses.ToolChoiceOptionsAuto),
 648		}
 649	case fantasy.ToolChoiceNone:
 650		openaiToolChoice = responses.ResponseNewParamsToolChoiceUnion{
 651			OfToolChoiceMode: param.NewOpt(responses.ToolChoiceOptionsNone),
 652		}
 653	case fantasy.ToolChoiceRequired:
 654		openaiToolChoice = responses.ResponseNewParamsToolChoiceUnion{
 655			OfToolChoiceMode: param.NewOpt(responses.ToolChoiceOptionsRequired),
 656		}
 657	default:
 658		openaiToolChoice = responses.ResponseNewParamsToolChoiceUnion{
 659			OfFunctionTool: &responses.ToolChoiceFunctionParam{
 660				Type: "function",
 661				Name: string(*toolChoice),
 662			},
 663		}
 664	}
 665
 666	return openaiTools, openaiToolChoice, warnings
 667}
 668
 669func (o responsesLanguageModel) Generate(ctx context.Context, call fantasy.Call) (*fantasy.Response, error) {
 670	params, warnings := o.prepareParams(call)
 671	response, err := o.client.Responses.New(ctx, *params)
 672	if err != nil {
 673		return nil, toProviderErr(err)
 674	}
 675
 676	if response.Error.Message != "" {
 677		return nil, &fantasy.Error{
 678			Title:   "provider error",
 679			Message: fmt.Sprintf("%s (code: %s)", response.Error.Message, response.Error.Code),
 680		}
 681	}
 682
 683	var content []fantasy.Content
 684	hasFunctionCall := false
 685
 686	for _, outputItem := range response.Output {
 687		switch outputItem.Type {
 688		case "message":
 689			for _, contentPart := range outputItem.Content {
 690				if contentPart.Type == "output_text" {
 691					content = append(content, fantasy.TextContent{
 692						Text: contentPart.Text,
 693					})
 694
 695					for _, annotation := range contentPart.Annotations {
 696						switch annotation.Type {
 697						case "url_citation":
 698							content = append(content, fantasy.SourceContent{
 699								SourceType: fantasy.SourceTypeURL,
 700								ID:         uuid.NewString(),
 701								URL:        annotation.URL,
 702								Title:      annotation.Title,
 703							})
 704						case "file_citation":
 705							title := "Document"
 706							if annotation.Filename != "" {
 707								title = annotation.Filename
 708							}
 709							filename := annotation.Filename
 710							if filename == "" {
 711								filename = annotation.FileID
 712							}
 713							content = append(content, fantasy.SourceContent{
 714								SourceType: fantasy.SourceTypeDocument,
 715								ID:         uuid.NewString(),
 716								MediaType:  "text/plain",
 717								Title:      title,
 718								Filename:   filename,
 719							})
 720						}
 721					}
 722				}
 723			}
 724
 725		case "function_call":
 726			hasFunctionCall = true
 727			content = append(content, fantasy.ToolCallContent{
 728				ProviderExecuted: false,
 729				ToolCallID:       outputItem.CallID,
 730				ToolName:         outputItem.Name,
 731				Input:            outputItem.Arguments,
 732			})
 733
 734		case "reasoning":
 735			metadata := &ResponsesReasoningMetadata{
 736				ItemID: outputItem.ID,
 737			}
 738			if outputItem.EncryptedContent != "" {
 739				metadata.EncryptedContent = &outputItem.EncryptedContent
 740			}
 741
 742			if len(outputItem.Summary) == 0 && metadata.EncryptedContent == nil {
 743				continue
 744			}
 745
 746			// When there are no summary parts, add an empty reasoning part
 747			summaries := outputItem.Summary
 748			if len(summaries) == 0 {
 749				summaries = []responses.ResponseReasoningItemSummary{{Type: "summary_text", Text: ""}}
 750			}
 751
 752			for _, s := range summaries {
 753				metadata.Summary = append(metadata.Summary, s.Text)
 754			}
 755
 756			content = append(content, fantasy.ReasoningContent{
 757				Text: strings.Join(metadata.Summary, "\n"),
 758				ProviderMetadata: fantasy.ProviderMetadata{
 759					Name: metadata,
 760				},
 761			})
 762		}
 763	}
 764
 765	usage := fantasy.Usage{
 766		InputTokens:  response.Usage.InputTokens,
 767		OutputTokens: response.Usage.OutputTokens,
 768		TotalTokens:  response.Usage.InputTokens + response.Usage.OutputTokens,
 769	}
 770
 771	if response.Usage.OutputTokensDetails.ReasoningTokens != 0 {
 772		usage.ReasoningTokens = response.Usage.OutputTokensDetails.ReasoningTokens
 773	}
 774	if response.Usage.InputTokensDetails.CachedTokens != 0 {
 775		usage.CacheReadTokens = response.Usage.InputTokensDetails.CachedTokens
 776	}
 777
 778	finishReason := mapResponsesFinishReason(response.IncompleteDetails.Reason, hasFunctionCall)
 779
 780	return &fantasy.Response{
 781		Content:          content,
 782		Usage:            usage,
 783		FinishReason:     finishReason,
 784		ProviderMetadata: fantasy.ProviderMetadata{},
 785		Warnings:         warnings,
 786	}, nil
 787}
 788
 789func mapResponsesFinishReason(reason string, hasFunctionCall bool) fantasy.FinishReason {
 790	if hasFunctionCall {
 791		return fantasy.FinishReasonToolCalls
 792	}
 793
 794	switch reason {
 795	case "":
 796		return fantasy.FinishReasonStop
 797	case "max_tokens", "max_output_tokens":
 798		return fantasy.FinishReasonLength
 799	case "content_filter":
 800		return fantasy.FinishReasonContentFilter
 801	default:
 802		return fantasy.FinishReasonOther
 803	}
 804}
 805
 806func (o responsesLanguageModel) Stream(ctx context.Context, call fantasy.Call) (fantasy.StreamResponse, error) {
 807	params, warnings := o.prepareParams(call)
 808
 809	stream := o.client.Responses.NewStreaming(ctx, *params)
 810
 811	finishReason := fantasy.FinishReasonUnknown
 812	var usage fantasy.Usage
 813	ongoingToolCalls := make(map[int64]*ongoingToolCall)
 814	hasFunctionCall := false
 815	activeReasoning := make(map[string]*reasoningState)
 816
 817	return func(yield func(fantasy.StreamPart) bool) {
 818		if len(warnings) > 0 {
 819			if !yield(fantasy.StreamPart{
 820				Type:     fantasy.StreamPartTypeWarnings,
 821				Warnings: warnings,
 822			}) {
 823				return
 824			}
 825		}
 826
 827		for stream.Next() {
 828			event := stream.Current()
 829
 830			switch event.Type {
 831			case "response.created":
 832				_ = event.AsResponseCreated()
 833
 834			case "response.output_item.added":
 835				added := event.AsResponseOutputItemAdded()
 836				switch added.Item.Type {
 837				case "function_call":
 838					ongoingToolCalls[added.OutputIndex] = &ongoingToolCall{
 839						toolName:   added.Item.Name,
 840						toolCallID: added.Item.CallID,
 841					}
 842					if !yield(fantasy.StreamPart{
 843						Type:         fantasy.StreamPartTypeToolInputStart,
 844						ID:           added.Item.CallID,
 845						ToolCallName: added.Item.Name,
 846					}) {
 847						return
 848					}
 849
 850				case "message":
 851					if !yield(fantasy.StreamPart{
 852						Type: fantasy.StreamPartTypeTextStart,
 853						ID:   added.Item.ID,
 854					}) {
 855						return
 856					}
 857
 858				case "reasoning":
 859					metadata := &ResponsesReasoningMetadata{
 860						ItemID:  added.Item.ID,
 861						Summary: []string{},
 862					}
 863					if added.Item.EncryptedContent != "" {
 864						metadata.EncryptedContent = &added.Item.EncryptedContent
 865					}
 866
 867					activeReasoning[added.Item.ID] = &reasoningState{
 868						metadata: metadata,
 869					}
 870					if !yield(fantasy.StreamPart{
 871						Type: fantasy.StreamPartTypeReasoningStart,
 872						ID:   added.Item.ID,
 873						ProviderMetadata: fantasy.ProviderMetadata{
 874							Name: metadata,
 875						},
 876					}) {
 877						return
 878					}
 879				}
 880
 881			case "response.output_item.done":
 882				done := event.AsResponseOutputItemDone()
 883				switch done.Item.Type {
 884				case "function_call":
 885					tc := ongoingToolCalls[done.OutputIndex]
 886					if tc != nil {
 887						delete(ongoingToolCalls, done.OutputIndex)
 888						hasFunctionCall = true
 889
 890						if !yield(fantasy.StreamPart{
 891							Type: fantasy.StreamPartTypeToolInputEnd,
 892							ID:   done.Item.CallID,
 893						}) {
 894							return
 895						}
 896						if !yield(fantasy.StreamPart{
 897							Type:          fantasy.StreamPartTypeToolCall,
 898							ID:            done.Item.CallID,
 899							ToolCallName:  done.Item.Name,
 900							ToolCallInput: done.Item.Arguments,
 901						}) {
 902							return
 903						}
 904					}
 905
 906				case "message":
 907					if !yield(fantasy.StreamPart{
 908						Type: fantasy.StreamPartTypeTextEnd,
 909						ID:   done.Item.ID,
 910					}) {
 911						return
 912					}
 913
 914				case "reasoning":
 915					state := activeReasoning[done.Item.ID]
 916					if state != nil {
 917						if !yield(fantasy.StreamPart{
 918							Type: fantasy.StreamPartTypeReasoningEnd,
 919							ID:   done.Item.ID,
 920							ProviderMetadata: fantasy.ProviderMetadata{
 921								Name: state.metadata,
 922							},
 923						}) {
 924							return
 925						}
 926						delete(activeReasoning, done.Item.ID)
 927					}
 928				}
 929
 930			case "response.function_call_arguments.delta":
 931				delta := event.AsResponseFunctionCallArgumentsDelta()
 932				tc := ongoingToolCalls[delta.OutputIndex]
 933				if tc != nil {
 934					if !yield(fantasy.StreamPart{
 935						Type:  fantasy.StreamPartTypeToolInputDelta,
 936						ID:    tc.toolCallID,
 937						Delta: delta.Delta,
 938					}) {
 939						return
 940					}
 941				}
 942
 943			case "response.output_text.delta":
 944				textDelta := event.AsResponseOutputTextDelta()
 945				if !yield(fantasy.StreamPart{
 946					Type:  fantasy.StreamPartTypeTextDelta,
 947					ID:    textDelta.ItemID,
 948					Delta: textDelta.Delta,
 949				}) {
 950					return
 951				}
 952
 953			case "response.reasoning_summary_part.added":
 954				added := event.AsResponseReasoningSummaryPartAdded()
 955				state := activeReasoning[added.ItemID]
 956				if state != nil {
 957					state.metadata.Summary = append(state.metadata.Summary, "")
 958					activeReasoning[added.ItemID] = state
 959					if !yield(fantasy.StreamPart{
 960						Type:  fantasy.StreamPartTypeReasoningDelta,
 961						ID:    added.ItemID,
 962						Delta: "\n",
 963						ProviderMetadata: fantasy.ProviderMetadata{
 964							Name: state.metadata,
 965						},
 966					}) {
 967						return
 968					}
 969				}
 970
 971			case "response.reasoning_summary_text.delta":
 972				textDelta := event.AsResponseReasoningSummaryTextDelta()
 973				state := activeReasoning[textDelta.ItemID]
 974				if state != nil {
 975					if len(state.metadata.Summary)-1 >= int(textDelta.SummaryIndex) {
 976						state.metadata.Summary[textDelta.SummaryIndex] += textDelta.Delta
 977					}
 978					activeReasoning[textDelta.ItemID] = state
 979					if !yield(fantasy.StreamPart{
 980						Type:  fantasy.StreamPartTypeReasoningDelta,
 981						ID:    textDelta.ItemID,
 982						Delta: textDelta.Delta,
 983						ProviderMetadata: fantasy.ProviderMetadata{
 984							Name: state.metadata,
 985						},
 986					}) {
 987						return
 988					}
 989				}
 990
 991			case "response.completed", "response.incomplete":
 992				completed := event.AsResponseCompleted()
 993				finishReason = mapResponsesFinishReason(completed.Response.IncompleteDetails.Reason, hasFunctionCall)
 994				usage = fantasy.Usage{
 995					InputTokens:  completed.Response.Usage.InputTokens,
 996					OutputTokens: completed.Response.Usage.OutputTokens,
 997					TotalTokens:  completed.Response.Usage.InputTokens + completed.Response.Usage.OutputTokens,
 998				}
 999				if completed.Response.Usage.OutputTokensDetails.ReasoningTokens != 0 {
1000					usage.ReasoningTokens = completed.Response.Usage.OutputTokensDetails.ReasoningTokens
1001				}
1002				if completed.Response.Usage.InputTokensDetails.CachedTokens != 0 {
1003					usage.CacheReadTokens = completed.Response.Usage.InputTokensDetails.CachedTokens
1004				}
1005
1006			case "error":
1007				errorEvent := event.AsError()
1008				if !yield(fantasy.StreamPart{
1009					Type:  fantasy.StreamPartTypeError,
1010					Error: fmt.Errorf("response error: %s (code: %s)", errorEvent.Message, errorEvent.Code),
1011				}) {
1012					return
1013				}
1014				return
1015			}
1016		}
1017
1018		err := stream.Err()
1019		if err != nil {
1020			yield(fantasy.StreamPart{
1021				Type:  fantasy.StreamPartTypeError,
1022				Error: toProviderErr(err),
1023			})
1024			return
1025		}
1026
1027		yield(fantasy.StreamPart{
1028			Type:         fantasy.StreamPartTypeFinish,
1029			Usage:        usage,
1030			FinishReason: finishReason,
1031		})
1032	}, nil
1033}
1034
1035// GetReasoningMetadata extracts reasoning metadata from provider options for responses models.
1036func GetReasoningMetadata(providerOptions fantasy.ProviderOptions) *ResponsesReasoningMetadata {
1037	if openaiResponsesOptions, ok := providerOptions[Name]; ok {
1038		if reasoning, ok := openaiResponsesOptions.(*ResponsesReasoningMetadata); ok {
1039			return reasoning
1040		}
1041	}
1042	return nil
1043}
1044
1045type ongoingToolCall struct {
1046	toolName   string
1047	toolCallID string
1048}
1049
1050type reasoningState struct {
1051	metadata *ResponsesReasoningMetadata
1052}
1053
1054// GenerateObject implements fantasy.LanguageModel.
1055func (o responsesLanguageModel) GenerateObject(ctx context.Context, call fantasy.ObjectCall) (*fantasy.ObjectResponse, error) {
1056	switch o.objectMode {
1057	case fantasy.ObjectModeText:
1058		return object.GenerateWithText(ctx, o, call)
1059	case fantasy.ObjectModeTool:
1060		return object.GenerateWithTool(ctx, o, call)
1061	default:
1062		return o.generateObjectWithJSONMode(ctx, call)
1063	}
1064}
1065
1066// StreamObject implements fantasy.LanguageModel.
1067func (o responsesLanguageModel) StreamObject(ctx context.Context, call fantasy.ObjectCall) (fantasy.ObjectStreamResponse, error) {
1068	switch o.objectMode {
1069	case fantasy.ObjectModeTool:
1070		return object.StreamWithTool(ctx, o, call)
1071	case fantasy.ObjectModeText:
1072		return object.StreamWithText(ctx, o, call)
1073	default:
1074		return o.streamObjectWithJSONMode(ctx, call)
1075	}
1076}
1077
1078func (o responsesLanguageModel) generateObjectWithJSONMode(ctx context.Context, call fantasy.ObjectCall) (*fantasy.ObjectResponse, error) {
1079	// Convert our Schema to OpenAI's JSON Schema format
1080	jsonSchemaMap := schema.ToMap(call.Schema)
1081
1082	// Add additionalProperties: false recursively for strict mode (OpenAI requirement)
1083	addAdditionalPropertiesFalse(jsonSchemaMap)
1084
1085	schemaName := call.SchemaName
1086	if schemaName == "" {
1087		schemaName = "response"
1088	}
1089
1090	// Build request using prepareParams
1091	fantasyCall := fantasy.Call{
1092		Prompt:           call.Prompt,
1093		MaxOutputTokens:  call.MaxOutputTokens,
1094		Temperature:      call.Temperature,
1095		TopP:             call.TopP,
1096		PresencePenalty:  call.PresencePenalty,
1097		FrequencyPenalty: call.FrequencyPenalty,
1098		ProviderOptions:  call.ProviderOptions,
1099	}
1100
1101	params, warnings := o.prepareParams(fantasyCall)
1102
1103	// Add structured output via Text.Format field
1104	params.Text = responses.ResponseTextConfigParam{
1105		Format: responses.ResponseFormatTextConfigParamOfJSONSchema(schemaName, jsonSchemaMap),
1106	}
1107
1108	// Make request
1109	response, err := o.client.Responses.New(ctx, *params)
1110	if err != nil {
1111		return nil, toProviderErr(err)
1112	}
1113
1114	if response.Error.Message != "" {
1115		return nil, &fantasy.Error{
1116			Title:   "provider error",
1117			Message: fmt.Sprintf("%s (code: %s)", response.Error.Message, response.Error.Code),
1118		}
1119	}
1120
1121	// Extract JSON text from response
1122	var jsonText string
1123	for _, outputItem := range response.Output {
1124		if outputItem.Type == "message" {
1125			for _, contentPart := range outputItem.Content {
1126				if contentPart.Type == "output_text" {
1127					jsonText = contentPart.Text
1128					break
1129				}
1130			}
1131		}
1132	}
1133
1134	if jsonText == "" {
1135		usage := fantasy.Usage{
1136			InputTokens:  response.Usage.InputTokens,
1137			OutputTokens: response.Usage.OutputTokens,
1138			TotalTokens:  response.Usage.InputTokens + response.Usage.OutputTokens,
1139		}
1140		finishReason := mapResponsesFinishReason(response.IncompleteDetails.Reason, false)
1141		return nil, &fantasy.NoObjectGeneratedError{
1142			RawText:      "",
1143			ParseError:   fmt.Errorf("no text content in response"),
1144			Usage:        usage,
1145			FinishReason: finishReason,
1146		}
1147	}
1148
1149	// Parse and validate
1150	var obj any
1151	if call.RepairText != nil {
1152		obj, err = schema.ParseAndValidateWithRepair(ctx, jsonText, call.Schema, call.RepairText)
1153	} else {
1154		obj, err = schema.ParseAndValidate(jsonText, call.Schema)
1155	}
1156
1157	usage := fantasy.Usage{
1158		InputTokens:  response.Usage.InputTokens,
1159		OutputTokens: response.Usage.OutputTokens,
1160		TotalTokens:  response.Usage.InputTokens + response.Usage.OutputTokens,
1161	}
1162	if response.Usage.OutputTokensDetails.ReasoningTokens != 0 {
1163		usage.ReasoningTokens = response.Usage.OutputTokensDetails.ReasoningTokens
1164	}
1165	if response.Usage.InputTokensDetails.CachedTokens != 0 {
1166		usage.CacheReadTokens = response.Usage.InputTokensDetails.CachedTokens
1167	}
1168
1169	finishReason := mapResponsesFinishReason(response.IncompleteDetails.Reason, false)
1170
1171	if err != nil {
1172		// Add usage info to error
1173		if nogErr, ok := err.(*fantasy.NoObjectGeneratedError); ok {
1174			nogErr.Usage = usage
1175			nogErr.FinishReason = finishReason
1176		}
1177		return nil, err
1178	}
1179
1180	return &fantasy.ObjectResponse{
1181		Object:       obj,
1182		RawText:      jsonText,
1183		Usage:        usage,
1184		FinishReason: finishReason,
1185		Warnings:     warnings,
1186	}, nil
1187}
1188
1189func (o responsesLanguageModel) streamObjectWithJSONMode(ctx context.Context, call fantasy.ObjectCall) (fantasy.ObjectStreamResponse, error) {
1190	// Convert our Schema to OpenAI's JSON Schema format
1191	jsonSchemaMap := schema.ToMap(call.Schema)
1192
1193	// Add additionalProperties: false recursively for strict mode (OpenAI requirement)
1194	addAdditionalPropertiesFalse(jsonSchemaMap)
1195
1196	schemaName := call.SchemaName
1197	if schemaName == "" {
1198		schemaName = "response"
1199	}
1200
1201	// Build request using prepareParams
1202	fantasyCall := fantasy.Call{
1203		Prompt:           call.Prompt,
1204		MaxOutputTokens:  call.MaxOutputTokens,
1205		Temperature:      call.Temperature,
1206		TopP:             call.TopP,
1207		PresencePenalty:  call.PresencePenalty,
1208		FrequencyPenalty: call.FrequencyPenalty,
1209		ProviderOptions:  call.ProviderOptions,
1210	}
1211
1212	params, warnings := o.prepareParams(fantasyCall)
1213
1214	// Add structured output via Text.Format field
1215	params.Text = responses.ResponseTextConfigParam{
1216		Format: responses.ResponseFormatTextConfigParamOfJSONSchema(schemaName, jsonSchemaMap),
1217	}
1218
1219	stream := o.client.Responses.NewStreaming(ctx, *params)
1220
1221	return func(yield func(fantasy.ObjectStreamPart) bool) {
1222		if len(warnings) > 0 {
1223			if !yield(fantasy.ObjectStreamPart{
1224				Type:     fantasy.ObjectStreamPartTypeObject,
1225				Warnings: warnings,
1226			}) {
1227				return
1228			}
1229		}
1230
1231		var accumulated string
1232		var lastParsedObject any
1233		var usage fantasy.Usage
1234		var finishReason fantasy.FinishReason
1235		var streamErr error
1236		hasFunctionCall := false
1237
1238		for stream.Next() {
1239			event := stream.Current()
1240
1241			switch event.Type {
1242			case "response.output_text.delta":
1243				textDelta := event.AsResponseOutputTextDelta()
1244				accumulated += textDelta.Delta
1245
1246				// Try to parse the accumulated text
1247				obj, state, parseErr := schema.ParsePartialJSON(accumulated)
1248
1249				// If we successfully parsed, validate and emit
1250				if state == schema.ParseStateSuccessful || state == schema.ParseStateRepaired {
1251					if err := schema.ValidateAgainstSchema(obj, call.Schema); err == nil {
1252						// Only emit if object is different from last
1253						if !reflect.DeepEqual(obj, lastParsedObject) {
1254							if !yield(fantasy.ObjectStreamPart{
1255								Type:   fantasy.ObjectStreamPartTypeObject,
1256								Object: obj,
1257							}) {
1258								return
1259							}
1260							lastParsedObject = obj
1261						}
1262					}
1263				}
1264
1265				// If parsing failed and we have a repair function, try it
1266				if state == schema.ParseStateFailed && call.RepairText != nil {
1267					repairedText, repairErr := call.RepairText(ctx, accumulated, parseErr)
1268					if repairErr == nil {
1269						obj2, state2, _ := schema.ParsePartialJSON(repairedText)
1270						if (state2 == schema.ParseStateSuccessful || state2 == schema.ParseStateRepaired) &&
1271							schema.ValidateAgainstSchema(obj2, call.Schema) == nil {
1272							if !reflect.DeepEqual(obj2, lastParsedObject) {
1273								if !yield(fantasy.ObjectStreamPart{
1274									Type:   fantasy.ObjectStreamPartTypeObject,
1275									Object: obj2,
1276								}) {
1277									return
1278								}
1279								lastParsedObject = obj2
1280							}
1281						}
1282					}
1283				}
1284
1285			case "response.completed", "response.incomplete":
1286				completed := event.AsResponseCompleted()
1287				finishReason = mapResponsesFinishReason(completed.Response.IncompleteDetails.Reason, hasFunctionCall)
1288				usage = fantasy.Usage{
1289					InputTokens:  completed.Response.Usage.InputTokens,
1290					OutputTokens: completed.Response.Usage.OutputTokens,
1291					TotalTokens:  completed.Response.Usage.InputTokens + completed.Response.Usage.OutputTokens,
1292				}
1293				if completed.Response.Usage.OutputTokensDetails.ReasoningTokens != 0 {
1294					usage.ReasoningTokens = completed.Response.Usage.OutputTokensDetails.ReasoningTokens
1295				}
1296				if completed.Response.Usage.InputTokensDetails.CachedTokens != 0 {
1297					usage.CacheReadTokens = completed.Response.Usage.InputTokensDetails.CachedTokens
1298				}
1299
1300			case "error":
1301				errorEvent := event.AsError()
1302				streamErr = fmt.Errorf("response error: %s (code: %s)", errorEvent.Message, errorEvent.Code)
1303				if !yield(fantasy.ObjectStreamPart{
1304					Type:  fantasy.ObjectStreamPartTypeError,
1305					Error: streamErr,
1306				}) {
1307					return
1308				}
1309				return
1310			}
1311		}
1312
1313		err := stream.Err()
1314		if err != nil {
1315			yield(fantasy.ObjectStreamPart{
1316				Type:  fantasy.ObjectStreamPartTypeError,
1317				Error: toProviderErr(err),
1318			})
1319			return
1320		}
1321
1322		// Final validation and emit
1323		if streamErr == nil && lastParsedObject != nil {
1324			yield(fantasy.ObjectStreamPart{
1325				Type:         fantasy.ObjectStreamPartTypeFinish,
1326				Usage:        usage,
1327				FinishReason: finishReason,
1328			})
1329		} else if streamErr == nil && lastParsedObject == nil {
1330			// No object was generated
1331			yield(fantasy.ObjectStreamPart{
1332				Type: fantasy.ObjectStreamPartTypeError,
1333				Error: &fantasy.NoObjectGeneratedError{
1334					RawText:      accumulated,
1335					ParseError:   fmt.Errorf("no valid object generated in stream"),
1336					Usage:        usage,
1337					FinishReason: finishReason,
1338				},
1339			})
1340		}
1341	}, nil
1342}