1package openai
2
3import (
4 "context"
5 "encoding/base64"
6 "encoding/json"
7 "fmt"
8 "reflect"
9 "strings"
10
11 "charm.land/fantasy"
12 "charm.land/fantasy/object"
13 "charm.land/fantasy/schema"
14 "github.com/google/uuid"
15 "github.com/openai/openai-go/v3"
16 "github.com/openai/openai-go/v3/packages/param"
17 "github.com/openai/openai-go/v3/responses"
18 "github.com/openai/openai-go/v3/shared"
19)
20
21const topLogprobsMax = 20
22
23type responsesLanguageModel struct {
24 provider string
25 modelID string
26 client openai.Client
27 objectMode fantasy.ObjectMode
28 noDefaultUserAgent bool
29}
30
31// newResponsesLanguageModel implements a responses api model
32// INFO: (kujtim) currently we do not support stored parameter we default it to false.
33func newResponsesLanguageModel(modelID string, provider string, client openai.Client, objectMode fantasy.ObjectMode, noDefaultUserAgent bool) responsesLanguageModel {
34 return responsesLanguageModel{
35 modelID: modelID,
36 provider: provider,
37 client: client,
38 objectMode: objectMode,
39 noDefaultUserAgent: noDefaultUserAgent,
40 }
41}
42
43func (o responsesLanguageModel) Model() string {
44 return o.modelID
45}
46
47func (o responsesLanguageModel) Provider() string {
48 return o.provider
49}
50
51type responsesModelConfig struct {
52 isReasoningModel bool
53 systemMessageMode string
54 requiredAutoTruncation bool
55 supportsFlexProcessing bool
56 supportsPriorityProcessing bool
57}
58
59func getResponsesModelConfig(modelID string) responsesModelConfig {
60 supportsFlexProcessing := strings.HasPrefix(modelID, "o3") ||
61 strings.Contains(modelID, "-o3") || strings.Contains(modelID, "o4-mini") ||
62 (strings.Contains(modelID, "gpt-5") && !strings.Contains(modelID, "gpt-5-chat"))
63
64 supportsPriorityProcessing := strings.Contains(modelID, "gpt-4") ||
65 strings.Contains(modelID, "gpt-5-mini") ||
66 (strings.Contains(modelID, "gpt-5") &&
67 !strings.Contains(modelID, "gpt-5-nano") &&
68 !strings.Contains(modelID, "gpt-5-chat")) ||
69 strings.HasPrefix(modelID, "o3") ||
70 strings.Contains(modelID, "-o3") ||
71 strings.Contains(modelID, "o4-mini")
72
73 defaults := responsesModelConfig{
74 requiredAutoTruncation: false,
75 systemMessageMode: "system",
76 supportsFlexProcessing: supportsFlexProcessing,
77 supportsPriorityProcessing: supportsPriorityProcessing,
78 }
79
80 if strings.Contains(modelID, "gpt-5-chat") {
81 return responsesModelConfig{
82 isReasoningModel: false,
83 systemMessageMode: defaults.systemMessageMode,
84 requiredAutoTruncation: defaults.requiredAutoTruncation,
85 supportsFlexProcessing: defaults.supportsFlexProcessing,
86 supportsPriorityProcessing: defaults.supportsPriorityProcessing,
87 }
88 }
89
90 if strings.HasPrefix(modelID, "o1") || strings.Contains(modelID, "-o1") ||
91 strings.HasPrefix(modelID, "o3") || strings.Contains(modelID, "-o3") ||
92 strings.HasPrefix(modelID, "o4") || strings.Contains(modelID, "-o4") ||
93 strings.HasPrefix(modelID, "oss") || strings.Contains(modelID, "-oss") ||
94 strings.Contains(modelID, "gpt-5") || strings.Contains(modelID, "codex-") ||
95 strings.Contains(modelID, "computer-use") {
96 if strings.Contains(modelID, "o1-mini") || strings.Contains(modelID, "o1-preview") {
97 return responsesModelConfig{
98 isReasoningModel: true,
99 systemMessageMode: "remove",
100 requiredAutoTruncation: defaults.requiredAutoTruncation,
101 supportsFlexProcessing: defaults.supportsFlexProcessing,
102 supportsPriorityProcessing: defaults.supportsPriorityProcessing,
103 }
104 }
105
106 return responsesModelConfig{
107 isReasoningModel: true,
108 systemMessageMode: "developer",
109 requiredAutoTruncation: defaults.requiredAutoTruncation,
110 supportsFlexProcessing: defaults.supportsFlexProcessing,
111 supportsPriorityProcessing: defaults.supportsPriorityProcessing,
112 }
113 }
114
115 return responsesModelConfig{
116 isReasoningModel: false,
117 systemMessageMode: defaults.systemMessageMode,
118 requiredAutoTruncation: defaults.requiredAutoTruncation,
119 supportsFlexProcessing: defaults.supportsFlexProcessing,
120 supportsPriorityProcessing: defaults.supportsPriorityProcessing,
121 }
122}
123
124func (o responsesLanguageModel) prepareParams(call fantasy.Call) (*responses.ResponseNewParams, []fantasy.CallWarning) {
125 var warnings []fantasy.CallWarning
126 params := &responses.ResponseNewParams{
127 Store: param.NewOpt(false),
128 }
129
130 modelConfig := getResponsesModelConfig(o.modelID)
131
132 if call.TopK != nil {
133 warnings = append(warnings, fantasy.CallWarning{
134 Type: fantasy.CallWarningTypeUnsupportedSetting,
135 Setting: "topK",
136 })
137 }
138
139 if call.PresencePenalty != nil {
140 warnings = append(warnings, fantasy.CallWarning{
141 Type: fantasy.CallWarningTypeUnsupportedSetting,
142 Setting: "presencePenalty",
143 })
144 }
145
146 if call.FrequencyPenalty != nil {
147 warnings = append(warnings, fantasy.CallWarning{
148 Type: fantasy.CallWarningTypeUnsupportedSetting,
149 Setting: "frequencyPenalty",
150 })
151 }
152
153 var openaiOptions *ResponsesProviderOptions
154 if opts, ok := call.ProviderOptions[Name]; ok {
155 if typedOpts, ok := opts.(*ResponsesProviderOptions); ok {
156 openaiOptions = typedOpts
157 }
158 }
159
160 input, inputWarnings := toResponsesPrompt(call.Prompt, modelConfig.systemMessageMode)
161 warnings = append(warnings, inputWarnings...)
162
163 var include []IncludeType
164
165 addInclude := func(key IncludeType) {
166 include = append(include, key)
167 }
168
169 topLogprobs := 0
170 if openaiOptions != nil && openaiOptions.Logprobs != nil {
171 switch v := openaiOptions.Logprobs.(type) {
172 case bool:
173 if v {
174 topLogprobs = topLogprobsMax
175 }
176 case float64:
177 topLogprobs = int(v)
178 case int:
179 topLogprobs = v
180 }
181 }
182
183 if topLogprobs > 0 {
184 addInclude(IncludeMessageOutputTextLogprobs)
185 }
186
187 params.Model = o.modelID
188 params.Input = responses.ResponseNewParamsInputUnion{
189 OfInputItemList: input,
190 }
191
192 if call.Temperature != nil {
193 params.Temperature = param.NewOpt(*call.Temperature)
194 }
195 if call.TopP != nil {
196 params.TopP = param.NewOpt(*call.TopP)
197 }
198 if call.MaxOutputTokens != nil {
199 params.MaxOutputTokens = param.NewOpt(*call.MaxOutputTokens)
200 }
201
202 if openaiOptions != nil {
203 if openaiOptions.MaxToolCalls != nil {
204 params.MaxToolCalls = param.NewOpt(*openaiOptions.MaxToolCalls)
205 }
206 if openaiOptions.Metadata != nil {
207 metadata := make(shared.Metadata)
208 for k, v := range openaiOptions.Metadata {
209 if str, ok := v.(string); ok {
210 metadata[k] = str
211 }
212 }
213 params.Metadata = metadata
214 }
215 if openaiOptions.ParallelToolCalls != nil {
216 params.ParallelToolCalls = param.NewOpt(*openaiOptions.ParallelToolCalls)
217 }
218 if openaiOptions.User != nil {
219 params.User = param.NewOpt(*openaiOptions.User)
220 }
221 if openaiOptions.Instructions != nil {
222 params.Instructions = param.NewOpt(*openaiOptions.Instructions)
223 }
224 if openaiOptions.ServiceTier != nil {
225 params.ServiceTier = responses.ResponseNewParamsServiceTier(*openaiOptions.ServiceTier)
226 }
227 if openaiOptions.PromptCacheKey != nil {
228 params.PromptCacheKey = param.NewOpt(*openaiOptions.PromptCacheKey)
229 }
230 if openaiOptions.SafetyIdentifier != nil {
231 params.SafetyIdentifier = param.NewOpt(*openaiOptions.SafetyIdentifier)
232 }
233 if topLogprobs > 0 {
234 params.TopLogprobs = param.NewOpt(int64(topLogprobs))
235 }
236
237 if len(openaiOptions.Include) > 0 {
238 include = append(include, openaiOptions.Include...)
239 }
240
241 if modelConfig.isReasoningModel && (openaiOptions.ReasoningEffort != nil || openaiOptions.ReasoningSummary != nil) {
242 reasoning := shared.ReasoningParam{}
243 if openaiOptions.ReasoningEffort != nil {
244 reasoning.Effort = shared.ReasoningEffort(*openaiOptions.ReasoningEffort)
245 }
246 if openaiOptions.ReasoningSummary != nil {
247 reasoning.Summary = shared.ReasoningSummary(*openaiOptions.ReasoningSummary)
248 }
249 params.Reasoning = reasoning
250 }
251 }
252
253 if modelConfig.requiredAutoTruncation {
254 params.Truncation = responses.ResponseNewParamsTruncationAuto
255 }
256
257 if len(include) > 0 {
258 includeParams := make([]responses.ResponseIncludable, len(include))
259 for i, inc := range include {
260 includeParams[i] = responses.ResponseIncludable(string(inc))
261 }
262 params.Include = includeParams
263 }
264
265 if modelConfig.isReasoningModel {
266 if call.Temperature != nil {
267 params.Temperature = param.Opt[float64]{}
268 warnings = append(warnings, fantasy.CallWarning{
269 Type: fantasy.CallWarningTypeUnsupportedSetting,
270 Setting: "temperature",
271 Details: "temperature is not supported for reasoning models",
272 })
273 }
274
275 if call.TopP != nil {
276 params.TopP = param.Opt[float64]{}
277 warnings = append(warnings, fantasy.CallWarning{
278 Type: fantasy.CallWarningTypeUnsupportedSetting,
279 Setting: "topP",
280 Details: "topP is not supported for reasoning models",
281 })
282 }
283 } else {
284 if openaiOptions != nil {
285 if openaiOptions.ReasoningEffort != nil {
286 warnings = append(warnings, fantasy.CallWarning{
287 Type: fantasy.CallWarningTypeUnsupportedSetting,
288 Setting: "reasoningEffort",
289 Details: "reasoningEffort is not supported for non-reasoning models",
290 })
291 }
292
293 if openaiOptions.ReasoningSummary != nil {
294 warnings = append(warnings, fantasy.CallWarning{
295 Type: fantasy.CallWarningTypeUnsupportedSetting,
296 Setting: "reasoningSummary",
297 Details: "reasoningSummary is not supported for non-reasoning models",
298 })
299 }
300 }
301 }
302
303 if openaiOptions != nil && openaiOptions.ServiceTier != nil {
304 if *openaiOptions.ServiceTier == ServiceTierFlex && !modelConfig.supportsFlexProcessing {
305 warnings = append(warnings, fantasy.CallWarning{
306 Type: fantasy.CallWarningTypeUnsupportedSetting,
307 Setting: "serviceTier",
308 Details: "flex processing is only available for o3, o4-mini, and gpt-5 models",
309 })
310 params.ServiceTier = ""
311 }
312
313 if *openaiOptions.ServiceTier == ServiceTierPriority && !modelConfig.supportsPriorityProcessing {
314 warnings = append(warnings, fantasy.CallWarning{
315 Type: fantasy.CallWarningTypeUnsupportedSetting,
316 Setting: "serviceTier",
317 Details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported",
318 })
319 params.ServiceTier = ""
320 }
321 }
322
323 tools, toolChoice, toolWarnings := toResponsesTools(call.Tools, call.ToolChoice, openaiOptions)
324 warnings = append(warnings, toolWarnings...)
325
326 if len(tools) > 0 {
327 params.Tools = tools
328 params.ToolChoice = toolChoice
329 }
330
331 return params, warnings
332}
333
334func toResponsesPrompt(prompt fantasy.Prompt, systemMessageMode string) (responses.ResponseInputParam, []fantasy.CallWarning) {
335 var input responses.ResponseInputParam
336 var warnings []fantasy.CallWarning
337
338 for _, msg := range prompt {
339 switch msg.Role {
340 case fantasy.MessageRoleSystem:
341 var systemText string
342 for _, c := range msg.Content {
343 if c.GetType() != fantasy.ContentTypeText {
344 warnings = append(warnings, fantasy.CallWarning{
345 Type: fantasy.CallWarningTypeOther,
346 Message: "system prompt can only have text content",
347 })
348 continue
349 }
350 textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
351 if !ok {
352 warnings = append(warnings, fantasy.CallWarning{
353 Type: fantasy.CallWarningTypeOther,
354 Message: "system prompt text part does not have the right type",
355 })
356 continue
357 }
358 if strings.TrimSpace(textPart.Text) != "" {
359 systemText += textPart.Text
360 }
361 }
362
363 if systemText == "" {
364 warnings = append(warnings, fantasy.CallWarning{
365 Type: fantasy.CallWarningTypeOther,
366 Message: "system prompt has no text parts",
367 })
368 continue
369 }
370
371 switch systemMessageMode {
372 case "system":
373 input = append(input, responses.ResponseInputItemParamOfMessage(systemText, responses.EasyInputMessageRoleSystem))
374 case "developer":
375 input = append(input, responses.ResponseInputItemParamOfMessage(systemText, responses.EasyInputMessageRoleDeveloper))
376 case "remove":
377 warnings = append(warnings, fantasy.CallWarning{
378 Type: fantasy.CallWarningTypeOther,
379 Message: "system messages are removed for this model",
380 })
381 }
382
383 case fantasy.MessageRoleUser:
384 var contentParts responses.ResponseInputMessageContentListParam
385 for i, c := range msg.Content {
386 switch c.GetType() {
387 case fantasy.ContentTypeText:
388 textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
389 if !ok {
390 warnings = append(warnings, fantasy.CallWarning{
391 Type: fantasy.CallWarningTypeOther,
392 Message: "user message text part does not have the right type",
393 })
394 continue
395 }
396 contentParts = append(contentParts, responses.ResponseInputContentUnionParam{
397 OfInputText: &responses.ResponseInputTextParam{
398 Type: "input_text",
399 Text: textPart.Text,
400 },
401 })
402
403 case fantasy.ContentTypeFile:
404 filePart, ok := fantasy.AsContentType[fantasy.FilePart](c)
405 if !ok {
406 warnings = append(warnings, fantasy.CallWarning{
407 Type: fantasy.CallWarningTypeOther,
408 Message: "user message file part does not have the right type",
409 })
410 continue
411 }
412
413 if strings.HasPrefix(filePart.MediaType, "image/") {
414 base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
415 imageURL := fmt.Sprintf("data:%s;base64,%s", filePart.MediaType, base64Encoded)
416 contentParts = append(contentParts, responses.ResponseInputContentUnionParam{
417 OfInputImage: &responses.ResponseInputImageParam{
418 Type: "input_image",
419 ImageURL: param.NewOpt(imageURL),
420 },
421 })
422 } else if filePart.MediaType == "application/pdf" {
423 base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
424 fileData := fmt.Sprintf("data:application/pdf;base64,%s", base64Encoded)
425 filename := filePart.Filename
426 if filename == "" {
427 filename = fmt.Sprintf("part-%d.pdf", i)
428 }
429 contentParts = append(contentParts, responses.ResponseInputContentUnionParam{
430 OfInputFile: &responses.ResponseInputFileParam{
431 Type: "input_file",
432 Filename: param.NewOpt(filename),
433 FileData: param.NewOpt(fileData),
434 },
435 })
436 } else {
437 warnings = append(warnings, fantasy.CallWarning{
438 Type: fantasy.CallWarningTypeOther,
439 Message: fmt.Sprintf("file part media type %s not supported", filePart.MediaType),
440 })
441 }
442 }
443 }
444
445 if !hasVisibleResponsesUserContent(contentParts) {
446 warnings = append(warnings, fantasy.CallWarning{
447 Type: fantasy.CallWarningTypeOther,
448 Message: "dropping empty user message (contains neither user-facing content nor tool results)",
449 })
450 continue
451 }
452
453 input = append(input, responses.ResponseInputItemParamOfMessage(contentParts, responses.EasyInputMessageRoleUser))
454
455 case fantasy.MessageRoleAssistant:
456 startIdx := len(input)
457 for _, c := range msg.Content {
458 switch c.GetType() {
459 case fantasy.ContentTypeText:
460 textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
461 if !ok {
462 warnings = append(warnings, fantasy.CallWarning{
463 Type: fantasy.CallWarningTypeOther,
464 Message: "assistant message text part does not have the right type",
465 })
466 continue
467 }
468 input = append(input, responses.ResponseInputItemParamOfMessage(textPart.Text, responses.EasyInputMessageRoleAssistant))
469
470 case fantasy.ContentTypeToolCall:
471 toolCallPart, ok := fantasy.AsContentType[fantasy.ToolCallPart](c)
472 if !ok {
473 warnings = append(warnings, fantasy.CallWarning{
474 Type: fantasy.CallWarningTypeOther,
475 Message: "assistant message tool call part does not have the right type",
476 })
477 continue
478 }
479
480 if toolCallPart.ProviderExecuted {
481 continue
482 }
483
484 inputJSON, err := json.Marshal(toolCallPart.Input)
485 if err != nil {
486 warnings = append(warnings, fantasy.CallWarning{
487 Type: fantasy.CallWarningTypeOther,
488 Message: fmt.Sprintf("failed to marshal tool call input: %v", err),
489 })
490 continue
491 }
492
493 input = append(input, responses.ResponseInputItemParamOfFunctionCall(string(inputJSON), toolCallPart.ToolCallID, toolCallPart.ToolName))
494 case fantasy.ContentTypeReasoning:
495 reasoningMetadata := GetReasoningMetadata(c.Options())
496 if reasoningMetadata == nil || reasoningMetadata.ItemID == "" {
497 continue
498 }
499 if len(reasoningMetadata.Summary) == 0 && reasoningMetadata.EncryptedContent == nil {
500 warnings = append(warnings, fantasy.CallWarning{
501 Type: fantasy.CallWarningTypeOther,
502 Message: "assistant message reasoning part does is empty",
503 })
504 continue
505 }
506 // we want to always send an empty array
507 summary := []responses.ResponseReasoningItemSummaryParam{}
508 for _, s := range reasoningMetadata.Summary {
509 summary = append(summary, responses.ResponseReasoningItemSummaryParam{
510 Type: "summary_text",
511 Text: s,
512 })
513 }
514 reasoning := &responses.ResponseReasoningItemParam{
515 ID: reasoningMetadata.ItemID,
516 Summary: summary,
517 }
518 if reasoningMetadata.EncryptedContent != nil {
519 reasoning.EncryptedContent = param.NewOpt(*reasoningMetadata.EncryptedContent)
520 }
521 input = append(input, responses.ResponseInputItemUnionParam{
522 OfReasoning: reasoning,
523 })
524 }
525 }
526
527 if !hasVisibleResponsesAssistantContent(input, startIdx) {
528 warnings = append(warnings, fantasy.CallWarning{
529 Type: fantasy.CallWarningTypeOther,
530 Message: "dropping empty assistant message (contains neither user-facing content nor tool calls)",
531 })
532 // Remove any items that were added during this iteration
533 input = input[:startIdx]
534 continue
535 }
536
537 case fantasy.MessageRoleTool:
538 for _, c := range msg.Content {
539 if c.GetType() != fantasy.ContentTypeToolResult {
540 warnings = append(warnings, fantasy.CallWarning{
541 Type: fantasy.CallWarningTypeOther,
542 Message: "tool message can only have tool result content",
543 })
544 continue
545 }
546
547 toolResultPart, ok := fantasy.AsContentType[fantasy.ToolResultPart](c)
548 if !ok {
549 warnings = append(warnings, fantasy.CallWarning{
550 Type: fantasy.CallWarningTypeOther,
551 Message: "tool message result part does not have the right type",
552 })
553 continue
554 }
555
556 var outputStr string
557 switch toolResultPart.Output.GetType() {
558 case fantasy.ToolResultContentTypeText:
559 output, ok := fantasy.AsToolResultOutputType[fantasy.ToolResultOutputContentText](toolResultPart.Output)
560 if !ok {
561 warnings = append(warnings, fantasy.CallWarning{
562 Type: fantasy.CallWarningTypeOther,
563 Message: "tool result output does not have the right type",
564 })
565 continue
566 }
567 outputStr = output.Text
568 case fantasy.ToolResultContentTypeError:
569 output, ok := fantasy.AsToolResultOutputType[fantasy.ToolResultOutputContentError](toolResultPart.Output)
570 if !ok {
571 warnings = append(warnings, fantasy.CallWarning{
572 Type: fantasy.CallWarningTypeOther,
573 Message: "tool result output does not have the right type",
574 })
575 continue
576 }
577 outputStr = output.Error.Error()
578 }
579
580 input = append(input, responses.ResponseInputItemParamOfFunctionCallOutput(toolResultPart.ToolCallID, outputStr))
581 }
582 }
583 }
584
585 return input, warnings
586}
587
588func hasVisibleResponsesUserContent(content responses.ResponseInputMessageContentListParam) bool {
589 return len(content) > 0
590}
591
592func hasVisibleResponsesAssistantContent(items []responses.ResponseInputItemUnionParam, startIdx int) bool {
593 // Check if we added any assistant content parts from this message
594 for i := startIdx; i < len(items); i++ {
595 if items[i].OfMessage != nil || items[i].OfFunctionCall != nil {
596 return true
597 }
598 }
599 return false
600}
601
602func toResponsesTools(tools []fantasy.Tool, toolChoice *fantasy.ToolChoice, options *ResponsesProviderOptions) ([]responses.ToolUnionParam, responses.ResponseNewParamsToolChoiceUnion, []fantasy.CallWarning) {
603 warnings := make([]fantasy.CallWarning, 0)
604 var openaiTools []responses.ToolUnionParam
605
606 if len(tools) == 0 {
607 return nil, responses.ResponseNewParamsToolChoiceUnion{}, nil
608 }
609
610 strictJSONSchema := false
611 if options != nil && options.StrictJSONSchema != nil {
612 strictJSONSchema = *options.StrictJSONSchema
613 }
614
615 for _, tool := range tools {
616 if tool.GetType() == fantasy.ToolTypeFunction {
617 ft, ok := tool.(fantasy.FunctionTool)
618 if !ok {
619 continue
620 }
621 openaiTools = append(openaiTools, responses.ToolUnionParam{
622 OfFunction: &responses.FunctionToolParam{
623 Name: ft.Name,
624 Description: param.NewOpt(ft.Description),
625 Parameters: ft.InputSchema,
626 Strict: param.NewOpt(strictJSONSchema),
627 Type: "function",
628 },
629 })
630 continue
631 }
632
633 warnings = append(warnings, fantasy.CallWarning{
634 Type: fantasy.CallWarningTypeUnsupportedTool,
635 Tool: tool,
636 Message: "tool is not supported",
637 })
638 }
639
640 if toolChoice == nil {
641 return openaiTools, responses.ResponseNewParamsToolChoiceUnion{}, warnings
642 }
643
644 var openaiToolChoice responses.ResponseNewParamsToolChoiceUnion
645
646 switch *toolChoice {
647 case fantasy.ToolChoiceAuto:
648 openaiToolChoice = responses.ResponseNewParamsToolChoiceUnion{
649 OfToolChoiceMode: param.NewOpt(responses.ToolChoiceOptionsAuto),
650 }
651 case fantasy.ToolChoiceNone:
652 openaiToolChoice = responses.ResponseNewParamsToolChoiceUnion{
653 OfToolChoiceMode: param.NewOpt(responses.ToolChoiceOptionsNone),
654 }
655 case fantasy.ToolChoiceRequired:
656 openaiToolChoice = responses.ResponseNewParamsToolChoiceUnion{
657 OfToolChoiceMode: param.NewOpt(responses.ToolChoiceOptionsRequired),
658 }
659 default:
660 openaiToolChoice = responses.ResponseNewParamsToolChoiceUnion{
661 OfFunctionTool: &responses.ToolChoiceFunctionParam{
662 Type: "function",
663 Name: string(*toolChoice),
664 },
665 }
666 }
667
668 return openaiTools, openaiToolChoice, warnings
669}
670
671func (o responsesLanguageModel) Generate(ctx context.Context, call fantasy.Call) (*fantasy.Response, error) {
672 params, warnings := o.prepareParams(call)
673 response, err := o.client.Responses.New(ctx, *params, callUARequestOptions(call, o.noDefaultUserAgent)...)
674 if err != nil {
675 return nil, toProviderErr(err)
676 }
677
678 if response.Error.Message != "" {
679 return nil, &fantasy.Error{
680 Title: "provider error",
681 Message: fmt.Sprintf("%s (code: %s)", response.Error.Message, response.Error.Code),
682 }
683 }
684
685 var content []fantasy.Content
686 hasFunctionCall := false
687
688 for _, outputItem := range response.Output {
689 switch outputItem.Type {
690 case "message":
691 for _, contentPart := range outputItem.Content {
692 if contentPart.Type == "output_text" {
693 content = append(content, fantasy.TextContent{
694 Text: contentPart.Text,
695 })
696
697 for _, annotation := range contentPart.Annotations {
698 switch annotation.Type {
699 case "url_citation":
700 content = append(content, fantasy.SourceContent{
701 SourceType: fantasy.SourceTypeURL,
702 ID: uuid.NewString(),
703 URL: annotation.URL,
704 Title: annotation.Title,
705 })
706 case "file_citation":
707 title := "Document"
708 if annotation.Filename != "" {
709 title = annotation.Filename
710 }
711 filename := annotation.Filename
712 if filename == "" {
713 filename = annotation.FileID
714 }
715 content = append(content, fantasy.SourceContent{
716 SourceType: fantasy.SourceTypeDocument,
717 ID: uuid.NewString(),
718 MediaType: "text/plain",
719 Title: title,
720 Filename: filename,
721 })
722 }
723 }
724 }
725 }
726
727 case "function_call":
728 hasFunctionCall = true
729 content = append(content, fantasy.ToolCallContent{
730 ProviderExecuted: false,
731 ToolCallID: outputItem.CallID,
732 ToolName: outputItem.Name,
733 Input: outputItem.Arguments.OfString,
734 })
735
736 case "reasoning":
737 metadata := &ResponsesReasoningMetadata{
738 ItemID: outputItem.ID,
739 }
740 if outputItem.EncryptedContent != "" {
741 metadata.EncryptedContent = &outputItem.EncryptedContent
742 }
743
744 if len(outputItem.Summary) == 0 && metadata.EncryptedContent == nil {
745 continue
746 }
747
748 // When there are no summary parts, add an empty reasoning part
749 summaries := outputItem.Summary
750 if len(summaries) == 0 {
751 summaries = []responses.ResponseReasoningItemSummary{{Type: "summary_text", Text: ""}}
752 }
753
754 for _, s := range summaries {
755 metadata.Summary = append(metadata.Summary, s.Text)
756 }
757
758 content = append(content, fantasy.ReasoningContent{
759 Text: strings.Join(metadata.Summary, "\n"),
760 ProviderMetadata: fantasy.ProviderMetadata{
761 Name: metadata,
762 },
763 })
764 }
765 }
766
767 usage := fantasy.Usage{
768 InputTokens: response.Usage.InputTokens,
769 OutputTokens: response.Usage.OutputTokens,
770 TotalTokens: response.Usage.InputTokens + response.Usage.OutputTokens,
771 }
772
773 if response.Usage.OutputTokensDetails.ReasoningTokens != 0 {
774 usage.ReasoningTokens = response.Usage.OutputTokensDetails.ReasoningTokens
775 }
776 if response.Usage.InputTokensDetails.CachedTokens != 0 {
777 usage.CacheReadTokens = response.Usage.InputTokensDetails.CachedTokens
778 }
779
780 finishReason := mapResponsesFinishReason(response.IncompleteDetails.Reason, hasFunctionCall)
781
782 return &fantasy.Response{
783 Content: content,
784 Usage: usage,
785 FinishReason: finishReason,
786 ProviderMetadata: fantasy.ProviderMetadata{},
787 Warnings: warnings,
788 }, nil
789}
790
791func mapResponsesFinishReason(reason string, hasFunctionCall bool) fantasy.FinishReason {
792 if hasFunctionCall {
793 return fantasy.FinishReasonToolCalls
794 }
795
796 switch reason {
797 case "":
798 return fantasy.FinishReasonStop
799 case "max_tokens", "max_output_tokens":
800 return fantasy.FinishReasonLength
801 case "content_filter":
802 return fantasy.FinishReasonContentFilter
803 default:
804 return fantasy.FinishReasonOther
805 }
806}
807
808func (o responsesLanguageModel) Stream(ctx context.Context, call fantasy.Call) (fantasy.StreamResponse, error) {
809 params, warnings := o.prepareParams(call)
810
811 stream := o.client.Responses.NewStreaming(ctx, *params, callUARequestOptions(call, o.noDefaultUserAgent)...)
812
813 finishReason := fantasy.FinishReasonUnknown
814 var usage fantasy.Usage
815 ongoingToolCalls := make(map[int64]*ongoingToolCall)
816 hasFunctionCall := false
817 activeReasoning := make(map[string]*reasoningState)
818
819 return func(yield func(fantasy.StreamPart) bool) {
820 if len(warnings) > 0 {
821 if !yield(fantasy.StreamPart{
822 Type: fantasy.StreamPartTypeWarnings,
823 Warnings: warnings,
824 }) {
825 return
826 }
827 }
828
829 for stream.Next() {
830 event := stream.Current()
831
832 switch event.Type {
833 case "response.created":
834 _ = event.AsResponseCreated()
835
836 case "response.output_item.added":
837 added := event.AsResponseOutputItemAdded()
838 switch added.Item.Type {
839 case "function_call":
840 ongoingToolCalls[added.OutputIndex] = &ongoingToolCall{
841 toolName: added.Item.Name,
842 toolCallID: added.Item.CallID,
843 }
844 if !yield(fantasy.StreamPart{
845 Type: fantasy.StreamPartTypeToolInputStart,
846 ID: added.Item.CallID,
847 ToolCallName: added.Item.Name,
848 }) {
849 return
850 }
851
852 case "message":
853 if !yield(fantasy.StreamPart{
854 Type: fantasy.StreamPartTypeTextStart,
855 ID: added.Item.ID,
856 }) {
857 return
858 }
859
860 case "reasoning":
861 metadata := &ResponsesReasoningMetadata{
862 ItemID: added.Item.ID,
863 Summary: []string{},
864 }
865 if added.Item.EncryptedContent != "" {
866 metadata.EncryptedContent = &added.Item.EncryptedContent
867 }
868
869 activeReasoning[added.Item.ID] = &reasoningState{
870 metadata: metadata,
871 }
872 if !yield(fantasy.StreamPart{
873 Type: fantasy.StreamPartTypeReasoningStart,
874 ID: added.Item.ID,
875 ProviderMetadata: fantasy.ProviderMetadata{
876 Name: metadata,
877 },
878 }) {
879 return
880 }
881 }
882
883 case "response.output_item.done":
884 done := event.AsResponseOutputItemDone()
885 switch done.Item.Type {
886 case "function_call":
887 tc := ongoingToolCalls[done.OutputIndex]
888 if tc != nil {
889 delete(ongoingToolCalls, done.OutputIndex)
890 hasFunctionCall = true
891
892 if !yield(fantasy.StreamPart{
893 Type: fantasy.StreamPartTypeToolInputEnd,
894 ID: done.Item.CallID,
895 }) {
896 return
897 }
898 if !yield(fantasy.StreamPart{
899 Type: fantasy.StreamPartTypeToolCall,
900 ID: done.Item.CallID,
901 ToolCallName: done.Item.Name,
902 ToolCallInput: done.Item.Arguments.OfString,
903 }) {
904 return
905 }
906 }
907
908 case "message":
909 if !yield(fantasy.StreamPart{
910 Type: fantasy.StreamPartTypeTextEnd,
911 ID: done.Item.ID,
912 }) {
913 return
914 }
915
916 case "reasoning":
917 state := activeReasoning[done.Item.ID]
918 if state != nil {
919 if !yield(fantasy.StreamPart{
920 Type: fantasy.StreamPartTypeReasoningEnd,
921 ID: done.Item.ID,
922 ProviderMetadata: fantasy.ProviderMetadata{
923 Name: state.metadata,
924 },
925 }) {
926 return
927 }
928 delete(activeReasoning, done.Item.ID)
929 }
930 }
931
932 case "response.function_call_arguments.delta":
933 delta := event.AsResponseFunctionCallArgumentsDelta()
934 tc := ongoingToolCalls[delta.OutputIndex]
935 if tc != nil {
936 if !yield(fantasy.StreamPart{
937 Type: fantasy.StreamPartTypeToolInputDelta,
938 ID: tc.toolCallID,
939 Delta: delta.Delta,
940 }) {
941 return
942 }
943 }
944
945 case "response.output_text.delta":
946 textDelta := event.AsResponseOutputTextDelta()
947 if !yield(fantasy.StreamPart{
948 Type: fantasy.StreamPartTypeTextDelta,
949 ID: textDelta.ItemID,
950 Delta: textDelta.Delta,
951 }) {
952 return
953 }
954
955 case "response.reasoning_summary_part.added":
956 added := event.AsResponseReasoningSummaryPartAdded()
957 state := activeReasoning[added.ItemID]
958 if state != nil {
959 state.metadata.Summary = append(state.metadata.Summary, "")
960 activeReasoning[added.ItemID] = state
961 if !yield(fantasy.StreamPart{
962 Type: fantasy.StreamPartTypeReasoningDelta,
963 ID: added.ItemID,
964 Delta: "\n",
965 ProviderMetadata: fantasy.ProviderMetadata{
966 Name: state.metadata,
967 },
968 }) {
969 return
970 }
971 }
972
973 case "response.reasoning_summary_text.delta":
974 textDelta := event.AsResponseReasoningSummaryTextDelta()
975 state := activeReasoning[textDelta.ItemID]
976 if state != nil {
977 if len(state.metadata.Summary)-1 >= int(textDelta.SummaryIndex) {
978 state.metadata.Summary[textDelta.SummaryIndex] += textDelta.Delta
979 }
980 activeReasoning[textDelta.ItemID] = state
981 if !yield(fantasy.StreamPart{
982 Type: fantasy.StreamPartTypeReasoningDelta,
983 ID: textDelta.ItemID,
984 Delta: textDelta.Delta,
985 ProviderMetadata: fantasy.ProviderMetadata{
986 Name: state.metadata,
987 },
988 }) {
989 return
990 }
991 }
992
993 case "response.completed", "response.incomplete":
994 completed := event.AsResponseCompleted()
995 finishReason = mapResponsesFinishReason(completed.Response.IncompleteDetails.Reason, hasFunctionCall)
996 usage = fantasy.Usage{
997 InputTokens: completed.Response.Usage.InputTokens,
998 OutputTokens: completed.Response.Usage.OutputTokens,
999 TotalTokens: completed.Response.Usage.InputTokens + completed.Response.Usage.OutputTokens,
1000 }
1001 if completed.Response.Usage.OutputTokensDetails.ReasoningTokens != 0 {
1002 usage.ReasoningTokens = completed.Response.Usage.OutputTokensDetails.ReasoningTokens
1003 }
1004 if completed.Response.Usage.InputTokensDetails.CachedTokens != 0 {
1005 usage.CacheReadTokens = completed.Response.Usage.InputTokensDetails.CachedTokens
1006 }
1007
1008 case "error":
1009 errorEvent := event.AsError()
1010 if !yield(fantasy.StreamPart{
1011 Type: fantasy.StreamPartTypeError,
1012 Error: fmt.Errorf("response error: %s (code: %s)", errorEvent.Message, errorEvent.Code),
1013 }) {
1014 return
1015 }
1016 return
1017 }
1018 }
1019
1020 err := stream.Err()
1021 if err != nil {
1022 yield(fantasy.StreamPart{
1023 Type: fantasy.StreamPartTypeError,
1024 Error: toProviderErr(err),
1025 })
1026 return
1027 }
1028
1029 yield(fantasy.StreamPart{
1030 Type: fantasy.StreamPartTypeFinish,
1031 Usage: usage,
1032 FinishReason: finishReason,
1033 })
1034 }, nil
1035}
1036
1037// GetReasoningMetadata extracts reasoning metadata from provider options for responses models.
1038func GetReasoningMetadata(providerOptions fantasy.ProviderOptions) *ResponsesReasoningMetadata {
1039 if openaiResponsesOptions, ok := providerOptions[Name]; ok {
1040 if reasoning, ok := openaiResponsesOptions.(*ResponsesReasoningMetadata); ok {
1041 return reasoning
1042 }
1043 }
1044 return nil
1045}
1046
1047type ongoingToolCall struct {
1048 toolName string
1049 toolCallID string
1050}
1051
1052type reasoningState struct {
1053 metadata *ResponsesReasoningMetadata
1054}
1055
1056// GenerateObject implements fantasy.LanguageModel.
1057func (o responsesLanguageModel) GenerateObject(ctx context.Context, call fantasy.ObjectCall) (*fantasy.ObjectResponse, error) {
1058 switch o.objectMode {
1059 case fantasy.ObjectModeText:
1060 return object.GenerateWithText(ctx, o, call)
1061 case fantasy.ObjectModeTool:
1062 return object.GenerateWithTool(ctx, o, call)
1063 default:
1064 return o.generateObjectWithJSONMode(ctx, call)
1065 }
1066}
1067
1068// StreamObject implements fantasy.LanguageModel.
1069func (o responsesLanguageModel) StreamObject(ctx context.Context, call fantasy.ObjectCall) (fantasy.ObjectStreamResponse, error) {
1070 switch o.objectMode {
1071 case fantasy.ObjectModeTool:
1072 return object.StreamWithTool(ctx, o, call)
1073 case fantasy.ObjectModeText:
1074 return object.StreamWithText(ctx, o, call)
1075 default:
1076 return o.streamObjectWithJSONMode(ctx, call)
1077 }
1078}
1079
1080func (o responsesLanguageModel) generateObjectWithJSONMode(ctx context.Context, call fantasy.ObjectCall) (*fantasy.ObjectResponse, error) {
1081 // Convert our Schema to OpenAI's JSON Schema format
1082 jsonSchemaMap := schema.ToMap(call.Schema)
1083
1084 // Add additionalProperties: false recursively for strict mode (OpenAI requirement)
1085 addAdditionalPropertiesFalse(jsonSchemaMap)
1086
1087 schemaName := call.SchemaName
1088 if schemaName == "" {
1089 schemaName = "response"
1090 }
1091
1092 // Build request using prepareParams
1093 fantasyCall := fantasy.Call{
1094 Prompt: call.Prompt,
1095 MaxOutputTokens: call.MaxOutputTokens,
1096 Temperature: call.Temperature,
1097 TopP: call.TopP,
1098 PresencePenalty: call.PresencePenalty,
1099 FrequencyPenalty: call.FrequencyPenalty,
1100 ProviderOptions: call.ProviderOptions,
1101 }
1102
1103 params, warnings := o.prepareParams(fantasyCall)
1104
1105 // Add structured output via Text.Format field
1106 params.Text = responses.ResponseTextConfigParam{
1107 Format: responses.ResponseFormatTextConfigParamOfJSONSchema(schemaName, jsonSchemaMap),
1108 }
1109
1110 // Make request
1111 response, err := o.client.Responses.New(ctx, *params, objectCallUARequestOptions(call, o.noDefaultUserAgent)...)
1112 if err != nil {
1113 return nil, toProviderErr(err)
1114 }
1115
1116 if response.Error.Message != "" {
1117 return nil, &fantasy.Error{
1118 Title: "provider error",
1119 Message: fmt.Sprintf("%s (code: %s)", response.Error.Message, response.Error.Code),
1120 }
1121 }
1122
1123 // Extract JSON text from response
1124 var jsonText string
1125 for _, outputItem := range response.Output {
1126 if outputItem.Type == "message" {
1127 for _, contentPart := range outputItem.Content {
1128 if contentPart.Type == "output_text" {
1129 jsonText = contentPart.Text
1130 break
1131 }
1132 }
1133 }
1134 }
1135
1136 if jsonText == "" {
1137 usage := fantasy.Usage{
1138 InputTokens: response.Usage.InputTokens,
1139 OutputTokens: response.Usage.OutputTokens,
1140 TotalTokens: response.Usage.InputTokens + response.Usage.OutputTokens,
1141 }
1142 finishReason := mapResponsesFinishReason(response.IncompleteDetails.Reason, false)
1143 return nil, &fantasy.NoObjectGeneratedError{
1144 RawText: "",
1145 ParseError: fmt.Errorf("no text content in response"),
1146 Usage: usage,
1147 FinishReason: finishReason,
1148 }
1149 }
1150
1151 // Parse and validate
1152 var obj any
1153 if call.RepairText != nil {
1154 obj, err = schema.ParseAndValidateWithRepair(ctx, jsonText, call.Schema, call.RepairText)
1155 } else {
1156 obj, err = schema.ParseAndValidate(jsonText, call.Schema)
1157 }
1158
1159 usage := fantasy.Usage{
1160 InputTokens: response.Usage.InputTokens,
1161 OutputTokens: response.Usage.OutputTokens,
1162 TotalTokens: response.Usage.InputTokens + response.Usage.OutputTokens,
1163 }
1164 if response.Usage.OutputTokensDetails.ReasoningTokens != 0 {
1165 usage.ReasoningTokens = response.Usage.OutputTokensDetails.ReasoningTokens
1166 }
1167 if response.Usage.InputTokensDetails.CachedTokens != 0 {
1168 usage.CacheReadTokens = response.Usage.InputTokensDetails.CachedTokens
1169 }
1170
1171 finishReason := mapResponsesFinishReason(response.IncompleteDetails.Reason, false)
1172
1173 if err != nil {
1174 // Add usage info to error
1175 if nogErr, ok := err.(*fantasy.NoObjectGeneratedError); ok {
1176 nogErr.Usage = usage
1177 nogErr.FinishReason = finishReason
1178 }
1179 return nil, err
1180 }
1181
1182 return &fantasy.ObjectResponse{
1183 Object: obj,
1184 RawText: jsonText,
1185 Usage: usage,
1186 FinishReason: finishReason,
1187 Warnings: warnings,
1188 }, nil
1189}
1190
1191func (o responsesLanguageModel) streamObjectWithJSONMode(ctx context.Context, call fantasy.ObjectCall) (fantasy.ObjectStreamResponse, error) {
1192 // Convert our Schema to OpenAI's JSON Schema format
1193 jsonSchemaMap := schema.ToMap(call.Schema)
1194
1195 // Add additionalProperties: false recursively for strict mode (OpenAI requirement)
1196 addAdditionalPropertiesFalse(jsonSchemaMap)
1197
1198 schemaName := call.SchemaName
1199 if schemaName == "" {
1200 schemaName = "response"
1201 }
1202
1203 // Build request using prepareParams
1204 fantasyCall := fantasy.Call{
1205 Prompt: call.Prompt,
1206 MaxOutputTokens: call.MaxOutputTokens,
1207 Temperature: call.Temperature,
1208 TopP: call.TopP,
1209 PresencePenalty: call.PresencePenalty,
1210 FrequencyPenalty: call.FrequencyPenalty,
1211 ProviderOptions: call.ProviderOptions,
1212 }
1213
1214 params, warnings := o.prepareParams(fantasyCall)
1215
1216 // Add structured output via Text.Format field
1217 params.Text = responses.ResponseTextConfigParam{
1218 Format: responses.ResponseFormatTextConfigParamOfJSONSchema(schemaName, jsonSchemaMap),
1219 }
1220
1221 stream := o.client.Responses.NewStreaming(ctx, *params, objectCallUARequestOptions(call, o.noDefaultUserAgent)...)
1222
1223 return func(yield func(fantasy.ObjectStreamPart) bool) {
1224 if len(warnings) > 0 {
1225 if !yield(fantasy.ObjectStreamPart{
1226 Type: fantasy.ObjectStreamPartTypeObject,
1227 Warnings: warnings,
1228 }) {
1229 return
1230 }
1231 }
1232
1233 var accumulated string
1234 var lastParsedObject any
1235 var usage fantasy.Usage
1236 var finishReason fantasy.FinishReason
1237 var streamErr error
1238 hasFunctionCall := false
1239
1240 for stream.Next() {
1241 event := stream.Current()
1242
1243 switch event.Type {
1244 case "response.output_text.delta":
1245 textDelta := event.AsResponseOutputTextDelta()
1246 accumulated += textDelta.Delta
1247
1248 // Try to parse the accumulated text
1249 obj, state, parseErr := schema.ParsePartialJSON(accumulated)
1250
1251 // If we successfully parsed, validate and emit
1252 if state == schema.ParseStateSuccessful || state == schema.ParseStateRepaired {
1253 if err := schema.ValidateAgainstSchema(obj, call.Schema); err == nil {
1254 // Only emit if object is different from last
1255 if !reflect.DeepEqual(obj, lastParsedObject) {
1256 if !yield(fantasy.ObjectStreamPart{
1257 Type: fantasy.ObjectStreamPartTypeObject,
1258 Object: obj,
1259 }) {
1260 return
1261 }
1262 lastParsedObject = obj
1263 }
1264 }
1265 }
1266
1267 // If parsing failed and we have a repair function, try it
1268 if state == schema.ParseStateFailed && call.RepairText != nil {
1269 repairedText, repairErr := call.RepairText(ctx, accumulated, parseErr)
1270 if repairErr == nil {
1271 obj2, state2, _ := schema.ParsePartialJSON(repairedText)
1272 if (state2 == schema.ParseStateSuccessful || state2 == schema.ParseStateRepaired) &&
1273 schema.ValidateAgainstSchema(obj2, call.Schema) == nil {
1274 if !reflect.DeepEqual(obj2, lastParsedObject) {
1275 if !yield(fantasy.ObjectStreamPart{
1276 Type: fantasy.ObjectStreamPartTypeObject,
1277 Object: obj2,
1278 }) {
1279 return
1280 }
1281 lastParsedObject = obj2
1282 }
1283 }
1284 }
1285 }
1286
1287 case "response.completed", "response.incomplete":
1288 completed := event.AsResponseCompleted()
1289 finishReason = mapResponsesFinishReason(completed.Response.IncompleteDetails.Reason, hasFunctionCall)
1290 usage = fantasy.Usage{
1291 InputTokens: completed.Response.Usage.InputTokens,
1292 OutputTokens: completed.Response.Usage.OutputTokens,
1293 TotalTokens: completed.Response.Usage.InputTokens + completed.Response.Usage.OutputTokens,
1294 }
1295 if completed.Response.Usage.OutputTokensDetails.ReasoningTokens != 0 {
1296 usage.ReasoningTokens = completed.Response.Usage.OutputTokensDetails.ReasoningTokens
1297 }
1298 if completed.Response.Usage.InputTokensDetails.CachedTokens != 0 {
1299 usage.CacheReadTokens = completed.Response.Usage.InputTokensDetails.CachedTokens
1300 }
1301
1302 case "error":
1303 errorEvent := event.AsError()
1304 streamErr = fmt.Errorf("response error: %s (code: %s)", errorEvent.Message, errorEvent.Code)
1305 if !yield(fantasy.ObjectStreamPart{
1306 Type: fantasy.ObjectStreamPartTypeError,
1307 Error: streamErr,
1308 }) {
1309 return
1310 }
1311 return
1312 }
1313 }
1314
1315 err := stream.Err()
1316 if err != nil {
1317 yield(fantasy.ObjectStreamPart{
1318 Type: fantasy.ObjectStreamPartTypeError,
1319 Error: toProviderErr(err),
1320 })
1321 return
1322 }
1323
1324 // Final validation and emit
1325 if streamErr == nil && lastParsedObject != nil {
1326 yield(fantasy.ObjectStreamPart{
1327 Type: fantasy.ObjectStreamPartTypeFinish,
1328 Usage: usage,
1329 FinishReason: finishReason,
1330 })
1331 } else if streamErr == nil && lastParsedObject == nil {
1332 // No object was generated
1333 yield(fantasy.ObjectStreamPart{
1334 Type: fantasy.ObjectStreamPartTypeError,
1335 Error: &fantasy.NoObjectGeneratedError{
1336 RawText: accumulated,
1337 ParseError: fmt.Errorf("no valid object generated in stream"),
1338 Usage: usage,
1339 FinishReason: finishReason,
1340 },
1341 })
1342 }
1343 }, nil
1344}