language_model_hooks.go

  1package openai
  2
  3import (
  4	"encoding/base64"
  5	"fmt"
  6	"strings"
  7
  8	"charm.land/fantasy"
  9	"github.com/charmbracelet/openai-go"
 10	"github.com/charmbracelet/openai-go/packages/param"
 11	"github.com/charmbracelet/openai-go/shared"
 12)
 13
 14// LanguageModelPrepareCallFunc is a function that prepares the call for the language model.
 15type LanguageModelPrepareCallFunc = func(model fantasy.LanguageModel, params *openai.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error)
 16
 17// LanguageModelMapFinishReasonFunc is a function that maps the finish reason for the language model.
 18type LanguageModelMapFinishReasonFunc = func(finishReason string) fantasy.FinishReason
 19
 20// LanguageModelUsageFunc is a function that calculates usage for the language model.
 21type LanguageModelUsageFunc = func(choice openai.ChatCompletion) (fantasy.Usage, fantasy.ProviderOptionsData)
 22
 23// LanguageModelExtraContentFunc is a function that adds extra content for the language model.
 24type LanguageModelExtraContentFunc = func(choice openai.ChatCompletionChoice) []fantasy.Content
 25
 26// LanguageModelStreamExtraFunc is a function that handles stream extra functionality for the language model.
 27type LanguageModelStreamExtraFunc = func(chunk openai.ChatCompletionChunk, yield func(fantasy.StreamPart) bool, ctx map[string]any) (map[string]any, bool)
 28
 29// LanguageModelStreamUsageFunc is a function that calculates stream usage for the language model.
 30type LanguageModelStreamUsageFunc = func(chunk openai.ChatCompletionChunk, ctx map[string]any, metadata fantasy.ProviderMetadata) (fantasy.Usage, fantasy.ProviderMetadata)
 31
 32// LanguageModelStreamProviderMetadataFunc is a function that handles stream provider metadata for the language model.
 33type LanguageModelStreamProviderMetadataFunc = func(choice openai.ChatCompletionChoice, metadata fantasy.ProviderMetadata) fantasy.ProviderMetadata
 34
 35// LanguageModelToPromptFunc is a function that handles converting fantasy prompts to openai sdk messages.
 36type LanguageModelToPromptFunc = func(prompt fantasy.Prompt, provider, model string) ([]openai.ChatCompletionMessageParamUnion, []fantasy.CallWarning)
 37
 38// DefaultPrepareCallFunc is the default implementation for preparing a call to the language model.
 39func DefaultPrepareCallFunc(model fantasy.LanguageModel, params *openai.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error) {
 40	if call.ProviderOptions == nil {
 41		return nil, nil
 42	}
 43	var warnings []fantasy.CallWarning
 44	providerOptions := &ProviderOptions{}
 45	if v, ok := call.ProviderOptions[Name]; ok {
 46		providerOptions, ok = v.(*ProviderOptions)
 47		if !ok {
 48			return nil, &fantasy.Error{Title: "invalid argument", Message: "openai provider options should be *openai.ProviderOptions"}
 49		}
 50	}
 51
 52	if providerOptions.LogitBias != nil {
 53		params.LogitBias = providerOptions.LogitBias
 54	}
 55	if providerOptions.LogProbs != nil && providerOptions.TopLogProbs != nil {
 56		providerOptions.LogProbs = nil
 57	}
 58	if providerOptions.LogProbs != nil {
 59		params.Logprobs = param.NewOpt(*providerOptions.LogProbs)
 60	}
 61	if providerOptions.TopLogProbs != nil {
 62		params.TopLogprobs = param.NewOpt(*providerOptions.TopLogProbs)
 63	}
 64	if providerOptions.User != nil {
 65		params.User = param.NewOpt(*providerOptions.User)
 66	}
 67	if providerOptions.ParallelToolCalls != nil {
 68		params.ParallelToolCalls = param.NewOpt(*providerOptions.ParallelToolCalls)
 69	}
 70	if providerOptions.MaxCompletionTokens != nil {
 71		params.MaxCompletionTokens = param.NewOpt(*providerOptions.MaxCompletionTokens)
 72	}
 73
 74	if providerOptions.TextVerbosity != nil {
 75		params.Verbosity = openai.ChatCompletionNewParamsVerbosity(*providerOptions.TextVerbosity)
 76	}
 77	if providerOptions.Prediction != nil {
 78		// Convert map[string]any to ChatCompletionPredictionContentParam
 79		if content, ok := providerOptions.Prediction["content"]; ok {
 80			if contentStr, ok := content.(string); ok {
 81				params.Prediction = openai.ChatCompletionPredictionContentParam{
 82					Content: openai.ChatCompletionPredictionContentContentUnionParam{
 83						OfString: param.NewOpt(contentStr),
 84					},
 85				}
 86			}
 87		}
 88	}
 89	if providerOptions.Store != nil {
 90		params.Store = param.NewOpt(*providerOptions.Store)
 91	}
 92	if providerOptions.Metadata != nil {
 93		// Convert map[string]any to map[string]string
 94		metadata := make(map[string]string)
 95		for k, v := range providerOptions.Metadata {
 96			if str, ok := v.(string); ok {
 97				metadata[k] = str
 98			}
 99		}
100		params.Metadata = metadata
101	}
102	if providerOptions.PromptCacheKey != nil {
103		params.PromptCacheKey = param.NewOpt(*providerOptions.PromptCacheKey)
104	}
105	if providerOptions.SafetyIdentifier != nil {
106		params.SafetyIdentifier = param.NewOpt(*providerOptions.SafetyIdentifier)
107	}
108	if providerOptions.ServiceTier != nil {
109		params.ServiceTier = openai.ChatCompletionNewParamsServiceTier(*providerOptions.ServiceTier)
110	}
111
112	if providerOptions.ReasoningEffort != nil {
113		switch *providerOptions.ReasoningEffort {
114		case ReasoningEffortNone:
115			params.ReasoningEffort = shared.ReasoningEffortNone
116		case ReasoningEffortMinimal:
117			params.ReasoningEffort = shared.ReasoningEffortMinimal
118		case ReasoningEffortLow:
119			params.ReasoningEffort = shared.ReasoningEffortLow
120		case ReasoningEffortMedium:
121			params.ReasoningEffort = shared.ReasoningEffortMedium
122		case ReasoningEffortHigh:
123			params.ReasoningEffort = shared.ReasoningEffortHigh
124		case ReasoningEffortXHigh:
125			params.ReasoningEffort = shared.ReasoningEffortXhigh
126		default:
127			return nil, fmt.Errorf("reasoning model `%s` not supported", *providerOptions.ReasoningEffort)
128		}
129	}
130
131	if isReasoningModel(model.Model()) {
132		if providerOptions.LogitBias != nil {
133			params.LogitBias = nil
134			warnings = append(warnings, fantasy.CallWarning{
135				Type:    fantasy.CallWarningTypeUnsupportedSetting,
136				Setting: "LogitBias",
137				Message: "LogitBias is not supported for reasoning models",
138			})
139		}
140		if providerOptions.LogProbs != nil {
141			params.Logprobs = param.Opt[bool]{}
142			warnings = append(warnings, fantasy.CallWarning{
143				Type:    fantasy.CallWarningTypeUnsupportedSetting,
144				Setting: "Logprobs",
145				Message: "Logprobs is not supported for reasoning models",
146			})
147		}
148		if providerOptions.TopLogProbs != nil {
149			params.TopLogprobs = param.Opt[int64]{}
150			warnings = append(warnings, fantasy.CallWarning{
151				Type:    fantasy.CallWarningTypeUnsupportedSetting,
152				Setting: "TopLogprobs",
153				Message: "TopLogprobs is not supported for reasoning models",
154			})
155		}
156	}
157
158	// Handle service tier validation
159	if providerOptions.ServiceTier != nil {
160		serviceTier := *providerOptions.ServiceTier
161		if serviceTier == "flex" && !supportsFlexProcessing(model.Model()) {
162			params.ServiceTier = ""
163			warnings = append(warnings, fantasy.CallWarning{
164				Type:    fantasy.CallWarningTypeUnsupportedSetting,
165				Setting: "ServiceTier",
166				Details: "flex processing is only available for o3, o4-mini, and gpt-5 models",
167			})
168		} else if serviceTier == "priority" && !supportsPriorityProcessing(model.Model()) {
169			params.ServiceTier = ""
170			warnings = append(warnings, fantasy.CallWarning{
171				Type:    fantasy.CallWarningTypeUnsupportedSetting,
172				Setting: "ServiceTier",
173				Details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported",
174			})
175		}
176	}
177	return warnings, nil
178}
179
180// DefaultMapFinishReasonFunc is the default implementation for mapping finish reasons.
181func DefaultMapFinishReasonFunc(finishReason string) fantasy.FinishReason {
182	switch finishReason {
183	case "stop":
184		return fantasy.FinishReasonStop
185	case "length":
186		return fantasy.FinishReasonLength
187	case "content_filter":
188		return fantasy.FinishReasonContentFilter
189	case "function_call", "tool_calls":
190		return fantasy.FinishReasonToolCalls
191	default:
192		return fantasy.FinishReasonUnknown
193	}
194}
195
196// DefaultUsageFunc is the default implementation for calculating usage.
197func DefaultUsageFunc(response openai.ChatCompletion) (fantasy.Usage, fantasy.ProviderOptionsData) {
198	completionTokenDetails := response.Usage.CompletionTokensDetails
199	promptTokenDetails := response.Usage.PromptTokensDetails
200
201	// Build provider metadata
202	providerMetadata := &ProviderMetadata{}
203
204	// Add logprobs if available
205	if len(response.Choices) > 0 && len(response.Choices[0].Logprobs.Content) > 0 {
206		providerMetadata.Logprobs = response.Choices[0].Logprobs.Content
207	}
208
209	// Add prediction tokens if available
210	if completionTokenDetails.AcceptedPredictionTokens > 0 || completionTokenDetails.RejectedPredictionTokens > 0 {
211		if completionTokenDetails.AcceptedPredictionTokens > 0 {
212			providerMetadata.AcceptedPredictionTokens = completionTokenDetails.AcceptedPredictionTokens
213		}
214		if completionTokenDetails.RejectedPredictionTokens > 0 {
215			providerMetadata.RejectedPredictionTokens = completionTokenDetails.RejectedPredictionTokens
216		}
217	}
218	// OpenAI reports prompt_tokens INCLUDING cached tokens. Subtract to avoid double-counting.
219	inputTokens := max(response.Usage.PromptTokens-promptTokenDetails.CachedTokens, 0)
220	return fantasy.Usage{
221		InputTokens:     inputTokens,
222		OutputTokens:    response.Usage.CompletionTokens,
223		TotalTokens:     response.Usage.TotalTokens,
224		ReasoningTokens: completionTokenDetails.ReasoningTokens,
225		CacheReadTokens: promptTokenDetails.CachedTokens,
226	}, providerMetadata
227}
228
229// DefaultStreamUsageFunc is the default implementation for calculating stream usage.
230func DefaultStreamUsageFunc(chunk openai.ChatCompletionChunk, _ map[string]any, metadata fantasy.ProviderMetadata) (fantasy.Usage, fantasy.ProviderMetadata) {
231	if chunk.Usage.TotalTokens == 0 {
232		return fantasy.Usage{}, nil
233	}
234	streamProviderMetadata := &ProviderMetadata{}
235	if metadata != nil {
236		if providerMetadata, ok := metadata[Name]; ok {
237			converted, ok := providerMetadata.(*ProviderMetadata)
238			if ok {
239				streamProviderMetadata = converted
240			}
241		}
242	}
243	// we do this here because the acc does not add prompt details
244	completionTokenDetails := chunk.Usage.CompletionTokensDetails
245	promptTokenDetails := chunk.Usage.PromptTokensDetails
246	// OpenAI reports prompt_tokens INCLUDING cached tokens. Subtract to avoid double-counting.
247	inputTokens := max(chunk.Usage.PromptTokens-promptTokenDetails.CachedTokens, 0)
248	usage := fantasy.Usage{
249		InputTokens:     inputTokens,
250		OutputTokens:    chunk.Usage.CompletionTokens,
251		TotalTokens:     chunk.Usage.TotalTokens,
252		ReasoningTokens: completionTokenDetails.ReasoningTokens,
253		CacheReadTokens: promptTokenDetails.CachedTokens,
254	}
255
256	// Add prediction tokens if available
257	if completionTokenDetails.AcceptedPredictionTokens > 0 || completionTokenDetails.RejectedPredictionTokens > 0 {
258		if completionTokenDetails.AcceptedPredictionTokens > 0 {
259			streamProviderMetadata.AcceptedPredictionTokens = completionTokenDetails.AcceptedPredictionTokens
260		}
261		if completionTokenDetails.RejectedPredictionTokens > 0 {
262			streamProviderMetadata.RejectedPredictionTokens = completionTokenDetails.RejectedPredictionTokens
263		}
264	}
265
266	return usage, fantasy.ProviderMetadata{
267		Name: streamProviderMetadata,
268	}
269}
270
271// DefaultStreamProviderMetadataFunc is the default implementation for handling stream provider metadata.
272func DefaultStreamProviderMetadataFunc(choice openai.ChatCompletionChoice, metadata fantasy.ProviderMetadata) fantasy.ProviderMetadata {
273	if metadata == nil {
274		metadata = fantasy.ProviderMetadata{}
275	}
276	streamProviderMetadata, ok := metadata[Name]
277	if !ok {
278		streamProviderMetadata = &ProviderMetadata{}
279	}
280	if converted, ok := streamProviderMetadata.(*ProviderMetadata); ok {
281		converted.Logprobs = choice.Logprobs.Content
282		metadata[Name] = converted
283	}
284	return metadata
285}
286
287// DefaultToPrompt converts a fantasy prompt to OpenAI format with default handling.
288func DefaultToPrompt(prompt fantasy.Prompt, _, _ string) ([]openai.ChatCompletionMessageParamUnion, []fantasy.CallWarning) {
289	var messages []openai.ChatCompletionMessageParamUnion
290	var warnings []fantasy.CallWarning
291	for _, msg := range prompt {
292		switch msg.Role {
293		case fantasy.MessageRoleSystem:
294			var systemPromptParts []string
295			for _, c := range msg.Content {
296				if c.GetType() != fantasy.ContentTypeText {
297					warnings = append(warnings, fantasy.CallWarning{
298						Type:    fantasy.CallWarningTypeOther,
299						Message: "system prompt can only have text content",
300					})
301					continue
302				}
303				textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
304				if !ok {
305					warnings = append(warnings, fantasy.CallWarning{
306						Type:    fantasy.CallWarningTypeOther,
307						Message: "system prompt text part does not have the right type",
308					})
309					continue
310				}
311				text := textPart.Text
312				if strings.TrimSpace(text) != "" {
313					systemPromptParts = append(systemPromptParts, textPart.Text)
314				}
315			}
316			if len(systemPromptParts) == 0 {
317				warnings = append(warnings, fantasy.CallWarning{
318					Type:    fantasy.CallWarningTypeOther,
319					Message: "system prompt has no text parts",
320				})
321				continue
322			}
323			messages = append(messages, openai.SystemMessage(strings.Join(systemPromptParts, "\n")))
324		case fantasy.MessageRoleUser:
325			// simple user message just text content
326			if len(msg.Content) == 1 && msg.Content[0].GetType() == fantasy.ContentTypeText {
327				textPart, ok := fantasy.AsContentType[fantasy.TextPart](msg.Content[0])
328				if !ok {
329					warnings = append(warnings, fantasy.CallWarning{
330						Type:    fantasy.CallWarningTypeOther,
331						Message: "user message text part does not have the right type",
332					})
333					continue
334				}
335				messages = append(messages, openai.UserMessage(textPart.Text))
336				continue
337			}
338			// text content and attachments
339			// for now we only support image content later we need to check
340			// TODO: add the supported media types to the language model so we
341			//  can use that to validate the data here.
342			var content []openai.ChatCompletionContentPartUnionParam
343			for _, c := range msg.Content {
344				switch c.GetType() {
345				case fantasy.ContentTypeText:
346					textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
347					if !ok {
348						warnings = append(warnings, fantasy.CallWarning{
349							Type:    fantasy.CallWarningTypeOther,
350							Message: "user message text part does not have the right type",
351						})
352						continue
353					}
354					content = append(content, openai.ChatCompletionContentPartUnionParam{
355						OfText: &openai.ChatCompletionContentPartTextParam{
356							Text: textPart.Text,
357						},
358					})
359				case fantasy.ContentTypeFile:
360					filePart, ok := fantasy.AsContentType[fantasy.FilePart](c)
361					if !ok {
362						warnings = append(warnings, fantasy.CallWarning{
363							Type:    fantasy.CallWarningTypeOther,
364							Message: "user message file part does not have the right type",
365						})
366						continue
367					}
368
369					switch {
370					case strings.HasPrefix(filePart.MediaType, "text/"):
371						base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
372						documentBlock := openai.ChatCompletionContentPartFileFileParam{
373							FileData: param.NewOpt(base64Encoded),
374						}
375						content = append(content, openai.FileContentPart(documentBlock))
376
377					case strings.HasPrefix(filePart.MediaType, "image/"):
378						// Handle image files
379						base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
380						data := "data:" + filePart.MediaType + ";base64," + base64Encoded
381						imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: data}
382
383						// Check for provider-specific options like image detail
384						if providerOptions, ok := filePart.ProviderOptions[Name]; ok {
385							if detail, ok := providerOptions.(*ProviderFileOptions); ok {
386								imageURL.Detail = detail.ImageDetail
387							}
388						}
389
390						imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
391						content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
392
393					case filePart.MediaType == "audio/wav":
394						// Handle WAV audio files
395						base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
396						audioBlock := openai.ChatCompletionContentPartInputAudioParam{
397							InputAudio: openai.ChatCompletionContentPartInputAudioInputAudioParam{
398								Data:   base64Encoded,
399								Format: "wav",
400							},
401						}
402						content = append(content, openai.ChatCompletionContentPartUnionParam{OfInputAudio: &audioBlock})
403
404					case filePart.MediaType == "audio/mpeg" || filePart.MediaType == "audio/mp3":
405						// Handle MP3 audio files
406						base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
407						audioBlock := openai.ChatCompletionContentPartInputAudioParam{
408							InputAudio: openai.ChatCompletionContentPartInputAudioInputAudioParam{
409								Data:   base64Encoded,
410								Format: "mp3",
411							},
412						}
413						content = append(content, openai.ChatCompletionContentPartUnionParam{OfInputAudio: &audioBlock})
414
415					case filePart.MediaType == "application/pdf":
416						// Handle PDF files
417						dataStr := string(filePart.Data)
418
419						// Check if data looks like a file ID (starts with "file-")
420						if strings.HasPrefix(dataStr, "file-") {
421							fileBlock := openai.ChatCompletionContentPartFileParam{
422								File: openai.ChatCompletionContentPartFileFileParam{
423									FileID: param.NewOpt(dataStr),
424								},
425							}
426							content = append(content, openai.ChatCompletionContentPartUnionParam{OfFile: &fileBlock})
427						} else {
428							// Handle as base64 data
429							base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
430							data := "data:application/pdf;base64," + base64Encoded
431
432							filename := filePart.Filename
433							if filename == "" {
434								// Generate default filename based on content index
435								filename = fmt.Sprintf("part-%d.pdf", len(content))
436							}
437
438							fileBlock := openai.ChatCompletionContentPartFileParam{
439								File: openai.ChatCompletionContentPartFileFileParam{
440									Filename: param.NewOpt(filename),
441									FileData: param.NewOpt(data),
442								},
443							}
444							content = append(content, openai.ChatCompletionContentPartUnionParam{OfFile: &fileBlock})
445						}
446
447					default:
448						warnings = append(warnings, fantasy.CallWarning{
449							Type:    fantasy.CallWarningTypeOther,
450							Message: fmt.Sprintf("file part media type %s not supported", filePart.MediaType),
451						})
452					}
453				}
454			}
455			if !hasVisibleUserContent(content) {
456				warnings = append(warnings, fantasy.CallWarning{
457					Type:    fantasy.CallWarningTypeOther,
458					Message: "dropping empty user message (contains neither user-facing content nor tool results)",
459				})
460				continue
461			}
462			messages = append(messages, openai.UserMessage(content))
463		case fantasy.MessageRoleAssistant:
464			// simple assistant message just text content
465			if len(msg.Content) == 1 && msg.Content[0].GetType() == fantasy.ContentTypeText {
466				textPart, ok := fantasy.AsContentType[fantasy.TextPart](msg.Content[0])
467				if !ok {
468					warnings = append(warnings, fantasy.CallWarning{
469						Type:    fantasy.CallWarningTypeOther,
470						Message: "assistant message text part does not have the right type",
471					})
472					continue
473				}
474				messages = append(messages, openai.AssistantMessage(textPart.Text))
475				continue
476			}
477			assistantMsg := openai.ChatCompletionAssistantMessageParam{
478				Role: "assistant",
479			}
480			for _, c := range msg.Content {
481				switch c.GetType() {
482				case fantasy.ContentTypeText:
483					textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
484					if !ok {
485						warnings = append(warnings, fantasy.CallWarning{
486							Type:    fantasy.CallWarningTypeOther,
487							Message: "assistant message text part does not have the right type",
488						})
489						continue
490					}
491					assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
492						OfString: param.NewOpt(textPart.Text),
493					}
494				case fantasy.ContentTypeToolCall:
495					toolCallPart, ok := fantasy.AsContentType[fantasy.ToolCallPart](c)
496					if !ok {
497						warnings = append(warnings, fantasy.CallWarning{
498							Type:    fantasy.CallWarningTypeOther,
499							Message: "assistant message tool part does not have the right type",
500						})
501						continue
502					}
503					assistantMsg.ToolCalls = append(assistantMsg.ToolCalls,
504						openai.ChatCompletionMessageToolCallUnionParam{
505							OfFunction: &openai.ChatCompletionMessageFunctionToolCallParam{
506								ID:   toolCallPart.ToolCallID,
507								Type: "function",
508								Function: openai.ChatCompletionMessageFunctionToolCallFunctionParam{
509									Name:      toolCallPart.ToolName,
510									Arguments: toolCallPart.Input,
511								},
512							},
513						})
514				}
515			}
516			if !hasVisibleAssistantContent(&assistantMsg) {
517				warnings = append(warnings, fantasy.CallWarning{
518					Type:    fantasy.CallWarningTypeOther,
519					Message: "dropping empty assistant message (contains neither user-facing content nor tool calls)",
520				})
521				continue
522			}
523			messages = append(messages, openai.ChatCompletionMessageParamUnion{
524				OfAssistant: &assistantMsg,
525			})
526		case fantasy.MessageRoleTool:
527			for _, c := range msg.Content {
528				if c.GetType() != fantasy.ContentTypeToolResult {
529					warnings = append(warnings, fantasy.CallWarning{
530						Type:    fantasy.CallWarningTypeOther,
531						Message: "tool message can only have tool result content",
532					})
533					continue
534				}
535
536				toolResultPart, ok := fantasy.AsContentType[fantasy.ToolResultPart](c)
537				if !ok {
538					warnings = append(warnings, fantasy.CallWarning{
539						Type:    fantasy.CallWarningTypeOther,
540						Message: "tool message result part does not have the right type",
541					})
542					continue
543				}
544
545				switch toolResultPart.Output.GetType() {
546				case fantasy.ToolResultContentTypeText:
547					output, ok := fantasy.AsToolResultOutputType[fantasy.ToolResultOutputContentText](toolResultPart.Output)
548					if !ok {
549						warnings = append(warnings, fantasy.CallWarning{
550							Type:    fantasy.CallWarningTypeOther,
551							Message: "tool result output does not have the right type",
552						})
553						continue
554					}
555					messages = append(messages, openai.ToolMessage(output.Text, toolResultPart.ToolCallID))
556				case fantasy.ToolResultContentTypeError:
557					// TODO: check if better handling is needed
558					output, ok := fantasy.AsToolResultOutputType[fantasy.ToolResultOutputContentError](toolResultPart.Output)
559					if !ok {
560						warnings = append(warnings, fantasy.CallWarning{
561							Type:    fantasy.CallWarningTypeOther,
562							Message: "tool result output does not have the right type",
563						})
564						continue
565					}
566					messages = append(messages, openai.ToolMessage(output.Error.Error(), toolResultPart.ToolCallID))
567				}
568			}
569		}
570	}
571	return messages, warnings
572}
573
574func hasVisibleUserContent(content []openai.ChatCompletionContentPartUnionParam) bool {
575	for _, part := range content {
576		if part.OfText != nil || part.OfImageURL != nil || part.OfInputAudio != nil || part.OfFile != nil {
577			return true
578		}
579	}
580	return false
581}
582
583func hasVisibleAssistantContent(msg *openai.ChatCompletionAssistantMessageParam) bool {
584	// Check if there's text content
585	if !param.IsOmitted(msg.Content.OfString) || len(msg.Content.OfArrayOfContentParts) > 0 {
586		return true
587	}
588	// Check if there are tool calls
589	if len(msg.ToolCalls) > 0 {
590		return true
591	}
592	return false
593}