1package openai
2
3import (
4 "encoding/base64"
5 "fmt"
6 "strings"
7
8 "charm.land/fantasy"
9 "github.com/charmbracelet/openai-go"
10 "github.com/charmbracelet/openai-go/packages/param"
11 "github.com/charmbracelet/openai-go/shared"
12)
13
14// LanguageModelPrepareCallFunc is a function that prepares the call for the language model.
15type LanguageModelPrepareCallFunc = func(model fantasy.LanguageModel, params *openai.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error)
16
17// LanguageModelMapFinishReasonFunc is a function that maps the finish reason for the language model.
18type LanguageModelMapFinishReasonFunc = func(finishReason string) fantasy.FinishReason
19
20// LanguageModelUsageFunc is a function that calculates usage for the language model.
21type LanguageModelUsageFunc = func(choice openai.ChatCompletion) (fantasy.Usage, fantasy.ProviderOptionsData)
22
23// LanguageModelExtraContentFunc is a function that adds extra content for the language model.
24type LanguageModelExtraContentFunc = func(choice openai.ChatCompletionChoice) []fantasy.Content
25
26// LanguageModelStreamExtraFunc is a function that handles stream extra functionality for the language model.
27type LanguageModelStreamExtraFunc = func(chunk openai.ChatCompletionChunk, yield func(fantasy.StreamPart) bool, ctx map[string]any) (map[string]any, bool)
28
29// LanguageModelStreamUsageFunc is a function that calculates stream usage for the language model.
30type LanguageModelStreamUsageFunc = func(chunk openai.ChatCompletionChunk, ctx map[string]any, metadata fantasy.ProviderMetadata) (fantasy.Usage, fantasy.ProviderMetadata)
31
32// LanguageModelStreamProviderMetadataFunc is a function that handles stream provider metadata for the language model.
33type LanguageModelStreamProviderMetadataFunc = func(choice openai.ChatCompletionChoice, metadata fantasy.ProviderMetadata) fantasy.ProviderMetadata
34
35// LanguageModelToPromptFunc is a function that handles converting fantasy prompts to openai sdk messages.
36type LanguageModelToPromptFunc = func(prompt fantasy.Prompt, provider, model string) ([]openai.ChatCompletionMessageParamUnion, []fantasy.CallWarning)
37
38// DefaultPrepareCallFunc is the default implementation for preparing a call to the language model.
39func DefaultPrepareCallFunc(model fantasy.LanguageModel, params *openai.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error) {
40 if call.ProviderOptions == nil {
41 return nil, nil
42 }
43 var warnings []fantasy.CallWarning
44 providerOptions := &ProviderOptions{}
45 if v, ok := call.ProviderOptions[Name]; ok {
46 providerOptions, ok = v.(*ProviderOptions)
47 if !ok {
48 return nil, &fantasy.Error{Title: "invalid argument", Message: "openai provider options should be *openai.ProviderOptions"}
49 }
50 }
51
52 if providerOptions.LogitBias != nil {
53 params.LogitBias = providerOptions.LogitBias
54 }
55 if providerOptions.LogProbs != nil && providerOptions.TopLogProbs != nil {
56 providerOptions.LogProbs = nil
57 }
58 if providerOptions.LogProbs != nil {
59 params.Logprobs = param.NewOpt(*providerOptions.LogProbs)
60 }
61 if providerOptions.TopLogProbs != nil {
62 params.TopLogprobs = param.NewOpt(*providerOptions.TopLogProbs)
63 }
64 if providerOptions.User != nil {
65 params.User = param.NewOpt(*providerOptions.User)
66 }
67 if providerOptions.ParallelToolCalls != nil {
68 params.ParallelToolCalls = param.NewOpt(*providerOptions.ParallelToolCalls)
69 }
70 if providerOptions.MaxCompletionTokens != nil {
71 params.MaxCompletionTokens = param.NewOpt(*providerOptions.MaxCompletionTokens)
72 }
73
74 if providerOptions.TextVerbosity != nil {
75 params.Verbosity = openai.ChatCompletionNewParamsVerbosity(*providerOptions.TextVerbosity)
76 }
77 if providerOptions.Prediction != nil {
78 // Convert map[string]any to ChatCompletionPredictionContentParam
79 if content, ok := providerOptions.Prediction["content"]; ok {
80 if contentStr, ok := content.(string); ok {
81 params.Prediction = openai.ChatCompletionPredictionContentParam{
82 Content: openai.ChatCompletionPredictionContentContentUnionParam{
83 OfString: param.NewOpt(contentStr),
84 },
85 }
86 }
87 }
88 }
89 if providerOptions.Store != nil {
90 params.Store = param.NewOpt(*providerOptions.Store)
91 }
92 if providerOptions.Metadata != nil {
93 // Convert map[string]any to map[string]string
94 metadata := make(map[string]string)
95 for k, v := range providerOptions.Metadata {
96 if str, ok := v.(string); ok {
97 metadata[k] = str
98 }
99 }
100 params.Metadata = metadata
101 }
102 if providerOptions.PromptCacheKey != nil {
103 params.PromptCacheKey = param.NewOpt(*providerOptions.PromptCacheKey)
104 }
105 if providerOptions.SafetyIdentifier != nil {
106 params.SafetyIdentifier = param.NewOpt(*providerOptions.SafetyIdentifier)
107 }
108 if providerOptions.ServiceTier != nil {
109 params.ServiceTier = openai.ChatCompletionNewParamsServiceTier(*providerOptions.ServiceTier)
110 }
111
112 if providerOptions.ReasoningEffort != nil {
113 switch *providerOptions.ReasoningEffort {
114 case ReasoningEffortMinimal:
115 params.ReasoningEffort = shared.ReasoningEffortMinimal
116 case ReasoningEffortLow:
117 params.ReasoningEffort = shared.ReasoningEffortLow
118 case ReasoningEffortMedium:
119 params.ReasoningEffort = shared.ReasoningEffortMedium
120 case ReasoningEffortHigh:
121 params.ReasoningEffort = shared.ReasoningEffortHigh
122 default:
123 return nil, fmt.Errorf("reasoning model `%s` not supported", *providerOptions.ReasoningEffort)
124 }
125 }
126
127 if isReasoningModel(model.Model()) {
128 if providerOptions.LogitBias != nil {
129 params.LogitBias = nil
130 warnings = append(warnings, fantasy.CallWarning{
131 Type: fantasy.CallWarningTypeUnsupportedSetting,
132 Setting: "LogitBias",
133 Message: "LogitBias is not supported for reasoning models",
134 })
135 }
136 if providerOptions.LogProbs != nil {
137 params.Logprobs = param.Opt[bool]{}
138 warnings = append(warnings, fantasy.CallWarning{
139 Type: fantasy.CallWarningTypeUnsupportedSetting,
140 Setting: "Logprobs",
141 Message: "Logprobs is not supported for reasoning models",
142 })
143 }
144 if providerOptions.TopLogProbs != nil {
145 params.TopLogprobs = param.Opt[int64]{}
146 warnings = append(warnings, fantasy.CallWarning{
147 Type: fantasy.CallWarningTypeUnsupportedSetting,
148 Setting: "TopLogprobs",
149 Message: "TopLogprobs is not supported for reasoning models",
150 })
151 }
152 }
153
154 // Handle service tier validation
155 if providerOptions.ServiceTier != nil {
156 serviceTier := *providerOptions.ServiceTier
157 if serviceTier == "flex" && !supportsFlexProcessing(model.Model()) {
158 params.ServiceTier = ""
159 warnings = append(warnings, fantasy.CallWarning{
160 Type: fantasy.CallWarningTypeUnsupportedSetting,
161 Setting: "ServiceTier",
162 Details: "flex processing is only available for o3, o4-mini, and gpt-5 models",
163 })
164 } else if serviceTier == "priority" && !supportsPriorityProcessing(model.Model()) {
165 params.ServiceTier = ""
166 warnings = append(warnings, fantasy.CallWarning{
167 Type: fantasy.CallWarningTypeUnsupportedSetting,
168 Setting: "ServiceTier",
169 Details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported",
170 })
171 }
172 }
173 return warnings, nil
174}
175
176// DefaultMapFinishReasonFunc is the default implementation for mapping finish reasons.
177func DefaultMapFinishReasonFunc(finishReason string) fantasy.FinishReason {
178 switch finishReason {
179 case "stop":
180 return fantasy.FinishReasonStop
181 case "length":
182 return fantasy.FinishReasonLength
183 case "content_filter":
184 return fantasy.FinishReasonContentFilter
185 case "function_call", "tool_calls":
186 return fantasy.FinishReasonToolCalls
187 default:
188 return fantasy.FinishReasonUnknown
189 }
190}
191
192// DefaultUsageFunc is the default implementation for calculating usage.
193func DefaultUsageFunc(response openai.ChatCompletion) (fantasy.Usage, fantasy.ProviderOptionsData) {
194 completionTokenDetails := response.Usage.CompletionTokensDetails
195 promptTokenDetails := response.Usage.PromptTokensDetails
196
197 // Build provider metadata
198 providerMetadata := &ProviderMetadata{}
199
200 // Add logprobs if available
201 if len(response.Choices) > 0 && len(response.Choices[0].Logprobs.Content) > 0 {
202 providerMetadata.Logprobs = response.Choices[0].Logprobs.Content
203 }
204
205 // Add prediction tokens if available
206 if completionTokenDetails.AcceptedPredictionTokens > 0 || completionTokenDetails.RejectedPredictionTokens > 0 {
207 if completionTokenDetails.AcceptedPredictionTokens > 0 {
208 providerMetadata.AcceptedPredictionTokens = completionTokenDetails.AcceptedPredictionTokens
209 }
210 if completionTokenDetails.RejectedPredictionTokens > 0 {
211 providerMetadata.RejectedPredictionTokens = completionTokenDetails.RejectedPredictionTokens
212 }
213 }
214 // OpenAI reports prompt_tokens INCLUDING cached tokens. Subtract to avoid double-counting.
215 inputTokens := max(response.Usage.PromptTokens-promptTokenDetails.CachedTokens, 0)
216 return fantasy.Usage{
217 InputTokens: inputTokens,
218 OutputTokens: response.Usage.CompletionTokens,
219 TotalTokens: response.Usage.TotalTokens,
220 ReasoningTokens: completionTokenDetails.ReasoningTokens,
221 CacheReadTokens: promptTokenDetails.CachedTokens,
222 }, providerMetadata
223}
224
225// DefaultStreamUsageFunc is the default implementation for calculating stream usage.
226func DefaultStreamUsageFunc(chunk openai.ChatCompletionChunk, _ map[string]any, metadata fantasy.ProviderMetadata) (fantasy.Usage, fantasy.ProviderMetadata) {
227 if chunk.Usage.TotalTokens == 0 {
228 return fantasy.Usage{}, nil
229 }
230 streamProviderMetadata := &ProviderMetadata{}
231 if metadata != nil {
232 if providerMetadata, ok := metadata[Name]; ok {
233 converted, ok := providerMetadata.(*ProviderMetadata)
234 if ok {
235 streamProviderMetadata = converted
236 }
237 }
238 }
239 // we do this here because the acc does not add prompt details
240 completionTokenDetails := chunk.Usage.CompletionTokensDetails
241 promptTokenDetails := chunk.Usage.PromptTokensDetails
242 // OpenAI reports prompt_tokens INCLUDING cached tokens. Subtract to avoid double-counting.
243 inputTokens := max(chunk.Usage.PromptTokens-promptTokenDetails.CachedTokens, 0)
244 usage := fantasy.Usage{
245 InputTokens: inputTokens,
246 OutputTokens: chunk.Usage.CompletionTokens,
247 TotalTokens: chunk.Usage.TotalTokens,
248 ReasoningTokens: completionTokenDetails.ReasoningTokens,
249 CacheReadTokens: promptTokenDetails.CachedTokens,
250 }
251
252 // Add prediction tokens if available
253 if completionTokenDetails.AcceptedPredictionTokens > 0 || completionTokenDetails.RejectedPredictionTokens > 0 {
254 if completionTokenDetails.AcceptedPredictionTokens > 0 {
255 streamProviderMetadata.AcceptedPredictionTokens = completionTokenDetails.AcceptedPredictionTokens
256 }
257 if completionTokenDetails.RejectedPredictionTokens > 0 {
258 streamProviderMetadata.RejectedPredictionTokens = completionTokenDetails.RejectedPredictionTokens
259 }
260 }
261
262 return usage, fantasy.ProviderMetadata{
263 Name: streamProviderMetadata,
264 }
265}
266
267// DefaultStreamProviderMetadataFunc is the default implementation for handling stream provider metadata.
268func DefaultStreamProviderMetadataFunc(choice openai.ChatCompletionChoice, metadata fantasy.ProviderMetadata) fantasy.ProviderMetadata {
269 if metadata == nil {
270 metadata = fantasy.ProviderMetadata{}
271 }
272 streamProviderMetadata, ok := metadata[Name]
273 if !ok {
274 streamProviderMetadata = &ProviderMetadata{}
275 }
276 if converted, ok := streamProviderMetadata.(*ProviderMetadata); ok {
277 converted.Logprobs = choice.Logprobs.Content
278 metadata[Name] = converted
279 }
280 return metadata
281}
282
283// DefaultToPrompt converts a fantasy prompt to OpenAI format with default handling.
284func DefaultToPrompt(prompt fantasy.Prompt, _, _ string) ([]openai.ChatCompletionMessageParamUnion, []fantasy.CallWarning) {
285 var messages []openai.ChatCompletionMessageParamUnion
286 var warnings []fantasy.CallWarning
287 for _, msg := range prompt {
288 switch msg.Role {
289 case fantasy.MessageRoleSystem:
290 var systemPromptParts []string
291 for _, c := range msg.Content {
292 if c.GetType() != fantasy.ContentTypeText {
293 warnings = append(warnings, fantasy.CallWarning{
294 Type: fantasy.CallWarningTypeOther,
295 Message: "system prompt can only have text content",
296 })
297 continue
298 }
299 textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
300 if !ok {
301 warnings = append(warnings, fantasy.CallWarning{
302 Type: fantasy.CallWarningTypeOther,
303 Message: "system prompt text part does not have the right type",
304 })
305 continue
306 }
307 text := textPart.Text
308 if strings.TrimSpace(text) != "" {
309 systemPromptParts = append(systemPromptParts, textPart.Text)
310 }
311 }
312 if len(systemPromptParts) == 0 {
313 warnings = append(warnings, fantasy.CallWarning{
314 Type: fantasy.CallWarningTypeOther,
315 Message: "system prompt has no text parts",
316 })
317 continue
318 }
319 messages = append(messages, openai.SystemMessage(strings.Join(systemPromptParts, "\n")))
320 case fantasy.MessageRoleUser:
321 // simple user message just text content
322 if len(msg.Content) == 1 && msg.Content[0].GetType() == fantasy.ContentTypeText {
323 textPart, ok := fantasy.AsContentType[fantasy.TextPart](msg.Content[0])
324 if !ok {
325 warnings = append(warnings, fantasy.CallWarning{
326 Type: fantasy.CallWarningTypeOther,
327 Message: "user message text part does not have the right type",
328 })
329 continue
330 }
331 messages = append(messages, openai.UserMessage(textPart.Text))
332 continue
333 }
334 // text content and attachments
335 // for now we only support image content later we need to check
336 // TODO: add the supported media types to the language model so we
337 // can use that to validate the data here.
338 var content []openai.ChatCompletionContentPartUnionParam
339 for _, c := range msg.Content {
340 switch c.GetType() {
341 case fantasy.ContentTypeText:
342 textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
343 if !ok {
344 warnings = append(warnings, fantasy.CallWarning{
345 Type: fantasy.CallWarningTypeOther,
346 Message: "user message text part does not have the right type",
347 })
348 continue
349 }
350 content = append(content, openai.ChatCompletionContentPartUnionParam{
351 OfText: &openai.ChatCompletionContentPartTextParam{
352 Text: textPart.Text,
353 },
354 })
355 case fantasy.ContentTypeFile:
356 filePart, ok := fantasy.AsContentType[fantasy.FilePart](c)
357 if !ok {
358 warnings = append(warnings, fantasy.CallWarning{
359 Type: fantasy.CallWarningTypeOther,
360 Message: "user message file part does not have the right type",
361 })
362 continue
363 }
364
365 switch {
366 case strings.HasPrefix(filePart.MediaType, "text/"):
367 base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
368 documentBlock := openai.ChatCompletionContentPartFileFileParam{
369 FileData: param.NewOpt(base64Encoded),
370 }
371 content = append(content, openai.FileContentPart(documentBlock))
372
373 case strings.HasPrefix(filePart.MediaType, "image/"):
374 // Handle image files
375 base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
376 data := "data:" + filePart.MediaType + ";base64," + base64Encoded
377 imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: data}
378
379 // Check for provider-specific options like image detail
380 if providerOptions, ok := filePart.ProviderOptions[Name]; ok {
381 if detail, ok := providerOptions.(*ProviderFileOptions); ok {
382 imageURL.Detail = detail.ImageDetail
383 }
384 }
385
386 imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
387 content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
388
389 case filePart.MediaType == "audio/wav":
390 // Handle WAV audio files
391 base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
392 audioBlock := openai.ChatCompletionContentPartInputAudioParam{
393 InputAudio: openai.ChatCompletionContentPartInputAudioInputAudioParam{
394 Data: base64Encoded,
395 Format: "wav",
396 },
397 }
398 content = append(content, openai.ChatCompletionContentPartUnionParam{OfInputAudio: &audioBlock})
399
400 case filePart.MediaType == "audio/mpeg" || filePart.MediaType == "audio/mp3":
401 // Handle MP3 audio files
402 base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
403 audioBlock := openai.ChatCompletionContentPartInputAudioParam{
404 InputAudio: openai.ChatCompletionContentPartInputAudioInputAudioParam{
405 Data: base64Encoded,
406 Format: "mp3",
407 },
408 }
409 content = append(content, openai.ChatCompletionContentPartUnionParam{OfInputAudio: &audioBlock})
410
411 case filePart.MediaType == "application/pdf":
412 // Handle PDF files
413 dataStr := string(filePart.Data)
414
415 // Check if data looks like a file ID (starts with "file-")
416 if strings.HasPrefix(dataStr, "file-") {
417 fileBlock := openai.ChatCompletionContentPartFileParam{
418 File: openai.ChatCompletionContentPartFileFileParam{
419 FileID: param.NewOpt(dataStr),
420 },
421 }
422 content = append(content, openai.ChatCompletionContentPartUnionParam{OfFile: &fileBlock})
423 } else {
424 // Handle as base64 data
425 base64Encoded := base64.StdEncoding.EncodeToString(filePart.Data)
426 data := "data:application/pdf;base64," + base64Encoded
427
428 filename := filePart.Filename
429 if filename == "" {
430 // Generate default filename based on content index
431 filename = fmt.Sprintf("part-%d.pdf", len(content))
432 }
433
434 fileBlock := openai.ChatCompletionContentPartFileParam{
435 File: openai.ChatCompletionContentPartFileFileParam{
436 Filename: param.NewOpt(filename),
437 FileData: param.NewOpt(data),
438 },
439 }
440 content = append(content, openai.ChatCompletionContentPartUnionParam{OfFile: &fileBlock})
441 }
442
443 default:
444 warnings = append(warnings, fantasy.CallWarning{
445 Type: fantasy.CallWarningTypeOther,
446 Message: fmt.Sprintf("file part media type %s not supported", filePart.MediaType),
447 })
448 }
449 }
450 }
451 if !hasVisibleUserContent(content) {
452 warnings = append(warnings, fantasy.CallWarning{
453 Type: fantasy.CallWarningTypeOther,
454 Message: "dropping empty user message (contains neither user-facing content nor tool results)",
455 })
456 continue
457 }
458 messages = append(messages, openai.UserMessage(content))
459 case fantasy.MessageRoleAssistant:
460 // simple assistant message just text content
461 if len(msg.Content) == 1 && msg.Content[0].GetType() == fantasy.ContentTypeText {
462 textPart, ok := fantasy.AsContentType[fantasy.TextPart](msg.Content[0])
463 if !ok {
464 warnings = append(warnings, fantasy.CallWarning{
465 Type: fantasy.CallWarningTypeOther,
466 Message: "assistant message text part does not have the right type",
467 })
468 continue
469 }
470 messages = append(messages, openai.AssistantMessage(textPart.Text))
471 continue
472 }
473 assistantMsg := openai.ChatCompletionAssistantMessageParam{
474 Role: "assistant",
475 }
476 for _, c := range msg.Content {
477 switch c.GetType() {
478 case fantasy.ContentTypeText:
479 textPart, ok := fantasy.AsContentType[fantasy.TextPart](c)
480 if !ok {
481 warnings = append(warnings, fantasy.CallWarning{
482 Type: fantasy.CallWarningTypeOther,
483 Message: "assistant message text part does not have the right type",
484 })
485 continue
486 }
487 assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
488 OfString: param.NewOpt(textPart.Text),
489 }
490 case fantasy.ContentTypeToolCall:
491 toolCallPart, ok := fantasy.AsContentType[fantasy.ToolCallPart](c)
492 if !ok {
493 warnings = append(warnings, fantasy.CallWarning{
494 Type: fantasy.CallWarningTypeOther,
495 Message: "assistant message tool part does not have the right type",
496 })
497 continue
498 }
499 assistantMsg.ToolCalls = append(assistantMsg.ToolCalls,
500 openai.ChatCompletionMessageToolCallUnionParam{
501 OfFunction: &openai.ChatCompletionMessageFunctionToolCallParam{
502 ID: toolCallPart.ToolCallID,
503 Type: "function",
504 Function: openai.ChatCompletionMessageFunctionToolCallFunctionParam{
505 Name: toolCallPart.ToolName,
506 Arguments: toolCallPart.Input,
507 },
508 },
509 })
510 }
511 }
512 if !hasVisibleAssistantContent(&assistantMsg) {
513 warnings = append(warnings, fantasy.CallWarning{
514 Type: fantasy.CallWarningTypeOther,
515 Message: "dropping empty assistant message (contains neither user-facing content nor tool calls)",
516 })
517 continue
518 }
519 messages = append(messages, openai.ChatCompletionMessageParamUnion{
520 OfAssistant: &assistantMsg,
521 })
522 case fantasy.MessageRoleTool:
523 for _, c := range msg.Content {
524 if c.GetType() != fantasy.ContentTypeToolResult {
525 warnings = append(warnings, fantasy.CallWarning{
526 Type: fantasy.CallWarningTypeOther,
527 Message: "tool message can only have tool result content",
528 })
529 continue
530 }
531
532 toolResultPart, ok := fantasy.AsContentType[fantasy.ToolResultPart](c)
533 if !ok {
534 warnings = append(warnings, fantasy.CallWarning{
535 Type: fantasy.CallWarningTypeOther,
536 Message: "tool message result part does not have the right type",
537 })
538 continue
539 }
540
541 switch toolResultPart.Output.GetType() {
542 case fantasy.ToolResultContentTypeText:
543 output, ok := fantasy.AsToolResultOutputType[fantasy.ToolResultOutputContentText](toolResultPart.Output)
544 if !ok {
545 warnings = append(warnings, fantasy.CallWarning{
546 Type: fantasy.CallWarningTypeOther,
547 Message: "tool result output does not have the right type",
548 })
549 continue
550 }
551 messages = append(messages, openai.ToolMessage(output.Text, toolResultPart.ToolCallID))
552 case fantasy.ToolResultContentTypeError:
553 // TODO: check if better handling is needed
554 output, ok := fantasy.AsToolResultOutputType[fantasy.ToolResultOutputContentError](toolResultPart.Output)
555 if !ok {
556 warnings = append(warnings, fantasy.CallWarning{
557 Type: fantasy.CallWarningTypeOther,
558 Message: "tool result output does not have the right type",
559 })
560 continue
561 }
562 messages = append(messages, openai.ToolMessage(output.Error.Error(), toolResultPart.ToolCallID))
563 }
564 }
565 }
566 }
567 return messages, warnings
568}
569
570func hasVisibleUserContent(content []openai.ChatCompletionContentPartUnionParam) bool {
571 for _, part := range content {
572 if part.OfText != nil || part.OfImageURL != nil || part.OfInputAudio != nil || part.OfFile != nil {
573 return true
574 }
575 }
576 return false
577}
578
579func hasVisibleAssistantContent(msg *openai.ChatCompletionAssistantMessageParam) bool {
580 // Check if there's text content
581 if !param.IsOmitted(msg.Content.OfString) || len(msg.Content.OfArrayOfContentParts) > 0 {
582 return true
583 }
584 // Check if there are tool calls
585 if len(msg.ToolCalls) > 0 {
586 return true
587 }
588 return false
589}