openai.go

  1package provider
  2
  3import (
  4	"context"
  5	"encoding/json"
  6	"errors"
  7	"fmt"
  8	"io"
  9	"log/slog"
 10	"strings"
 11	"time"
 12
 13	"github.com/charmbracelet/catwalk/pkg/catwalk"
 14	"github.com/charmbracelet/crush/internal/config"
 15	"github.com/charmbracelet/crush/internal/llm/tools"
 16	"github.com/charmbracelet/crush/internal/log"
 17	"github.com/charmbracelet/crush/internal/message"
 18	"github.com/google/uuid"
 19	"github.com/openai/openai-go"
 20	"github.com/openai/openai-go/option"
 21	"github.com/openai/openai-go/packages/param"
 22	"github.com/openai/openai-go/shared"
 23)
 24
 25type openaiClient struct {
 26	providerOptions providerClientOptions
 27	client          openai.Client
 28}
 29
 30type OpenAIClient ProviderClient
 31
 32func newOpenAIClient(opts providerClientOptions) OpenAIClient {
 33	return &openaiClient{
 34		providerOptions: opts,
 35		client:          createOpenAIClient(opts),
 36	}
 37}
 38
 39func createOpenAIClient(opts providerClientOptions) openai.Client {
 40	openaiClientOptions := []option.RequestOption{}
 41	if opts.apiKey != "" {
 42		openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
 43	}
 44	if opts.baseURL != "" {
 45		resolvedBaseURL, err := config.Get().Resolve(opts.baseURL)
 46		if err == nil && resolvedBaseURL != "" {
 47			openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(resolvedBaseURL))
 48		}
 49	}
 50
 51	if config.Get().Options.Debug {
 52		httpClient := log.NewHTTPClient()
 53		openaiClientOptions = append(openaiClientOptions, option.WithHTTPClient(httpClient))
 54	}
 55
 56	for key, value := range opts.extraHeaders {
 57		openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
 58	}
 59
 60	for extraKey, extraValue := range opts.extraBody {
 61		openaiClientOptions = append(openaiClientOptions, option.WithJSONSet(extraKey, extraValue))
 62	}
 63
 64	return openai.NewClient(openaiClientOptions...)
 65}
 66
 67func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
 68	isAnthropicModel := o.providerOptions.config.ID == string(catwalk.InferenceProviderOpenRouter) && strings.HasPrefix(o.Model().ID, "anthropic/")
 69	// Add system message first
 70	systemMessage := o.providerOptions.systemMessage
 71	if o.providerOptions.systemPromptPrefix != "" {
 72		systemMessage = o.providerOptions.systemPromptPrefix + "\n" + systemMessage
 73	}
 74
 75	system := openai.SystemMessage(systemMessage)
 76	if isAnthropicModel && !o.providerOptions.disableCache {
 77		systemTextBlock := openai.ChatCompletionContentPartTextParam{Text: systemMessage}
 78		systemTextBlock.SetExtraFields(
 79			map[string]any{
 80				"cache_control": map[string]string{
 81					"type": "ephemeral",
 82				},
 83			},
 84		)
 85		var content []openai.ChatCompletionContentPartTextParam
 86		content = append(content, systemTextBlock)
 87		system = openai.SystemMessage(content)
 88	}
 89	openaiMessages = append(openaiMessages, system)
 90
 91	for i, msg := range messages {
 92		cache := false
 93		if i > len(messages)-3 {
 94			cache = true
 95		}
 96		switch msg.Role {
 97		case message.User:
 98			var content []openai.ChatCompletionContentPartUnionParam
 99
100			textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
101			content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
102			hasBinaryContent := false
103			for _, binaryContent := range msg.BinaryContent() {
104				hasBinaryContent = true
105				imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(catwalk.InferenceProviderOpenAI)}
106				imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
107
108				content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
109			}
110			if cache && !o.providerOptions.disableCache && isAnthropicModel {
111				textBlock.SetExtraFields(map[string]any{
112					"cache_control": map[string]string{
113						"type": "ephemeral",
114					},
115				})
116			}
117			if hasBinaryContent || (isAnthropicModel && !o.providerOptions.disableCache) {
118				openaiMessages = append(openaiMessages, openai.UserMessage(content))
119			} else {
120				openaiMessages = append(openaiMessages, openai.UserMessage(msg.Content().String()))
121			}
122
123		case message.Assistant:
124			assistantMsg := openai.ChatCompletionAssistantMessageParam{
125				Role: "assistant",
126			}
127
128			if len(msg.ToolCalls()) > 0 {
129				assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls()))
130				for i, call := range msg.ToolCalls() {
131					assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
132						ID:   call.ID,
133						Type: "function",
134						Function: openai.ChatCompletionMessageToolCallFunctionParam{
135							Name:      call.Name,
136							Arguments: call.Input,
137						},
138					}
139				}
140			}
141			if msg.Content().String() != "" {
142				assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
143					OfString: param.NewOpt(msg.Content().Text),
144				}
145			}
146
147			if cache && !o.providerOptions.disableCache && isAnthropicModel {
148				assistantMsg.SetExtraFields(map[string]any{
149					"cache_control": map[string]string{
150						"type": "ephemeral",
151					},
152				})
153			}
154			openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
155				OfAssistant: &assistantMsg,
156			})
157
158		case message.Tool:
159			for _, result := range msg.ToolResults() {
160				openaiMessages = append(openaiMessages,
161					openai.ToolMessage(result.Content, result.ToolCallID),
162				)
163			}
164		}
165	}
166
167	return
168}
169
170func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
171	openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
172
173	for i, tool := range tools {
174		info := tool.Info()
175		openaiTools[i] = openai.ChatCompletionToolParam{
176			Function: openai.FunctionDefinitionParam{
177				Name:        info.Name,
178				Description: openai.String(info.Description),
179				Parameters: openai.FunctionParameters{
180					"type":       "object",
181					"properties": info.Parameters,
182					"required":   info.Required,
183				},
184			},
185		}
186	}
187
188	return openaiTools
189}
190
191func (o *openaiClient) finishReason(reason string) message.FinishReason {
192	switch reason {
193	case "stop":
194		return message.FinishReasonEndTurn
195	case "length":
196		return message.FinishReasonMaxTokens
197	case "tool_calls":
198		return message.FinishReasonToolUse
199	default:
200		return message.FinishReasonUnknown
201	}
202}
203
204func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
205	model := o.providerOptions.model(o.providerOptions.modelType)
206	cfg := config.Get()
207
208	modelConfig := cfg.Models[config.SelectedModelTypeLarge]
209	if o.providerOptions.modelType == config.SelectedModelTypeSmall {
210		modelConfig = cfg.Models[config.SelectedModelTypeSmall]
211	}
212
213	reasoningEffort := modelConfig.ReasoningEffort
214
215	params := openai.ChatCompletionNewParams{
216		Model:    openai.ChatModel(model.ID),
217		Messages: messages,
218		Tools:    tools,
219	}
220
221	maxTokens := model.DefaultMaxTokens
222	if modelConfig.MaxTokens > 0 {
223		maxTokens = modelConfig.MaxTokens
224	}
225
226	// Override max tokens if set in provider options
227	if o.providerOptions.maxTokens > 0 {
228		maxTokens = o.providerOptions.maxTokens
229	}
230	if model.CanReason {
231		params.MaxCompletionTokens = openai.Int(maxTokens)
232		switch reasoningEffort {
233		case "low":
234			params.ReasoningEffort = shared.ReasoningEffortLow
235		case "medium":
236			params.ReasoningEffort = shared.ReasoningEffortMedium
237		case "high":
238			params.ReasoningEffort = shared.ReasoningEffortHigh
239		case "minimal":
240			params.ReasoningEffort = shared.ReasoningEffort("minimal")
241		default:
242			params.ReasoningEffort = shared.ReasoningEffort(reasoningEffort)
243		}
244	} else {
245		params.MaxTokens = openai.Int(maxTokens)
246	}
247
248	return params
249}
250
251func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
252	params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
253	attempts := 0
254	for {
255		attempts++
256		openaiResponse, err := o.client.Chat.Completions.New(
257			ctx,
258			params,
259		)
260		// If there is an error we are going to see if we can retry the call
261		if err != nil {
262			retry, after, retryErr := o.shouldRetry(attempts, err)
263			if retryErr != nil {
264				return nil, retryErr
265			}
266			if retry {
267				slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
268				select {
269				case <-ctx.Done():
270					return nil, ctx.Err()
271				case <-time.After(time.Duration(after) * time.Millisecond):
272					continue
273				}
274			}
275			return nil, retryErr
276		}
277
278		if len(openaiResponse.Choices) == 0 {
279			return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration")
280		}
281
282		content := ""
283		if openaiResponse.Choices[0].Message.Content != "" {
284			content = openaiResponse.Choices[0].Message.Content
285		}
286
287		toolCalls := o.toolCalls(*openaiResponse)
288		finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
289
290		if len(toolCalls) > 0 {
291			finishReason = message.FinishReasonToolUse
292		}
293
294		return &ProviderResponse{
295			Content:      content,
296			ToolCalls:    toolCalls,
297			Usage:        o.usage(*openaiResponse),
298			FinishReason: finishReason,
299		}, nil
300	}
301}
302
303func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
304	params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
305	params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
306		IncludeUsage: openai.Bool(true),
307	}
308
309	attempts := 0
310	eventChan := make(chan ProviderEvent)
311
312	go func() {
313		for {
314			attempts++
315			// Kujtim: fixes an issue with anthropig models on openrouter
316			if len(params.Tools) == 0 {
317				params.Tools = nil
318			}
319			openaiStream := o.client.Chat.Completions.NewStreaming(
320				ctx,
321				params,
322			)
323
324			acc := openai.ChatCompletionAccumulator{}
325			currentContent := ""
326			toolCalls := make([]message.ToolCall, 0)
327			msgToolCalls := make(map[int64]openai.ChatCompletionMessageToolCall)
328			for openaiStream.Next() {
329				chunk := openaiStream.Current()
330				// Kujtim: this is an issue with openrouter qwen, its sending -1 for the tool index
331				if len(chunk.Choices) != 0 && len(chunk.Choices[0].Delta.ToolCalls) > 0 && chunk.Choices[0].Delta.ToolCalls[0].Index == -1 {
332					chunk.Choices[0].Delta.ToolCalls[0].Index = 0
333				}
334				acc.AddChunk(chunk)
335				for _, choice := range chunk.Choices {
336					reasoning, ok := choice.Delta.JSON.ExtraFields["reasoning"]
337					if ok && reasoning.Raw() != "" {
338						reasoningStr := ""
339						json.Unmarshal([]byte(reasoning.Raw()), &reasoningStr)
340						if reasoningStr != "" {
341							eventChan <- ProviderEvent{
342								Type:     EventThinkingDelta,
343								Thinking: reasoningStr,
344							}
345						}
346					}
347					if choice.Delta.Content != "" {
348						eventChan <- ProviderEvent{
349							Type:    EventContentDelta,
350							Content: choice.Delta.Content,
351						}
352						currentContent += choice.Delta.Content
353					} else if len(choice.Delta.ToolCalls) > 0 {
354						toolCall := choice.Delta.ToolCalls[0]
355						newToolCall := false
356						if existingToolCall, ok := msgToolCalls[toolCall.Index]; ok { // tool call exists
357							if toolCall.ID != "" && toolCall.ID != existingToolCall.ID {
358								found := false
359								// try to find the tool based on the ID
360								for _, tool := range msgToolCalls {
361									if tool.ID == toolCall.ID {
362										existingToolCall.Function.Arguments += toolCall.Function.Arguments
363										msgToolCalls[toolCall.Index] = existingToolCall
364										found = true
365									}
366								}
367								if !found {
368									newToolCall = true
369								}
370							} else {
371								existingToolCall.Function.Arguments += toolCall.Function.Arguments
372								msgToolCalls[toolCall.Index] = existingToolCall
373							}
374						} else {
375							newToolCall = true
376						}
377						if newToolCall { // new tool call
378							if toolCall.ID == "" {
379								toolCall.ID = uuid.NewString()
380							}
381							eventChan <- ProviderEvent{
382								Type: EventToolUseStart,
383								ToolCall: &message.ToolCall{
384									ID:       toolCall.ID,
385									Name:     toolCall.Function.Name,
386									Finished: false,
387								},
388							}
389							msgToolCalls[toolCall.Index] = openai.ChatCompletionMessageToolCall{
390								ID:   toolCall.ID,
391								Type: "function",
392								Function: openai.ChatCompletionMessageToolCallFunction{
393									Name:      toolCall.Function.Name,
394									Arguments: toolCall.Function.Arguments,
395								},
396							}
397						}
398					}
399				}
400			}
401
402			err := openaiStream.Err()
403			if err == nil || errors.Is(err, io.EOF) {
404				if len(acc.Choices) == 0 {
405					eventChan <- ProviderEvent{
406						Type:  EventError,
407						Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"),
408					}
409					return
410				}
411
412				resultFinishReason := acc.Choices[0].FinishReason
413				if resultFinishReason == "" {
414					// If the finish reason is empty, we assume it was a successful completion
415					// INFO: this is happening for openrouter for some reason
416					resultFinishReason = "stop"
417				}
418				// Stream completed successfully
419				finishReason := o.finishReason(resultFinishReason)
420				if len(acc.Choices[0].Message.ToolCalls) > 0 {
421					toolCalls = append(toolCalls, o.toolCalls(acc.ChatCompletion)...)
422				}
423				if len(toolCalls) > 0 {
424					finishReason = message.FinishReasonToolUse
425				}
426
427				eventChan <- ProviderEvent{
428					Type: EventComplete,
429					Response: &ProviderResponse{
430						Content:      currentContent,
431						ToolCalls:    toolCalls,
432						Usage:        o.usage(acc.ChatCompletion),
433						FinishReason: finishReason,
434					},
435				}
436				close(eventChan)
437				return
438			}
439
440			// If there is an error we are going to see if we can retry the call
441			retry, after, retryErr := o.shouldRetry(attempts, err)
442			if retryErr != nil {
443				eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
444				close(eventChan)
445				return
446			}
447			if retry {
448				slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
449				select {
450				case <-ctx.Done():
451					// context cancelled
452					if ctx.Err() == nil {
453						eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
454					}
455					close(eventChan)
456					return
457				case <-time.After(time.Duration(after) * time.Millisecond):
458					continue
459				}
460			}
461			eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
462			close(eventChan)
463			return
464		}
465	}()
466
467	return eventChan
468}
469
470func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
471	if attempts > maxRetries {
472		return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
473	}
474	if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
475		return false, 0, err
476	}
477	var apiErr *openai.Error
478	retryMs := 0
479	retryAfterValues := []string{}
480	if errors.As(err, &apiErr) {
481		// Check for token expiration (401 Unauthorized)
482		if apiErr.StatusCode == 401 {
483			o.providerOptions.apiKey, err = config.Get().Resolve(o.providerOptions.config.APIKey)
484			if err != nil {
485				return false, 0, fmt.Errorf("failed to resolve API key: %w", err)
486			}
487			o.client = createOpenAIClient(o.providerOptions)
488			return true, 0, nil
489		}
490
491		if apiErr.StatusCode != 429 && apiErr.StatusCode != 500 {
492			return false, 0, err
493		}
494
495		retryAfterValues = apiErr.Response.Header.Values("Retry-After")
496	}
497
498	if apiErr != nil {
499		slog.Warn("OpenAI API error", "status_code", apiErr.StatusCode, "message", apiErr.Message, "type", apiErr.Type)
500		if len(retryAfterValues) > 0 {
501			slog.Warn("Retry-After header", "values", retryAfterValues)
502		}
503	} else {
504		slog.Error("OpenAI API error", "error", err.Error(), "attempt", attempts, "max_retries", maxRetries)
505	}
506
507	backoffMs := 2000 * (1 << (attempts - 1))
508	jitterMs := int(float64(backoffMs) * 0.2)
509	retryMs = backoffMs + jitterMs
510	if len(retryAfterValues) > 0 {
511		if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
512			retryMs = retryMs * 1000
513		}
514	}
515	return true, int64(retryMs), nil
516}
517
518func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
519	var toolCalls []message.ToolCall
520
521	if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
522		for _, call := range completion.Choices[0].Message.ToolCalls {
523			// accumulator for some reason does this.
524			if call.Function.Name == "" {
525				continue
526			}
527			toolCall := message.ToolCall{
528				ID:       call.ID,
529				Name:     call.Function.Name,
530				Input:    call.Function.Arguments,
531				Type:     "function",
532				Finished: true,
533			}
534			toolCalls = append(toolCalls, toolCall)
535		}
536	}
537
538	return toolCalls
539}
540
541func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
542	cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
543	inputTokens := completion.Usage.PromptTokens - cachedTokens
544
545	return TokenUsage{
546		InputTokens:         inputTokens,
547		OutputTokens:        completion.Usage.CompletionTokens,
548		CacheCreationTokens: 0, // OpenAI doesn't provide this directly
549		CacheReadTokens:     cachedTokens,
550	}
551}
552
553func (o *openaiClient) Model() catwalk.Model {
554	return o.providerOptions.model(o.providerOptions.modelType)
555}