openai.go

  1package provider
  2
  3import (
  4	"context"
  5	"encoding/json"
  6	"errors"
  7	"fmt"
  8	"io"
  9	"log/slog"
 10	"strings"
 11	"time"
 12
 13	"github.com/charmbracelet/catwalk/pkg/catwalk"
 14	"github.com/charmbracelet/crush/internal/config"
 15	"github.com/charmbracelet/crush/internal/llm/tools"
 16	"github.com/charmbracelet/crush/internal/log"
 17	"github.com/charmbracelet/crush/internal/message"
 18	"github.com/google/uuid"
 19	"github.com/openai/openai-go"
 20	"github.com/openai/openai-go/option"
 21	"github.com/openai/openai-go/packages/param"
 22	"github.com/openai/openai-go/shared"
 23)
 24
 25type openaiClient struct {
 26	providerOptions providerClientOptions
 27	client          openai.Client
 28}
 29
 30type OpenAIClient ProviderClient
 31
 32func newOpenAIClient(opts providerClientOptions) OpenAIClient {
 33	return &openaiClient{
 34		providerOptions: opts,
 35		client:          createOpenAIClient(opts),
 36	}
 37}
 38
 39func createOpenAIClient(opts providerClientOptions) openai.Client {
 40	openaiClientOptions := []option.RequestOption{}
 41	if opts.apiKey != "" {
 42		openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
 43	}
 44	if opts.baseURL != "" {
 45		resolvedBaseURL, err := config.Get().Resolve(opts.baseURL)
 46		if err == nil && resolvedBaseURL != "" {
 47			openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(resolvedBaseURL))
 48		}
 49	}
 50
 51	if config.Get().Options.Debug {
 52		httpClient := log.NewHTTPClient()
 53		openaiClientOptions = append(openaiClientOptions, option.WithHTTPClient(httpClient))
 54	}
 55
 56	for key, value := range opts.extraHeaders {
 57		openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
 58	}
 59
 60	for extraKey, extraValue := range opts.extraBody {
 61		openaiClientOptions = append(openaiClientOptions, option.WithJSONSet(extraKey, extraValue))
 62	}
 63
 64	return openai.NewClient(openaiClientOptions...)
 65}
 66
 67func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
 68	isAnthropicModel := o.providerOptions.config.ID == string(catwalk.InferenceProviderOpenRouter) && strings.HasPrefix(o.Model().ID, "anthropic/")
 69	// Add system message first
 70	systemMessage := o.providerOptions.systemMessage
 71	if o.providerOptions.systemPromptPrefix != "" {
 72		systemMessage = o.providerOptions.systemPromptPrefix + "\n" + systemMessage
 73	}
 74
 75	system := openai.SystemMessage(systemMessage)
 76	if isAnthropicModel && !o.providerOptions.disableCache {
 77		systemTextBlock := openai.ChatCompletionContentPartTextParam{Text: systemMessage}
 78		systemTextBlock.SetExtraFields(
 79			map[string]any{
 80				"cache_control": map[string]string{
 81					"type": "ephemeral",
 82				},
 83			},
 84		)
 85		var content []openai.ChatCompletionContentPartTextParam
 86		content = append(content, systemTextBlock)
 87		system = openai.SystemMessage(content)
 88	}
 89	openaiMessages = append(openaiMessages, system)
 90
 91	for i, msg := range messages {
 92		cache := false
 93		if i > len(messages)-3 {
 94			cache = true
 95		}
 96		switch msg.Role {
 97		case message.User:
 98			var content []openai.ChatCompletionContentPartUnionParam
 99
100			textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
101			content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
102			hasBinaryContent := false
103			for _, binaryContent := range msg.BinaryContent() {
104				hasBinaryContent = true
105				imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(catwalk.InferenceProviderOpenAI)}
106				imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
107
108				content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
109			}
110			if cache && !o.providerOptions.disableCache && isAnthropicModel {
111				textBlock.SetExtraFields(map[string]any{
112					"cache_control": map[string]string{
113						"type": "ephemeral",
114					},
115				})
116			}
117			if hasBinaryContent || (isAnthropicModel && !o.providerOptions.disableCache) {
118				openaiMessages = append(openaiMessages, openai.UserMessage(content))
119			} else {
120				openaiMessages = append(openaiMessages, openai.UserMessage(msg.Content().String()))
121			}
122
123		case message.Assistant:
124			assistantMsg := openai.ChatCompletionAssistantMessageParam{
125				Role: "assistant",
126			}
127
128			// Only include finished tool calls; interrupted tool calls must not be resent.
129			if len(msg.ToolCalls()) > 0 {
130				finished := make([]message.ToolCall, 0, len(msg.ToolCalls()))
131				for _, call := range msg.ToolCalls() {
132					if call.Finished {
133						finished = append(finished, call)
134					}
135				}
136				if len(finished) > 0 {
137					assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(finished))
138					for i, call := range finished {
139						assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
140							ID:   call.ID,
141							Type: "function",
142							Function: openai.ChatCompletionMessageToolCallFunctionParam{
143								Name:      call.Name,
144								Arguments: call.Input,
145							},
146						}
147					}
148				}
149			}
150			if msg.Content().String() != "" {
151				assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
152					OfString: param.NewOpt(msg.Content().Text),
153				}
154			}
155
156			if cache && !o.providerOptions.disableCache && isAnthropicModel {
157				assistantMsg.SetExtraFields(map[string]any{
158					"cache_control": map[string]string{
159						"type": "ephemeral",
160					},
161				})
162			}
163			// Skip empty assistant messages (no content and no finished tool calls)
164			if msg.Content().String() == "" && len(assistantMsg.ToolCalls) == 0 {
165				continue
166			}
167
168			openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
169				OfAssistant: &assistantMsg,
170			})
171
172		case message.Tool:
173			for _, result := range msg.ToolResults() {
174				openaiMessages = append(openaiMessages,
175					openai.ToolMessage(result.Content, result.ToolCallID),
176				)
177			}
178		}
179	}
180
181	return openaiMessages
182}
183
184func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
185	openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
186
187	for i, tool := range tools {
188		info := tool.Info()
189		openaiTools[i] = openai.ChatCompletionToolParam{
190			Function: openai.FunctionDefinitionParam{
191				Name:        info.Name,
192				Description: openai.String(info.Description),
193				Parameters: openai.FunctionParameters{
194					"type":       "object",
195					"properties": info.Parameters,
196					"required":   info.Required,
197				},
198			},
199		}
200	}
201
202	return openaiTools
203}
204
205func (o *openaiClient) finishReason(reason string) message.FinishReason {
206	switch reason {
207	case "stop":
208		return message.FinishReasonEndTurn
209	case "length":
210		return message.FinishReasonMaxTokens
211	case "tool_calls":
212		return message.FinishReasonToolUse
213	default:
214		return message.FinishReasonUnknown
215	}
216}
217
218func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
219	model := o.providerOptions.model(o.providerOptions.modelType)
220	cfg := config.Get()
221
222	modelConfig := cfg.Models[config.SelectedModelTypeLarge]
223	if o.providerOptions.modelType == config.SelectedModelTypeSmall {
224		modelConfig = cfg.Models[config.SelectedModelTypeSmall]
225	}
226
227	reasoningEffort := modelConfig.ReasoningEffort
228
229	params := openai.ChatCompletionNewParams{
230		Model:    openai.ChatModel(model.ID),
231		Messages: messages,
232		Tools:    tools,
233	}
234
235	maxTokens := model.DefaultMaxTokens
236	if modelConfig.MaxTokens > 0 {
237		maxTokens = modelConfig.MaxTokens
238	}
239
240	// Override max tokens if set in provider options
241	if o.providerOptions.maxTokens > 0 {
242		maxTokens = o.providerOptions.maxTokens
243	}
244	if model.CanReason {
245		params.MaxCompletionTokens = openai.Int(maxTokens)
246		switch reasoningEffort {
247		case "low":
248			params.ReasoningEffort = shared.ReasoningEffortLow
249		case "medium":
250			params.ReasoningEffort = shared.ReasoningEffortMedium
251		case "high":
252			params.ReasoningEffort = shared.ReasoningEffortHigh
253		case "minimal":
254			params.ReasoningEffort = shared.ReasoningEffort("minimal")
255		default:
256			params.ReasoningEffort = shared.ReasoningEffort(reasoningEffort)
257		}
258	} else {
259		params.MaxTokens = openai.Int(maxTokens)
260	}
261
262	return params
263}
264
265func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
266	params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
267	attempts := 0
268	for {
269		attempts++
270		openaiResponse, err := o.client.Chat.Completions.New(
271			ctx,
272			params,
273		)
274		// If there is an error we are going to see if we can retry the call
275		if err != nil {
276			retry, after, retryErr := o.shouldRetry(attempts, err)
277			if retryErr != nil {
278				return nil, retryErr
279			}
280			if retry {
281				slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
282				select {
283				case <-ctx.Done():
284					return nil, ctx.Err()
285				case <-time.After(time.Duration(after) * time.Millisecond):
286					continue
287				}
288			}
289			return nil, retryErr
290		}
291
292		if len(openaiResponse.Choices) == 0 {
293			return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration")
294		}
295
296		content := ""
297		if openaiResponse.Choices[0].Message.Content != "" {
298			content = openaiResponse.Choices[0].Message.Content
299		}
300
301		toolCalls := o.toolCalls(*openaiResponse)
302		finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
303
304		if len(toolCalls) > 0 {
305			finishReason = message.FinishReasonToolUse
306		}
307
308		return &ProviderResponse{
309			Content:      content,
310			ToolCalls:    toolCalls,
311			Usage:        o.usage(*openaiResponse),
312			FinishReason: finishReason,
313		}, nil
314	}
315}
316
317func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
318	params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
319	params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
320		IncludeUsage: openai.Bool(true),
321	}
322
323	attempts := 0
324	eventChan := make(chan ProviderEvent)
325
326	go func() {
327		for {
328			attempts++
329			// Kujtim: fixes an issue with anthropig models on openrouter
330			if len(params.Tools) == 0 {
331				params.Tools = nil
332			}
333			openaiStream := o.client.Chat.Completions.NewStreaming(
334				ctx,
335				params,
336			)
337
338			acc := openai.ChatCompletionAccumulator{}
339			currentContent := ""
340			toolCalls := make([]message.ToolCall, 0)
341			msgToolCalls := make(map[int64]openai.ChatCompletionMessageToolCall)
342			toolMap := make(map[string]openai.ChatCompletionMessageToolCall)
343			toolCallIDMap := make(map[string]string)
344			for openaiStream.Next() {
345				chunk := openaiStream.Current()
346				// Kujtim: this is an issue with openrouter qwen, its sending -1 for the tool index
347				if len(chunk.Choices) != 0 && len(chunk.Choices[0].Delta.ToolCalls) > 0 && chunk.Choices[0].Delta.ToolCalls[0].Index == -1 {
348					chunk.Choices[0].Delta.ToolCalls[0].Index = 0
349				}
350				acc.AddChunk(chunk)
351				for i, choice := range chunk.Choices {
352					reasoning, ok := choice.Delta.JSON.ExtraFields["reasoning"]
353					if ok && reasoning.Raw() != "" {
354						reasoningStr := ""
355						json.Unmarshal([]byte(reasoning.Raw()), &reasoningStr)
356						if reasoningStr != "" {
357							eventChan <- ProviderEvent{
358								Type:     EventThinkingDelta,
359								Thinking: reasoningStr,
360							}
361						}
362					}
363					if choice.Delta.Content != "" {
364						eventChan <- ProviderEvent{
365							Type:    EventContentDelta,
366							Content: choice.Delta.Content,
367						}
368						currentContent += choice.Delta.Content
369					} else if len(choice.Delta.ToolCalls) > 0 {
370						toolCall := choice.Delta.ToolCalls[0]
371						if strings.HasPrefix(toolCall.ID, "functions.") {
372							exID, ok := toolCallIDMap[toolCall.ID]
373							if !ok {
374								newID := uuid.NewString()
375								toolCallIDMap[toolCall.ID] = newID
376								toolCall.ID = newID
377							} else {
378								toolCall.ID = exID
379							}
380						}
381						newToolCall := false
382						if existingToolCall, ok := msgToolCalls[toolCall.Index]; ok { // tool call exists
383							if toolCall.ID != "" && toolCall.ID != existingToolCall.ID {
384								found := false
385								// try to find the tool based on the ID
386								for _, tool := range msgToolCalls {
387									if tool.ID == toolCall.ID {
388										existingToolCall.Function.Arguments += toolCall.Function.Arguments
389										msgToolCalls[toolCall.Index] = existingToolCall
390										toolMap[existingToolCall.ID] = existingToolCall
391										found = true
392									}
393								}
394								if !found {
395									newToolCall = true
396								}
397							} else {
398								existingToolCall.Function.Arguments += toolCall.Function.Arguments
399								msgToolCalls[toolCall.Index] = existingToolCall
400								toolMap[existingToolCall.ID] = existingToolCall
401							}
402						} else {
403							newToolCall = true
404						}
405						if newToolCall { // new tool call
406							if toolCall.ID == "" {
407								toolCall.ID = uuid.NewString()
408							}
409							eventChan <- ProviderEvent{
410								Type: EventToolUseStart,
411								ToolCall: &message.ToolCall{
412									ID:       toolCall.ID,
413									Name:     toolCall.Function.Name,
414									Finished: false,
415								},
416							}
417							msgToolCalls[toolCall.Index] = openai.ChatCompletionMessageToolCall{
418								ID:   toolCall.ID,
419								Type: "function",
420								Function: openai.ChatCompletionMessageToolCallFunction{
421									Name:      toolCall.Function.Name,
422									Arguments: toolCall.Function.Arguments,
423								},
424							}
425							toolMap[toolCall.ID] = msgToolCalls[toolCall.Index]
426						}
427						toolCalls := []openai.ChatCompletionMessageToolCall{}
428						for _, tc := range toolMap {
429							toolCalls = append(toolCalls, tc)
430						}
431						acc.Choices[i].Message.ToolCalls = toolCalls
432					}
433				}
434			}
435
436			err := openaiStream.Err()
437			if err == nil || errors.Is(err, io.EOF) {
438				if len(acc.Choices) == 0 {
439					eventChan <- ProviderEvent{
440						Type:  EventError,
441						Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"),
442					}
443					return
444				}
445
446				resultFinishReason := acc.Choices[0].FinishReason
447				if resultFinishReason == "" {
448					// If the finish reason is empty, we assume it was a successful completion
449					// INFO: this is happening for openrouter for some reason
450					resultFinishReason = "stop"
451				}
452				// Stream completed successfully
453				finishReason := o.finishReason(resultFinishReason)
454				if len(acc.Choices[0].Message.ToolCalls) > 0 {
455					toolCalls = append(toolCalls, o.toolCalls(acc.ChatCompletion)...)
456				}
457				if len(toolCalls) > 0 {
458					finishReason = message.FinishReasonToolUse
459				}
460
461				eventChan <- ProviderEvent{
462					Type: EventComplete,
463					Response: &ProviderResponse{
464						Content:      currentContent,
465						ToolCalls:    toolCalls,
466						Usage:        o.usage(acc.ChatCompletion),
467						FinishReason: finishReason,
468					},
469				}
470				close(eventChan)
471				return
472			}
473
474			// If there is an error we are going to see if we can retry the call
475			retry, after, retryErr := o.shouldRetry(attempts, err)
476			if retryErr != nil {
477				eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
478				close(eventChan)
479				return
480			}
481			if retry {
482				slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
483				select {
484				case <-ctx.Done():
485					// context cancelled
486					if ctx.Err() != nil {
487						eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
488					}
489					close(eventChan)
490					return
491				case <-time.After(time.Duration(after) * time.Millisecond):
492					continue
493				}
494			}
495			eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
496			close(eventChan)
497			return
498		}
499	}()
500
501	return eventChan
502}
503
504func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
505	if attempts > maxRetries {
506		return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
507	}
508	if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
509		return false, 0, err
510	}
511	var apiErr *openai.Error
512	retryMs := 0
513	retryAfterValues := []string{}
514	if errors.As(err, &apiErr) {
515		// Check for token expiration (401 Unauthorized)
516		if apiErr.StatusCode == 401 {
517			return false, 0, err
518		}
519
520		if apiErr.StatusCode != 429 && apiErr.StatusCode != 500 {
521			return false, 0, err
522		}
523
524		retryAfterValues = apiErr.Response.Header.Values("Retry-After")
525	}
526
527	if apiErr != nil {
528		slog.Warn("OpenAI API error", "status_code", apiErr.StatusCode, "message", apiErr.Message, "type", apiErr.Type)
529		if len(retryAfterValues) > 0 {
530			slog.Warn("Retry-After header", "values", retryAfterValues)
531		}
532	} else {
533		slog.Error("OpenAI API error", "error", err.Error(), "attempt", attempts, "max_retries", maxRetries)
534	}
535
536	backoffMs := 2000 * (1 << (attempts - 1))
537	jitterMs := int(float64(backoffMs) * 0.2)
538	retryMs = backoffMs + jitterMs
539	if len(retryAfterValues) > 0 {
540		if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
541			retryMs = retryMs * 1000
542		}
543	}
544	return true, int64(retryMs), nil
545}
546
547func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
548	var toolCalls []message.ToolCall
549
550	if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
551		for _, call := range completion.Choices[0].Message.ToolCalls {
552			// accumulator for some reason does this.
553			if call.Function.Name == "" {
554				continue
555			}
556			toolCall := message.ToolCall{
557				ID:       call.ID,
558				Name:     call.Function.Name,
559				Input:    call.Function.Arguments,
560				Type:     "function",
561				Finished: true,
562			}
563			toolCalls = append(toolCalls, toolCall)
564		}
565	}
566
567	return toolCalls
568}
569
570func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
571	cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
572	inputTokens := completion.Usage.PromptTokens - cachedTokens
573
574	return TokenUsage{
575		InputTokens:         inputTokens,
576		OutputTokens:        completion.Usage.CompletionTokens,
577		CacheCreationTokens: 0, // OpenAI doesn't provide this directly
578		CacheReadTokens:     cachedTokens,
579	}
580}
581
582func (o *openaiClient) Model() catwalk.Model {
583	return o.providerOptions.model(o.providerOptions.modelType)
584}