openai.go

  1package provider
  2
  3import (
  4	"context"
  5	"encoding/json"
  6	"errors"
  7	"fmt"
  8	"io"
  9	"log/slog"
 10	"slices"
 11	"strings"
 12	"time"
 13
 14	"github.com/charmbracelet/catwalk/pkg/catwalk"
 15	"github.com/charmbracelet/crush/internal/config"
 16	"github.com/charmbracelet/crush/internal/llm/tools"
 17	"github.com/charmbracelet/crush/internal/log"
 18	"github.com/charmbracelet/crush/internal/message"
 19	"github.com/google/uuid"
 20	"github.com/openai/openai-go/v2"
 21	"github.com/openai/openai-go/v2/option"
 22	"github.com/openai/openai-go/v2/packages/param"
 23	"github.com/openai/openai-go/v2/shared"
 24)
 25
 26type openaiClient struct {
 27	providerOptions providerClientOptions
 28	client          openai.Client
 29}
 30
 31type OpenAIClient ProviderClient
 32
 33func newOpenAIClient(opts providerClientOptions) OpenAIClient {
 34	return &openaiClient{
 35		providerOptions: opts,
 36		client:          createOpenAIClient(opts),
 37	}
 38}
 39
 40func createOpenAIClient(opts providerClientOptions) openai.Client {
 41	openaiClientOptions := []option.RequestOption{}
 42	if opts.apiKey != "" {
 43		openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
 44	}
 45	if opts.baseURL != "" {
 46		resolvedBaseURL, err := config.Get().Resolve(opts.baseURL)
 47		if err == nil {
 48			openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(resolvedBaseURL))
 49		}
 50	}
 51
 52	if config.Get().Options.Debug {
 53		httpClient := log.NewHTTPClient()
 54		openaiClientOptions = append(openaiClientOptions, option.WithHTTPClient(httpClient))
 55	}
 56
 57	for key, value := range opts.extraHeaders {
 58		openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
 59	}
 60
 61	for extraKey, extraValue := range opts.extraBody {
 62		openaiClientOptions = append(openaiClientOptions, option.WithJSONSet(extraKey, extraValue))
 63	}
 64
 65	return openai.NewClient(openaiClientOptions...)
 66}
 67
 68func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
 69	isAnthropicModel := o.providerOptions.config.ID == string(catwalk.InferenceProviderOpenRouter) && strings.HasPrefix(o.Model().ID, "anthropic/")
 70	// Add system message first
 71	systemMessage := o.providerOptions.systemMessage
 72	if o.providerOptions.systemPromptPrefix != "" {
 73		systemMessage = o.providerOptions.systemPromptPrefix + "\n" + systemMessage
 74	}
 75
 76	system := openai.SystemMessage(systemMessage)
 77	if isAnthropicModel && !o.providerOptions.disableCache {
 78		systemTextBlock := openai.ChatCompletionContentPartTextParam{Text: systemMessage}
 79		systemTextBlock.SetExtraFields(
 80			map[string]any{
 81				"cache_control": map[string]string{
 82					"type": "ephemeral",
 83				},
 84			},
 85		)
 86		var content []openai.ChatCompletionContentPartTextParam
 87		content = append(content, systemTextBlock)
 88		system = openai.SystemMessage(content)
 89	}
 90	openaiMessages = append(openaiMessages, system)
 91
 92	for i, msg := range messages {
 93		cache := false
 94		if i > len(messages)-3 {
 95			cache = true
 96		}
 97		switch msg.Role {
 98		case message.User:
 99			var content []openai.ChatCompletionContentPartUnionParam
100
101			textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
102			content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
103			hasBinaryContent := false
104			for _, binaryContent := range msg.BinaryContent() {
105				hasBinaryContent = true
106				imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(catwalk.InferenceProviderOpenAI)}
107				imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
108
109				content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
110			}
111			if cache && !o.providerOptions.disableCache && isAnthropicModel {
112				textBlock.SetExtraFields(map[string]any{
113					"cache_control": map[string]string{
114						"type": "ephemeral",
115					},
116				})
117			}
118			if hasBinaryContent || (isAnthropicModel && !o.providerOptions.disableCache) {
119				openaiMessages = append(openaiMessages, openai.UserMessage(content))
120			} else {
121				openaiMessages = append(openaiMessages, openai.UserMessage(msg.Content().String()))
122			}
123
124		case message.Assistant:
125			assistantMsg := openai.ChatCompletionAssistantMessageParam{
126				Role: "assistant",
127			}
128
129			hasContent := false
130			if msg.Content().String() != "" {
131				hasContent = true
132				textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
133				if cache && !o.providerOptions.disableCache && isAnthropicModel {
134					textBlock.SetExtraFields(map[string]any{
135						"cache_control": map[string]string{
136							"type": "ephemeral",
137						},
138					})
139				}
140				assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
141					OfArrayOfContentParts: []openai.ChatCompletionAssistantMessageParamContentArrayOfContentPartUnion{
142						{
143							OfText: &textBlock,
144						},
145					},
146				}
147				if !isAnthropicModel {
148					assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
149						OfString: param.NewOpt(msg.Content().String()),
150					}
151				}
152			}
153
154			if len(msg.ToolCalls()) > 0 {
155				hasContent = true
156				assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallUnionParam, len(msg.ToolCalls()))
157				for i, call := range msg.ToolCalls() {
158					assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallUnionParam{
159						OfFunction: &openai.ChatCompletionMessageFunctionToolCallParam{
160							ID:   call.ID,
161							Type: "function",
162							Function: openai.ChatCompletionMessageFunctionToolCallFunctionParam{
163								Name:      call.Name,
164								Arguments: call.Input,
165							},
166						},
167					}
168				}
169			}
170			if !hasContent {
171				continue
172			}
173
174			openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
175				OfAssistant: &assistantMsg,
176			})
177
178		case message.Tool:
179			for _, result := range msg.ToolResults() {
180				openaiMessages = append(openaiMessages,
181					openai.ToolMessage(result.Content, result.ToolCallID),
182				)
183			}
184		}
185	}
186
187	return
188}
189
190func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.FunctionDefinitionParam {
191	openaiTools := make([]openai.FunctionDefinitionParam, len(tools))
192
193	for i, tool := range tools {
194		info := tool.Info()
195		openaiTools[i] = openai.FunctionDefinitionParam{
196			Name:        info.Name,
197			Description: openai.String(info.Description),
198			Parameters: openai.FunctionParameters{
199				"type":       "object",
200				"properties": info.Parameters,
201				"required":   info.Required,
202			},
203		}
204	}
205
206	return openaiTools
207}
208
209func (o *openaiClient) finishReason(reason string) message.FinishReason {
210	switch reason {
211	case "stop":
212		return message.FinishReasonEndTurn
213	case "length":
214		return message.FinishReasonMaxTokens
215	case "tool_calls":
216		return message.FinishReasonToolUse
217	default:
218		return message.FinishReasonUnknown
219	}
220}
221
222func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.FunctionDefinitionParam) openai.ChatCompletionNewParams {
223	model := o.providerOptions.model(o.providerOptions.modelType)
224	cfg := config.Get()
225
226	modelConfig := cfg.Models[config.SelectedModelTypeLarge]
227	if o.providerOptions.modelType == config.SelectedModelTypeSmall {
228		modelConfig = cfg.Models[config.SelectedModelTypeSmall]
229	}
230
231	reasoningEffort := modelConfig.ReasoningEffort
232
233	params := openai.ChatCompletionNewParams{
234		Model:    openai.ChatModel(model.ID),
235		Messages: messages,
236	}
237
238	for _, t := range tools {
239		params.Tools = append(params.Tools, openai.ChatCompletionFunctionTool(t))
240	}
241
242	maxTokens := model.DefaultMaxTokens
243	if modelConfig.MaxTokens > 0 {
244		maxTokens = modelConfig.MaxTokens
245	}
246
247	// Override max tokens if set in provider options
248	if o.providerOptions.maxTokens > 0 {
249		maxTokens = o.providerOptions.maxTokens
250	}
251	if model.CanReason {
252		params.MaxCompletionTokens = openai.Int(maxTokens)
253		switch reasoningEffort {
254		case "low":
255			params.ReasoningEffort = shared.ReasoningEffortLow
256		case "medium":
257			params.ReasoningEffort = shared.ReasoningEffortMedium
258		case "high":
259			params.ReasoningEffort = shared.ReasoningEffortHigh
260		case "minimal":
261			params.ReasoningEffort = shared.ReasoningEffort("minimal")
262		default:
263			params.ReasoningEffort = shared.ReasoningEffort(reasoningEffort)
264		}
265	} else {
266		params.MaxTokens = openai.Int(maxTokens)
267	}
268
269	return params
270}
271
272func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
273	params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
274	attempts := 0
275	for {
276		attempts++
277		openaiResponse, err := o.client.Chat.Completions.New(
278			ctx,
279			params,
280		)
281		// If there is an error we are going to see if we can retry the call
282		if err != nil {
283			retry, after, retryErr := o.shouldRetry(attempts, err)
284			if retryErr != nil {
285				return nil, retryErr
286			}
287			if retry {
288				slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries)
289				select {
290				case <-ctx.Done():
291					return nil, ctx.Err()
292				case <-time.After(time.Duration(after) * time.Millisecond):
293					continue
294				}
295			}
296			return nil, retryErr
297		}
298
299		if len(openaiResponse.Choices) == 0 {
300			return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration")
301		}
302
303		content := ""
304		if openaiResponse.Choices[0].Message.Content != "" {
305			content = openaiResponse.Choices[0].Message.Content
306		}
307
308		toolCalls := o.toolCalls(*openaiResponse)
309		finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
310
311		if len(toolCalls) > 0 {
312			finishReason = message.FinishReasonToolUse
313		}
314
315		return &ProviderResponse{
316			Content:      content,
317			ToolCalls:    toolCalls,
318			Usage:        o.usage(*openaiResponse),
319			FinishReason: finishReason,
320		}, nil
321	}
322}
323
324func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
325	params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
326	params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
327		IncludeUsage: openai.Bool(true),
328	}
329
330	attempts := 0
331	eventChan := make(chan ProviderEvent)
332
333	go func() {
334		for {
335			attempts++
336			// Kujtim: fixes an issue with anthropig models on openrouter
337			if len(params.Tools) == 0 {
338				params.Tools = nil
339			}
340			openaiStream := o.client.Chat.Completions.NewStreaming(
341				ctx,
342				params,
343			)
344
345			acc := openai.ChatCompletionAccumulator{}
346			currentContent := ""
347			toolCalls := make([]message.ToolCall, 0)
348			var msgToolCalls []openai.ChatCompletionMessageToolCallUnion
349			for openaiStream.Next() {
350				chunk := openaiStream.Current()
351				// Kujtim: this is an issue with openrouter qwen, its sending -1 for the tool index
352				if len(chunk.Choices) > 0 && len(chunk.Choices[0].Delta.ToolCalls) > 0 && chunk.Choices[0].Delta.ToolCalls[0].Index == -1 {
353					chunk.Choices[0].Delta.ToolCalls[0].Index = 0
354				}
355				acc.AddChunk(chunk)
356				for i, choice := range chunk.Choices {
357					reasoning, ok := choice.Delta.JSON.ExtraFields["reasoning"]
358					if ok && reasoning.Raw() != "" {
359						reasoningStr := ""
360						json.Unmarshal([]byte(reasoning.Raw()), &reasoningStr)
361						if reasoningStr != "" {
362							eventChan <- ProviderEvent{
363								Type:     EventThinkingDelta,
364								Thinking: reasoningStr,
365							}
366						}
367					}
368					if choice.Delta.Content != "" {
369						eventChan <- ProviderEvent{
370							Type:    EventContentDelta,
371							Content: choice.Delta.Content,
372						}
373						currentContent += choice.Delta.Content
374					} else if len(choice.Delta.ToolCalls) > 0 {
375						toolCall := choice.Delta.ToolCalls[0]
376						newToolCall := false
377						if len(msgToolCalls)-1 >= int(toolCall.Index) { // tool call exists
378							existingToolCall := msgToolCalls[toolCall.Index]
379							if toolCall.ID != "" && toolCall.ID != existingToolCall.ID {
380								found := false
381								// try to find the tool based on the ID
382								for i, tool := range msgToolCalls {
383									if tool.ID == toolCall.ID {
384										msgToolCalls[i].Function.Arguments += toolCall.Function.Arguments
385										found = true
386									}
387								}
388								if !found {
389									newToolCall = true
390								}
391							} else {
392								msgToolCalls[toolCall.Index].Function.Arguments += toolCall.Function.Arguments
393							}
394						} else {
395							newToolCall = true
396						}
397						if newToolCall { // new tool call
398							if toolCall.ID == "" {
399								toolCall.ID = uuid.NewString()
400							}
401							eventChan <- ProviderEvent{
402								Type: EventToolUseStart,
403								ToolCall: &message.ToolCall{
404									ID:       toolCall.ID,
405									Name:     toolCall.Function.Name,
406									Finished: false,
407								},
408							}
409							msgToolCalls = append(msgToolCalls, openai.ChatCompletionMessageToolCallUnion{
410								ID:   toolCall.ID,
411								Type: "function",
412								Function: openai.ChatCompletionMessageFunctionToolCallFunction{
413									Name:      toolCall.Function.Name,
414									Arguments: toolCall.Function.Arguments,
415								},
416							})
417						}
418					}
419					acc.Choices[i].Message.ToolCalls = slices.Clone(msgToolCalls)
420				}
421			}
422
423			err := openaiStream.Err()
424			if err == nil || errors.Is(err, io.EOF) {
425				if len(acc.Choices) == 0 {
426					eventChan <- ProviderEvent{
427						Type:  EventError,
428						Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"),
429					}
430					return
431				}
432
433				resultFinishReason := acc.Choices[0].FinishReason
434				if resultFinishReason == "" {
435					// If the finish reason is empty, we assume it was a successful completion
436					// INFO: this is happening for openrouter for some reason
437					resultFinishReason = "stop"
438				}
439				// Stream completed successfully
440				finishReason := o.finishReason(resultFinishReason)
441				if len(acc.Choices[0].Message.ToolCalls) > 0 {
442					toolCalls = append(toolCalls, o.toolCalls(acc.ChatCompletion)...)
443				}
444				if len(toolCalls) > 0 {
445					finishReason = message.FinishReasonToolUse
446				}
447
448				eventChan <- ProviderEvent{
449					Type: EventComplete,
450					Response: &ProviderResponse{
451						Content:      currentContent,
452						ToolCalls:    toolCalls,
453						Usage:        o.usage(acc.ChatCompletion),
454						FinishReason: finishReason,
455					},
456				}
457				close(eventChan)
458				return
459			}
460
461			// If there is an error we are going to see if we can retry the call
462			retry, after, retryErr := o.shouldRetry(attempts, err)
463			if retryErr != nil {
464				eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
465				close(eventChan)
466				return
467			}
468			if retry {
469				slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries)
470				select {
471				case <-ctx.Done():
472					// context cancelled
473					if ctx.Err() == nil {
474						eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
475					}
476					close(eventChan)
477					return
478				case <-time.After(time.Duration(after) * time.Millisecond):
479					continue
480				}
481			}
482			eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
483			close(eventChan)
484			return
485		}
486	}()
487
488	return eventChan
489}
490
491func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
492	if attempts > maxRetries {
493		return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
494	}
495	if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
496		return false, 0, err
497	}
498	var apiErr *openai.Error
499	retryMs := 0
500	retryAfterValues := []string{}
501	if errors.As(err, &apiErr) {
502		// Check for token expiration (401 Unauthorized)
503		if apiErr.StatusCode == 401 {
504			o.providerOptions.apiKey, err = config.Get().Resolve(o.providerOptions.config.APIKey)
505			if err != nil {
506				return false, 0, fmt.Errorf("failed to resolve API key: %w", err)
507			}
508			o.client = createOpenAIClient(o.providerOptions)
509			return true, 0, nil
510		}
511
512		if apiErr.StatusCode != 429 && apiErr.StatusCode != 500 {
513			return false, 0, err
514		}
515
516		retryAfterValues = apiErr.Response.Header.Values("Retry-After")
517	}
518
519	if apiErr != nil {
520		slog.Warn("OpenAI API error", "status_code", apiErr.StatusCode, "message", apiErr.Message, "type", apiErr.Type)
521		if len(retryAfterValues) > 0 {
522			slog.Warn("Retry-After header", "values", retryAfterValues)
523		}
524	} else {
525		slog.Error("OpenAI API error", "error", err.Error(), "attempt", attempts, "max_retries", maxRetries)
526	}
527
528	backoffMs := 2000 * (1 << (attempts - 1))
529	jitterMs := int(float64(backoffMs) * 0.2)
530	retryMs = backoffMs + jitterMs
531	if len(retryAfterValues) > 0 {
532		if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
533			retryMs = retryMs * 1000
534		}
535	}
536	return true, int64(retryMs), nil
537}
538
539func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
540	var toolCalls []message.ToolCall
541
542	if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
543		for _, call := range completion.Choices[0].Message.ToolCalls {
544			toolCall := message.ToolCall{
545				ID:       call.ID,
546				Name:     call.Function.Name,
547				Input:    call.Function.Arguments,
548				Type:     "function",
549				Finished: true,
550			}
551			toolCalls = append(toolCalls, toolCall)
552		}
553	}
554
555	return toolCalls
556}
557
558func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
559	cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
560	inputTokens := completion.Usage.PromptTokens - cachedTokens
561
562	return TokenUsage{
563		InputTokens:         inputTokens,
564		OutputTokens:        completion.Usage.CompletionTokens,
565		CacheCreationTokens: 0, // OpenAI doesn't provide this directly
566		CacheReadTokens:     cachedTokens,
567	}
568}
569
570func (o *openaiClient) Model() catwalk.Model {
571	return o.providerOptions.model(o.providerOptions.modelType)
572}