1package provider
  2
  3import (
  4	"context"
  5	"encoding/json"
  6	"errors"
  7	"fmt"
  8	"io"
  9	"time"
 10
 11	"github.com/charmbracelet/crush/internal/config"
 12	"github.com/charmbracelet/crush/internal/fur/provider"
 13	"github.com/charmbracelet/crush/internal/llm/tools"
 14	"github.com/charmbracelet/crush/internal/logging"
 15	"github.com/charmbracelet/crush/internal/message"
 16	"github.com/openai/openai-go"
 17	"github.com/openai/openai-go/option"
 18	"github.com/openai/openai-go/shared"
 19)
 20
 21type openaiClient struct {
 22	providerOptions providerClientOptions
 23	client          openai.Client
 24}
 25
 26type OpenAIClient ProviderClient
 27
 28func newOpenAIClient(opts providerClientOptions) OpenAIClient {
 29	openaiClientOptions := []option.RequestOption{}
 30	if opts.apiKey != "" {
 31		openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
 32	}
 33	if opts.baseURL != "" {
 34		openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(opts.baseURL))
 35	}
 36
 37	if opts.extraHeaders != nil {
 38		for key, value := range opts.extraHeaders {
 39			openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
 40		}
 41	}
 42
 43	client := openai.NewClient(openaiClientOptions...)
 44	return &openaiClient{
 45		providerOptions: opts,
 46		client:          client,
 47	}
 48}
 49
 50func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
 51	// Add system message first
 52	openaiMessages = append(openaiMessages, openai.SystemMessage(o.providerOptions.systemMessage))
 53
 54	for _, msg := range messages {
 55		switch msg.Role {
 56		case message.User:
 57			var content []openai.ChatCompletionContentPartUnionParam
 58			textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
 59			content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
 60			for _, binaryContent := range msg.BinaryContent() {
 61				imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(provider.InferenceProviderOpenAI)}
 62				imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
 63
 64				content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
 65			}
 66
 67			openaiMessages = append(openaiMessages, openai.UserMessage(content))
 68
 69		case message.Assistant:
 70			assistantMsg := openai.ChatCompletionAssistantMessageParam{
 71				Role: "assistant",
 72			}
 73
 74			if msg.Content().String() != "" {
 75				assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
 76					OfString: openai.String(msg.Content().String()),
 77				}
 78			}
 79
 80			if len(msg.ToolCalls()) > 0 {
 81				assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls()))
 82				for i, call := range msg.ToolCalls() {
 83					assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
 84						ID:   call.ID,
 85						Type: "function",
 86						Function: openai.ChatCompletionMessageToolCallFunctionParam{
 87							Name:      call.Name,
 88							Arguments: call.Input,
 89						},
 90					}
 91				}
 92			}
 93
 94			openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
 95				OfAssistant: &assistantMsg,
 96			})
 97
 98		case message.Tool:
 99			for _, result := range msg.ToolResults() {
100				openaiMessages = append(openaiMessages,
101					openai.ToolMessage(result.Content, result.ToolCallID),
102				)
103			}
104		}
105	}
106
107	return
108}
109
110func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
111	openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
112
113	for i, tool := range tools {
114		info := tool.Info()
115		openaiTools[i] = openai.ChatCompletionToolParam{
116			Function: openai.FunctionDefinitionParam{
117				Name:        info.Name,
118				Description: openai.String(info.Description),
119				Parameters: openai.FunctionParameters{
120					"type":       "object",
121					"properties": info.Parameters,
122					"required":   info.Required,
123				},
124			},
125		}
126	}
127
128	return openaiTools
129}
130
131func (o *openaiClient) finishReason(reason string) message.FinishReason {
132	switch reason {
133	case "stop":
134		return message.FinishReasonEndTurn
135	case "length":
136		return message.FinishReasonMaxTokens
137	case "tool_calls":
138		return message.FinishReasonToolUse
139	default:
140		return message.FinishReasonUnknown
141	}
142}
143
144func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
145	model := o.providerOptions.model(o.providerOptions.modelType)
146	cfg := config.Get()
147
148	modelConfig := cfg.Models.Large
149	if o.providerOptions.modelType == config.SmallModel {
150		modelConfig = cfg.Models.Small
151	}
152
153	reasoningEffort := model.ReasoningEffort
154	if modelConfig.ReasoningEffort != "" {
155		reasoningEffort = modelConfig.ReasoningEffort
156	}
157
158	params := openai.ChatCompletionNewParams{
159		Model:    openai.ChatModel(model.ID),
160		Messages: messages,
161		Tools:    tools,
162	}
163
164	maxTokens := model.DefaultMaxTokens
165	if modelConfig.MaxTokens > 0 {
166		maxTokens = modelConfig.MaxTokens
167	}
168	if model.CanReason {
169		params.MaxCompletionTokens = openai.Int(maxTokens)
170		switch reasoningEffort {
171		case "low":
172			params.ReasoningEffort = shared.ReasoningEffortLow
173		case "medium":
174			params.ReasoningEffort = shared.ReasoningEffortMedium
175		case "high":
176			params.ReasoningEffort = shared.ReasoningEffortHigh
177		default:
178			params.ReasoningEffort = shared.ReasoningEffortMedium
179		}
180	} else {
181		params.MaxTokens = openai.Int(maxTokens)
182	}
183
184	return params
185}
186
187func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
188	params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
189	cfg := config.Get()
190	if cfg.Options.Debug {
191		jsonData, _ := json.Marshal(params)
192		logging.Debug("Prepared messages", "messages", string(jsonData))
193	}
194	attempts := 0
195	for {
196		attempts++
197		openaiResponse, err := o.client.Chat.Completions.New(
198			ctx,
199			params,
200		)
201		// If there is an error we are going to see if we can retry the call
202		if err != nil {
203			retry, after, retryErr := o.shouldRetry(attempts, err)
204			if retryErr != nil {
205				return nil, retryErr
206			}
207			if retry {
208				logging.WarnPersist(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), logging.PersistTimeArg, time.Millisecond*time.Duration(after+100))
209				select {
210				case <-ctx.Done():
211					return nil, ctx.Err()
212				case <-time.After(time.Duration(after) * time.Millisecond):
213					continue
214				}
215			}
216			return nil, retryErr
217		}
218
219		content := ""
220		if openaiResponse.Choices[0].Message.Content != "" {
221			content = openaiResponse.Choices[0].Message.Content
222		}
223
224		toolCalls := o.toolCalls(*openaiResponse)
225		finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
226
227		if len(toolCalls) > 0 {
228			finishReason = message.FinishReasonToolUse
229		}
230
231		return &ProviderResponse{
232			Content:      content,
233			ToolCalls:    toolCalls,
234			Usage:        o.usage(*openaiResponse),
235			FinishReason: finishReason,
236		}, nil
237	}
238}
239
240func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
241	params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
242	params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
243		IncludeUsage: openai.Bool(true),
244	}
245
246	cfg := config.Get()
247	if cfg.Options.Debug {
248		jsonData, _ := json.Marshal(params)
249		logging.Debug("Prepared messages", "messages", string(jsonData))
250	}
251
252	attempts := 0
253	eventChan := make(chan ProviderEvent)
254
255	go func() {
256		for {
257			attempts++
258			openaiStream := o.client.Chat.Completions.NewStreaming(
259				ctx,
260				params,
261			)
262
263			acc := openai.ChatCompletionAccumulator{}
264			currentContent := ""
265			toolCalls := make([]message.ToolCall, 0)
266
267			for openaiStream.Next() {
268				chunk := openaiStream.Current()
269				acc.AddChunk(chunk)
270
271				for _, choice := range chunk.Choices {
272					if choice.Delta.Content != "" {
273						eventChan <- ProviderEvent{
274							Type:    EventContentDelta,
275							Content: choice.Delta.Content,
276						}
277						currentContent += choice.Delta.Content
278					}
279				}
280			}
281
282			err := openaiStream.Err()
283			if err == nil || errors.Is(err, io.EOF) {
284				// Stream completed successfully
285				finishReason := o.finishReason(string(acc.ChatCompletion.Choices[0].FinishReason))
286				if len(acc.Choices[0].Message.ToolCalls) > 0 {
287					toolCalls = append(toolCalls, o.toolCalls(acc.ChatCompletion)...)
288				}
289				if len(toolCalls) > 0 {
290					finishReason = message.FinishReasonToolUse
291				}
292
293				eventChan <- ProviderEvent{
294					Type: EventComplete,
295					Response: &ProviderResponse{
296						Content:      currentContent,
297						ToolCalls:    toolCalls,
298						Usage:        o.usage(acc.ChatCompletion),
299						FinishReason: finishReason,
300					},
301				}
302				close(eventChan)
303				return
304			}
305
306			// If there is an error we are going to see if we can retry the call
307			retry, after, retryErr := o.shouldRetry(attempts, err)
308			if retryErr != nil {
309				eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
310				close(eventChan)
311				return
312			}
313			if retry {
314				logging.WarnPersist(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries), logging.PersistTimeArg, time.Millisecond*time.Duration(after+100))
315				select {
316				case <-ctx.Done():
317					// context cancelled
318					if ctx.Err() == nil {
319						eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
320					}
321					close(eventChan)
322					return
323				case <-time.After(time.Duration(after) * time.Millisecond):
324					continue
325				}
326			}
327			eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
328			close(eventChan)
329			return
330		}
331	}()
332
333	return eventChan
334}
335
336func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
337	var apierr *openai.Error
338	if !errors.As(err, &apierr) {
339		return false, 0, err
340	}
341
342	if apierr.StatusCode != 429 && apierr.StatusCode != 500 {
343		return false, 0, err
344	}
345
346	if attempts > maxRetries {
347		return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
348	}
349
350	retryMs := 0
351	retryAfterValues := apierr.Response.Header.Values("Retry-After")
352
353	backoffMs := 2000 * (1 << (attempts - 1))
354	jitterMs := int(float64(backoffMs) * 0.2)
355	retryMs = backoffMs + jitterMs
356	if len(retryAfterValues) > 0 {
357		if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
358			retryMs = retryMs * 1000
359		}
360	}
361	return true, int64(retryMs), nil
362}
363
364func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
365	var toolCalls []message.ToolCall
366
367	if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
368		for _, call := range completion.Choices[0].Message.ToolCalls {
369			toolCall := message.ToolCall{
370				ID:       call.ID,
371				Name:     call.Function.Name,
372				Input:    call.Function.Arguments,
373				Type:     "function",
374				Finished: true,
375			}
376			toolCalls = append(toolCalls, toolCall)
377		}
378	}
379
380	return toolCalls
381}
382
383func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
384	cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
385	inputTokens := completion.Usage.PromptTokens - cachedTokens
386
387	return TokenUsage{
388		InputTokens:         inputTokens,
389		OutputTokens:        completion.Usage.CompletionTokens,
390		CacheCreationTokens: 0, // OpenAI doesn't provide this directly
391		CacheReadTokens:     cachedTokens,
392	}
393}
394
395func (a *openaiClient) Model() config.Model {
396	return a.providerOptions.model(a.providerOptions.modelType)
397}