1package provider
2
3import (
4 "context"
5 "errors"
6 "fmt"
7 "io"
8 "log/slog"
9 "slices"
10 "strings"
11 "time"
12
13 "github.com/charmbracelet/catwalk/pkg/catwalk"
14 "github.com/charmbracelet/crush/internal/config"
15 "github.com/charmbracelet/crush/internal/llm/tools"
16 "github.com/charmbracelet/crush/internal/log"
17 "github.com/charmbracelet/crush/internal/message"
18 "github.com/google/uuid"
19 "github.com/openai/openai-go"
20 "github.com/openai/openai-go/option"
21 "github.com/openai/openai-go/packages/param"
22 "github.com/openai/openai-go/shared"
23)
24
25type openaiClient struct {
26 providerOptions providerClientOptions
27 client openai.Client
28}
29
30type OpenAIClient ProviderClient
31
32func newOpenAIClient(opts providerClientOptions) OpenAIClient {
33 return &openaiClient{
34 providerOptions: opts,
35 client: createOpenAIClient(opts),
36 }
37}
38
39func createOpenAIClient(opts providerClientOptions) openai.Client {
40 openaiClientOptions := []option.RequestOption{}
41 if opts.apiKey != "" {
42 openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
43 }
44 if opts.baseURL != "" {
45 resolvedBaseURL, err := config.Get().Resolve(opts.baseURL)
46 if err == nil {
47 openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(resolvedBaseURL))
48 }
49 }
50
51 if config.Get().Options.Debug {
52 httpClient := log.NewHTTPClient()
53 openaiClientOptions = append(openaiClientOptions, option.WithHTTPClient(httpClient))
54 }
55
56 for key, value := range opts.extraHeaders {
57 openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
58 }
59
60 for extraKey, extraValue := range opts.extraBody {
61 openaiClientOptions = append(openaiClientOptions, option.WithJSONSet(extraKey, extraValue))
62 }
63
64 return openai.NewClient(openaiClientOptions...)
65}
66
67func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
68 isAnthropicModel := o.providerOptions.config.ID == string(catwalk.InferenceProviderOpenRouter) && strings.HasPrefix(o.Model().ID, "anthropic/")
69 // Add system message first
70 systemMessage := o.providerOptions.systemMessage
71 if o.providerOptions.systemPromptPrefix != "" {
72 systemMessage = o.providerOptions.systemPromptPrefix + "\n" + systemMessage
73 }
74
75 system := openai.SystemMessage(systemMessage)
76 if isAnthropicModel && !o.providerOptions.disableCache {
77 systemTextBlock := openai.ChatCompletionContentPartTextParam{Text: systemMessage}
78 systemTextBlock.SetExtraFields(
79 map[string]any{
80 "cache_control": map[string]string{
81 "type": "ephemeral",
82 },
83 },
84 )
85 var content []openai.ChatCompletionContentPartTextParam
86 content = append(content, systemTextBlock)
87 system = openai.SystemMessage(content)
88 }
89 openaiMessages = append(openaiMessages, system)
90
91 for i, msg := range messages {
92 cache := false
93 if i > len(messages)-3 {
94 cache = true
95 }
96 switch msg.Role {
97 case message.User:
98 var content []openai.ChatCompletionContentPartUnionParam
99
100 textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
101 content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
102 hasBinaryContent := false
103 for _, binaryContent := range msg.BinaryContent() {
104 hasBinaryContent = true
105 imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(catwalk.InferenceProviderOpenAI)}
106 imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
107
108 content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
109 }
110 if cache && !o.providerOptions.disableCache && isAnthropicModel {
111 textBlock.SetExtraFields(map[string]any{
112 "cache_control": map[string]string{
113 "type": "ephemeral",
114 },
115 })
116 }
117 if hasBinaryContent || (isAnthropicModel && !o.providerOptions.disableCache) {
118 openaiMessages = append(openaiMessages, openai.UserMessage(content))
119 } else {
120 openaiMessages = append(openaiMessages, openai.UserMessage(msg.Content().String()))
121 }
122
123 case message.Assistant:
124 assistantMsg := openai.ChatCompletionAssistantMessageParam{
125 Role: "assistant",
126 }
127
128 hasContent := false
129 if msg.Content().String() != "" {
130 hasContent = true
131 textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
132 if cache && !o.providerOptions.disableCache && isAnthropicModel {
133 textBlock.SetExtraFields(map[string]any{
134 "cache_control": map[string]string{
135 "type": "ephemeral",
136 },
137 })
138 }
139 assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
140 OfArrayOfContentParts: []openai.ChatCompletionAssistantMessageParamContentArrayOfContentPartUnion{
141 {
142 OfText: &textBlock,
143 },
144 },
145 }
146 if !isAnthropicModel {
147 assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
148 OfString: param.NewOpt(msg.Content().String()),
149 }
150 }
151 }
152
153 if len(msg.ToolCalls()) > 0 {
154 hasContent = true
155 assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls()))
156 for i, call := range msg.ToolCalls() {
157 assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
158 ID: call.ID,
159 Type: "function",
160 Function: openai.ChatCompletionMessageToolCallFunctionParam{
161 Name: call.Name,
162 Arguments: call.Input,
163 },
164 }
165 }
166 }
167 if !hasContent {
168 slog.Warn("There is a message without content, investigate, this should not happen")
169 continue
170 }
171
172 openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
173 OfAssistant: &assistantMsg,
174 })
175
176 case message.Tool:
177 for _, result := range msg.ToolResults() {
178 openaiMessages = append(openaiMessages,
179 openai.ToolMessage(result.Content, result.ToolCallID),
180 )
181 }
182 }
183 }
184
185 return
186}
187
188func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
189 openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
190
191 for i, tool := range tools {
192 info := tool.Info()
193 openaiTools[i] = openai.ChatCompletionToolParam{
194 Function: openai.FunctionDefinitionParam{
195 Name: info.Name,
196 Description: openai.String(info.Description),
197 Parameters: openai.FunctionParameters{
198 "type": "object",
199 "properties": info.Parameters,
200 "required": info.Required,
201 },
202 },
203 }
204 }
205
206 return openaiTools
207}
208
209func (o *openaiClient) finishReason(reason string) message.FinishReason {
210 switch reason {
211 case "stop":
212 return message.FinishReasonEndTurn
213 case "length":
214 return message.FinishReasonMaxTokens
215 case "tool_calls":
216 return message.FinishReasonToolUse
217 default:
218 return message.FinishReasonUnknown
219 }
220}
221
222func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
223 model := o.providerOptions.model(o.providerOptions.modelType)
224 cfg := config.Get()
225
226 modelConfig := cfg.Models[config.SelectedModelTypeLarge]
227 if o.providerOptions.modelType == config.SelectedModelTypeSmall {
228 modelConfig = cfg.Models[config.SelectedModelTypeSmall]
229 }
230
231 reasoningEffort := modelConfig.ReasoningEffort
232
233 params := openai.ChatCompletionNewParams{
234 Model: openai.ChatModel(model.ID),
235 Messages: messages,
236 Tools: tools,
237 }
238
239 maxTokens := model.DefaultMaxTokens
240 if modelConfig.MaxTokens > 0 {
241 maxTokens = modelConfig.MaxTokens
242 }
243
244 // Override max tokens if set in provider options
245 if o.providerOptions.maxTokens > 0 {
246 maxTokens = o.providerOptions.maxTokens
247 }
248 if model.CanReason {
249 params.MaxCompletionTokens = openai.Int(maxTokens)
250 switch reasoningEffort {
251 case "low":
252 params.ReasoningEffort = shared.ReasoningEffortLow
253 case "medium":
254 params.ReasoningEffort = shared.ReasoningEffortMedium
255 case "high":
256 params.ReasoningEffort = shared.ReasoningEffortHigh
257 default:
258 params.ReasoningEffort = shared.ReasoningEffort(reasoningEffort)
259 }
260 } else {
261 params.MaxTokens = openai.Int(maxTokens)
262 }
263
264 return params
265}
266
267func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
268 params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
269 attempts := 0
270 for {
271 attempts++
272 openaiResponse, err := o.client.Chat.Completions.New(
273 ctx,
274 params,
275 )
276 // If there is an error we are going to see if we can retry the call
277 if err != nil {
278 retry, after, retryErr := o.shouldRetry(attempts, err)
279 if retryErr != nil {
280 return nil, retryErr
281 }
282 if retry {
283 slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries)
284 select {
285 case <-ctx.Done():
286 return nil, ctx.Err()
287 case <-time.After(time.Duration(after) * time.Millisecond):
288 continue
289 }
290 }
291 return nil, retryErr
292 }
293
294 if len(openaiResponse.Choices) == 0 {
295 return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration")
296 }
297
298 content := ""
299 if openaiResponse.Choices[0].Message.Content != "" {
300 content = openaiResponse.Choices[0].Message.Content
301 }
302
303 toolCalls := o.toolCalls(*openaiResponse)
304 finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
305
306 if len(toolCalls) > 0 {
307 finishReason = message.FinishReasonToolUse
308 }
309
310 return &ProviderResponse{
311 Content: content,
312 ToolCalls: toolCalls,
313 Usage: o.usage(*openaiResponse),
314 FinishReason: finishReason,
315 }, nil
316 }
317}
318
319func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
320 params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
321 params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
322 IncludeUsage: openai.Bool(true),
323 }
324
325 attempts := 0
326 eventChan := make(chan ProviderEvent)
327
328 go func() {
329 for {
330 attempts++
331 // Kujtim: fixes an issue with anthropig models on openrouter
332 if len(params.Tools) == 0 {
333 params.Tools = nil
334 }
335 openaiStream := o.client.Chat.Completions.NewStreaming(
336 ctx,
337 params,
338 )
339
340 acc := openai.ChatCompletionAccumulator{}
341 currentContent := ""
342 toolCalls := make([]message.ToolCall, 0)
343 var msgToolCalls []openai.ChatCompletionMessageToolCall
344 for openaiStream.Next() {
345 chunk := openaiStream.Current()
346 // Kujtim: this is an issue with openrouter qwen, its sending -1 for the tool index
347 if len(chunk.Choices) > 0 && len(chunk.Choices[0].Delta.ToolCalls) > 0 && chunk.Choices[0].Delta.ToolCalls[0].Index == -1 {
348 chunk.Choices[0].Delta.ToolCalls[0].Index = 0
349 }
350 acc.AddChunk(chunk)
351 // This fixes multiple tool calls for some providers
352 for i, choice := range chunk.Choices {
353 if choice.Delta.Content != "" {
354 eventChan <- ProviderEvent{
355 Type: EventContentDelta,
356 Content: choice.Delta.Content,
357 }
358 currentContent += choice.Delta.Content
359 } else if len(choice.Delta.ToolCalls) > 0 {
360 toolCall := choice.Delta.ToolCalls[0]
361 newToolCall := false
362 if len(msgToolCalls)-1 >= int(toolCall.Index) { // tool call exists
363 existingToolCall := msgToolCalls[toolCall.Index]
364 if toolCall.ID != "" && toolCall.ID != existingToolCall.ID {
365 found := false
366 // try to find the tool based on the ID
367 for i, tool := range msgToolCalls {
368 if tool.ID == toolCall.ID {
369 msgToolCalls[i].Function.Arguments += toolCall.Function.Arguments
370 found = true
371 }
372 }
373 if !found {
374 newToolCall = true
375 }
376 } else {
377 msgToolCalls[toolCall.Index].Function.Arguments += toolCall.Function.Arguments
378 }
379 } else {
380 newToolCall = true
381 }
382 if newToolCall { // new tool call
383 if toolCall.ID == "" {
384 toolCall.ID = uuid.NewString()
385 }
386 eventChan <- ProviderEvent{
387 Type: EventToolUseStart,
388 ToolCall: &message.ToolCall{
389 ID: toolCall.ID,
390 Name: toolCall.Function.Name,
391 Finished: false,
392 },
393 }
394 msgToolCalls = append(msgToolCalls, openai.ChatCompletionMessageToolCall{
395 ID: toolCall.ID,
396 Type: "function",
397 Function: openai.ChatCompletionMessageToolCallFunction{
398 Name: toolCall.Function.Name,
399 Arguments: toolCall.Function.Arguments,
400 },
401 })
402 }
403 }
404 acc.Choices[i].Message.ToolCalls = slices.Clone(msgToolCalls)
405 }
406 }
407
408 err := openaiStream.Err()
409 if err == nil || errors.Is(err, io.EOF) {
410 if len(acc.Choices) == 0 {
411 eventChan <- ProviderEvent{
412 Type: EventError,
413 Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"),
414 }
415 return
416 }
417
418 resultFinishReason := acc.Choices[0].FinishReason
419 if resultFinishReason == "" {
420 // If the finish reason is empty, we assume it was a successful completion
421 // INFO: this is happening for openrouter for some reason
422 resultFinishReason = "stop"
423 }
424 // Stream completed successfully
425 finishReason := o.finishReason(resultFinishReason)
426 if len(acc.Choices[0].Message.ToolCalls) > 0 {
427 toolCalls = append(toolCalls, o.toolCalls(acc.ChatCompletion)...)
428 }
429 if len(toolCalls) > 0 {
430 finishReason = message.FinishReasonToolUse
431 }
432
433 eventChan <- ProviderEvent{
434 Type: EventComplete,
435 Response: &ProviderResponse{
436 Content: currentContent,
437 ToolCalls: toolCalls,
438 Usage: o.usage(acc.ChatCompletion),
439 FinishReason: finishReason,
440 },
441 }
442 close(eventChan)
443 return
444 }
445
446 // If there is an error we are going to see if we can retry the call
447 retry, after, retryErr := o.shouldRetry(attempts, err)
448 if retryErr != nil {
449 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
450 close(eventChan)
451 return
452 }
453 if retry {
454 slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries)
455 select {
456 case <-ctx.Done():
457 // context cancelled
458 if ctx.Err() == nil {
459 eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
460 }
461 close(eventChan)
462 return
463 case <-time.After(time.Duration(after) * time.Millisecond):
464 continue
465 }
466 }
467 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
468 close(eventChan)
469 return
470 }
471 }()
472
473 return eventChan
474}
475
476func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
477 if attempts > maxRetries {
478 return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
479 }
480 if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
481 return false, 0, err
482 }
483 var apiErr *openai.Error
484 retryMs := 0
485 retryAfterValues := []string{}
486 if errors.As(err, &apiErr) {
487 // Check for token expiration (401 Unauthorized)
488 if apiErr.StatusCode == 401 {
489 o.providerOptions.apiKey, err = config.Get().Resolve(o.providerOptions.config.APIKey)
490 if err != nil {
491 return false, 0, fmt.Errorf("failed to resolve API key: %w", err)
492 }
493 o.client = createOpenAIClient(o.providerOptions)
494 return true, 0, nil
495 }
496
497 if apiErr.StatusCode != 429 && apiErr.StatusCode != 500 {
498 return false, 0, err
499 }
500
501 retryAfterValues = apiErr.Response.Header.Values("Retry-After")
502 }
503
504 if apiErr != nil {
505 slog.Warn("OpenAI API error", "status_code", apiErr.StatusCode, "message", apiErr.Message, "type", apiErr.Type)
506 if len(retryAfterValues) > 0 {
507 slog.Warn("Retry-After header", "values", retryAfterValues)
508 }
509 } else {
510 slog.Error("OpenAI API error", "error", err.Error(), "attempt", attempts, "max_retries", maxRetries)
511 }
512
513 backoffMs := 2000 * (1 << (attempts - 1))
514 jitterMs := int(float64(backoffMs) * 0.2)
515 retryMs = backoffMs + jitterMs
516 if len(retryAfterValues) > 0 {
517 if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
518 retryMs = retryMs * 1000
519 }
520 }
521 return true, int64(retryMs), nil
522}
523
524func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
525 var toolCalls []message.ToolCall
526
527 if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
528 for _, call := range completion.Choices[0].Message.ToolCalls {
529 toolCall := message.ToolCall{
530 ID: call.ID,
531 Name: call.Function.Name,
532 Input: call.Function.Arguments,
533 Type: "function",
534 Finished: true,
535 }
536 toolCalls = append(toolCalls, toolCall)
537 }
538 }
539
540 return toolCalls
541}
542
543func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
544 cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
545 inputTokens := completion.Usage.PromptTokens - cachedTokens
546
547 return TokenUsage{
548 InputTokens: inputTokens,
549 OutputTokens: completion.Usage.CompletionTokens,
550 CacheCreationTokens: 0, // OpenAI doesn't provide this directly
551 CacheReadTokens: cachedTokens,
552 }
553}
554
555func (o *openaiClient) Model() catwalk.Model {
556 return o.providerOptions.model(o.providerOptions.modelType)
557}