1package provider
2
3import (
4 "context"
5 "encoding/json"
6 "errors"
7 "fmt"
8 "io"
9 "log/slog"
10 "strings"
11 "time"
12
13 "github.com/charmbracelet/catwalk/pkg/catwalk"
14 "github.com/charmbracelet/crush/internal/config"
15 "github.com/charmbracelet/crush/internal/llm/tools"
16 "github.com/charmbracelet/crush/internal/log"
17 "github.com/charmbracelet/crush/internal/message"
18 "github.com/google/uuid"
19 "github.com/openai/openai-go"
20 "github.com/openai/openai-go/option"
21 "github.com/openai/openai-go/packages/param"
22 "github.com/openai/openai-go/shared"
23)
24
25type openaiClient struct {
26 providerOptions providerClientOptions
27 client openai.Client
28}
29
30type OpenAIClient ProviderClient
31
32func newOpenAIClient(opts providerClientOptions) OpenAIClient {
33 return &openaiClient{
34 providerOptions: opts,
35 client: createOpenAIClient(opts),
36 }
37}
38
39func createOpenAIClient(opts providerClientOptions) openai.Client {
40 openaiClientOptions := []option.RequestOption{}
41 if opts.apiKey != "" {
42 openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
43 }
44 if opts.baseURL != "" {
45 resolvedBaseURL, err := config.Get().Resolve(opts.baseURL)
46 if err == nil && resolvedBaseURL != "" {
47 openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(resolvedBaseURL))
48 }
49 }
50
51 if config.Get().Options.Debug {
52 httpClient := log.NewHTTPClient()
53 openaiClientOptions = append(openaiClientOptions, option.WithHTTPClient(httpClient))
54 }
55
56 for key, value := range opts.extraHeaders {
57 openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
58 }
59
60 for extraKey, extraValue := range opts.extraBody {
61 openaiClientOptions = append(openaiClientOptions, option.WithJSONSet(extraKey, extraValue))
62 }
63
64 return openai.NewClient(openaiClientOptions...)
65}
66
67func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
68 isAnthropicModel := o.providerOptions.config.ID == string(catwalk.InferenceProviderOpenRouter) && strings.HasPrefix(o.Model().ID, "anthropic/")
69 // Add system message first
70 systemMessage := o.providerOptions.systemMessage
71 if o.providerOptions.systemPromptPrefix != "" {
72 systemMessage = o.providerOptions.systemPromptPrefix + "\n" + systemMessage
73 }
74
75 system := openai.SystemMessage(systemMessage)
76 if isAnthropicModel && !o.providerOptions.disableCache {
77 systemTextBlock := openai.ChatCompletionContentPartTextParam{Text: systemMessage}
78 systemTextBlock.SetExtraFields(
79 map[string]any{
80 "cache_control": map[string]string{
81 "type": "ephemeral",
82 },
83 },
84 )
85 var content []openai.ChatCompletionContentPartTextParam
86 content = append(content, systemTextBlock)
87 system = openai.SystemMessage(content)
88 }
89 openaiMessages = append(openaiMessages, system)
90
91 for i, msg := range messages {
92 cache := false
93 if i > len(messages)-3 {
94 cache = true
95 }
96 switch msg.Role {
97 case message.User:
98 var content []openai.ChatCompletionContentPartUnionParam
99
100 textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
101 content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
102 hasBinaryContent := false
103 for _, binaryContent := range msg.BinaryContent() {
104 hasBinaryContent = true
105 imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(catwalk.InferenceProviderOpenAI)}
106 imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
107
108 content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
109 }
110 if cache && !o.providerOptions.disableCache && isAnthropicModel {
111 textBlock.SetExtraFields(map[string]any{
112 "cache_control": map[string]string{
113 "type": "ephemeral",
114 },
115 })
116 }
117 if hasBinaryContent || (isAnthropicModel && !o.providerOptions.disableCache) {
118 openaiMessages = append(openaiMessages, openai.UserMessage(content))
119 } else {
120 openaiMessages = append(openaiMessages, openai.UserMessage(msg.Content().String()))
121 }
122
123 case message.Assistant:
124 assistantMsg := openai.ChatCompletionAssistantMessageParam{
125 Role: "assistant",
126 }
127
128 if len(msg.ToolCalls()) > 0 {
129 assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls()))
130 for i, call := range msg.ToolCalls() {
131 assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
132 ID: call.ID,
133 Type: "function",
134 Function: openai.ChatCompletionMessageToolCallFunctionParam{
135 Name: call.Name,
136 Arguments: call.Input,
137 },
138 }
139 }
140 }
141 if msg.Content().String() != "" {
142 assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
143 OfString: param.NewOpt(msg.Content().Text),
144 }
145 }
146
147 if cache && !o.providerOptions.disableCache && isAnthropicModel {
148 assistantMsg.SetExtraFields(map[string]any{
149 "cache_control": map[string]string{
150 "type": "ephemeral",
151 },
152 })
153 }
154 openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
155 OfAssistant: &assistantMsg,
156 })
157
158 case message.Tool:
159 for _, result := range msg.ToolResults() {
160 openaiMessages = append(openaiMessages,
161 openai.ToolMessage(result.Content, result.ToolCallID),
162 )
163 }
164 }
165 }
166
167 return
168}
169
170func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
171 openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
172
173 for i, tool := range tools {
174 info := tool.Info()
175 openaiTools[i] = openai.ChatCompletionToolParam{
176 Function: openai.FunctionDefinitionParam{
177 Name: info.Name,
178 Description: openai.String(info.Description),
179 Parameters: openai.FunctionParameters{
180 "type": "object",
181 "properties": info.Parameters,
182 "required": info.Required,
183 },
184 },
185 }
186 }
187
188 return openaiTools
189}
190
191func (o *openaiClient) finishReason(reason string) message.FinishReason {
192 switch reason {
193 case "stop":
194 return message.FinishReasonEndTurn
195 case "length":
196 return message.FinishReasonMaxTokens
197 case "tool_calls":
198 return message.FinishReasonToolUse
199 default:
200 return message.FinishReasonUnknown
201 }
202}
203
204func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
205 model := o.providerOptions.model(o.providerOptions.modelType)
206 cfg := config.Get()
207
208 modelConfig := cfg.Models[config.SelectedModelTypeLarge]
209 if o.providerOptions.modelType == config.SelectedModelTypeSmall {
210 modelConfig = cfg.Models[config.SelectedModelTypeSmall]
211 }
212
213 reasoningEffort := modelConfig.ReasoningEffort
214
215 params := openai.ChatCompletionNewParams{
216 Model: openai.ChatModel(model.ID),
217 Messages: messages,
218 Tools: tools,
219 }
220
221 maxTokens := model.DefaultMaxTokens
222 if modelConfig.MaxTokens > 0 {
223 maxTokens = modelConfig.MaxTokens
224 }
225
226 // Override max tokens if set in provider options
227 if o.providerOptions.maxTokens > 0 {
228 maxTokens = o.providerOptions.maxTokens
229 }
230 if model.CanReason {
231 params.MaxCompletionTokens = openai.Int(maxTokens)
232 switch reasoningEffort {
233 case "low":
234 params.ReasoningEffort = shared.ReasoningEffortLow
235 case "medium":
236 params.ReasoningEffort = shared.ReasoningEffortMedium
237 case "high":
238 params.ReasoningEffort = shared.ReasoningEffortHigh
239 case "minimal":
240 params.ReasoningEffort = shared.ReasoningEffort("minimal")
241 default:
242 params.ReasoningEffort = shared.ReasoningEffort(reasoningEffort)
243 }
244 } else {
245 params.MaxTokens = openai.Int(maxTokens)
246 }
247
248 return params
249}
250
251func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
252 params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
253 attempts := 0
254 for {
255 attempts++
256 openaiResponse, err := o.client.Chat.Completions.New(
257 ctx,
258 params,
259 )
260 // If there is an error we are going to see if we can retry the call
261 if err != nil {
262 retry, after, retryErr := o.shouldRetry(attempts, err)
263 if retryErr != nil {
264 return nil, retryErr
265 }
266 if retry {
267 slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
268 select {
269 case <-ctx.Done():
270 return nil, ctx.Err()
271 case <-time.After(time.Duration(after) * time.Millisecond):
272 continue
273 }
274 }
275 return nil, retryErr
276 }
277
278 if len(openaiResponse.Choices) == 0 {
279 return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration")
280 }
281
282 content := ""
283 if openaiResponse.Choices[0].Message.Content != "" {
284 content = openaiResponse.Choices[0].Message.Content
285 }
286
287 toolCalls := o.toolCalls(*openaiResponse)
288 finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
289
290 if len(toolCalls) > 0 {
291 finishReason = message.FinishReasonToolUse
292 }
293
294 return &ProviderResponse{
295 Content: content,
296 ToolCalls: toolCalls,
297 Usage: o.usage(*openaiResponse),
298 FinishReason: finishReason,
299 }, nil
300 }
301}
302
303func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
304 params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
305 params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
306 IncludeUsage: openai.Bool(true),
307 }
308
309 attempts := 0
310 eventChan := make(chan ProviderEvent)
311
312 go func() {
313 for {
314 attempts++
315 // Kujtim: fixes an issue with anthropig models on openrouter
316 if len(params.Tools) == 0 {
317 params.Tools = nil
318 }
319 openaiStream := o.client.Chat.Completions.NewStreaming(
320 ctx,
321 params,
322 )
323
324 acc := openai.ChatCompletionAccumulator{}
325 currentContent := ""
326 toolCalls := make([]message.ToolCall, 0)
327 msgToolCalls := make(map[int64]openai.ChatCompletionMessageToolCall)
328 toolMap := make(map[string]openai.ChatCompletionMessageToolCall)
329 for openaiStream.Next() {
330 chunk := openaiStream.Current()
331 // Kujtim: this is an issue with openrouter qwen, its sending -1 for the tool index
332 if len(chunk.Choices) != 0 && len(chunk.Choices[0].Delta.ToolCalls) > 0 && chunk.Choices[0].Delta.ToolCalls[0].Index == -1 {
333 chunk.Choices[0].Delta.ToolCalls[0].Index = 0
334 }
335 acc.AddChunk(chunk)
336 for i, choice := range chunk.Choices {
337 reasoning, ok := choice.Delta.JSON.ExtraFields["reasoning"]
338 if ok && reasoning.Raw() != "" {
339 reasoningStr := ""
340 json.Unmarshal([]byte(reasoning.Raw()), &reasoningStr)
341 if reasoningStr != "" {
342 eventChan <- ProviderEvent{
343 Type: EventThinkingDelta,
344 Thinking: reasoningStr,
345 }
346 }
347 }
348 if choice.Delta.Content != "" {
349 eventChan <- ProviderEvent{
350 Type: EventContentDelta,
351 Content: choice.Delta.Content,
352 }
353 currentContent += choice.Delta.Content
354 } else if len(choice.Delta.ToolCalls) > 0 {
355 toolCall := choice.Delta.ToolCalls[0]
356 newToolCall := false
357 if existingToolCall, ok := msgToolCalls[toolCall.Index]; ok { // tool call exists
358 if toolCall.ID != "" && toolCall.ID != existingToolCall.ID {
359 found := false
360 // try to find the tool based on the ID
361 for _, tool := range msgToolCalls {
362 if tool.ID == toolCall.ID {
363 existingToolCall.Function.Arguments += toolCall.Function.Arguments
364 msgToolCalls[toolCall.Index] = existingToolCall
365 toolMap[existingToolCall.ID] = existingToolCall
366 found = true
367 }
368 }
369 if !found {
370 newToolCall = true
371 }
372 } else {
373 existingToolCall.Function.Arguments += toolCall.Function.Arguments
374 msgToolCalls[toolCall.Index] = existingToolCall
375 toolMap[existingToolCall.ID] = existingToolCall
376 }
377 } else {
378 newToolCall = true
379 }
380 if newToolCall { // new tool call
381 if toolCall.ID == "" {
382 toolCall.ID = uuid.NewString()
383 }
384 eventChan <- ProviderEvent{
385 Type: EventToolUseStart,
386 ToolCall: &message.ToolCall{
387 ID: toolCall.ID,
388 Name: toolCall.Function.Name,
389 Finished: false,
390 },
391 }
392 msgToolCalls[toolCall.Index] = openai.ChatCompletionMessageToolCall{
393 ID: toolCall.ID,
394 Type: "function",
395 Function: openai.ChatCompletionMessageToolCallFunction{
396 Name: toolCall.Function.Name,
397 Arguments: toolCall.Function.Arguments,
398 },
399 }
400 toolMap[toolCall.ID] = msgToolCalls[toolCall.Index]
401 }
402 toolCalls := []openai.ChatCompletionMessageToolCall{}
403 for _, tc := range toolMap {
404 toolCalls = append(toolCalls, tc)
405 }
406 acc.Choices[i].Message.ToolCalls = toolCalls
407 }
408 }
409 }
410
411 err := openaiStream.Err()
412 if err == nil || errors.Is(err, io.EOF) {
413 if len(acc.Choices) == 0 {
414 eventChan <- ProviderEvent{
415 Type: EventError,
416 Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"),
417 }
418 return
419 }
420
421 resultFinishReason := acc.Choices[0].FinishReason
422 if resultFinishReason == "" {
423 // If the finish reason is empty, we assume it was a successful completion
424 // INFO: this is happening for openrouter for some reason
425 resultFinishReason = "stop"
426 }
427 // Stream completed successfully
428 finishReason := o.finishReason(resultFinishReason)
429 if len(acc.Choices[0].Message.ToolCalls) > 0 {
430 toolCalls = append(toolCalls, o.toolCalls(acc.ChatCompletion)...)
431 }
432 if len(toolCalls) > 0 {
433 finishReason = message.FinishReasonToolUse
434 }
435
436 eventChan <- ProviderEvent{
437 Type: EventComplete,
438 Response: &ProviderResponse{
439 Content: currentContent,
440 ToolCalls: toolCalls,
441 Usage: o.usage(acc.ChatCompletion),
442 FinishReason: finishReason,
443 },
444 }
445 close(eventChan)
446 return
447 }
448
449 // If there is an error we are going to see if we can retry the call
450 retry, after, retryErr := o.shouldRetry(attempts, err)
451 if retryErr != nil {
452 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
453 close(eventChan)
454 return
455 }
456 if retry {
457 slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
458 select {
459 case <-ctx.Done():
460 // context cancelled
461 if ctx.Err() == nil {
462 eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
463 }
464 close(eventChan)
465 return
466 case <-time.After(time.Duration(after) * time.Millisecond):
467 continue
468 }
469 }
470 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
471 close(eventChan)
472 return
473 }
474 }()
475
476 return eventChan
477}
478
479func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
480 if attempts > maxRetries {
481 return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
482 }
483 if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
484 return false, 0, err
485 }
486 var apiErr *openai.Error
487 retryMs := 0
488 retryAfterValues := []string{}
489 if errors.As(err, &apiErr) {
490 // Check for token expiration (401 Unauthorized)
491 if apiErr.StatusCode == 401 {
492 o.providerOptions.apiKey, err = config.Get().Resolve(o.providerOptions.config.APIKey)
493 if err != nil {
494 return false, 0, fmt.Errorf("failed to resolve API key: %w", err)
495 }
496 o.client = createOpenAIClient(o.providerOptions)
497 return true, 0, nil
498 }
499
500 if apiErr.StatusCode != 429 && apiErr.StatusCode != 500 {
501 return false, 0, err
502 }
503
504 retryAfterValues = apiErr.Response.Header.Values("Retry-After")
505 }
506
507 if apiErr != nil {
508 slog.Warn("OpenAI API error", "status_code", apiErr.StatusCode, "message", apiErr.Message, "type", apiErr.Type)
509 if len(retryAfterValues) > 0 {
510 slog.Warn("Retry-After header", "values", retryAfterValues)
511 }
512 } else {
513 slog.Error("OpenAI API error", "error", err.Error(), "attempt", attempts, "max_retries", maxRetries)
514 }
515
516 backoffMs := 2000 * (1 << (attempts - 1))
517 jitterMs := int(float64(backoffMs) * 0.2)
518 retryMs = backoffMs + jitterMs
519 if len(retryAfterValues) > 0 {
520 if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
521 retryMs = retryMs * 1000
522 }
523 }
524 return true, int64(retryMs), nil
525}
526
527func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
528 var toolCalls []message.ToolCall
529
530 if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
531 for _, call := range completion.Choices[0].Message.ToolCalls {
532 // accumulator for some reason does this.
533 if call.Function.Name == "" {
534 continue
535 }
536 toolCall := message.ToolCall{
537 ID: call.ID,
538 Name: call.Function.Name,
539 Input: call.Function.Arguments,
540 Type: "function",
541 Finished: true,
542 }
543 toolCalls = append(toolCalls, toolCall)
544 }
545 }
546
547 return toolCalls
548}
549
550func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
551 cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
552 inputTokens := completion.Usage.PromptTokens - cachedTokens
553
554 return TokenUsage{
555 InputTokens: inputTokens,
556 OutputTokens: completion.Usage.CompletionTokens,
557 CacheCreationTokens: 0, // OpenAI doesn't provide this directly
558 CacheReadTokens: cachedTokens,
559 }
560}
561
562func (o *openaiClient) Model() catwalk.Model {
563 return o.providerOptions.model(o.providerOptions.modelType)
564}