1package provider
2
3import (
4 "context"
5 "encoding/json"
6 "errors"
7 "fmt"
8 "io"
9 "log/slog"
10 "slices"
11 "strings"
12 "time"
13
14 "github.com/charmbracelet/catwalk/pkg/catwalk"
15 "github.com/charmbracelet/crush/internal/config"
16 "github.com/charmbracelet/crush/internal/llm/tools"
17 "github.com/charmbracelet/crush/internal/log"
18 "github.com/charmbracelet/crush/internal/message"
19 "github.com/google/uuid"
20 "github.com/openai/openai-go"
21 "github.com/openai/openai-go/option"
22 "github.com/openai/openai-go/packages/param"
23 "github.com/openai/openai-go/shared"
24)
25
26type openaiClient struct {
27 providerOptions providerClientOptions
28 client openai.Client
29}
30
31type OpenAIClient ProviderClient
32
33func newOpenAIClient(opts providerClientOptions) OpenAIClient {
34 return &openaiClient{
35 providerOptions: opts,
36 client: createOpenAIClient(opts),
37 }
38}
39
40func createOpenAIClient(opts providerClientOptions) openai.Client {
41 openaiClientOptions := []option.RequestOption{}
42 if opts.apiKey != "" {
43 openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
44 }
45 if opts.baseURL != "" {
46 resolvedBaseURL, err := config.Get().Resolve(opts.baseURL)
47 if err == nil && resolvedBaseURL != "" {
48 openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(resolvedBaseURL))
49 }
50 }
51
52 if config.Get().Options.Debug {
53 httpClient := log.NewHTTPClient()
54 openaiClientOptions = append(openaiClientOptions, option.WithHTTPClient(httpClient))
55 }
56
57 for key, value := range opts.extraHeaders {
58 openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
59 }
60
61 for extraKey, extraValue := range opts.extraBody {
62 openaiClientOptions = append(openaiClientOptions, option.WithJSONSet(extraKey, extraValue))
63 }
64
65 return openai.NewClient(openaiClientOptions...)
66}
67
68func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
69 isAnthropicModel := o.providerOptions.config.ID == string(catwalk.InferenceProviderOpenRouter) && strings.HasPrefix(o.Model().ID, "anthropic/")
70 // Add system message first
71 systemMessage := o.providerOptions.systemMessage
72 if o.providerOptions.systemPromptPrefix != "" {
73 systemMessage = o.providerOptions.systemPromptPrefix + "\n" + systemMessage
74 }
75
76 system := openai.SystemMessage(systemMessage)
77 if isAnthropicModel && !o.providerOptions.disableCache {
78 systemTextBlock := openai.ChatCompletionContentPartTextParam{Text: systemMessage}
79 systemTextBlock.SetExtraFields(
80 map[string]any{
81 "cache_control": map[string]string{
82 "type": "ephemeral",
83 },
84 },
85 )
86 var content []openai.ChatCompletionContentPartTextParam
87 content = append(content, systemTextBlock)
88 system = openai.SystemMessage(content)
89 }
90 openaiMessages = append(openaiMessages, system)
91
92 for i, msg := range messages {
93 cache := false
94 if i > len(messages)-3 {
95 cache = true
96 }
97 switch msg.Role {
98 case message.User:
99 var content []openai.ChatCompletionContentPartUnionParam
100
101 textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
102 content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
103 hasBinaryContent := false
104 for _, binaryContent := range msg.BinaryContent() {
105 hasBinaryContent = true
106 imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(catwalk.InferenceProviderOpenAI)}
107 imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
108
109 content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
110 }
111 if cache && !o.providerOptions.disableCache && isAnthropicModel {
112 textBlock.SetExtraFields(map[string]any{
113 "cache_control": map[string]string{
114 "type": "ephemeral",
115 },
116 })
117 }
118 if hasBinaryContent || (isAnthropicModel && !o.providerOptions.disableCache) {
119 openaiMessages = append(openaiMessages, openai.UserMessage(content))
120 } else {
121 openaiMessages = append(openaiMessages, openai.UserMessage(msg.Content().String()))
122 }
123
124 case message.Assistant:
125 assistantMsg := openai.ChatCompletionAssistantMessageParam{
126 Role: "assistant",
127 }
128
129 hasContent := false
130 if msg.Content().String() != "" {
131 hasContent = true
132 textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
133 if cache && !o.providerOptions.disableCache && isAnthropicModel {
134 textBlock.SetExtraFields(map[string]any{
135 "cache_control": map[string]string{
136 "type": "ephemeral",
137 },
138 })
139 }
140 assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
141 OfArrayOfContentParts: []openai.ChatCompletionAssistantMessageParamContentArrayOfContentPartUnion{
142 {
143 OfText: &textBlock,
144 },
145 },
146 }
147 if !isAnthropicModel {
148 assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
149 OfString: param.NewOpt(msg.Content().String()),
150 }
151 }
152 }
153
154 if len(msg.ToolCalls()) > 0 {
155 hasContent = true
156 assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(msg.ToolCalls()))
157 for i, call := range msg.ToolCalls() {
158 assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
159 ID: call.ID,
160 Type: "function",
161 Function: openai.ChatCompletionMessageToolCallFunctionParam{
162 Name: call.Name,
163 Arguments: call.Input,
164 },
165 }
166 }
167 }
168 if !hasContent {
169 continue
170 }
171
172 openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
173 OfAssistant: &assistantMsg,
174 })
175
176 case message.Tool:
177 for _, result := range msg.ToolResults() {
178 openaiMessages = append(openaiMessages,
179 openai.ToolMessage(result.Content, result.ToolCallID),
180 )
181 }
182 }
183 }
184
185 return
186}
187
188func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
189 openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
190
191 for i, tool := range tools {
192 info := tool.Info()
193 openaiTools[i] = openai.ChatCompletionToolParam{
194 Function: openai.FunctionDefinitionParam{
195 Name: info.Name,
196 Description: openai.String(info.Description),
197 Parameters: openai.FunctionParameters{
198 "type": "object",
199 "properties": info.Parameters,
200 "required": info.Required,
201 },
202 },
203 }
204 }
205
206 return openaiTools
207}
208
209func (o *openaiClient) finishReason(reason string) message.FinishReason {
210 switch reason {
211 case "stop":
212 return message.FinishReasonEndTurn
213 case "length":
214 return message.FinishReasonMaxTokens
215 case "tool_calls":
216 return message.FinishReasonToolUse
217 default:
218 return message.FinishReasonUnknown
219 }
220}
221
222func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
223 model := o.providerOptions.model(o.providerOptions.modelType)
224 cfg := config.Get()
225
226 modelConfig := cfg.Models[config.SelectedModelTypeLarge]
227 if o.providerOptions.modelType == config.SelectedModelTypeSmall {
228 modelConfig = cfg.Models[config.SelectedModelTypeSmall]
229 }
230
231 reasoningEffort := modelConfig.ReasoningEffort
232
233 params := openai.ChatCompletionNewParams{
234 Model: openai.ChatModel(model.ID),
235 Messages: messages,
236 Tools: tools,
237 }
238
239 maxTokens := model.DefaultMaxTokens
240 if modelConfig.MaxTokens > 0 {
241 maxTokens = modelConfig.MaxTokens
242 }
243
244 // Override max tokens if set in provider options
245 if o.providerOptions.maxTokens > 0 {
246 maxTokens = o.providerOptions.maxTokens
247 }
248 if model.CanReason {
249 params.MaxCompletionTokens = openai.Int(maxTokens)
250 switch reasoningEffort {
251 case "low":
252 params.ReasoningEffort = shared.ReasoningEffortLow
253 case "medium":
254 params.ReasoningEffort = shared.ReasoningEffortMedium
255 case "high":
256 params.ReasoningEffort = shared.ReasoningEffortHigh
257 case "minimal":
258 params.ReasoningEffort = shared.ReasoningEffort("minimal")
259 default:
260 params.ReasoningEffort = shared.ReasoningEffort(reasoningEffort)
261 }
262 } else {
263 params.MaxTokens = openai.Int(maxTokens)
264 }
265
266 return params
267}
268
269func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
270 params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
271 attempts := 0
272 for {
273 attempts++
274 openaiResponse, err := o.client.Chat.Completions.New(
275 ctx,
276 params,
277 )
278 // If there is an error we are going to see if we can retry the call
279 if err != nil {
280 retry, after, retryErr := o.shouldRetry(attempts, err)
281 if retryErr != nil {
282 return nil, retryErr
283 }
284 if retry {
285 slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
286 select {
287 case <-ctx.Done():
288 return nil, ctx.Err()
289 case <-time.After(time.Duration(after) * time.Millisecond):
290 continue
291 }
292 }
293 return nil, retryErr
294 }
295
296 if len(openaiResponse.Choices) == 0 {
297 return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration")
298 }
299
300 content := ""
301 if openaiResponse.Choices[0].Message.Content != "" {
302 content = openaiResponse.Choices[0].Message.Content
303 }
304
305 toolCalls := o.toolCalls(*openaiResponse)
306 finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
307
308 if len(toolCalls) > 0 {
309 finishReason = message.FinishReasonToolUse
310 }
311
312 return &ProviderResponse{
313 Content: content,
314 ToolCalls: toolCalls,
315 Usage: o.usage(*openaiResponse),
316 FinishReason: finishReason,
317 }, nil
318 }
319}
320
321func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
322 params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
323 params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
324 IncludeUsage: openai.Bool(true),
325 }
326
327 attempts := 0
328 eventChan := make(chan ProviderEvent)
329
330 go func() {
331 for {
332 attempts++
333 // Kujtim: fixes an issue with anthropig models on openrouter
334 if len(params.Tools) == 0 {
335 params.Tools = nil
336 }
337 openaiStream := o.client.Chat.Completions.NewStreaming(
338 ctx,
339 params,
340 )
341
342 acc := openai.ChatCompletionAccumulator{}
343 currentContent := ""
344 toolCalls := make([]message.ToolCall, 0)
345 var msgToolCalls []openai.ChatCompletionMessageToolCall
346 for openaiStream.Next() {
347 chunk := openaiStream.Current()
348 if len(chunk.Choices) == 0 {
349 continue
350 }
351 // Kujtim: this is an issue with openrouter qwen, its sending -1 for the tool index
352 if len(chunk.Choices[0].Delta.ToolCalls) > 0 && chunk.Choices[0].Delta.ToolCalls[0].Index == -1 {
353 chunk.Choices[0].Delta.ToolCalls[0].Index = 0
354 }
355 acc.AddChunk(chunk)
356 for i, choice := range chunk.Choices {
357 reasoning, ok := choice.Delta.JSON.ExtraFields["reasoning"]
358 if ok && reasoning.Raw() != "" {
359 reasoningStr := ""
360 json.Unmarshal([]byte(reasoning.Raw()), &reasoningStr)
361 if reasoningStr != "" {
362 eventChan <- ProviderEvent{
363 Type: EventThinkingDelta,
364 Thinking: reasoningStr,
365 }
366 }
367 }
368 if choice.Delta.Content != "" {
369 eventChan <- ProviderEvent{
370 Type: EventContentDelta,
371 Content: choice.Delta.Content,
372 }
373 currentContent += choice.Delta.Content
374 } else if len(choice.Delta.ToolCalls) > 0 {
375 toolCall := choice.Delta.ToolCalls[0]
376 newToolCall := false
377 if len(msgToolCalls)-1 >= int(toolCall.Index) { // tool call exists
378 existingToolCall := msgToolCalls[toolCall.Index]
379 if toolCall.ID != "" && toolCall.ID != existingToolCall.ID {
380 found := false
381 // try to find the tool based on the ID
382 for i, tool := range msgToolCalls {
383 if tool.ID == toolCall.ID {
384 msgToolCalls[i].Function.Arguments += toolCall.Function.Arguments
385 found = true
386 }
387 }
388 if !found {
389 newToolCall = true
390 }
391 } else {
392 msgToolCalls[toolCall.Index].Function.Arguments += toolCall.Function.Arguments
393 }
394 } else {
395 newToolCall = true
396 }
397 if newToolCall { // new tool call
398 if toolCall.ID == "" {
399 toolCall.ID = uuid.NewString()
400 }
401 eventChan <- ProviderEvent{
402 Type: EventToolUseStart,
403 ToolCall: &message.ToolCall{
404 ID: toolCall.ID,
405 Name: toolCall.Function.Name,
406 Finished: false,
407 },
408 }
409 msgToolCalls = append(msgToolCalls, openai.ChatCompletionMessageToolCall{
410 ID: toolCall.ID,
411 Type: "function",
412 Function: openai.ChatCompletionMessageToolCallFunction{
413 Name: toolCall.Function.Name,
414 Arguments: toolCall.Function.Arguments,
415 },
416 })
417 }
418 }
419 acc.Choices[i].Message.ToolCalls = slices.Clone(msgToolCalls)
420 }
421 }
422
423 err := openaiStream.Err()
424 if err == nil || errors.Is(err, io.EOF) {
425 if len(acc.Choices) == 0 {
426 eventChan <- ProviderEvent{
427 Type: EventError,
428 Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"),
429 }
430 return
431 }
432
433 resultFinishReason := acc.Choices[0].FinishReason
434 if resultFinishReason == "" {
435 // If the finish reason is empty, we assume it was a successful completion
436 // INFO: this is happening for openrouter for some reason
437 resultFinishReason = "stop"
438 }
439 // Stream completed successfully
440 finishReason := o.finishReason(resultFinishReason)
441 if len(acc.Choices[0].Message.ToolCalls) > 0 {
442 toolCalls = append(toolCalls, o.toolCalls(acc.ChatCompletion)...)
443 }
444 if len(toolCalls) > 0 {
445 finishReason = message.FinishReasonToolUse
446 }
447
448 eventChan <- ProviderEvent{
449 Type: EventComplete,
450 Response: &ProviderResponse{
451 Content: currentContent,
452 ToolCalls: toolCalls,
453 Usage: o.usage(acc.ChatCompletion),
454 FinishReason: finishReason,
455 },
456 }
457 close(eventChan)
458 return
459 }
460
461 // If there is an error we are going to see if we can retry the call
462 retry, after, retryErr := o.shouldRetry(attempts, err)
463 if retryErr != nil {
464 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
465 close(eventChan)
466 return
467 }
468 if retry {
469 slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
470 select {
471 case <-ctx.Done():
472 // context cancelled
473 if ctx.Err() == nil {
474 eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
475 }
476 close(eventChan)
477 return
478 case <-time.After(time.Duration(after) * time.Millisecond):
479 continue
480 }
481 }
482 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
483 close(eventChan)
484 return
485 }
486 }()
487
488 return eventChan
489}
490
491func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
492 if attempts > maxRetries {
493 return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
494 }
495 if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
496 return false, 0, err
497 }
498 var apiErr *openai.Error
499 retryMs := 0
500 retryAfterValues := []string{}
501 if errors.As(err, &apiErr) {
502 // Check for token expiration (401 Unauthorized)
503 if apiErr.StatusCode == 401 {
504 o.providerOptions.apiKey, err = config.Get().Resolve(o.providerOptions.config.APIKey)
505 if err != nil {
506 return false, 0, fmt.Errorf("failed to resolve API key: %w", err)
507 }
508 o.client = createOpenAIClient(o.providerOptions)
509 return true, 0, nil
510 }
511
512 if apiErr.StatusCode != 429 && apiErr.StatusCode != 500 {
513 return false, 0, err
514 }
515
516 retryAfterValues = apiErr.Response.Header.Values("Retry-After")
517 }
518
519 if apiErr != nil {
520 slog.Warn("OpenAI API error", "status_code", apiErr.StatusCode, "message", apiErr.Message, "type", apiErr.Type)
521 if len(retryAfterValues) > 0 {
522 slog.Warn("Retry-After header", "values", retryAfterValues)
523 }
524 } else {
525 slog.Error("OpenAI API error", "error", err.Error(), "attempt", attempts, "max_retries", maxRetries)
526 }
527
528 backoffMs := 2000 * (1 << (attempts - 1))
529 jitterMs := int(float64(backoffMs) * 0.2)
530 retryMs = backoffMs + jitterMs
531 if len(retryAfterValues) > 0 {
532 if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
533 retryMs = retryMs * 1000
534 }
535 }
536 return true, int64(retryMs), nil
537}
538
539func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
540 var toolCalls []message.ToolCall
541
542 if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
543 for _, call := range completion.Choices[0].Message.ToolCalls {
544 toolCall := message.ToolCall{
545 ID: call.ID,
546 Name: call.Function.Name,
547 Input: call.Function.Arguments,
548 Type: "function",
549 Finished: true,
550 }
551 toolCalls = append(toolCalls, toolCall)
552 }
553 }
554
555 return toolCalls
556}
557
558func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
559 cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
560 inputTokens := completion.Usage.PromptTokens - cachedTokens
561
562 return TokenUsage{
563 InputTokens: inputTokens,
564 OutputTokens: completion.Usage.CompletionTokens,
565 CacheCreationTokens: 0, // OpenAI doesn't provide this directly
566 CacheReadTokens: cachedTokens,
567 }
568}
569
570func (o *openaiClient) Model() catwalk.Model {
571 return o.providerOptions.model(o.providerOptions.modelType)
572}