1package provider
2
3import (
4 "context"
5 "encoding/json"
6 "errors"
7 "fmt"
8 "io"
9 "log/slog"
10 "net/http"
11 "strings"
12 "time"
13
14 "github.com/charmbracelet/catwalk/pkg/catwalk"
15 "github.com/charmbracelet/crush/internal/config"
16 "github.com/charmbracelet/crush/internal/llm/tools"
17 "github.com/charmbracelet/crush/internal/log"
18 "github.com/charmbracelet/crush/internal/message"
19 "github.com/google/uuid"
20 "github.com/openai/openai-go"
21 "github.com/openai/openai-go/option"
22 "github.com/openai/openai-go/packages/param"
23 "github.com/openai/openai-go/shared"
24)
25
26type openaiClient struct {
27 providerOptions providerClientOptions
28 client openai.Client
29}
30
31type OpenAIClient ProviderClient
32
33func newOpenAIClient(opts providerClientOptions) OpenAIClient {
34 return &openaiClient{
35 providerOptions: opts,
36 client: createOpenAIClient(opts),
37 }
38}
39
40func createOpenAIClient(opts providerClientOptions) openai.Client {
41 openaiClientOptions := []option.RequestOption{}
42 if opts.apiKey != "" {
43 openaiClientOptions = append(openaiClientOptions, option.WithAPIKey(opts.apiKey))
44 }
45 if opts.baseURL != "" {
46 resolvedBaseURL, err := opts.resolver.ResolveValue(opts.baseURL)
47 if err == nil && resolvedBaseURL != "" {
48 openaiClientOptions = append(openaiClientOptions, option.WithBaseURL(resolvedBaseURL))
49 }
50 }
51
52 if opts.cfg.Options.Debug {
53 httpClient := log.NewHTTPClient()
54 openaiClientOptions = append(openaiClientOptions, option.WithHTTPClient(httpClient))
55 }
56
57 for key, value := range opts.extraHeaders {
58 openaiClientOptions = append(openaiClientOptions, option.WithHeader(key, value))
59 }
60
61 for extraKey, extraValue := range opts.extraBody {
62 openaiClientOptions = append(openaiClientOptions, option.WithJSONSet(extraKey, extraValue))
63 }
64
65 return openai.NewClient(openaiClientOptions...)
66}
67
68func (o *openaiClient) convertMessages(messages []message.Message) (openaiMessages []openai.ChatCompletionMessageParamUnion) {
69 isAnthropicModel := o.providerOptions.config.ID == string(catwalk.InferenceProviderOpenRouter) && strings.HasPrefix(o.Model().ID, "anthropic/")
70 // Add system message first
71 systemMessage := o.providerOptions.systemMessage
72 if o.providerOptions.systemPromptPrefix != "" {
73 systemMessage = o.providerOptions.systemPromptPrefix + "\n" + systemMessage
74 }
75
76 system := openai.SystemMessage(systemMessage)
77 if isAnthropicModel && !o.providerOptions.disableCache {
78 systemTextBlock := openai.ChatCompletionContentPartTextParam{Text: systemMessage}
79 systemTextBlock.SetExtraFields(
80 map[string]any{
81 "cache_control": map[string]string{
82 "type": "ephemeral",
83 },
84 },
85 )
86 var content []openai.ChatCompletionContentPartTextParam
87 content = append(content, systemTextBlock)
88 system = openai.SystemMessage(content)
89 }
90 openaiMessages = append(openaiMessages, system)
91
92 for i, msg := range messages {
93 cache := false
94 if i > len(messages)-3 {
95 cache = true
96 }
97 switch msg.Role {
98 case message.User:
99 var content []openai.ChatCompletionContentPartUnionParam
100
101 textBlock := openai.ChatCompletionContentPartTextParam{Text: msg.Content().String()}
102 content = append(content, openai.ChatCompletionContentPartUnionParam{OfText: &textBlock})
103 hasBinaryContent := false
104 for _, binaryContent := range msg.BinaryContent() {
105 hasBinaryContent = true
106 imageURL := openai.ChatCompletionContentPartImageImageURLParam{URL: binaryContent.String(catwalk.InferenceProviderOpenAI)}
107 imageBlock := openai.ChatCompletionContentPartImageParam{ImageURL: imageURL}
108
109 content = append(content, openai.ChatCompletionContentPartUnionParam{OfImageURL: &imageBlock})
110 }
111 if cache && !o.providerOptions.disableCache && isAnthropicModel {
112 textBlock.SetExtraFields(map[string]any{
113 "cache_control": map[string]string{
114 "type": "ephemeral",
115 },
116 })
117 }
118 if hasBinaryContent || (isAnthropicModel && !o.providerOptions.disableCache) {
119 openaiMessages = append(openaiMessages, openai.UserMessage(content))
120 } else {
121 openaiMessages = append(openaiMessages, openai.UserMessage(msg.Content().String()))
122 }
123
124 case message.Assistant:
125 assistantMsg := openai.ChatCompletionAssistantMessageParam{
126 Role: "assistant",
127 }
128
129 // Only include finished tool calls; interrupted tool calls must not be resent.
130 if len(msg.ToolCalls()) > 0 {
131 finished := make([]message.ToolCall, 0, len(msg.ToolCalls()))
132 for _, call := range msg.ToolCalls() {
133 if call.Finished {
134 finished = append(finished, call)
135 }
136 }
137 if len(finished) > 0 {
138 assistantMsg.ToolCalls = make([]openai.ChatCompletionMessageToolCallParam, len(finished))
139 for i, call := range finished {
140 assistantMsg.ToolCalls[i] = openai.ChatCompletionMessageToolCallParam{
141 ID: call.ID,
142 Type: "function",
143 Function: openai.ChatCompletionMessageToolCallFunctionParam{
144 Name: call.Name,
145 Arguments: call.Input,
146 },
147 }
148 }
149 }
150 }
151 if msg.Content().String() != "" {
152 assistantMsg.Content = openai.ChatCompletionAssistantMessageParamContentUnion{
153 OfString: param.NewOpt(msg.Content().Text),
154 }
155 }
156
157 if cache && !o.providerOptions.disableCache && isAnthropicModel {
158 assistantMsg.SetExtraFields(map[string]any{
159 "cache_control": map[string]string{
160 "type": "ephemeral",
161 },
162 })
163 }
164 // Skip empty assistant messages (no content and no finished tool calls)
165 if msg.Content().String() == "" && len(assistantMsg.ToolCalls) == 0 {
166 continue
167 }
168
169 openaiMessages = append(openaiMessages, openai.ChatCompletionMessageParamUnion{
170 OfAssistant: &assistantMsg,
171 })
172
173 case message.Tool:
174 for _, result := range msg.ToolResults() {
175 openaiMessages = append(openaiMessages,
176 openai.ToolMessage(result.Content, result.ToolCallID),
177 )
178 }
179 }
180 }
181
182 return openaiMessages
183}
184
185func (o *openaiClient) convertTools(tools []tools.BaseTool) []openai.ChatCompletionToolParam {
186 openaiTools := make([]openai.ChatCompletionToolParam, len(tools))
187
188 for i, tool := range tools {
189 info := tool.Info()
190 openaiTools[i] = openai.ChatCompletionToolParam{
191 Function: openai.FunctionDefinitionParam{
192 Name: info.Name,
193 Description: openai.String(info.Description),
194 Parameters: openai.FunctionParameters{
195 "type": "object",
196 "properties": info.Parameters,
197 "required": info.Required,
198 },
199 },
200 }
201 }
202
203 return openaiTools
204}
205
206func (o *openaiClient) finishReason(reason string) message.FinishReason {
207 switch reason {
208 case "stop":
209 return message.FinishReasonEndTurn
210 case "length":
211 return message.FinishReasonMaxTokens
212 case "tool_calls":
213 return message.FinishReasonToolUse
214 default:
215 return message.FinishReasonUnknown
216 }
217}
218
219func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessageParamUnion, tools []openai.ChatCompletionToolParam) openai.ChatCompletionNewParams {
220 model := o.providerOptions.model(o.providerOptions.modelType)
221 cfg := o.providerOptions.cfg
222
223 modelConfig := cfg.Models[config.SelectedModelTypeLarge]
224 if o.providerOptions.modelType == config.SelectedModelTypeSmall {
225 modelConfig = cfg.Models[config.SelectedModelTypeSmall]
226 }
227
228 reasoningEffort := modelConfig.ReasoningEffort
229
230 params := openai.ChatCompletionNewParams{
231 Model: openai.ChatModel(model.ID),
232 Messages: messages,
233 Tools: tools,
234 }
235
236 maxTokens := model.DefaultMaxTokens
237 if modelConfig.MaxTokens > 0 {
238 maxTokens = modelConfig.MaxTokens
239 }
240
241 // Override max tokens if set in provider options
242 if o.providerOptions.maxTokens > 0 {
243 maxTokens = o.providerOptions.maxTokens
244 }
245 if model.CanReason {
246 params.MaxCompletionTokens = openai.Int(maxTokens)
247 switch reasoningEffort {
248 case "low":
249 params.ReasoningEffort = shared.ReasoningEffortLow
250 case "medium":
251 params.ReasoningEffort = shared.ReasoningEffortMedium
252 case "high":
253 params.ReasoningEffort = shared.ReasoningEffortHigh
254 case "minimal":
255 params.ReasoningEffort = shared.ReasoningEffort("minimal")
256 default:
257 params.ReasoningEffort = shared.ReasoningEffort(reasoningEffort)
258 }
259 } else {
260 params.MaxTokens = openai.Int(maxTokens)
261 }
262
263 return params
264}
265
266func (o *openaiClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
267 params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
268 attempts := 0
269 for {
270 attempts++
271 openaiResponse, err := o.client.Chat.Completions.New(
272 ctx,
273 params,
274 )
275 // If there is an error we are going to see if we can retry the call
276 if err != nil {
277 retry, after, retryErr := o.shouldRetry(attempts, err)
278 if retryErr != nil {
279 return nil, retryErr
280 }
281 if retry {
282 slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
283 select {
284 case <-ctx.Done():
285 return nil, ctx.Err()
286 case <-time.After(time.Duration(after) * time.Millisecond):
287 continue
288 }
289 }
290 return nil, retryErr
291 }
292
293 if len(openaiResponse.Choices) == 0 {
294 return nil, fmt.Errorf("received empty response from OpenAI API - check endpoint configuration")
295 }
296
297 content := ""
298 if openaiResponse.Choices[0].Message.Content != "" {
299 content = openaiResponse.Choices[0].Message.Content
300 }
301
302 toolCalls := o.toolCalls(*openaiResponse)
303 finishReason := o.finishReason(string(openaiResponse.Choices[0].FinishReason))
304
305 if len(toolCalls) > 0 {
306 finishReason = message.FinishReasonToolUse
307 }
308
309 return &ProviderResponse{
310 Content: content,
311 ToolCalls: toolCalls,
312 Usage: o.usage(*openaiResponse),
313 FinishReason: finishReason,
314 }, nil
315 }
316}
317
318func (o *openaiClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
319 params := o.preparedParams(o.convertMessages(messages), o.convertTools(tools))
320 params.StreamOptions = openai.ChatCompletionStreamOptionsParam{
321 IncludeUsage: openai.Bool(true),
322 }
323
324 attempts := 0
325 eventChan := make(chan ProviderEvent)
326
327 go func() {
328 for {
329 attempts++
330 // Kujtim: fixes an issue with anthropig models on openrouter
331 if len(params.Tools) == 0 {
332 params.Tools = nil
333 }
334 openaiStream := o.client.Chat.Completions.NewStreaming(
335 ctx,
336 params,
337 )
338
339 acc := openai.ChatCompletionAccumulator{}
340 currentContent := ""
341 toolCalls := make([]message.ToolCall, 0)
342 msgToolCalls := make(map[int64]openai.ChatCompletionMessageToolCall)
343 toolMap := make(map[string]openai.ChatCompletionMessageToolCall)
344 toolCallIDMap := make(map[string]string)
345 for openaiStream.Next() {
346 chunk := openaiStream.Current()
347 // Kujtim: this is an issue with openrouter qwen, its sending -1 for the tool index
348 if len(chunk.Choices) != 0 && len(chunk.Choices[0].Delta.ToolCalls) > 0 && chunk.Choices[0].Delta.ToolCalls[0].Index == -1 {
349 chunk.Choices[0].Delta.ToolCalls[0].Index = 0
350 }
351 acc.AddChunk(chunk)
352 for i, choice := range chunk.Choices {
353 reasoning, ok := choice.Delta.JSON.ExtraFields["reasoning"]
354 if ok && reasoning.Raw() != "" {
355 reasoningStr := ""
356 json.Unmarshal([]byte(reasoning.Raw()), &reasoningStr)
357 if reasoningStr != "" {
358 eventChan <- ProviderEvent{
359 Type: EventThinkingDelta,
360 Thinking: reasoningStr,
361 }
362 }
363 }
364 if choice.Delta.Content != "" {
365 eventChan <- ProviderEvent{
366 Type: EventContentDelta,
367 Content: choice.Delta.Content,
368 }
369 currentContent += choice.Delta.Content
370 } else if len(choice.Delta.ToolCalls) > 0 {
371 toolCall := choice.Delta.ToolCalls[0]
372 if strings.HasPrefix(toolCall.ID, "functions.") {
373 exID, ok := toolCallIDMap[toolCall.ID]
374 if !ok {
375 newID := uuid.NewString()
376 toolCallIDMap[toolCall.ID] = newID
377 toolCall.ID = newID
378 } else {
379 toolCall.ID = exID
380 }
381 }
382 newToolCall := false
383 if existingToolCall, ok := msgToolCalls[toolCall.Index]; ok { // tool call exists
384 if toolCall.ID != "" && toolCall.ID != existingToolCall.ID {
385 found := false
386 // try to find the tool based on the ID
387 for _, tool := range msgToolCalls {
388 if tool.ID == toolCall.ID {
389 existingToolCall.Function.Arguments += toolCall.Function.Arguments
390 msgToolCalls[toolCall.Index] = existingToolCall
391 toolMap[existingToolCall.ID] = existingToolCall
392 found = true
393 }
394 }
395 if !found {
396 newToolCall = true
397 }
398 } else {
399 existingToolCall.Function.Arguments += toolCall.Function.Arguments
400 msgToolCalls[toolCall.Index] = existingToolCall
401 toolMap[existingToolCall.ID] = existingToolCall
402 }
403 } else {
404 newToolCall = true
405 }
406 if newToolCall { // new tool call
407 if toolCall.ID == "" {
408 toolCall.ID = uuid.NewString()
409 }
410 eventChan <- ProviderEvent{
411 Type: EventToolUseStart,
412 ToolCall: &message.ToolCall{
413 ID: toolCall.ID,
414 Name: toolCall.Function.Name,
415 Finished: false,
416 },
417 }
418 msgToolCalls[toolCall.Index] = openai.ChatCompletionMessageToolCall{
419 ID: toolCall.ID,
420 Type: "function",
421 Function: openai.ChatCompletionMessageToolCallFunction{
422 Name: toolCall.Function.Name,
423 Arguments: toolCall.Function.Arguments,
424 },
425 }
426 toolMap[toolCall.ID] = msgToolCalls[toolCall.Index]
427 }
428 toolCalls := []openai.ChatCompletionMessageToolCall{}
429 for _, tc := range toolMap {
430 toolCalls = append(toolCalls, tc)
431 }
432 acc.Choices[i].Message.ToolCalls = toolCalls
433 }
434 }
435 }
436
437 err := openaiStream.Err()
438 if err == nil || errors.Is(err, io.EOF) {
439 if len(acc.Choices) == 0 {
440 eventChan <- ProviderEvent{
441 Type: EventError,
442 Error: fmt.Errorf("received empty streaming response from OpenAI API - check endpoint configuration"),
443 }
444 return
445 }
446
447 resultFinishReason := acc.Choices[0].FinishReason
448 if resultFinishReason == "" {
449 // If the finish reason is empty, we assume it was a successful completion
450 // INFO: this is happening for openrouter for some reason
451 resultFinishReason = "stop"
452 }
453 // Stream completed successfully
454 finishReason := o.finishReason(resultFinishReason)
455 if len(acc.Choices[0].Message.ToolCalls) > 0 {
456 toolCalls = append(toolCalls, o.toolCalls(acc.ChatCompletion)...)
457 }
458 if len(toolCalls) > 0 {
459 finishReason = message.FinishReasonToolUse
460 }
461
462 eventChan <- ProviderEvent{
463 Type: EventComplete,
464 Response: &ProviderResponse{
465 Content: currentContent,
466 ToolCalls: toolCalls,
467 Usage: o.usage(acc.ChatCompletion),
468 FinishReason: finishReason,
469 },
470 }
471 close(eventChan)
472 return
473 }
474
475 // If there is an error we are going to see if we can retry the call
476 retry, after, retryErr := o.shouldRetry(attempts, err)
477 if retryErr != nil {
478 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
479 close(eventChan)
480 return
481 }
482 if retry {
483 slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
484 select {
485 case <-ctx.Done():
486 // context cancelled
487 if ctx.Err() != nil {
488 eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
489 }
490 close(eventChan)
491 return
492 case <-time.After(time.Duration(after) * time.Millisecond):
493 continue
494 }
495 }
496 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
497 close(eventChan)
498 return
499 }
500 }()
501
502 return eventChan
503}
504
505func (o *openaiClient) shouldRetry(attempts int, err error) (bool, int64, error) {
506 if attempts > maxRetries {
507 return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
508 }
509 if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
510 return false, 0, err
511 }
512 var apiErr *openai.Error
513 retryMs := 0
514 retryAfterValues := []string{}
515 if errors.As(err, &apiErr) {
516 // Check for token expiration (401 Unauthorized)
517 if apiErr.StatusCode == http.StatusUnauthorized {
518 prev := o.providerOptions.apiKey
519 // in case the key comes from a script, we try to re-evaluate it.
520 o.providerOptions.apiKey, err = o.providerOptions.resolver.ResolveValue(o.providerOptions.config.APIKey)
521 if err != nil {
522 return false, 0, fmt.Errorf("failed to resolve API key: %w", err)
523 }
524 // if it didn't change, do not retry.
525 if prev == o.providerOptions.apiKey {
526 return false, 0, err
527 }
528 o.client = createOpenAIClient(o.providerOptions)
529 return true, 0, nil
530 }
531
532 if apiErr.StatusCode != http.StatusTooManyRequests && apiErr.StatusCode != http.StatusInternalServerError {
533 return false, 0, err
534 }
535
536 retryAfterValues = apiErr.Response.Header.Values("Retry-After")
537 }
538
539 if apiErr != nil {
540 slog.Warn("OpenAI API error", "status_code", apiErr.StatusCode, "message", apiErr.Message, "type", apiErr.Type)
541 if len(retryAfterValues) > 0 {
542 slog.Warn("Retry-After header", "values", retryAfterValues)
543 }
544 } else {
545 slog.Error("OpenAI API error", "error", err.Error(), "attempt", attempts, "max_retries", maxRetries)
546 }
547
548 backoffMs := 2000 * (1 << (attempts - 1))
549 jitterMs := int(float64(backoffMs) * 0.2)
550 retryMs = backoffMs + jitterMs
551 if len(retryAfterValues) > 0 {
552 if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
553 retryMs = retryMs * 1000
554 }
555 }
556 return true, int64(retryMs), nil
557}
558
559func (o *openaiClient) toolCalls(completion openai.ChatCompletion) []message.ToolCall {
560 var toolCalls []message.ToolCall
561
562 if len(completion.Choices) > 0 && len(completion.Choices[0].Message.ToolCalls) > 0 {
563 for _, call := range completion.Choices[0].Message.ToolCalls {
564 // accumulator for some reason does this.
565 if call.Function.Name == "" {
566 continue
567 }
568 toolCall := message.ToolCall{
569 ID: call.ID,
570 Name: call.Function.Name,
571 Input: call.Function.Arguments,
572 Type: "function",
573 Finished: true,
574 }
575 toolCalls = append(toolCalls, toolCall)
576 }
577 }
578
579 return toolCalls
580}
581
582func (o *openaiClient) usage(completion openai.ChatCompletion) TokenUsage {
583 cachedTokens := completion.Usage.PromptTokensDetails.CachedTokens
584 inputTokens := completion.Usage.PromptTokens - cachedTokens
585
586 return TokenUsage{
587 InputTokens: inputTokens,
588 OutputTokens: completion.Usage.CompletionTokens,
589 CacheCreationTokens: 0, // OpenAI doesn't provide this directly
590 CacheReadTokens: cachedTokens,
591 }
592}
593
594func (o *openaiClient) Model() catwalk.Model {
595 return o.providerOptions.model(o.providerOptions.modelType)
596}