1package provider
2
3import (
4 "context"
5 "encoding/json"
6 "errors"
7 "fmt"
8 "io"
9 "log/slog"
10 "regexp"
11 "strconv"
12 "strings"
13 "time"
14
15 "github.com/anthropics/anthropic-sdk-go"
16 "github.com/anthropics/anthropic-sdk-go/bedrock"
17 "github.com/anthropics/anthropic-sdk-go/option"
18 "github.com/anthropics/anthropic-sdk-go/vertex"
19 "github.com/charmbracelet/catwalk/pkg/catwalk"
20 "github.com/charmbracelet/crush/internal/config"
21 "github.com/charmbracelet/crush/internal/llm/tools"
22 "github.com/charmbracelet/crush/internal/log"
23 "github.com/charmbracelet/crush/internal/message"
24)
25
26// Pre-compiled regex for parsing context limit errors.
27var contextLimitRegex = regexp.MustCompile(`input length and ` + "`max_tokens`" + ` exceed context limit: (\d+) \+ (\d+) > (\d+)`)
28
29type anthropicClient struct {
30 providerOptions providerClientOptions
31 tp AnthropicClientType
32 client anthropic.Client
33 adjustedMaxTokens int // Used when context limit is hit
34}
35
36type AnthropicClient ProviderClient
37
38type AnthropicClientType string
39
40const (
41 AnthropicClientTypeNormal AnthropicClientType = "normal"
42 AnthropicClientTypeBedrock AnthropicClientType = "bedrock"
43 AnthropicClientTypeVertex AnthropicClientType = "vertex"
44)
45
46func newAnthropicClient(opts providerClientOptions, tp AnthropicClientType) AnthropicClient {
47 return &anthropicClient{
48 providerOptions: opts,
49 tp: tp,
50 client: createAnthropicClient(opts, tp),
51 }
52}
53
54func createAnthropicClient(opts providerClientOptions, tp AnthropicClientType) anthropic.Client {
55 anthropicClientOptions := []option.RequestOption{}
56
57 // Check if Authorization header is provided in extra headers
58 hasBearerAuth := false
59 if opts.extraHeaders != nil {
60 for key := range opts.extraHeaders {
61 if strings.ToLower(key) == "authorization" {
62 hasBearerAuth = true
63 break
64 }
65 }
66 }
67
68 isBearerToken := strings.HasPrefix(opts.apiKey, "Bearer ")
69
70 if opts.apiKey != "" && !hasBearerAuth {
71 if isBearerToken {
72 slog.Debug("API key starts with 'Bearer ', using as Authorization header")
73 anthropicClientOptions = append(anthropicClientOptions, option.WithHeader("Authorization", opts.apiKey))
74 } else {
75 // Use standard X-Api-Key header
76 anthropicClientOptions = append(anthropicClientOptions, option.WithAPIKey(opts.apiKey))
77 }
78 } else if hasBearerAuth {
79 slog.Debug("Skipping X-Api-Key header because Authorization header is provided")
80 }
81
82 if opts.baseURL != "" {
83 resolvedBaseURL, err := config.Get().Resolve(opts.baseURL)
84 if err == nil && resolvedBaseURL != "" {
85 anthropicClientOptions = append(anthropicClientOptions, option.WithBaseURL(resolvedBaseURL))
86 }
87 }
88
89 if config.Get().Options.Debug {
90 httpClient := log.NewHTTPClient()
91 anthropicClientOptions = append(anthropicClientOptions, option.WithHTTPClient(httpClient))
92 }
93
94 switch tp {
95 case AnthropicClientTypeBedrock:
96 anthropicClientOptions = append(anthropicClientOptions, bedrock.WithLoadDefaultConfig(context.Background()))
97 case AnthropicClientTypeVertex:
98 project := opts.extraParams["project"]
99 location := opts.extraParams["location"]
100 anthropicClientOptions = append(anthropicClientOptions, vertex.WithGoogleAuth(context.Background(), location, project))
101 }
102 for key, header := range opts.extraHeaders {
103 anthropicClientOptions = append(anthropicClientOptions, option.WithHeaderAdd(key, header))
104 }
105 for key, value := range opts.extraBody {
106 anthropicClientOptions = append(anthropicClientOptions, option.WithJSONSet(key, value))
107 }
108 return anthropic.NewClient(anthropicClientOptions...)
109}
110
111func (a *anthropicClient) convertMessages(messages []message.Message) (anthropicMessages []anthropic.MessageParam) {
112 for i, msg := range messages {
113 cache := false
114 if i > len(messages)-3 {
115 cache = true
116 }
117 switch msg.Role {
118 case message.User:
119 content := anthropic.NewTextBlock(msg.Content().String())
120 if cache && !a.providerOptions.disableCache {
121 content.OfText.CacheControl = anthropic.CacheControlEphemeralParam{
122 Type: "ephemeral",
123 }
124 }
125 var contentBlocks []anthropic.ContentBlockParamUnion
126 contentBlocks = append(contentBlocks, content)
127 for _, binaryContent := range msg.BinaryContent() {
128 base64Image := binaryContent.String(catwalk.InferenceProviderAnthropic)
129 imageBlock := anthropic.NewImageBlockBase64(binaryContent.MIMEType, base64Image)
130 contentBlocks = append(contentBlocks, imageBlock)
131 }
132 anthropicMessages = append(anthropicMessages, anthropic.NewUserMessage(contentBlocks...))
133
134 case message.Assistant:
135 blocks := []anthropic.ContentBlockParamUnion{}
136
137 // Add thinking blocks first if present (required when thinking is enabled with tool use)
138 if reasoningContent := msg.ReasoningContent(); reasoningContent.Thinking != "" {
139 thinkingBlock := anthropic.NewThinkingBlock(reasoningContent.Signature, reasoningContent.Thinking)
140 blocks = append(blocks, thinkingBlock)
141 }
142
143 if msg.Content().String() != "" {
144 content := anthropic.NewTextBlock(msg.Content().String())
145 if cache && !a.providerOptions.disableCache {
146 content.OfText.CacheControl = anthropic.CacheControlEphemeralParam{
147 Type: "ephemeral",
148 }
149 }
150 blocks = append(blocks, content)
151 }
152
153 for _, toolCall := range msg.ToolCalls() {
154 if !toolCall.Finished {
155 continue
156 }
157 var inputMap map[string]any
158 err := json.Unmarshal([]byte(toolCall.Input), &inputMap)
159 if err != nil {
160 continue
161 }
162 blocks = append(blocks, anthropic.NewToolUseBlock(toolCall.ID, inputMap, toolCall.Name))
163 }
164
165 if len(blocks) == 0 {
166 continue
167 }
168 anthropicMessages = append(anthropicMessages, anthropic.NewAssistantMessage(blocks...))
169
170 case message.Tool:
171 results := make([]anthropic.ContentBlockParamUnion, len(msg.ToolResults()))
172 for i, toolResult := range msg.ToolResults() {
173 results[i] = anthropic.NewToolResultBlock(toolResult.ToolCallID, toolResult.Content, toolResult.IsError)
174 }
175 anthropicMessages = append(anthropicMessages, anthropic.NewUserMessage(results...))
176 }
177 }
178 return anthropicMessages
179}
180
181func (a *anthropicClient) convertTools(tools []tools.BaseTool) []anthropic.ToolUnionParam {
182 if len(tools) == 0 {
183 return nil
184 }
185 anthropicTools := make([]anthropic.ToolUnionParam, len(tools))
186
187 for i, tool := range tools {
188 info := tool.Info()
189 toolParam := anthropic.ToolParam{
190 Name: info.Name,
191 Description: anthropic.String(info.Description),
192 InputSchema: anthropic.ToolInputSchemaParam{
193 Properties: info.Parameters,
194 Required: info.Required,
195 },
196 }
197
198 if i == len(tools)-1 && !a.providerOptions.disableCache {
199 toolParam.CacheControl = anthropic.CacheControlEphemeralParam{
200 Type: "ephemeral",
201 }
202 }
203
204 anthropicTools[i] = anthropic.ToolUnionParam{OfTool: &toolParam}
205 }
206
207 return anthropicTools
208}
209
210func (a *anthropicClient) finishReason(reason string) message.FinishReason {
211 switch reason {
212 case "end_turn":
213 return message.FinishReasonEndTurn
214 case "max_tokens":
215 return message.FinishReasonMaxTokens
216 case "tool_use":
217 return message.FinishReasonToolUse
218 case "stop_sequence":
219 return message.FinishReasonEndTurn
220 default:
221 return message.FinishReasonUnknown
222 }
223}
224
225func (a *anthropicClient) isThinkingEnabled() bool {
226 cfg := config.Get()
227 modelConfig := cfg.Models[config.SelectedModelTypeLarge]
228 if a.providerOptions.modelType == config.SelectedModelTypeSmall {
229 modelConfig = cfg.Models[config.SelectedModelTypeSmall]
230 }
231 return a.Model().CanReason && modelConfig.Think
232}
233
234func (a *anthropicClient) preparedMessages(messages []anthropic.MessageParam, tools []anthropic.ToolUnionParam) anthropic.MessageNewParams {
235 model := a.providerOptions.model(a.providerOptions.modelType)
236 var thinkingParam anthropic.ThinkingConfigParamUnion
237 cfg := config.Get()
238 modelConfig := cfg.Models[config.SelectedModelTypeLarge]
239 if a.providerOptions.modelType == config.SelectedModelTypeSmall {
240 modelConfig = cfg.Models[config.SelectedModelTypeSmall]
241 }
242 temperature := anthropic.Float(0)
243
244 maxTokens := model.DefaultMaxTokens
245 if modelConfig.MaxTokens > 0 {
246 maxTokens = modelConfig.MaxTokens
247 }
248 if a.isThinkingEnabled() {
249 thinkingParam = anthropic.ThinkingConfigParamOfEnabled(int64(float64(maxTokens) * 0.8))
250 temperature = anthropic.Float(1)
251 }
252 // Override max tokens if set in provider options
253 if a.providerOptions.maxTokens > 0 {
254 maxTokens = a.providerOptions.maxTokens
255 }
256
257 // Use adjusted max tokens if context limit was hit
258 if a.adjustedMaxTokens > 0 {
259 maxTokens = int64(a.adjustedMaxTokens)
260 }
261
262 systemBlocks := []anthropic.TextBlockParam{}
263
264 // Add custom system prompt prefix if configured
265 if a.providerOptions.systemPromptPrefix != "" {
266 systemBlocks = append(systemBlocks, anthropic.TextBlockParam{
267 Text: a.providerOptions.systemPromptPrefix,
268 })
269 }
270
271 systemBlocks = append(systemBlocks, anthropic.TextBlockParam{
272 Text: a.providerOptions.systemMessage,
273 CacheControl: anthropic.CacheControlEphemeralParam{
274 Type: "ephemeral",
275 },
276 })
277
278 return anthropic.MessageNewParams{
279 Model: anthropic.Model(model.ID),
280 MaxTokens: maxTokens,
281 Temperature: temperature,
282 Messages: messages,
283 Tools: tools,
284 Thinking: thinkingParam,
285 System: systemBlocks,
286 }
287}
288
289func (a *anthropicClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
290 attempts := 0
291 for {
292 attempts++
293 // Prepare messages on each attempt in case max_tokens was adjusted
294 preparedMessages := a.preparedMessages(a.convertMessages(messages), a.convertTools(tools))
295
296 var opts []option.RequestOption
297 if a.isThinkingEnabled() {
298 opts = append(opts, option.WithHeaderAdd("anthropic-beta", "interleaved-thinking-2025-05-14"))
299 }
300 anthropicResponse, err := a.client.Messages.New(
301 ctx,
302 preparedMessages,
303 opts...,
304 )
305 // If there is an error we are going to see if we can retry the call
306 if err != nil {
307 retry, after, retryErr := a.shouldRetry(attempts, err)
308 if retryErr != nil {
309 return nil, retryErr
310 }
311 if retry {
312 slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
313 select {
314 case <-ctx.Done():
315 return nil, ctx.Err()
316 case <-time.After(time.Duration(after) * time.Millisecond):
317 continue
318 }
319 }
320 return nil, retryErr
321 }
322
323 content := ""
324 for _, block := range anthropicResponse.Content {
325 if text, ok := block.AsAny().(anthropic.TextBlock); ok {
326 content += text.Text
327 }
328 }
329
330 return &ProviderResponse{
331 Content: content,
332 ToolCalls: a.toolCalls(*anthropicResponse),
333 Usage: a.usage(*anthropicResponse),
334 }, nil
335 }
336}
337
338func (a *anthropicClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
339 attempts := 0
340 eventChan := make(chan ProviderEvent)
341 go func() {
342 for {
343 attempts++
344 // Prepare messages on each attempt in case max_tokens was adjusted
345 preparedMessages := a.preparedMessages(a.convertMessages(messages), a.convertTools(tools))
346
347 var opts []option.RequestOption
348 if a.isThinkingEnabled() {
349 opts = append(opts, option.WithHeaderAdd("anthropic-beta", "interleaved-thinking-2025-05-14"))
350 }
351
352 anthropicStream := a.client.Messages.NewStreaming(
353 ctx,
354 preparedMessages,
355 opts...,
356 )
357 accumulatedMessage := anthropic.Message{}
358
359 currentToolCallID := ""
360 for anthropicStream.Next() {
361 event := anthropicStream.Current()
362 err := accumulatedMessage.Accumulate(event)
363 if err != nil {
364 slog.Warn("Error accumulating message", "error", err)
365 continue
366 }
367
368 switch event := event.AsAny().(type) {
369 case anthropic.ContentBlockStartEvent:
370 switch event.ContentBlock.Type {
371 case "text":
372 eventChan <- ProviderEvent{Type: EventContentStart}
373 case "tool_use":
374 currentToolCallID = event.ContentBlock.ID
375 eventChan <- ProviderEvent{
376 Type: EventToolUseStart,
377 ToolCall: &message.ToolCall{
378 ID: event.ContentBlock.ID,
379 Name: event.ContentBlock.Name,
380 Finished: false,
381 },
382 }
383 }
384
385 case anthropic.ContentBlockDeltaEvent:
386 if event.Delta.Type == "thinking_delta" && event.Delta.Thinking != "" {
387 eventChan <- ProviderEvent{
388 Type: EventThinkingDelta,
389 Thinking: event.Delta.Thinking,
390 }
391 } else if event.Delta.Type == "signature_delta" && event.Delta.Signature != "" {
392 eventChan <- ProviderEvent{
393 Type: EventSignatureDelta,
394 Signature: event.Delta.Signature,
395 }
396 } else if event.Delta.Type == "text_delta" && event.Delta.Text != "" {
397 eventChan <- ProviderEvent{
398 Type: EventContentDelta,
399 Content: event.Delta.Text,
400 }
401 } else if event.Delta.Type == "input_json_delta" {
402 if currentToolCallID != "" {
403 eventChan <- ProviderEvent{
404 Type: EventToolUseDelta,
405 ToolCall: &message.ToolCall{
406 ID: currentToolCallID,
407 Finished: false,
408 Input: event.Delta.PartialJSON,
409 },
410 }
411 }
412 }
413 case anthropic.ContentBlockStopEvent:
414 if currentToolCallID != "" {
415 eventChan <- ProviderEvent{
416 Type: EventToolUseStop,
417 ToolCall: &message.ToolCall{
418 ID: currentToolCallID,
419 },
420 }
421 currentToolCallID = ""
422 } else {
423 eventChan <- ProviderEvent{Type: EventContentStop}
424 }
425
426 case anthropic.MessageStopEvent:
427 content := ""
428 for _, block := range accumulatedMessage.Content {
429 if text, ok := block.AsAny().(anthropic.TextBlock); ok {
430 content += text.Text
431 }
432 }
433
434 eventChan <- ProviderEvent{
435 Type: EventComplete,
436 Response: &ProviderResponse{
437 Content: content,
438 ToolCalls: a.toolCalls(accumulatedMessage),
439 Usage: a.usage(accumulatedMessage),
440 FinishReason: a.finishReason(string(accumulatedMessage.StopReason)),
441 },
442 Content: content,
443 }
444 }
445 }
446
447 err := anthropicStream.Err()
448 if err == nil || errors.Is(err, io.EOF) {
449 close(eventChan)
450 return
451 }
452
453 // If there is an error we are going to see if we can retry the call
454 retry, after, retryErr := a.shouldRetry(attempts, err)
455 if retryErr != nil {
456 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
457 close(eventChan)
458 return
459 }
460 if retry {
461 slog.Warn("Retrying due to rate limit", "attempt", attempts, "max_retries", maxRetries, "error", err)
462 select {
463 case <-ctx.Done():
464 // context cancelled
465 if ctx.Err() != nil {
466 eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
467 }
468 close(eventChan)
469 return
470 case <-time.After(time.Duration(after) * time.Millisecond):
471 continue
472 }
473 }
474 if ctx.Err() != nil {
475 eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
476 }
477
478 close(eventChan)
479 return
480 }
481 }()
482 return eventChan
483}
484
485func (a *anthropicClient) shouldRetry(attempts int, err error) (bool, int64, error) {
486 var apiErr *anthropic.Error
487 if !errors.As(err, &apiErr) {
488 return false, 0, err
489 }
490
491 if attempts > maxRetries {
492 return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
493 }
494
495 if apiErr.StatusCode == 401 {
496 return false, 0, err
497 }
498
499 // Handle context limit exceeded error (400 Bad Request)
500 if apiErr.StatusCode == 400 {
501 if adjusted, ok := a.handleContextLimitError(apiErr); ok {
502 a.adjustedMaxTokens = adjusted
503 slog.Debug("Adjusted max_tokens due to context limit", "new_max_tokens", adjusted)
504 return true, 0, nil
505 }
506 }
507
508 isOverloaded := strings.Contains(apiErr.Error(), "overloaded") || strings.Contains(apiErr.Error(), "rate limit exceeded")
509 if apiErr.StatusCode != 429 && apiErr.StatusCode != 529 && !isOverloaded {
510 return false, 0, err
511 }
512
513 retryMs := 0
514 retryAfterValues := apiErr.Response.Header.Values("Retry-After")
515
516 backoffMs := 2000 * (1 << (attempts - 1))
517 jitterMs := int(float64(backoffMs) * 0.2)
518 retryMs = backoffMs + jitterMs
519 if len(retryAfterValues) > 0 {
520 if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
521 retryMs = retryMs * 1000
522 }
523 }
524 return true, int64(retryMs), nil
525}
526
527// handleContextLimitError parses context limit error and returns adjusted max_tokens
528func (a *anthropicClient) handleContextLimitError(apiErr *anthropic.Error) (int, bool) {
529 // Parse error message like: "input length and max_tokens exceed context limit: 154978 + 50000 > 200000"
530 errorMsg := apiErr.Error()
531
532 matches := contextLimitRegex.FindStringSubmatch(errorMsg)
533
534 if len(matches) != 4 {
535 return 0, false
536 }
537
538 inputTokens, err1 := strconv.Atoi(matches[1])
539 contextLimit, err2 := strconv.Atoi(matches[3])
540
541 if err1 != nil || err2 != nil {
542 return 0, false
543 }
544
545 // Calculate safe max_tokens with a buffer of 1000 tokens
546 safeMaxTokens := contextLimit - inputTokens - 1000
547
548 // Ensure we don't go below a minimum threshold
549 safeMaxTokens = max(safeMaxTokens, 1000)
550
551 return safeMaxTokens, true
552}
553
554func (a *anthropicClient) toolCalls(msg anthropic.Message) []message.ToolCall {
555 var toolCalls []message.ToolCall
556
557 for _, block := range msg.Content {
558 switch variant := block.AsAny().(type) {
559 case anthropic.ToolUseBlock:
560 toolCall := message.ToolCall{
561 ID: variant.ID,
562 Name: variant.Name,
563 Input: string(variant.Input),
564 Type: string(variant.Type),
565 Finished: true,
566 }
567 toolCalls = append(toolCalls, toolCall)
568 }
569 }
570
571 return toolCalls
572}
573
574func (a *anthropicClient) usage(msg anthropic.Message) TokenUsage {
575 return TokenUsage{
576 InputTokens: msg.Usage.InputTokens,
577 OutputTokens: msg.Usage.OutputTokens,
578 CacheCreationTokens: msg.Usage.CacheCreationInputTokens,
579 CacheReadTokens: msg.Usage.CacheReadInputTokens,
580 }
581}
582
583func (a *anthropicClient) Model() catwalk.Model {
584 return a.providerOptions.model(a.providerOptions.modelType)
585}