1package provider
2
3import (
4 "context"
5 "encoding/json"
6 "errors"
7 "fmt"
8 "io"
9 "log/slog"
10 "regexp"
11 "strconv"
12 "strings"
13 "time"
14
15 "github.com/anthropics/anthropic-sdk-go"
16 "github.com/anthropics/anthropic-sdk-go/bedrock"
17 "github.com/anthropics/anthropic-sdk-go/option"
18 "github.com/charmbracelet/crush/internal/config"
19 "github.com/charmbracelet/crush/internal/fur/provider"
20 "github.com/charmbracelet/crush/internal/llm/tools"
21 "github.com/charmbracelet/crush/internal/message"
22)
23
24type anthropicClient struct {
25 providerOptions providerClientOptions
26 useBedrock bool
27 client anthropic.Client
28 adjustedMaxTokens int // Used when context limit is hit
29}
30
31type AnthropicClient ProviderClient
32
33func newAnthropicClient(opts providerClientOptions, useBedrock bool) AnthropicClient {
34 return &anthropicClient{
35 providerOptions: opts,
36 client: createAnthropicClient(opts, useBedrock),
37 }
38}
39
40func createAnthropicClient(opts providerClientOptions, useBedrock bool) anthropic.Client {
41 anthropicClientOptions := []option.RequestOption{}
42 if opts.apiKey != "" {
43 anthropicClientOptions = append(anthropicClientOptions, option.WithAPIKey(opts.apiKey))
44 }
45 if useBedrock {
46 anthropicClientOptions = append(anthropicClientOptions, bedrock.WithLoadDefaultConfig(context.Background()))
47 }
48 return anthropic.NewClient(anthropicClientOptions...)
49}
50
51func (a *anthropicClient) convertMessages(messages []message.Message) (anthropicMessages []anthropic.MessageParam) {
52 for i, msg := range messages {
53 cache := false
54 if i > len(messages)-3 {
55 cache = true
56 }
57 switch msg.Role {
58 case message.User:
59 content := anthropic.NewTextBlock(msg.Content().String())
60 if cache && !a.providerOptions.disableCache {
61 content.OfText.CacheControl = anthropic.CacheControlEphemeralParam{
62 Type: "ephemeral",
63 }
64 }
65 var contentBlocks []anthropic.ContentBlockParamUnion
66 contentBlocks = append(contentBlocks, content)
67 for _, binaryContent := range msg.BinaryContent() {
68 base64Image := binaryContent.String(provider.InferenceProviderAnthropic)
69 imageBlock := anthropic.NewImageBlockBase64(binaryContent.MIMEType, base64Image)
70 contentBlocks = append(contentBlocks, imageBlock)
71 }
72 anthropicMessages = append(anthropicMessages, anthropic.NewUserMessage(contentBlocks...))
73
74 case message.Assistant:
75 blocks := []anthropic.ContentBlockParamUnion{}
76
77 // Add thinking blocks first if present (required when thinking is enabled with tool use)
78 if reasoningContent := msg.ReasoningContent(); reasoningContent.Thinking != "" {
79 thinkingBlock := anthropic.NewThinkingBlock(reasoningContent.Signature, reasoningContent.Thinking)
80 blocks = append(blocks, thinkingBlock)
81 }
82
83 if msg.Content().String() != "" {
84 content := anthropic.NewTextBlock(msg.Content().String())
85 if cache && !a.providerOptions.disableCache {
86 content.OfText.CacheControl = anthropic.CacheControlEphemeralParam{
87 Type: "ephemeral",
88 }
89 }
90 blocks = append(blocks, content)
91 }
92
93 for _, toolCall := range msg.ToolCalls() {
94 var inputMap map[string]any
95 err := json.Unmarshal([]byte(toolCall.Input), &inputMap)
96 if err != nil {
97 continue
98 }
99 blocks = append(blocks, anthropic.NewToolUseBlock(toolCall.ID, inputMap, toolCall.Name))
100 }
101
102 if len(blocks) == 0 {
103 slog.Warn("There is a message without content, investigate, this should not happen")
104 continue
105 }
106 anthropicMessages = append(anthropicMessages, anthropic.NewAssistantMessage(blocks...))
107
108 case message.Tool:
109 results := make([]anthropic.ContentBlockParamUnion, len(msg.ToolResults()))
110 for i, toolResult := range msg.ToolResults() {
111 results[i] = anthropic.NewToolResultBlock(toolResult.ToolCallID, toolResult.Content, toolResult.IsError)
112 }
113 anthropicMessages = append(anthropicMessages, anthropic.NewUserMessage(results...))
114 }
115 }
116 return
117}
118
119func (a *anthropicClient) convertTools(tools []tools.BaseTool) []anthropic.ToolUnionParam {
120 anthropicTools := make([]anthropic.ToolUnionParam, len(tools))
121
122 for i, tool := range tools {
123 info := tool.Info()
124 toolParam := anthropic.ToolParam{
125 Name: info.Name,
126 Description: anthropic.String(info.Description),
127 InputSchema: anthropic.ToolInputSchemaParam{
128 Properties: info.Parameters,
129 // TODO: figure out how we can tell claude the required fields?
130 },
131 }
132
133 if i == len(tools)-1 && !a.providerOptions.disableCache {
134 toolParam.CacheControl = anthropic.CacheControlEphemeralParam{
135 Type: "ephemeral",
136 }
137 }
138
139 anthropicTools[i] = anthropic.ToolUnionParam{OfTool: &toolParam}
140 }
141
142 return anthropicTools
143}
144
145func (a *anthropicClient) finishReason(reason string) message.FinishReason {
146 switch reason {
147 case "end_turn":
148 return message.FinishReasonEndTurn
149 case "max_tokens":
150 return message.FinishReasonMaxTokens
151 case "tool_use":
152 return message.FinishReasonToolUse
153 case "stop_sequence":
154 return message.FinishReasonEndTurn
155 default:
156 return message.FinishReasonUnknown
157 }
158}
159
160func (a *anthropicClient) preparedMessages(messages []anthropic.MessageParam, tools []anthropic.ToolUnionParam) anthropic.MessageNewParams {
161 model := a.providerOptions.model(a.providerOptions.modelType)
162 var thinkingParam anthropic.ThinkingConfigParamUnion
163 cfg := config.Get()
164 modelConfig := cfg.Models[config.SelectedModelTypeLarge]
165 if a.providerOptions.modelType == config.SelectedModelTypeSmall {
166 modelConfig = cfg.Models[config.SelectedModelTypeSmall]
167 }
168 temperature := anthropic.Float(0)
169
170 maxTokens := model.DefaultMaxTokens
171 if modelConfig.MaxTokens > 0 {
172 maxTokens = modelConfig.MaxTokens
173 }
174 if a.Model().CanReason && modelConfig.Think {
175 thinkingParam = anthropic.ThinkingConfigParamOfEnabled(int64(float64(maxTokens) * 0.8))
176 temperature = anthropic.Float(1)
177 }
178 // Override max tokens if set in provider options
179 if a.providerOptions.maxTokens > 0 {
180 maxTokens = a.providerOptions.maxTokens
181 }
182
183 // Use adjusted max tokens if context limit was hit
184 if a.adjustedMaxTokens > 0 {
185 maxTokens = int64(a.adjustedMaxTokens)
186 }
187
188 return anthropic.MessageNewParams{
189 Model: anthropic.Model(model.ID),
190 MaxTokens: maxTokens,
191 Temperature: temperature,
192 Messages: messages,
193 Tools: tools,
194 Thinking: thinkingParam,
195 System: []anthropic.TextBlockParam{
196 {
197 Text: a.providerOptions.systemMessage,
198 CacheControl: anthropic.CacheControlEphemeralParam{
199 Type: "ephemeral",
200 },
201 },
202 },
203 }
204}
205
206func (a *anthropicClient) send(ctx context.Context, messages []message.Message, tools []tools.BaseTool) (response *ProviderResponse, err error) {
207 cfg := config.Get()
208
209 attempts := 0
210 for {
211 attempts++
212 // Prepare messages on each attempt in case max_tokens was adjusted
213 preparedMessages := a.preparedMessages(a.convertMessages(messages), a.convertTools(tools))
214 if cfg.Options.Debug {
215 jsonData, _ := json.Marshal(preparedMessages)
216 slog.Debug("Prepared messages", "messages", string(jsonData))
217 }
218
219 anthropicResponse, err := a.client.Messages.New(
220 ctx,
221 preparedMessages,
222 )
223 // If there is an error we are going to see if we can retry the call
224 if err != nil {
225 slog.Error("Error in Anthropic API call", "error", err)
226 retry, after, retryErr := a.shouldRetry(attempts, err)
227 if retryErr != nil {
228 return nil, retryErr
229 }
230 if retry {
231 slog.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries))
232 select {
233 case <-ctx.Done():
234 return nil, ctx.Err()
235 case <-time.After(time.Duration(after) * time.Millisecond):
236 continue
237 }
238 }
239 return nil, retryErr
240 }
241
242 content := ""
243 for _, block := range anthropicResponse.Content {
244 if text, ok := block.AsAny().(anthropic.TextBlock); ok {
245 content += text.Text
246 }
247 }
248
249 return &ProviderResponse{
250 Content: content,
251 ToolCalls: a.toolCalls(*anthropicResponse),
252 Usage: a.usage(*anthropicResponse),
253 }, nil
254 }
255}
256
257func (a *anthropicClient) stream(ctx context.Context, messages []message.Message, tools []tools.BaseTool) <-chan ProviderEvent {
258 cfg := config.Get()
259 attempts := 0
260 eventChan := make(chan ProviderEvent)
261 go func() {
262 for {
263 attempts++
264 // Prepare messages on each attempt in case max_tokens was adjusted
265 preparedMessages := a.preparedMessages(a.convertMessages(messages), a.convertTools(tools))
266 if cfg.Options.Debug {
267 jsonData, _ := json.Marshal(preparedMessages)
268 slog.Debug("Prepared messages", "messages", string(jsonData))
269 }
270
271 anthropicStream := a.client.Messages.NewStreaming(
272 ctx,
273 preparedMessages,
274 option.WithHeaderAdd("anthropic-beta", "interleaved-thinking-2025-05-14"),
275 )
276 accumulatedMessage := anthropic.Message{}
277
278 currentToolCallID := ""
279 for anthropicStream.Next() {
280 event := anthropicStream.Current()
281 err := accumulatedMessage.Accumulate(event)
282 if err != nil {
283 slog.Warn("Error accumulating message", "error", err)
284 continue
285 }
286
287 switch event := event.AsAny().(type) {
288 case anthropic.ContentBlockStartEvent:
289 switch event.ContentBlock.Type {
290 case "text":
291 eventChan <- ProviderEvent{Type: EventContentStart}
292 case "tool_use":
293 currentToolCallID = event.ContentBlock.ID
294 eventChan <- ProviderEvent{
295 Type: EventToolUseStart,
296 ToolCall: &message.ToolCall{
297 ID: event.ContentBlock.ID,
298 Name: event.ContentBlock.Name,
299 Finished: false,
300 },
301 }
302 }
303
304 case anthropic.ContentBlockDeltaEvent:
305 if event.Delta.Type == "thinking_delta" && event.Delta.Thinking != "" {
306 eventChan <- ProviderEvent{
307 Type: EventThinkingDelta,
308 Thinking: event.Delta.Thinking,
309 }
310 } else if event.Delta.Type == "signature_delta" && event.Delta.Signature != "" {
311 eventChan <- ProviderEvent{
312 Type: EventSignatureDelta,
313 Signature: event.Delta.Signature,
314 }
315 } else if event.Delta.Type == "text_delta" && event.Delta.Text != "" {
316 eventChan <- ProviderEvent{
317 Type: EventContentDelta,
318 Content: event.Delta.Text,
319 }
320 } else if event.Delta.Type == "input_json_delta" {
321 if currentToolCallID != "" {
322 eventChan <- ProviderEvent{
323 Type: EventToolUseDelta,
324 ToolCall: &message.ToolCall{
325 ID: currentToolCallID,
326 Finished: false,
327 Input: event.Delta.PartialJSON,
328 },
329 }
330 }
331 }
332 case anthropic.ContentBlockStopEvent:
333 if currentToolCallID != "" {
334 eventChan <- ProviderEvent{
335 Type: EventToolUseStop,
336 ToolCall: &message.ToolCall{
337 ID: currentToolCallID,
338 },
339 }
340 currentToolCallID = ""
341 } else {
342 eventChan <- ProviderEvent{Type: EventContentStop}
343 }
344
345 case anthropic.MessageStopEvent:
346 content := ""
347 for _, block := range accumulatedMessage.Content {
348 if text, ok := block.AsAny().(anthropic.TextBlock); ok {
349 content += text.Text
350 }
351 }
352
353 eventChan <- ProviderEvent{
354 Type: EventComplete,
355 Response: &ProviderResponse{
356 Content: content,
357 ToolCalls: a.toolCalls(accumulatedMessage),
358 Usage: a.usage(accumulatedMessage),
359 FinishReason: a.finishReason(string(accumulatedMessage.StopReason)),
360 },
361 Content: content,
362 }
363 }
364 }
365
366 err := anthropicStream.Err()
367 if err == nil || errors.Is(err, io.EOF) {
368 close(eventChan)
369 return
370 }
371 // If there is an error we are going to see if we can retry the call
372 retry, after, retryErr := a.shouldRetry(attempts, err)
373 if retryErr != nil {
374 eventChan <- ProviderEvent{Type: EventError, Error: retryErr}
375 close(eventChan)
376 return
377 }
378 if retry {
379 slog.Warn(fmt.Sprintf("Retrying due to rate limit... attempt %d of %d", attempts, maxRetries))
380 select {
381 case <-ctx.Done():
382 // context cancelled
383 if ctx.Err() != nil {
384 eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
385 }
386 close(eventChan)
387 return
388 case <-time.After(time.Duration(after) * time.Millisecond):
389 continue
390 }
391 }
392 if ctx.Err() != nil {
393 eventChan <- ProviderEvent{Type: EventError, Error: ctx.Err()}
394 }
395
396 close(eventChan)
397 return
398 }
399 }()
400 return eventChan
401}
402
403func (a *anthropicClient) shouldRetry(attempts int, err error) (bool, int64, error) {
404 var apiErr *anthropic.Error
405 if !errors.As(err, &apiErr) {
406 return false, 0, err
407 }
408
409 if attempts > maxRetries {
410 return false, 0, fmt.Errorf("maximum retry attempts reached for rate limit: %d retries", maxRetries)
411 }
412
413 if apiErr.StatusCode == 401 {
414 a.providerOptions.apiKey, err = config.Get().Resolve(a.providerOptions.config.APIKey)
415 if err != nil {
416 return false, 0, fmt.Errorf("failed to resolve API key: %w", err)
417 }
418 a.client = createAnthropicClient(a.providerOptions, a.useBedrock)
419 return true, 0, nil
420 }
421
422 // Handle context limit exceeded error (400 Bad Request)
423 if apiErr.StatusCode == 400 {
424 if adjusted, ok := a.handleContextLimitError(apiErr); ok {
425 a.adjustedMaxTokens = adjusted
426 slog.Debug("Adjusted max_tokens due to context limit", "new_max_tokens", adjusted)
427 return true, 0, nil
428 }
429 }
430
431 isOverloaded := strings.Contains(apiErr.Error(), "overloaded") || strings.Contains(apiErr.Error(), "rate limit exceeded")
432 if apiErr.StatusCode != 429 && apiErr.StatusCode != 529 && !isOverloaded {
433 return false, 0, err
434 }
435
436 retryMs := 0
437 retryAfterValues := apiErr.Response.Header.Values("Retry-After")
438
439 backoffMs := 2000 * (1 << (attempts - 1))
440 jitterMs := int(float64(backoffMs) * 0.2)
441 retryMs = backoffMs + jitterMs
442 if len(retryAfterValues) > 0 {
443 if _, err := fmt.Sscanf(retryAfterValues[0], "%d", &retryMs); err == nil {
444 retryMs = retryMs * 1000
445 }
446 }
447 return true, int64(retryMs), nil
448}
449
450// handleContextLimitError parses context limit error and returns adjusted max_tokens
451func (a *anthropicClient) handleContextLimitError(apiErr *anthropic.Error) (int, bool) {
452 // Parse error message like: "input length and max_tokens exceed context limit: 154978 + 50000 > 200000"
453 errorMsg := apiErr.Error()
454
455 re := regexp.MustCompile("input length and `max_tokens` exceed context limit: (\\d+) \\+ (\\d+) > (\\d+)")
456 matches := re.FindStringSubmatch(errorMsg)
457
458 if len(matches) != 4 {
459 return 0, false
460 }
461
462 inputTokens, err1 := strconv.Atoi(matches[1])
463 contextLimit, err2 := strconv.Atoi(matches[3])
464
465 if err1 != nil || err2 != nil {
466 return 0, false
467 }
468
469 // Calculate safe max_tokens with a buffer of 1000 tokens
470 safeMaxTokens := contextLimit - inputTokens - 1000
471
472 // Ensure we don't go below a minimum threshold
473 safeMaxTokens = max(safeMaxTokens, 1000)
474
475 return safeMaxTokens, true
476}
477
478func (a *anthropicClient) toolCalls(msg anthropic.Message) []message.ToolCall {
479 var toolCalls []message.ToolCall
480
481 for _, block := range msg.Content {
482 switch variant := block.AsAny().(type) {
483 case anthropic.ToolUseBlock:
484 toolCall := message.ToolCall{
485 ID: variant.ID,
486 Name: variant.Name,
487 Input: string(variant.Input),
488 Type: string(variant.Type),
489 Finished: true,
490 }
491 toolCalls = append(toolCalls, toolCall)
492 }
493 }
494
495 return toolCalls
496}
497
498func (a *anthropicClient) usage(msg anthropic.Message) TokenUsage {
499 return TokenUsage{
500 InputTokens: msg.Usage.InputTokens,
501 OutputTokens: msg.Usage.OutputTokens,
502 CacheCreationTokens: msg.Usage.CacheCreationInputTokens,
503 CacheReadTokens: msg.Usage.CacheReadInputTokens,
504 }
505}
506
507func (a *anthropicClient) Model() provider.Model {
508 return a.providerOptions.model(a.providerOptions.modelType)
509}