anthropic.rs

   1use anthropic::{
   2    ANTHROPIC_API_URL, AnthropicError, AnthropicModelMode, ContentDelta, CountTokensRequest, Event,
   3    ResponseContent, ToolResultContent, ToolResultPart, Usage,
   4};
   5use anyhow::Result;
   6use collections::{BTreeMap, HashMap};
   7use futures::{FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream};
   8use gpui::{AnyView, App, AsyncApp, Context, Entity, Task};
   9use http_client::HttpClient;
  10use language_model::{
  11    ApiKeyState, AuthenticateError, ConfigurationViewTargetAgent, EnvVar, IconOrSvg, LanguageModel,
  12    LanguageModelCacheConfiguration, LanguageModelCompletionError, LanguageModelCompletionEvent,
  13    LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
  14    LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
  15    LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent,
  16    RateLimiter, Role, StopReason, env_var,
  17};
  18use settings::{Settings, SettingsStore};
  19use std::pin::Pin;
  20use std::str::FromStr;
  21use std::sync::{Arc, LazyLock};
  22use strum::IntoEnumIterator;
  23use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
  24use ui_input::InputField;
  25use util::ResultExt;
  26
  27use crate::provider::util::parse_tool_arguments;
  28
  29pub use settings::AnthropicAvailableModel as AvailableModel;
  30
  31const PROVIDER_ID: LanguageModelProviderId = language_model::ANTHROPIC_PROVIDER_ID;
  32const PROVIDER_NAME: LanguageModelProviderName = language_model::ANTHROPIC_PROVIDER_NAME;
  33
  34#[derive(Default, Clone, Debug, PartialEq)]
  35pub struct AnthropicSettings {
  36    pub api_url: String,
  37    /// Extend Zed's list of Anthropic models.
  38    pub available_models: Vec<AvailableModel>,
  39}
  40
  41pub struct AnthropicLanguageModelProvider {
  42    http_client: Arc<dyn HttpClient>,
  43    state: Entity<State>,
  44}
  45
  46const API_KEY_ENV_VAR_NAME: &str = "ANTHROPIC_API_KEY";
  47static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
  48
  49pub struct State {
  50    api_key_state: ApiKeyState,
  51}
  52
  53impl State {
  54    fn is_authenticated(&self) -> bool {
  55        self.api_key_state.has_key()
  56    }
  57
  58    fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
  59        let api_url = AnthropicLanguageModelProvider::api_url(cx);
  60        self.api_key_state
  61            .store(api_url, api_key, |this| &mut this.api_key_state, cx)
  62    }
  63
  64    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
  65        let api_url = AnthropicLanguageModelProvider::api_url(cx);
  66        self.api_key_state
  67            .load_if_needed(api_url, |this| &mut this.api_key_state, cx)
  68    }
  69}
  70
  71impl AnthropicLanguageModelProvider {
  72    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
  73        let state = cx.new(|cx| {
  74            cx.observe_global::<SettingsStore>(|this: &mut State, cx| {
  75                let api_url = Self::api_url(cx);
  76                this.api_key_state
  77                    .handle_url_change(api_url, |this| &mut this.api_key_state, cx);
  78                cx.notify();
  79            })
  80            .detach();
  81            State {
  82                api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
  83            }
  84        });
  85
  86        Self { http_client, state }
  87    }
  88
  89    fn create_language_model(&self, model: anthropic::Model) -> Arc<dyn LanguageModel> {
  90        Arc::new(AnthropicModel {
  91            id: LanguageModelId::from(model.id().to_string()),
  92            model,
  93            state: self.state.clone(),
  94            http_client: self.http_client.clone(),
  95            request_limiter: RateLimiter::new(4),
  96        })
  97    }
  98
  99    fn settings(cx: &App) -> &AnthropicSettings {
 100        &crate::AllLanguageModelSettings::get_global(cx).anthropic
 101    }
 102
 103    fn api_url(cx: &App) -> SharedString {
 104        let api_url = &Self::settings(cx).api_url;
 105        if api_url.is_empty() {
 106            ANTHROPIC_API_URL.into()
 107        } else {
 108            SharedString::new(api_url.as_str())
 109        }
 110    }
 111}
 112
 113impl LanguageModelProviderState for AnthropicLanguageModelProvider {
 114    type ObservableEntity = State;
 115
 116    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 117        Some(self.state.clone())
 118    }
 119}
 120
 121impl LanguageModelProvider for AnthropicLanguageModelProvider {
 122    fn id(&self) -> LanguageModelProviderId {
 123        PROVIDER_ID
 124    }
 125
 126    fn name(&self) -> LanguageModelProviderName {
 127        PROVIDER_NAME
 128    }
 129
 130    fn icon(&self) -> IconOrSvg {
 131        IconOrSvg::Icon(IconName::AiAnthropic)
 132    }
 133
 134    fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 135        Some(self.create_language_model(anthropic::Model::default()))
 136    }
 137
 138    fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 139        Some(self.create_language_model(anthropic::Model::default_fast()))
 140    }
 141
 142    fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 143        [anthropic::Model::ClaudeSonnet4_6]
 144            .into_iter()
 145            .map(|model| self.create_language_model(model))
 146            .collect()
 147    }
 148
 149    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 150        let mut models = BTreeMap::default();
 151
 152        // Add base models from anthropic::Model::iter()
 153        for model in anthropic::Model::iter() {
 154            if !matches!(model, anthropic::Model::Custom { .. }) {
 155                models.insert(model.id().to_string(), model);
 156            }
 157        }
 158
 159        // Override with available models from settings
 160        for model in &AnthropicLanguageModelProvider::settings(cx).available_models {
 161            models.insert(
 162                model.name.clone(),
 163                anthropic::Model::Custom {
 164                    name: model.name.clone(),
 165                    display_name: model.display_name.clone(),
 166                    max_tokens: model.max_tokens,
 167                    tool_override: model.tool_override.clone(),
 168                    cache_configuration: model.cache_configuration.as_ref().map(|config| {
 169                        anthropic::AnthropicModelCacheConfiguration {
 170                            max_cache_anchors: config.max_cache_anchors,
 171                            should_speculate: config.should_speculate,
 172                            min_total_token: config.min_total_token,
 173                        }
 174                    }),
 175                    max_output_tokens: model.max_output_tokens,
 176                    default_temperature: model.default_temperature,
 177                    extra_beta_headers: model.extra_beta_headers.clone(),
 178                    mode: model.mode.unwrap_or_default().into(),
 179                },
 180            );
 181        }
 182
 183        models
 184            .into_values()
 185            .map(|model| self.create_language_model(model))
 186            .collect()
 187    }
 188
 189    fn is_authenticated(&self, cx: &App) -> bool {
 190        self.state.read(cx).is_authenticated()
 191    }
 192
 193    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 194        self.state.update(cx, |state, cx| state.authenticate(cx))
 195    }
 196
 197    fn configuration_view(
 198        &self,
 199        target_agent: ConfigurationViewTargetAgent,
 200        window: &mut Window,
 201        cx: &mut App,
 202    ) -> AnyView {
 203        cx.new(|cx| ConfigurationView::new(self.state.clone(), target_agent, window, cx))
 204            .into()
 205    }
 206
 207    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
 208        self.state
 209            .update(cx, |state, cx| state.set_api_key(None, cx))
 210    }
 211}
 212
 213pub struct AnthropicModel {
 214    id: LanguageModelId,
 215    model: anthropic::Model,
 216    state: Entity<State>,
 217    http_client: Arc<dyn HttpClient>,
 218    request_limiter: RateLimiter,
 219}
 220
 221fn to_anthropic_content(content: MessageContent) -> Option<anthropic::RequestContent> {
 222    match content {
 223        MessageContent::Text(text) => {
 224            let text = if text.chars().last().is_some_and(|c| c.is_whitespace()) {
 225                text.trim_end().to_string()
 226            } else {
 227                text
 228            };
 229            if !text.is_empty() {
 230                Some(anthropic::RequestContent::Text {
 231                    text,
 232                    cache_control: None,
 233                })
 234            } else {
 235                None
 236            }
 237        }
 238        MessageContent::Thinking {
 239            text: thinking,
 240            signature,
 241        } => {
 242            if let Some(signature) = signature
 243                && !thinking.is_empty()
 244            {
 245                Some(anthropic::RequestContent::Thinking {
 246                    thinking,
 247                    signature,
 248                    cache_control: None,
 249                })
 250            } else {
 251                None
 252            }
 253        }
 254        MessageContent::RedactedThinking(data) => {
 255            if !data.is_empty() {
 256                Some(anthropic::RequestContent::RedactedThinking { data })
 257            } else {
 258                None
 259            }
 260        }
 261        MessageContent::Image(image) => Some(anthropic::RequestContent::Image {
 262            source: anthropic::ImageSource {
 263                source_type: "base64".to_string(),
 264                media_type: "image/png".to_string(),
 265                data: image.source.to_string(),
 266            },
 267            cache_control: None,
 268        }),
 269        MessageContent::ToolUse(tool_use) => Some(anthropic::RequestContent::ToolUse {
 270            id: tool_use.id.to_string(),
 271            name: tool_use.name.to_string(),
 272            input: tool_use.input,
 273            cache_control: None,
 274        }),
 275        MessageContent::ToolResult(tool_result) => Some(anthropic::RequestContent::ToolResult {
 276            tool_use_id: tool_result.tool_use_id.to_string(),
 277            is_error: tool_result.is_error,
 278            content: match tool_result.content {
 279                LanguageModelToolResultContent::Text(text) => {
 280                    ToolResultContent::Plain(text.to_string())
 281                }
 282                LanguageModelToolResultContent::Image(image) => {
 283                    ToolResultContent::Multipart(vec![ToolResultPart::Image {
 284                        source: anthropic::ImageSource {
 285                            source_type: "base64".to_string(),
 286                            media_type: "image/png".to_string(),
 287                            data: image.source.to_string(),
 288                        },
 289                    }])
 290                }
 291            },
 292            cache_control: None,
 293        }),
 294    }
 295}
 296
 297/// Convert a LanguageModelRequest to an Anthropic CountTokensRequest.
 298pub fn into_anthropic_count_tokens_request(
 299    request: LanguageModelRequest,
 300    model: String,
 301    mode: AnthropicModelMode,
 302) -> CountTokensRequest {
 303    let mut new_messages: Vec<anthropic::Message> = Vec::new();
 304    let mut system_message = String::new();
 305
 306    for message in request.messages {
 307        if message.contents_empty() {
 308            continue;
 309        }
 310
 311        match message.role {
 312            Role::User | Role::Assistant => {
 313                let anthropic_message_content: Vec<anthropic::RequestContent> = message
 314                    .content
 315                    .into_iter()
 316                    .filter_map(to_anthropic_content)
 317                    .collect();
 318                let anthropic_role = match message.role {
 319                    Role::User => anthropic::Role::User,
 320                    Role::Assistant => anthropic::Role::Assistant,
 321                    Role::System => unreachable!("System role should never occur here"),
 322                };
 323                if anthropic_message_content.is_empty() {
 324                    continue;
 325                }
 326
 327                if let Some(last_message) = new_messages.last_mut()
 328                    && last_message.role == anthropic_role
 329                {
 330                    last_message.content.extend(anthropic_message_content);
 331                    continue;
 332                }
 333
 334                new_messages.push(anthropic::Message {
 335                    role: anthropic_role,
 336                    content: anthropic_message_content,
 337                });
 338            }
 339            Role::System => {
 340                if !system_message.is_empty() {
 341                    system_message.push_str("\n\n");
 342                }
 343                system_message.push_str(&message.string_contents());
 344            }
 345        }
 346    }
 347
 348    CountTokensRequest {
 349        model,
 350        messages: new_messages,
 351        system: if system_message.is_empty() {
 352            None
 353        } else {
 354            Some(anthropic::StringOrContents::String(system_message))
 355        },
 356        thinking: if request.thinking_allowed {
 357            match mode {
 358                AnthropicModelMode::Thinking { budget_tokens } => {
 359                    Some(anthropic::Thinking::Enabled { budget_tokens })
 360                }
 361                AnthropicModelMode::AdaptiveThinking => Some(anthropic::Thinking::Adaptive),
 362                AnthropicModelMode::Default => None,
 363            }
 364        } else {
 365            None
 366        },
 367        tools: request
 368            .tools
 369            .into_iter()
 370            .map(|tool| anthropic::Tool {
 371                name: tool.name,
 372                description: tool.description,
 373                input_schema: tool.input_schema,
 374                eager_input_streaming: tool.use_input_streaming,
 375            })
 376            .collect(),
 377        tool_choice: request.tool_choice.map(|choice| match choice {
 378            LanguageModelToolChoice::Auto => anthropic::ToolChoice::Auto,
 379            LanguageModelToolChoice::Any => anthropic::ToolChoice::Any,
 380            LanguageModelToolChoice::None => anthropic::ToolChoice::None,
 381        }),
 382    }
 383}
 384
 385/// Estimate tokens using tiktoken. Used as a fallback when the API is unavailable,
 386/// or by providers (like Zed Cloud) that don't have direct Anthropic API access.
 387pub fn count_anthropic_tokens_with_tiktoken(request: LanguageModelRequest) -> Result<u64> {
 388    let messages = request.messages;
 389    let mut tokens_from_images = 0;
 390    let mut string_messages = Vec::with_capacity(messages.len());
 391
 392    for message in messages {
 393        let mut string_contents = String::new();
 394
 395        for content in message.content {
 396            match content {
 397                MessageContent::Text(text) => {
 398                    string_contents.push_str(&text);
 399                }
 400                MessageContent::Thinking { .. } => {
 401                    // Thinking blocks are not included in the input token count.
 402                }
 403                MessageContent::RedactedThinking(_) => {
 404                    // Thinking blocks are not included in the input token count.
 405                }
 406                MessageContent::Image(image) => {
 407                    tokens_from_images += image.estimate_tokens();
 408                }
 409                MessageContent::ToolUse(_tool_use) => {
 410                    // TODO: Estimate token usage from tool uses.
 411                }
 412                MessageContent::ToolResult(tool_result) => match &tool_result.content {
 413                    LanguageModelToolResultContent::Text(text) => {
 414                        string_contents.push_str(text);
 415                    }
 416                    LanguageModelToolResultContent::Image(image) => {
 417                        tokens_from_images += image.estimate_tokens();
 418                    }
 419                },
 420            }
 421        }
 422
 423        if !string_contents.is_empty() {
 424            string_messages.push(tiktoken_rs::ChatCompletionRequestMessage {
 425                role: match message.role {
 426                    Role::User => "user".into(),
 427                    Role::Assistant => "assistant".into(),
 428                    Role::System => "system".into(),
 429                },
 430                content: Some(string_contents),
 431                name: None,
 432                function_call: None,
 433            });
 434        }
 435    }
 436
 437    // Tiktoken doesn't yet support these models, so we manually use the
 438    // same tokenizer as GPT-4.
 439    tiktoken_rs::num_tokens_from_messages("gpt-4", &string_messages)
 440        .map(|tokens| (tokens + tokens_from_images) as u64)
 441}
 442
 443impl AnthropicModel {
 444    fn stream_completion(
 445        &self,
 446        request: anthropic::Request,
 447        cx: &AsyncApp,
 448    ) -> BoxFuture<
 449        'static,
 450        Result<
 451            BoxStream<'static, Result<anthropic::Event, AnthropicError>>,
 452            LanguageModelCompletionError,
 453        >,
 454    > {
 455        let http_client = self.http_client.clone();
 456
 457        let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
 458            let api_url = AnthropicLanguageModelProvider::api_url(cx);
 459            (state.api_key_state.key(&api_url), api_url)
 460        });
 461
 462        let beta_headers = self.model.beta_headers();
 463
 464        async move {
 465            let Some(api_key) = api_key else {
 466                return Err(LanguageModelCompletionError::NoApiKey {
 467                    provider: PROVIDER_NAME,
 468                });
 469            };
 470            let request = anthropic::stream_completion(
 471                http_client.as_ref(),
 472                &api_url,
 473                &api_key,
 474                request,
 475                beta_headers,
 476            );
 477            request.await.map_err(Into::into)
 478        }
 479        .boxed()
 480    }
 481}
 482
 483impl LanguageModel for AnthropicModel {
 484    fn id(&self) -> LanguageModelId {
 485        self.id.clone()
 486    }
 487
 488    fn name(&self) -> LanguageModelName {
 489        LanguageModelName::from(self.model.display_name().to_string())
 490    }
 491
 492    fn provider_id(&self) -> LanguageModelProviderId {
 493        PROVIDER_ID
 494    }
 495
 496    fn provider_name(&self) -> LanguageModelProviderName {
 497        PROVIDER_NAME
 498    }
 499
 500    fn supports_tools(&self) -> bool {
 501        true
 502    }
 503
 504    fn supports_images(&self) -> bool {
 505        true
 506    }
 507
 508    fn supports_streaming_tools(&self) -> bool {
 509        true
 510    }
 511
 512    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 513        match choice {
 514            LanguageModelToolChoice::Auto
 515            | LanguageModelToolChoice::Any
 516            | LanguageModelToolChoice::None => true,
 517        }
 518    }
 519
 520    fn supports_thinking(&self) -> bool {
 521        self.model.supports_thinking()
 522    }
 523
 524    fn supported_effort_levels(&self) -> Vec<language_model::LanguageModelEffortLevel> {
 525        if self.model.supports_adaptive_thinking() {
 526            vec![
 527                language_model::LanguageModelEffortLevel {
 528                    name: "Low".into(),
 529                    value: "low".into(),
 530                    is_default: false,
 531                },
 532                language_model::LanguageModelEffortLevel {
 533                    name: "Medium".into(),
 534                    value: "medium".into(),
 535                    is_default: false,
 536                },
 537                language_model::LanguageModelEffortLevel {
 538                    name: "High".into(),
 539                    value: "high".into(),
 540                    is_default: true,
 541                },
 542                language_model::LanguageModelEffortLevel {
 543                    name: "Max".into(),
 544                    value: "max".into(),
 545                    is_default: false,
 546                },
 547            ]
 548        } else {
 549            Vec::new()
 550        }
 551    }
 552
 553    fn telemetry_id(&self) -> String {
 554        format!("anthropic/{}", self.model.id())
 555    }
 556
 557    fn api_key(&self, cx: &App) -> Option<String> {
 558        self.state.read_with(cx, |state, cx| {
 559            let api_url = AnthropicLanguageModelProvider::api_url(cx);
 560            state.api_key_state.key(&api_url).map(|key| key.to_string())
 561        })
 562    }
 563
 564    fn max_token_count(&self) -> u64 {
 565        self.model.max_token_count()
 566    }
 567
 568    fn max_output_tokens(&self) -> Option<u64> {
 569        Some(self.model.max_output_tokens())
 570    }
 571
 572    fn count_tokens(
 573        &self,
 574        request: LanguageModelRequest,
 575        cx: &App,
 576    ) -> BoxFuture<'static, Result<u64>> {
 577        let http_client = self.http_client.clone();
 578        let model_id = self.model.request_id().to_string();
 579        let mode = self.model.mode();
 580
 581        let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
 582            let api_url = AnthropicLanguageModelProvider::api_url(cx);
 583            (
 584                state.api_key_state.key(&api_url).map(|k| k.to_string()),
 585                api_url.to_string(),
 586            )
 587        });
 588
 589        async move {
 590            // If no API key, fall back to tiktoken estimation
 591            let Some(api_key) = api_key else {
 592                return count_anthropic_tokens_with_tiktoken(request);
 593            };
 594
 595            let count_request =
 596                into_anthropic_count_tokens_request(request.clone(), model_id, mode);
 597
 598            match anthropic::count_tokens(http_client.as_ref(), &api_url, &api_key, count_request)
 599                .await
 600            {
 601                Ok(response) => Ok(response.input_tokens),
 602                Err(err) => {
 603                    log::error!(
 604                        "Anthropic count_tokens API failed, falling back to tiktoken: {err:?}"
 605                    );
 606                    count_anthropic_tokens_with_tiktoken(request)
 607                }
 608            }
 609        }
 610        .boxed()
 611    }
 612
 613    fn stream_completion(
 614        &self,
 615        request: LanguageModelRequest,
 616        cx: &AsyncApp,
 617    ) -> BoxFuture<
 618        'static,
 619        Result<
 620            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 621            LanguageModelCompletionError,
 622        >,
 623    > {
 624        let request = into_anthropic(
 625            request,
 626            self.model.request_id().into(),
 627            self.model.default_temperature(),
 628            self.model.max_output_tokens(),
 629            self.model.mode(),
 630        );
 631        let request = self.stream_completion(request, cx);
 632        let future = self.request_limiter.stream(async move {
 633            let response = request.await?;
 634            Ok(AnthropicEventMapper::new().map_stream(response))
 635        });
 636        async move { Ok(future.await?.boxed()) }.boxed()
 637    }
 638
 639    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
 640        self.model
 641            .cache_configuration()
 642            .map(|config| LanguageModelCacheConfiguration {
 643                max_cache_anchors: config.max_cache_anchors,
 644                should_speculate: config.should_speculate,
 645                min_total_token: config.min_total_token,
 646            })
 647    }
 648}
 649
 650pub fn into_anthropic(
 651    request: LanguageModelRequest,
 652    model: String,
 653    default_temperature: f32,
 654    max_output_tokens: u64,
 655    mode: AnthropicModelMode,
 656) -> anthropic::Request {
 657    let mut new_messages: Vec<anthropic::Message> = Vec::new();
 658    let mut system_message = String::new();
 659
 660    for message in request.messages {
 661        if message.contents_empty() {
 662            continue;
 663        }
 664
 665        match message.role {
 666            Role::User | Role::Assistant => {
 667                let mut anthropic_message_content: Vec<anthropic::RequestContent> = message
 668                    .content
 669                    .into_iter()
 670                    .filter_map(to_anthropic_content)
 671                    .collect();
 672                let anthropic_role = match message.role {
 673                    Role::User => anthropic::Role::User,
 674                    Role::Assistant => anthropic::Role::Assistant,
 675                    Role::System => unreachable!("System role should never occur here"),
 676                };
 677                if anthropic_message_content.is_empty() {
 678                    continue;
 679                }
 680
 681                if let Some(last_message) = new_messages.last_mut()
 682                    && last_message.role == anthropic_role
 683                {
 684                    last_message.content.extend(anthropic_message_content);
 685                    continue;
 686                }
 687
 688                // Mark the last segment of the message as cached
 689                if message.cache {
 690                    let cache_control_value = Some(anthropic::CacheControl {
 691                        cache_type: anthropic::CacheControlType::Ephemeral,
 692                    });
 693                    for message_content in anthropic_message_content.iter_mut().rev() {
 694                        match message_content {
 695                            anthropic::RequestContent::RedactedThinking { .. } => {
 696                                // Caching is not possible, fallback to next message
 697                            }
 698                            anthropic::RequestContent::Text { cache_control, .. }
 699                            | anthropic::RequestContent::Thinking { cache_control, .. }
 700                            | anthropic::RequestContent::Image { cache_control, .. }
 701                            | anthropic::RequestContent::ToolUse { cache_control, .. }
 702                            | anthropic::RequestContent::ToolResult { cache_control, .. } => {
 703                                *cache_control = cache_control_value;
 704                                break;
 705                            }
 706                        }
 707                    }
 708                }
 709
 710                new_messages.push(anthropic::Message {
 711                    role: anthropic_role,
 712                    content: anthropic_message_content,
 713                });
 714            }
 715            Role::System => {
 716                if !system_message.is_empty() {
 717                    system_message.push_str("\n\n");
 718                }
 719                system_message.push_str(&message.string_contents());
 720            }
 721        }
 722    }
 723
 724    anthropic::Request {
 725        model,
 726        messages: new_messages,
 727        max_tokens: max_output_tokens,
 728        system: if system_message.is_empty() {
 729            None
 730        } else {
 731            Some(anthropic::StringOrContents::String(system_message))
 732        },
 733        thinking: if request.thinking_allowed {
 734            match mode {
 735                AnthropicModelMode::Thinking { budget_tokens } => {
 736                    Some(anthropic::Thinking::Enabled { budget_tokens })
 737                }
 738                AnthropicModelMode::AdaptiveThinking => Some(anthropic::Thinking::Adaptive),
 739                AnthropicModelMode::Default => None,
 740            }
 741        } else {
 742            None
 743        },
 744        tools: request
 745            .tools
 746            .into_iter()
 747            .map(|tool| anthropic::Tool {
 748                name: tool.name,
 749                description: tool.description,
 750                input_schema: tool.input_schema,
 751                eager_input_streaming: tool.use_input_streaming,
 752            })
 753            .collect(),
 754        tool_choice: request.tool_choice.map(|choice| match choice {
 755            LanguageModelToolChoice::Auto => anthropic::ToolChoice::Auto,
 756            LanguageModelToolChoice::Any => anthropic::ToolChoice::Any,
 757            LanguageModelToolChoice::None => anthropic::ToolChoice::None,
 758        }),
 759        metadata: None,
 760        output_config: if request.thinking_allowed
 761            && matches!(mode, AnthropicModelMode::AdaptiveThinking)
 762        {
 763            request.thinking_effort.as_deref().and_then(|effort| {
 764                let effort = match effort {
 765                    "low" => Some(anthropic::Effort::Low),
 766                    "medium" => Some(anthropic::Effort::Medium),
 767                    "high" => Some(anthropic::Effort::High),
 768                    "max" => Some(anthropic::Effort::Max),
 769                    _ => None,
 770                };
 771                effort.map(|effort| anthropic::OutputConfig {
 772                    effort: Some(effort),
 773                })
 774            })
 775        } else {
 776            None
 777        },
 778        stop_sequences: Vec::new(),
 779        speed: request.speed.map(From::from),
 780        temperature: request.temperature.or(Some(default_temperature)),
 781        top_k: None,
 782        top_p: None,
 783    }
 784}
 785
 786pub struct AnthropicEventMapper {
 787    tool_uses_by_index: HashMap<usize, RawToolUse>,
 788    usage: Usage,
 789    stop_reason: StopReason,
 790}
 791
 792impl AnthropicEventMapper {
 793    pub fn new() -> Self {
 794        Self {
 795            tool_uses_by_index: HashMap::default(),
 796            usage: Usage::default(),
 797            stop_reason: StopReason::EndTurn,
 798        }
 799    }
 800
 801    pub fn map_stream(
 802        mut self,
 803        events: Pin<Box<dyn Send + Stream<Item = Result<Event, AnthropicError>>>>,
 804    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 805    {
 806        events.flat_map(move |event| {
 807            futures::stream::iter(match event {
 808                Ok(event) => self.map_event(event),
 809                Err(error) => vec![Err(error.into())],
 810            })
 811        })
 812    }
 813
 814    pub fn map_event(
 815        &mut self,
 816        event: Event,
 817    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 818        match event {
 819            Event::ContentBlockStart {
 820                index,
 821                content_block,
 822            } => match content_block {
 823                ResponseContent::Text { text } => {
 824                    vec![Ok(LanguageModelCompletionEvent::Text(text))]
 825                }
 826                ResponseContent::Thinking { thinking } => {
 827                    vec![Ok(LanguageModelCompletionEvent::Thinking {
 828                        text: thinking,
 829                        signature: None,
 830                    })]
 831                }
 832                ResponseContent::RedactedThinking { data } => {
 833                    vec![Ok(LanguageModelCompletionEvent::RedactedThinking { data })]
 834                }
 835                ResponseContent::ToolUse { id, name, .. } => {
 836                    self.tool_uses_by_index.insert(
 837                        index,
 838                        RawToolUse {
 839                            id,
 840                            name,
 841                            input_json: String::new(),
 842                        },
 843                    );
 844                    Vec::new()
 845                }
 846            },
 847            Event::ContentBlockDelta { index, delta } => match delta {
 848                ContentDelta::TextDelta { text } => {
 849                    vec![Ok(LanguageModelCompletionEvent::Text(text))]
 850                }
 851                ContentDelta::ThinkingDelta { thinking } => {
 852                    vec![Ok(LanguageModelCompletionEvent::Thinking {
 853                        text: thinking,
 854                        signature: None,
 855                    })]
 856                }
 857                ContentDelta::SignatureDelta { signature } => {
 858                    vec![Ok(LanguageModelCompletionEvent::Thinking {
 859                        text: "".to_string(),
 860                        signature: Some(signature),
 861                    })]
 862                }
 863                ContentDelta::InputJsonDelta { partial_json } => {
 864                    if let Some(tool_use) = self.tool_uses_by_index.get_mut(&index) {
 865                        tool_use.input_json.push_str(&partial_json);
 866
 867                        // Try to convert invalid (incomplete) JSON into
 868                        // valid JSON that serde can accept, e.g. by closing
 869                        // unclosed delimiters. This way, we can update the
 870                        // UI with whatever has been streamed back so far.
 871                        if let Ok(input) = serde_json::Value::from_str(
 872                            &partial_json_fixer::fix_json(&tool_use.input_json),
 873                        ) {
 874                            return vec![Ok(LanguageModelCompletionEvent::ToolUse(
 875                                LanguageModelToolUse {
 876                                    id: tool_use.id.clone().into(),
 877                                    name: tool_use.name.clone().into(),
 878                                    is_input_complete: false,
 879                                    raw_input: tool_use.input_json.clone(),
 880                                    input,
 881                                    thought_signature: None,
 882                                },
 883                            ))];
 884                        }
 885                    }
 886                    vec![]
 887                }
 888            },
 889            Event::ContentBlockStop { index } => {
 890                if let Some(tool_use) = self.tool_uses_by_index.remove(&index) {
 891                    let input_json = tool_use.input_json.trim();
 892                    let event_result = match parse_tool_arguments(input_json) {
 893                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 894                            LanguageModelToolUse {
 895                                id: tool_use.id.into(),
 896                                name: tool_use.name.into(),
 897                                is_input_complete: true,
 898                                input,
 899                                raw_input: tool_use.input_json.clone(),
 900                                thought_signature: None,
 901                            },
 902                        )),
 903                        Err(json_parse_err) => {
 904                            Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 905                                id: tool_use.id.into(),
 906                                tool_name: tool_use.name.into(),
 907                                raw_input: input_json.into(),
 908                                json_parse_error: json_parse_err.to_string(),
 909                            })
 910                        }
 911                    };
 912
 913                    vec![event_result]
 914                } else {
 915                    Vec::new()
 916                }
 917            }
 918            Event::MessageStart { message } => {
 919                update_usage(&mut self.usage, &message.usage);
 920                vec![
 921                    Ok(LanguageModelCompletionEvent::UsageUpdate(convert_usage(
 922                        &self.usage,
 923                    ))),
 924                    Ok(LanguageModelCompletionEvent::StartMessage {
 925                        message_id: message.id,
 926                    }),
 927                ]
 928            }
 929            Event::MessageDelta { delta, usage } => {
 930                update_usage(&mut self.usage, &usage);
 931                if let Some(stop_reason) = delta.stop_reason.as_deref() {
 932                    self.stop_reason = match stop_reason {
 933                        "end_turn" => StopReason::EndTurn,
 934                        "max_tokens" => StopReason::MaxTokens,
 935                        "tool_use" => StopReason::ToolUse,
 936                        "refusal" => StopReason::Refusal,
 937                        _ => {
 938                            log::error!("Unexpected anthropic stop_reason: {stop_reason}");
 939                            StopReason::EndTurn
 940                        }
 941                    };
 942                }
 943                vec![Ok(LanguageModelCompletionEvent::UsageUpdate(
 944                    convert_usage(&self.usage),
 945                ))]
 946            }
 947            Event::MessageStop => {
 948                vec![Ok(LanguageModelCompletionEvent::Stop(self.stop_reason))]
 949            }
 950            Event::Error { error } => {
 951                vec![Err(error.into())]
 952            }
 953            _ => Vec::new(),
 954        }
 955    }
 956}
 957
 958struct RawToolUse {
 959    id: String,
 960    name: String,
 961    input_json: String,
 962}
 963
 964/// Updates usage data by preferring counts from `new`.
 965fn update_usage(usage: &mut Usage, new: &Usage) {
 966    if let Some(input_tokens) = new.input_tokens {
 967        usage.input_tokens = Some(input_tokens);
 968    }
 969    if let Some(output_tokens) = new.output_tokens {
 970        usage.output_tokens = Some(output_tokens);
 971    }
 972    if let Some(cache_creation_input_tokens) = new.cache_creation_input_tokens {
 973        usage.cache_creation_input_tokens = Some(cache_creation_input_tokens);
 974    }
 975    if let Some(cache_read_input_tokens) = new.cache_read_input_tokens {
 976        usage.cache_read_input_tokens = Some(cache_read_input_tokens);
 977    }
 978}
 979
 980fn convert_usage(usage: &Usage) -> language_model::TokenUsage {
 981    language_model::TokenUsage {
 982        input_tokens: usage.input_tokens.unwrap_or(0),
 983        output_tokens: usage.output_tokens.unwrap_or(0),
 984        cache_creation_input_tokens: usage.cache_creation_input_tokens.unwrap_or(0),
 985        cache_read_input_tokens: usage.cache_read_input_tokens.unwrap_or(0),
 986    }
 987}
 988
 989struct ConfigurationView {
 990    api_key_editor: Entity<InputField>,
 991    state: Entity<State>,
 992    load_credentials_task: Option<Task<()>>,
 993    target_agent: ConfigurationViewTargetAgent,
 994}
 995
 996impl ConfigurationView {
 997    const PLACEHOLDER_TEXT: &'static str = "sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
 998
 999    fn new(
1000        state: Entity<State>,
1001        target_agent: ConfigurationViewTargetAgent,
1002        window: &mut Window,
1003        cx: &mut Context<Self>,
1004    ) -> Self {
1005        cx.observe(&state, |_, _, cx| {
1006            cx.notify();
1007        })
1008        .detach();
1009
1010        let load_credentials_task = Some(cx.spawn({
1011            let state = state.clone();
1012            async move |this, cx| {
1013                let task = state.update(cx, |state, cx| state.authenticate(cx));
1014                // We don't log an error, because "not signed in" is also an error.
1015                let _ = task.await;
1016                this.update(cx, |this, cx| {
1017                    this.load_credentials_task = None;
1018                    cx.notify();
1019                })
1020                .log_err();
1021            }
1022        }));
1023
1024        Self {
1025            api_key_editor: cx.new(|cx| InputField::new(window, cx, Self::PLACEHOLDER_TEXT)),
1026            state,
1027            load_credentials_task,
1028            target_agent,
1029        }
1030    }
1031
1032    fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
1033        let api_key = self.api_key_editor.read(cx).text(cx);
1034        if api_key.is_empty() {
1035            return;
1036        }
1037
1038        // url changes can cause the editor to be displayed again
1039        self.api_key_editor
1040            .update(cx, |editor, cx| editor.set_text("", window, cx));
1041
1042        let state = self.state.clone();
1043        cx.spawn_in(window, async move |_, cx| {
1044            state
1045                .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))
1046                .await
1047        })
1048        .detach_and_log_err(cx);
1049    }
1050
1051    fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
1052        self.api_key_editor
1053            .update(cx, |editor, cx| editor.set_text("", window, cx));
1054
1055        let state = self.state.clone();
1056        cx.spawn_in(window, async move |_, cx| {
1057            state
1058                .update(cx, |state, cx| state.set_api_key(None, cx))
1059                .await
1060        })
1061        .detach_and_log_err(cx);
1062    }
1063
1064    fn should_render_editor(&self, cx: &mut Context<Self>) -> bool {
1065        !self.state.read(cx).is_authenticated()
1066    }
1067}
1068
1069impl Render for ConfigurationView {
1070    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1071        let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
1072        let configured_card_label = if env_var_set {
1073            format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
1074        } else {
1075            let api_url = AnthropicLanguageModelProvider::api_url(cx);
1076            if api_url == ANTHROPIC_API_URL {
1077                "API key configured".to_string()
1078            } else {
1079                format!("API key configured for {}", api_url)
1080            }
1081        };
1082
1083        if self.load_credentials_task.is_some() {
1084            div()
1085                .child(Label::new("Loading credentials..."))
1086                .into_any_element()
1087        } else if self.should_render_editor(cx) {
1088            v_flex()
1089                .size_full()
1090                .on_action(cx.listener(Self::save_api_key))
1091                .child(Label::new(format!("To use {}, you need to add an API key. Follow these steps:", match &self.target_agent {
1092                    ConfigurationViewTargetAgent::ZedAgent => "Zed's agent with Anthropic".into(),
1093                    ConfigurationViewTargetAgent::Other(agent) => agent.clone(),
1094                })))
1095                .child(
1096                    List::new()
1097                        .child(
1098                            ListBulletItem::new("")
1099                                .child(Label::new("Create one by visiting"))
1100                                .child(ButtonLink::new("Anthropic's settings", "https://console.anthropic.com/settings/keys"))
1101                        )
1102                        .child(
1103                            ListBulletItem::new("Paste your API key below and hit enter to start using the agent")
1104                        )
1105                )
1106                .child(self.api_key_editor.clone())
1107                .child(
1108                    Label::new(
1109                        format!("You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."),
1110                    )
1111                    .size(LabelSize::Small)
1112                    .color(Color::Muted)
1113                    .mt_0p5(),
1114                )
1115                .into_any_element()
1116        } else {
1117            ConfiguredApiCard::new(configured_card_label)
1118                .disabled(env_var_set)
1119                .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
1120                .when(env_var_set, |this| {
1121                    this.tooltip_label(format!(
1122                    "To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."
1123                ))
1124                })
1125                .into_any_element()
1126        }
1127    }
1128}
1129
1130#[cfg(test)]
1131mod tests {
1132    use super::*;
1133    use anthropic::AnthropicModelMode;
1134    use language_model::{LanguageModelRequestMessage, MessageContent};
1135
1136    #[test]
1137    fn test_cache_control_only_on_last_segment() {
1138        let request = LanguageModelRequest {
1139            messages: vec![LanguageModelRequestMessage {
1140                role: Role::User,
1141                content: vec![
1142                    MessageContent::Text("Some prompt".to_string()),
1143                    MessageContent::Image(language_model::LanguageModelImage::empty()),
1144                    MessageContent::Image(language_model::LanguageModelImage::empty()),
1145                    MessageContent::Image(language_model::LanguageModelImage::empty()),
1146                    MessageContent::Image(language_model::LanguageModelImage::empty()),
1147                ],
1148                cache: true,
1149                reasoning_details: None,
1150            }],
1151            thread_id: None,
1152            prompt_id: None,
1153            intent: None,
1154            stop: vec![],
1155            temperature: None,
1156            tools: vec![],
1157            tool_choice: None,
1158            thinking_allowed: true,
1159            thinking_effort: None,
1160            speed: None,
1161        };
1162
1163        let anthropic_request = into_anthropic(
1164            request,
1165            "claude-3-5-sonnet".to_string(),
1166            0.7,
1167            4096,
1168            AnthropicModelMode::Default,
1169        );
1170
1171        assert_eq!(anthropic_request.messages.len(), 1);
1172
1173        let message = &anthropic_request.messages[0];
1174        assert_eq!(message.content.len(), 5);
1175
1176        assert!(matches!(
1177            message.content[0],
1178            anthropic::RequestContent::Text {
1179                cache_control: None,
1180                ..
1181            }
1182        ));
1183        for i in 1..3 {
1184            assert!(matches!(
1185                message.content[i],
1186                anthropic::RequestContent::Image {
1187                    cache_control: None,
1188                    ..
1189                }
1190            ));
1191        }
1192
1193        assert!(matches!(
1194            message.content[4],
1195            anthropic::RequestContent::Image {
1196                cache_control: Some(anthropic::CacheControl {
1197                    cache_type: anthropic::CacheControlType::Ephemeral,
1198                }),
1199                ..
1200            }
1201        ));
1202    }
1203
1204    fn request_with_assistant_content(
1205        assistant_content: Vec<MessageContent>,
1206    ) -> anthropic::Request {
1207        let mut request = LanguageModelRequest {
1208            messages: vec![LanguageModelRequestMessage {
1209                role: Role::User,
1210                content: vec![MessageContent::Text("Hello".to_string())],
1211                cache: false,
1212                reasoning_details: None,
1213            }],
1214            thinking_effort: None,
1215            thread_id: None,
1216            prompt_id: None,
1217            intent: None,
1218            stop: vec![],
1219            temperature: None,
1220            tools: vec![],
1221            tool_choice: None,
1222            thinking_allowed: true,
1223            speed: None,
1224        };
1225        request.messages.push(LanguageModelRequestMessage {
1226            role: Role::Assistant,
1227            content: assistant_content,
1228            cache: false,
1229            reasoning_details: None,
1230        });
1231        into_anthropic(
1232            request,
1233            "claude-sonnet-4-5".to_string(),
1234            1.0,
1235            16000,
1236            AnthropicModelMode::Thinking {
1237                budget_tokens: Some(10000),
1238            },
1239        )
1240    }
1241
1242    #[test]
1243    fn test_unsigned_thinking_blocks_stripped() {
1244        let result = request_with_assistant_content(vec![
1245            MessageContent::Thinking {
1246                text: "Cancelled mid-think, no signature".to_string(),
1247                signature: None,
1248            },
1249            MessageContent::Text("Some response text".to_string()),
1250        ]);
1251
1252        let assistant_message = result
1253            .messages
1254            .iter()
1255            .find(|m| m.role == anthropic::Role::Assistant)
1256            .expect("assistant message should still exist");
1257
1258        assert_eq!(
1259            assistant_message.content.len(),
1260            1,
1261            "Only the text content should remain; unsigned thinking block should be stripped"
1262        );
1263        assert!(matches!(
1264            &assistant_message.content[0],
1265            anthropic::RequestContent::Text { text, .. } if text == "Some response text"
1266        ));
1267    }
1268
1269    #[test]
1270    fn test_signed_thinking_blocks_preserved() {
1271        let result = request_with_assistant_content(vec![
1272            MessageContent::Thinking {
1273                text: "Completed thinking".to_string(),
1274                signature: Some("valid-signature".to_string()),
1275            },
1276            MessageContent::Text("Response".to_string()),
1277        ]);
1278
1279        let assistant_message = result
1280            .messages
1281            .iter()
1282            .find(|m| m.role == anthropic::Role::Assistant)
1283            .expect("assistant message should exist");
1284
1285        assert_eq!(
1286            assistant_message.content.len(),
1287            2,
1288            "Both the signed thinking block and text should be preserved"
1289        );
1290        assert!(matches!(
1291            &assistant_message.content[0],
1292            anthropic::RequestContent::Thinking { thinking, signature, .. }
1293                if thinking == "Completed thinking" && signature == "valid-signature"
1294        ));
1295    }
1296
1297    #[test]
1298    fn test_only_unsigned_thinking_block_omits_entire_message() {
1299        let result = request_with_assistant_content(vec![MessageContent::Thinking {
1300            text: "Cancelled before any text or signature".to_string(),
1301            signature: None,
1302        }]);
1303
1304        let assistant_messages: Vec<_> = result
1305            .messages
1306            .iter()
1307            .filter(|m| m.role == anthropic::Role::Assistant)
1308            .collect();
1309
1310        assert_eq!(
1311            assistant_messages.len(),
1312            0,
1313            "An assistant message whose only content was an unsigned thinking block \
1314             should be omitted entirely"
1315        );
1316    }
1317}