cloud.rs

   1use anthropic::{AnthropicModelMode, parse_prompt_too_long};
   2use anyhow::{Result, anyhow};
   3use client::{Client, UserStore, zed_urls};
   4use collections::BTreeMap;
   5use feature_flags::{FeatureFlagAppExt, LlmClosedBetaFeatureFlag};
   6use futures::{
   7    AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
   8};
   9use gpui::{
  10    AnyElement, AnyView, App, AsyncApp, Context, Entity, SemanticVersion, Subscription, Task,
  11};
  12use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
  13use language_model::{
  14    AuthenticateError, CloudModel, LanguageModel, LanguageModelCacheConfiguration,
  15    LanguageModelCompletionError, LanguageModelId, LanguageModelKnownError, LanguageModelName,
  16    LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
  17    LanguageModelProviderTosView, LanguageModelRequest, LanguageModelToolChoice,
  18    LanguageModelToolSchemaFormat, ModelRequestLimitReachedError, RateLimiter, RequestUsage,
  19    ZED_CLOUD_PROVIDER_ID,
  20};
  21use language_model::{
  22    LanguageModelCompletionEvent, LanguageModelProvider, LlmApiToken, PaymentRequiredError,
  23    RefreshLlmTokenListener,
  24};
  25use proto::Plan;
  26use release_channel::AppVersion;
  27use schemars::JsonSchema;
  28use serde::{Deserialize, Serialize, de::DeserializeOwned};
  29use settings::{Settings, SettingsStore};
  30use smol::Timer;
  31use smol::io::{AsyncReadExt, BufReader};
  32use std::pin::Pin;
  33use std::str::FromStr as _;
  34use std::{
  35    sync::{Arc, LazyLock},
  36    time::Duration,
  37};
  38use strum::IntoEnumIterator;
  39use thiserror::Error;
  40use ui::{TintColor, prelude::*};
  41use zed_llm_client::{
  42    CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CURRENT_PLAN_HEADER_NAME, CompletionBody,
  43    CompletionRequestStatus, CountTokensBody, CountTokensResponse, EXPIRED_LLM_TOKEN_HEADER_NAME,
  44    MODEL_REQUESTS_RESOURCE_HEADER_VALUE, SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME,
  45    SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME, TOOL_USE_LIMIT_REACHED_HEADER_NAME,
  46    ZED_VERSION_HEADER_NAME,
  47};
  48
  49use crate::AllLanguageModelSettings;
  50use crate::provider::anthropic::{AnthropicEventMapper, count_anthropic_tokens, into_anthropic};
  51use crate::provider::google::{GoogleEventMapper, into_google};
  52use crate::provider::open_ai::{OpenAiEventMapper, count_open_ai_tokens, into_open_ai};
  53
  54pub const PROVIDER_NAME: &str = "Zed";
  55
  56const ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: Option<&str> =
  57    option_env!("ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON");
  58
  59fn zed_cloud_provider_additional_models() -> &'static [AvailableModel] {
  60    static ADDITIONAL_MODELS: LazyLock<Vec<AvailableModel>> = LazyLock::new(|| {
  61        ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON
  62            .map(|json| serde_json::from_str(json).unwrap())
  63            .unwrap_or_default()
  64    });
  65    ADDITIONAL_MODELS.as_slice()
  66}
  67
  68#[derive(Default, Clone, Debug, PartialEq)]
  69pub struct ZedDotDevSettings {
  70    pub available_models: Vec<AvailableModel>,
  71}
  72
  73#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
  74#[serde(rename_all = "lowercase")]
  75pub enum AvailableProvider {
  76    Anthropic,
  77    OpenAi,
  78    Google,
  79}
  80
  81#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
  82pub struct AvailableModel {
  83    /// The provider of the language model.
  84    pub provider: AvailableProvider,
  85    /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
  86    pub name: String,
  87    /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
  88    pub display_name: Option<String>,
  89    /// The size of the context window, indicating the maximum number of tokens the model can process.
  90    pub max_tokens: usize,
  91    /// The maximum number of output tokens allowed by the model.
  92    pub max_output_tokens: Option<u32>,
  93    /// The maximum number of completion tokens allowed by the model (o1-* only)
  94    pub max_completion_tokens: Option<u32>,
  95    /// Override this model with a different Anthropic model for tool calls.
  96    pub tool_override: Option<String>,
  97    /// Indicates whether this custom model supports caching.
  98    pub cache_configuration: Option<LanguageModelCacheConfiguration>,
  99    /// The default temperature to use for this model.
 100    pub default_temperature: Option<f32>,
 101    /// Any extra beta headers to provide when using the model.
 102    #[serde(default)]
 103    pub extra_beta_headers: Vec<String>,
 104    /// The model's mode (e.g. thinking)
 105    pub mode: Option<ModelMode>,
 106}
 107
 108#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 109#[serde(tag = "type", rename_all = "lowercase")]
 110pub enum ModelMode {
 111    #[default]
 112    Default,
 113    Thinking {
 114        /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
 115        budget_tokens: Option<u32>,
 116    },
 117}
 118
 119impl From<ModelMode> for AnthropicModelMode {
 120    fn from(value: ModelMode) -> Self {
 121        match value {
 122            ModelMode::Default => AnthropicModelMode::Default,
 123            ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
 124        }
 125    }
 126}
 127
 128pub struct CloudLanguageModelProvider {
 129    client: Arc<Client>,
 130    state: gpui::Entity<State>,
 131    _maintain_client_status: Task<()>,
 132}
 133
 134pub struct State {
 135    client: Arc<Client>,
 136    llm_api_token: LlmApiToken,
 137    user_store: Entity<UserStore>,
 138    status: client::Status,
 139    accept_terms: Option<Task<Result<()>>>,
 140    _settings_subscription: Subscription,
 141    _llm_token_subscription: Subscription,
 142}
 143
 144impl State {
 145    fn new(
 146        client: Arc<Client>,
 147        user_store: Entity<UserStore>,
 148        status: client::Status,
 149        cx: &mut Context<Self>,
 150    ) -> Self {
 151        let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
 152
 153        Self {
 154            client: client.clone(),
 155            llm_api_token: LlmApiToken::default(),
 156            user_store,
 157            status,
 158            accept_terms: None,
 159            _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
 160                cx.notify();
 161            }),
 162            _llm_token_subscription: cx.subscribe(
 163                &refresh_llm_token_listener,
 164                |this, _listener, _event, cx| {
 165                    let client = this.client.clone();
 166                    let llm_api_token = this.llm_api_token.clone();
 167                    cx.spawn(async move |_this, _cx| {
 168                        llm_api_token.refresh(&client).await?;
 169                        anyhow::Ok(())
 170                    })
 171                    .detach_and_log_err(cx);
 172                },
 173            ),
 174        }
 175    }
 176
 177    fn is_signed_out(&self) -> bool {
 178        self.status.is_signed_out()
 179    }
 180
 181    fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
 182        let client = self.client.clone();
 183        cx.spawn(async move |state, cx| {
 184            client
 185                .authenticate_and_connect(true, &cx)
 186                .await
 187                .into_response()?;
 188            state.update(cx, |_, cx| cx.notify())
 189        })
 190    }
 191
 192    fn has_accepted_terms_of_service(&self, cx: &App) -> bool {
 193        self.user_store
 194            .read(cx)
 195            .current_user_has_accepted_terms()
 196            .unwrap_or(false)
 197    }
 198
 199    fn accept_terms_of_service(&mut self, cx: &mut Context<Self>) {
 200        let user_store = self.user_store.clone();
 201        self.accept_terms = Some(cx.spawn(async move |this, cx| {
 202            let _ = user_store
 203                .update(cx, |store, cx| store.accept_terms_of_service(cx))?
 204                .await;
 205            this.update(cx, |this, cx| {
 206                this.accept_terms = None;
 207                cx.notify()
 208            })
 209        }));
 210    }
 211}
 212
 213impl CloudLanguageModelProvider {
 214    pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
 215        let mut status_rx = client.status();
 216        let status = *status_rx.borrow();
 217
 218        let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
 219
 220        let state_ref = state.downgrade();
 221        let maintain_client_status = cx.spawn(async move |cx| {
 222            while let Some(status) = status_rx.next().await {
 223                if let Some(this) = state_ref.upgrade() {
 224                    _ = this.update(cx, |this, cx| {
 225                        if this.status != status {
 226                            this.status = status;
 227                            cx.notify();
 228                        }
 229                    });
 230                } else {
 231                    break;
 232                }
 233            }
 234        });
 235
 236        Self {
 237            client,
 238            state: state.clone(),
 239            _maintain_client_status: maintain_client_status,
 240        }
 241    }
 242
 243    fn create_language_model(
 244        &self,
 245        model: CloudModel,
 246        llm_api_token: LlmApiToken,
 247    ) -> Arc<dyn LanguageModel> {
 248        Arc::new(CloudLanguageModel {
 249            id: LanguageModelId::from(model.id().to_string()),
 250            model,
 251            llm_api_token: llm_api_token.clone(),
 252            client: self.client.clone(),
 253            request_limiter: RateLimiter::new(4),
 254        })
 255    }
 256}
 257
 258impl LanguageModelProviderState for CloudLanguageModelProvider {
 259    type ObservableEntity = State;
 260
 261    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
 262        Some(self.state.clone())
 263    }
 264}
 265
 266impl LanguageModelProvider for CloudLanguageModelProvider {
 267    fn id(&self) -> LanguageModelProviderId {
 268        LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
 269    }
 270
 271    fn name(&self) -> LanguageModelProviderName {
 272        LanguageModelProviderName(PROVIDER_NAME.into())
 273    }
 274
 275    fn icon(&self) -> IconName {
 276        IconName::AiZed
 277    }
 278
 279    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 280        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 281        let model = CloudModel::Anthropic(anthropic::Model::ClaudeSonnet4);
 282        Some(self.create_language_model(model, llm_api_token))
 283    }
 284
 285    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 286        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 287        let model = CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet);
 288        Some(self.create_language_model(model, llm_api_token))
 289    }
 290
 291    fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 292        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 293        [
 294            CloudModel::Anthropic(anthropic::Model::ClaudeSonnet4),
 295            CloudModel::Anthropic(anthropic::Model::ClaudeSonnet4Thinking),
 296        ]
 297        .into_iter()
 298        .map(|model| self.create_language_model(model, llm_api_token.clone()))
 299        .collect()
 300    }
 301
 302    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 303        let mut models = BTreeMap::default();
 304
 305        if cx.is_staff() {
 306            for model in anthropic::Model::iter() {
 307                if !matches!(model, anthropic::Model::Custom { .. }) {
 308                    models.insert(model.id().to_string(), CloudModel::Anthropic(model));
 309                }
 310            }
 311            for model in open_ai::Model::iter() {
 312                if !matches!(model, open_ai::Model::Custom { .. }) {
 313                    models.insert(model.id().to_string(), CloudModel::OpenAi(model));
 314                }
 315            }
 316            for model in google_ai::Model::iter() {
 317                if !matches!(model, google_ai::Model::Custom { .. }) {
 318                    models.insert(model.id().to_string(), CloudModel::Google(model));
 319                }
 320            }
 321        } else {
 322            models.insert(
 323                anthropic::Model::Claude3_5Sonnet.id().to_string(),
 324                CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet),
 325            );
 326            models.insert(
 327                anthropic::Model::Claude3_7Sonnet.id().to_string(),
 328                CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
 329            );
 330            models.insert(
 331                anthropic::Model::Claude3_7SonnetThinking.id().to_string(),
 332                CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
 333            );
 334            models.insert(
 335                anthropic::Model::ClaudeSonnet4.id().to_string(),
 336                CloudModel::Anthropic(anthropic::Model::ClaudeSonnet4),
 337            );
 338            models.insert(
 339                anthropic::Model::ClaudeSonnet4Thinking.id().to_string(),
 340                CloudModel::Anthropic(anthropic::Model::ClaudeSonnet4Thinking),
 341            );
 342        }
 343
 344        let llm_closed_beta_models = if cx.has_flag::<LlmClosedBetaFeatureFlag>() {
 345            zed_cloud_provider_additional_models()
 346        } else {
 347            &[]
 348        };
 349
 350        // Override with available models from settings
 351        for model in AllLanguageModelSettings::get_global(cx)
 352            .zed_dot_dev
 353            .available_models
 354            .iter()
 355            .chain(llm_closed_beta_models)
 356            .cloned()
 357        {
 358            let model = match model.provider {
 359                AvailableProvider::Anthropic => CloudModel::Anthropic(anthropic::Model::Custom {
 360                    name: model.name.clone(),
 361                    display_name: model.display_name.clone(),
 362                    max_tokens: model.max_tokens,
 363                    tool_override: model.tool_override.clone(),
 364                    cache_configuration: model.cache_configuration.as_ref().map(|config| {
 365                        anthropic::AnthropicModelCacheConfiguration {
 366                            max_cache_anchors: config.max_cache_anchors,
 367                            should_speculate: config.should_speculate,
 368                            min_total_token: config.min_total_token,
 369                        }
 370                    }),
 371                    default_temperature: model.default_temperature,
 372                    max_output_tokens: model.max_output_tokens,
 373                    extra_beta_headers: model.extra_beta_headers.clone(),
 374                    mode: model.mode.unwrap_or_default().into(),
 375                }),
 376                AvailableProvider::OpenAi => CloudModel::OpenAi(open_ai::Model::Custom {
 377                    name: model.name.clone(),
 378                    display_name: model.display_name.clone(),
 379                    max_tokens: model.max_tokens,
 380                    max_output_tokens: model.max_output_tokens,
 381                    max_completion_tokens: model.max_completion_tokens,
 382                }),
 383                AvailableProvider::Google => CloudModel::Google(google_ai::Model::Custom {
 384                    name: model.name.clone(),
 385                    display_name: model.display_name.clone(),
 386                    max_tokens: model.max_tokens,
 387                }),
 388            };
 389            models.insert(model.id().to_string(), model.clone());
 390        }
 391
 392        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 393        models
 394            .into_values()
 395            .map(|model| self.create_language_model(model, llm_api_token.clone()))
 396            .collect()
 397    }
 398
 399    fn is_authenticated(&self, cx: &App) -> bool {
 400        !self.state.read(cx).is_signed_out()
 401    }
 402
 403    fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 404        Task::ready(Ok(()))
 405    }
 406
 407    fn configuration_view(&self, _: &mut Window, cx: &mut App) -> AnyView {
 408        cx.new(|_| ConfigurationView {
 409            state: self.state.clone(),
 410        })
 411        .into()
 412    }
 413
 414    fn must_accept_terms(&self, cx: &App) -> bool {
 415        !self.state.read(cx).has_accepted_terms_of_service(cx)
 416    }
 417
 418    fn render_accept_terms(
 419        &self,
 420        view: LanguageModelProviderTosView,
 421        cx: &mut App,
 422    ) -> Option<AnyElement> {
 423        render_accept_terms(self.state.clone(), view, cx)
 424    }
 425
 426    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 427        Task::ready(Ok(()))
 428    }
 429}
 430
 431fn render_accept_terms(
 432    state: Entity<State>,
 433    view_kind: LanguageModelProviderTosView,
 434    cx: &mut App,
 435) -> Option<AnyElement> {
 436    if state.read(cx).has_accepted_terms_of_service(cx) {
 437        return None;
 438    }
 439
 440    let accept_terms_disabled = state.read(cx).accept_terms.is_some();
 441
 442    let thread_fresh_start = matches!(view_kind, LanguageModelProviderTosView::ThreadFreshStart);
 443    let thread_empty_state = matches!(view_kind, LanguageModelProviderTosView::ThreadtEmptyState);
 444
 445    let terms_button = Button::new("terms_of_service", "Terms of Service")
 446        .style(ButtonStyle::Subtle)
 447        .icon(IconName::ArrowUpRight)
 448        .icon_color(Color::Muted)
 449        .icon_size(IconSize::XSmall)
 450        .when(thread_empty_state, |this| this.label_size(LabelSize::Small))
 451        .on_click(move |_, _window, cx| cx.open_url("https://zed.dev/terms-of-service"));
 452
 453    let button_container = h_flex().child(
 454        Button::new("accept_terms", "I accept the Terms of Service")
 455            .when(!thread_empty_state, |this| {
 456                this.full_width()
 457                    .style(ButtonStyle::Tinted(TintColor::Accent))
 458                    .icon(IconName::Check)
 459                    .icon_position(IconPosition::Start)
 460                    .icon_size(IconSize::Small)
 461            })
 462            .when(thread_empty_state, |this| {
 463                this.style(ButtonStyle::Tinted(TintColor::Warning))
 464                    .label_size(LabelSize::Small)
 465            })
 466            .disabled(accept_terms_disabled)
 467            .on_click({
 468                let state = state.downgrade();
 469                move |_, _window, cx| {
 470                    state
 471                        .update(cx, |state, cx| state.accept_terms_of_service(cx))
 472                        .ok();
 473                }
 474            }),
 475    );
 476
 477    let form = if thread_empty_state {
 478        h_flex()
 479            .w_full()
 480            .flex_wrap()
 481            .justify_between()
 482            .child(
 483                h_flex()
 484                    .child(
 485                        Label::new("To start using Zed AI, please read and accept the")
 486                            .size(LabelSize::Small),
 487                    )
 488                    .child(terms_button),
 489            )
 490            .child(button_container)
 491    } else {
 492        v_flex()
 493            .w_full()
 494            .gap_2()
 495            .child(
 496                h_flex()
 497                    .flex_wrap()
 498                    .when(thread_fresh_start, |this| this.justify_center())
 499                    .child(Label::new(
 500                        "To start using Zed AI, please read and accept the",
 501                    ))
 502                    .child(terms_button),
 503            )
 504            .child({
 505                match view_kind {
 506                    LanguageModelProviderTosView::PromptEditorPopup => {
 507                        button_container.w_full().justify_end()
 508                    }
 509                    LanguageModelProviderTosView::Configuration => {
 510                        button_container.w_full().justify_start()
 511                    }
 512                    LanguageModelProviderTosView::ThreadFreshStart => {
 513                        button_container.w_full().justify_center()
 514                    }
 515                    LanguageModelProviderTosView::ThreadtEmptyState => div().w_0(),
 516                }
 517            })
 518    };
 519
 520    Some(form.into_any())
 521}
 522
 523pub struct CloudLanguageModel {
 524    id: LanguageModelId,
 525    model: CloudModel,
 526    llm_api_token: LlmApiToken,
 527    client: Arc<Client>,
 528    request_limiter: RateLimiter,
 529}
 530
 531struct PerformLlmCompletionResponse {
 532    response: Response<AsyncBody>,
 533    usage: Option<RequestUsage>,
 534    tool_use_limit_reached: bool,
 535    includes_status_messages: bool,
 536}
 537
 538impl CloudLanguageModel {
 539    const MAX_RETRIES: usize = 3;
 540
 541    async fn perform_llm_completion(
 542        client: Arc<Client>,
 543        llm_api_token: LlmApiToken,
 544        app_version: Option<SemanticVersion>,
 545        body: CompletionBody,
 546    ) -> Result<PerformLlmCompletionResponse> {
 547        let http_client = &client.http_client();
 548
 549        let mut token = llm_api_token.acquire(&client).await?;
 550        let mut retries_remaining = Self::MAX_RETRIES;
 551        let mut retry_delay = Duration::from_secs(1);
 552
 553        loop {
 554            let request_builder = http_client::Request::builder()
 555                .method(Method::POST)
 556                .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref());
 557            let request_builder = if let Some(app_version) = app_version {
 558                request_builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
 559            } else {
 560                request_builder
 561            };
 562
 563            let request = request_builder
 564                .header("Content-Type", "application/json")
 565                .header("Authorization", format!("Bearer {token}"))
 566                .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
 567                .body(serde_json::to_string(&body)?.into())?;
 568            let mut response = http_client.send(request).await?;
 569            let status = response.status();
 570            if status.is_success() {
 571                let includes_status_messages = response
 572                    .headers()
 573                    .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
 574                    .is_some();
 575
 576                let tool_use_limit_reached = response
 577                    .headers()
 578                    .get(TOOL_USE_LIMIT_REACHED_HEADER_NAME)
 579                    .is_some();
 580
 581                let usage = if includes_status_messages {
 582                    None
 583                } else {
 584                    RequestUsage::from_headers(response.headers()).ok()
 585                };
 586
 587                return Ok(PerformLlmCompletionResponse {
 588                    response,
 589                    usage,
 590                    includes_status_messages,
 591                    tool_use_limit_reached,
 592                });
 593            } else if response
 594                .headers()
 595                .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
 596                .is_some()
 597            {
 598                retries_remaining -= 1;
 599                token = llm_api_token.refresh(&client).await?;
 600            } else if status == StatusCode::FORBIDDEN
 601                && response
 602                    .headers()
 603                    .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
 604                    .is_some()
 605            {
 606                if let Some(MODEL_REQUESTS_RESOURCE_HEADER_VALUE) = response
 607                    .headers()
 608                    .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
 609                    .and_then(|resource| resource.to_str().ok())
 610                {
 611                    if let Some(plan) = response
 612                        .headers()
 613                        .get(CURRENT_PLAN_HEADER_NAME)
 614                        .and_then(|plan| plan.to_str().ok())
 615                        .and_then(|plan| zed_llm_client::Plan::from_str(plan).ok())
 616                    {
 617                        let plan = match plan {
 618                            zed_llm_client::Plan::ZedFree => Plan::Free,
 619                            zed_llm_client::Plan::ZedPro => Plan::ZedPro,
 620                            zed_llm_client::Plan::ZedProTrial => Plan::ZedProTrial,
 621                        };
 622                        return Err(anyhow!(ModelRequestLimitReachedError { plan }));
 623                    }
 624                }
 625
 626                anyhow::bail!("Forbidden");
 627            } else if status.as_u16() >= 500 && status.as_u16() < 600 {
 628                // If we encounter an error in the 500 range, retry after a delay.
 629                // We've seen at least these in the wild from API providers:
 630                // * 500 Internal Server Error
 631                // * 502 Bad Gateway
 632                // * 529 Service Overloaded
 633
 634                if retries_remaining == 0 {
 635                    let mut body = String::new();
 636                    response.body_mut().read_to_string(&mut body).await?;
 637                    anyhow::bail!(
 638                        "cloud language model completion failed after {} retries with status {status}: {body}",
 639                        Self::MAX_RETRIES
 640                    );
 641                }
 642
 643                Timer::after(retry_delay).await;
 644
 645                retries_remaining -= 1;
 646                retry_delay *= 2; // If it fails again, wait longer.
 647            } else if status == StatusCode::PAYMENT_REQUIRED {
 648                return Err(anyhow!(PaymentRequiredError));
 649            } else {
 650                let mut body = String::new();
 651                response.body_mut().read_to_string(&mut body).await?;
 652                return Err(anyhow!(ApiError { status, body }));
 653            }
 654        }
 655    }
 656}
 657
 658#[derive(Debug, Error)]
 659#[error("cloud language model request failed with status {status}: {body}")]
 660struct ApiError {
 661    status: StatusCode,
 662    body: String,
 663}
 664
 665impl LanguageModel for CloudLanguageModel {
 666    fn id(&self) -> LanguageModelId {
 667        self.id.clone()
 668    }
 669
 670    fn name(&self) -> LanguageModelName {
 671        LanguageModelName::from(self.model.display_name().to_string())
 672    }
 673
 674    fn provider_id(&self) -> LanguageModelProviderId {
 675        LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
 676    }
 677
 678    fn provider_name(&self) -> LanguageModelProviderName {
 679        LanguageModelProviderName(PROVIDER_NAME.into())
 680    }
 681
 682    fn supports_tools(&self) -> bool {
 683        match self.model {
 684            CloudModel::Anthropic(_) => true,
 685            CloudModel::Google(_) => true,
 686            CloudModel::OpenAi(_) => true,
 687        }
 688    }
 689
 690    fn supports_images(&self) -> bool {
 691        match self.model {
 692            CloudModel::Anthropic(_) => true,
 693            CloudModel::Google(_) => true,
 694            CloudModel::OpenAi(_) => false,
 695        }
 696    }
 697
 698    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 699        match choice {
 700            LanguageModelToolChoice::Auto
 701            | LanguageModelToolChoice::Any
 702            | LanguageModelToolChoice::None => true,
 703        }
 704    }
 705
 706    fn telemetry_id(&self) -> String {
 707        format!("zed.dev/{}", self.model.id())
 708    }
 709
 710    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 711        self.model.tool_input_format()
 712    }
 713
 714    fn max_token_count(&self) -> usize {
 715        self.model.max_token_count()
 716    }
 717
 718    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
 719        match &self.model {
 720            CloudModel::Anthropic(model) => {
 721                model
 722                    .cache_configuration()
 723                    .map(|cache| LanguageModelCacheConfiguration {
 724                        max_cache_anchors: cache.max_cache_anchors,
 725                        should_speculate: cache.should_speculate,
 726                        min_total_token: cache.min_total_token,
 727                    })
 728            }
 729            CloudModel::OpenAi(_) | CloudModel::Google(_) => None,
 730        }
 731    }
 732
 733    fn count_tokens(
 734        &self,
 735        request: LanguageModelRequest,
 736        cx: &App,
 737    ) -> BoxFuture<'static, Result<usize>> {
 738        match self.model.clone() {
 739            CloudModel::Anthropic(_) => count_anthropic_tokens(request, cx),
 740            CloudModel::OpenAi(model) => count_open_ai_tokens(request, model, cx),
 741            CloudModel::Google(model) => {
 742                let client = self.client.clone();
 743                let llm_api_token = self.llm_api_token.clone();
 744                let model_id = model.id().to_string();
 745                let generate_content_request = into_google(request, model_id.clone());
 746                async move {
 747                    let http_client = &client.http_client();
 748                    let token = llm_api_token.acquire(&client).await?;
 749
 750                    let request_body = CountTokensBody {
 751                        provider: zed_llm_client::LanguageModelProvider::Google,
 752                        model: model_id,
 753                        provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
 754                            generate_content_request,
 755                        })?,
 756                    };
 757                    let request = http_client::Request::builder()
 758                        .method(Method::POST)
 759                        .uri(
 760                            http_client
 761                                .build_zed_llm_url("/count_tokens", &[])?
 762                                .as_ref(),
 763                        )
 764                        .header("Content-Type", "application/json")
 765                        .header("Authorization", format!("Bearer {token}"))
 766                        .body(serde_json::to_string(&request_body)?.into())?;
 767                    let mut response = http_client.send(request).await?;
 768                    let status = response.status();
 769                    let mut response_body = String::new();
 770                    response
 771                        .body_mut()
 772                        .read_to_string(&mut response_body)
 773                        .await?;
 774
 775                    if status.is_success() {
 776                        let response_body: CountTokensResponse =
 777                            serde_json::from_str(&response_body)?;
 778
 779                        Ok(response_body.tokens)
 780                    } else {
 781                        Err(anyhow!(ApiError {
 782                            status,
 783                            body: response_body
 784                        }))
 785                    }
 786                }
 787                .boxed()
 788            }
 789        }
 790    }
 791
 792    fn stream_completion(
 793        &self,
 794        request: LanguageModelRequest,
 795        cx: &AsyncApp,
 796    ) -> BoxFuture<
 797        'static,
 798        Result<
 799            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 800        >,
 801    > {
 802        let thread_id = request.thread_id.clone();
 803        let prompt_id = request.prompt_id.clone();
 804        let mode = request.mode;
 805        let app_version = cx.update(|cx| AppVersion::global(cx)).ok();
 806        match &self.model {
 807            CloudModel::Anthropic(model) => {
 808                let request = into_anthropic(
 809                    request,
 810                    model.request_id().into(),
 811                    model.default_temperature(),
 812                    model.max_output_tokens(),
 813                    model.mode(),
 814                );
 815                let client = self.client.clone();
 816                let llm_api_token = self.llm_api_token.clone();
 817                let future = self.request_limiter.stream(async move {
 818                    let PerformLlmCompletionResponse {
 819                        response,
 820                        usage,
 821                        includes_status_messages,
 822                        tool_use_limit_reached,
 823                    } = Self::perform_llm_completion(
 824                        client.clone(),
 825                        llm_api_token,
 826                        app_version,
 827                        CompletionBody {
 828                            thread_id,
 829                            prompt_id,
 830                            mode,
 831                            provider: zed_llm_client::LanguageModelProvider::Anthropic,
 832                            model: request.model.clone(),
 833                            provider_request: serde_json::to_value(&request)?,
 834                        },
 835                    )
 836                    .await
 837                    .map_err(|err| match err.downcast::<ApiError>() {
 838                        Ok(api_err) => {
 839                            if api_err.status == StatusCode::BAD_REQUEST {
 840                                if let Some(tokens) = parse_prompt_too_long(&api_err.body) {
 841                                    return anyhow!(
 842                                        LanguageModelKnownError::ContextWindowLimitExceeded {
 843                                            tokens
 844                                        }
 845                                    );
 846                                }
 847                            }
 848                            anyhow!(api_err)
 849                        }
 850                        Err(err) => anyhow!(err),
 851                    })?;
 852
 853                    let mut mapper = AnthropicEventMapper::new();
 854                    Ok(map_cloud_completion_events(
 855                        Box::pin(
 856                            response_lines(response, includes_status_messages)
 857                                .chain(usage_updated_event(usage))
 858                                .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
 859                        ),
 860                        move |event| mapper.map_event(event),
 861                    ))
 862                });
 863                async move { Ok(future.await?.boxed()) }.boxed()
 864            }
 865            CloudModel::OpenAi(model) => {
 866                let client = self.client.clone();
 867                let request = into_open_ai(request, model, model.max_output_tokens());
 868                let llm_api_token = self.llm_api_token.clone();
 869                let future = self.request_limiter.stream(async move {
 870                    let PerformLlmCompletionResponse {
 871                        response,
 872                        usage,
 873                        includes_status_messages,
 874                        tool_use_limit_reached,
 875                    } = Self::perform_llm_completion(
 876                        client.clone(),
 877                        llm_api_token,
 878                        app_version,
 879                        CompletionBody {
 880                            thread_id,
 881                            prompt_id,
 882                            mode,
 883                            provider: zed_llm_client::LanguageModelProvider::OpenAi,
 884                            model: request.model.clone(),
 885                            provider_request: serde_json::to_value(&request)?,
 886                        },
 887                    )
 888                    .await?;
 889
 890                    let mut mapper = OpenAiEventMapper::new();
 891                    Ok(map_cloud_completion_events(
 892                        Box::pin(
 893                            response_lines(response, includes_status_messages)
 894                                .chain(usage_updated_event(usage))
 895                                .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
 896                        ),
 897                        move |event| mapper.map_event(event),
 898                    ))
 899                });
 900                async move { Ok(future.await?.boxed()) }.boxed()
 901            }
 902            CloudModel::Google(model) => {
 903                let client = self.client.clone();
 904                let request = into_google(request, model.id().into());
 905                let llm_api_token = self.llm_api_token.clone();
 906                let future = self.request_limiter.stream(async move {
 907                    let PerformLlmCompletionResponse {
 908                        response,
 909                        usage,
 910                        includes_status_messages,
 911                        tool_use_limit_reached,
 912                    } = Self::perform_llm_completion(
 913                        client.clone(),
 914                        llm_api_token,
 915                        app_version,
 916                        CompletionBody {
 917                            thread_id,
 918                            prompt_id,
 919                            mode,
 920                            provider: zed_llm_client::LanguageModelProvider::Google,
 921                            model: request.model.model_id.clone(),
 922                            provider_request: serde_json::to_value(&request)?,
 923                        },
 924                    )
 925                    .await?;
 926
 927                    let mut mapper = GoogleEventMapper::new();
 928                    Ok(map_cloud_completion_events(
 929                        Box::pin(
 930                            response_lines(response, includes_status_messages)
 931                                .chain(usage_updated_event(usage))
 932                                .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
 933                        ),
 934                        move |event| mapper.map_event(event),
 935                    ))
 936                });
 937                async move { Ok(future.await?.boxed()) }.boxed()
 938            }
 939        }
 940    }
 941}
 942
 943#[derive(Serialize, Deserialize)]
 944#[serde(rename_all = "snake_case")]
 945pub enum CloudCompletionEvent<T> {
 946    Status(CompletionRequestStatus),
 947    Event(T),
 948}
 949
 950fn map_cloud_completion_events<T, F>(
 951    stream: Pin<Box<dyn Stream<Item = Result<CloudCompletionEvent<T>>> + Send>>,
 952    mut map_callback: F,
 953) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 954where
 955    T: DeserializeOwned + 'static,
 956    F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 957        + Send
 958        + 'static,
 959{
 960    stream
 961        .flat_map(move |event| {
 962            futures::stream::iter(match event {
 963                Err(error) => {
 964                    vec![Err(LanguageModelCompletionError::Other(error))]
 965                }
 966                Ok(CloudCompletionEvent::Status(event)) => {
 967                    vec![Ok(LanguageModelCompletionEvent::StatusUpdate(event))]
 968                }
 969                Ok(CloudCompletionEvent::Event(event)) => map_callback(event),
 970            })
 971        })
 972        .boxed()
 973}
 974
 975fn usage_updated_event<T>(
 976    usage: Option<RequestUsage>,
 977) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
 978    futures::stream::iter(usage.map(|usage| {
 979        Ok(CloudCompletionEvent::Status(
 980            CompletionRequestStatus::UsageUpdated {
 981                amount: usage.amount as usize,
 982                limit: usage.limit,
 983            },
 984        ))
 985    }))
 986}
 987
 988fn tool_use_limit_reached_event<T>(
 989    tool_use_limit_reached: bool,
 990) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
 991    futures::stream::iter(tool_use_limit_reached.then(|| {
 992        Ok(CloudCompletionEvent::Status(
 993            CompletionRequestStatus::ToolUseLimitReached,
 994        ))
 995    }))
 996}
 997
 998fn response_lines<T: DeserializeOwned>(
 999    response: Response<AsyncBody>,
1000    includes_status_messages: bool,
1001) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
1002    futures::stream::try_unfold(
1003        (String::new(), BufReader::new(response.into_body())),
1004        move |(mut line, mut body)| async move {
1005            match body.read_line(&mut line).await {
1006                Ok(0) => Ok(None),
1007                Ok(_) => {
1008                    let event = if includes_status_messages {
1009                        serde_json::from_str::<CloudCompletionEvent<T>>(&line)?
1010                    } else {
1011                        CloudCompletionEvent::Event(serde_json::from_str::<T>(&line)?)
1012                    };
1013
1014                    line.clear();
1015                    Ok(Some((event, (line, body))))
1016                }
1017                Err(e) => Err(e.into()),
1018            }
1019        },
1020    )
1021}
1022
1023struct ConfigurationView {
1024    state: gpui::Entity<State>,
1025}
1026
1027impl ConfigurationView {
1028    fn authenticate(&mut self, cx: &mut Context<Self>) {
1029        self.state.update(cx, |state, cx| {
1030            state.authenticate(cx).detach_and_log_err(cx);
1031        });
1032        cx.notify();
1033    }
1034}
1035
1036impl Render for ConfigurationView {
1037    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1038        const ZED_PRICING_URL: &str = "https://zed.dev/pricing";
1039
1040        let is_connected = !self.state.read(cx).is_signed_out();
1041        let user_store = self.state.read(cx).user_store.read(cx);
1042        let plan = user_store.current_plan();
1043        let subscription_period = user_store.subscription_period();
1044        let eligible_for_trial = user_store.trial_started_at().is_none();
1045        let has_accepted_terms = self.state.read(cx).has_accepted_terms_of_service(cx);
1046
1047        let is_pro = plan == Some(proto::Plan::ZedPro);
1048        let subscription_text = match (plan, subscription_period) {
1049            (Some(proto::Plan::ZedPro), Some(_)) => {
1050                "You have access to Zed's hosted LLMs through your Zed Pro subscription."
1051            }
1052            (Some(proto::Plan::ZedProTrial), Some(_)) => {
1053                "You have access to Zed's hosted LLMs through your Zed Pro trial."
1054            }
1055            (Some(proto::Plan::Free), Some(_)) => {
1056                "You have basic access to Zed's hosted LLMs through your Zed Free subscription."
1057            }
1058            _ => {
1059                if eligible_for_trial {
1060                    "Subscribe for access to Zed's hosted LLMs. Start with a 14 day free trial."
1061                } else {
1062                    "Subscribe for access to Zed's hosted LLMs."
1063                }
1064            }
1065        };
1066        let manage_subscription_buttons = if is_pro {
1067            h_flex().child(
1068                Button::new("manage_settings", "Manage Subscription")
1069                    .style(ButtonStyle::Tinted(TintColor::Accent))
1070                    .on_click(cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx)))),
1071            )
1072        } else {
1073            h_flex()
1074                .gap_2()
1075                .child(
1076                    Button::new("learn_more", "Learn more")
1077                        .style(ButtonStyle::Subtle)
1078                        .on_click(cx.listener(|_, _, _, cx| cx.open_url(ZED_PRICING_URL))),
1079                )
1080                .child(
1081                    Button::new("upgrade", "Upgrade")
1082                        .style(ButtonStyle::Subtle)
1083                        .color(Color::Accent)
1084                        .on_click(
1085                            cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
1086                        ),
1087                )
1088        };
1089
1090        if is_connected {
1091            v_flex()
1092                .gap_3()
1093                .w_full()
1094                .children(render_accept_terms(
1095                    self.state.clone(),
1096                    LanguageModelProviderTosView::Configuration,
1097                    cx,
1098                ))
1099                .when(has_accepted_terms, |this| {
1100                    this.child(subscription_text)
1101                        .child(manage_subscription_buttons)
1102                })
1103        } else {
1104            v_flex()
1105                .gap_2()
1106                .child(Label::new("Use Zed AI to access hosted language models."))
1107                .child(
1108                    Button::new("sign_in", "Sign In")
1109                        .icon_color(Color::Muted)
1110                        .icon(IconName::Github)
1111                        .icon_position(IconPosition::Start)
1112                        .on_click(cx.listener(move |this, _, _, cx| this.authenticate(cx))),
1113                )
1114        }
1115    }
1116}