cloud.rs

   1use anthropic::AnthropicModelMode;
   2use anyhow::{Context as _, Result, anyhow};
   3use chrono::{DateTime, Utc};
   4use client::{Client, ModelRequestUsage, UserStore, zed_urls};
   5use futures::{
   6    AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
   7};
   8use google_ai::GoogleModelMode;
   9use gpui::{
  10    AnyElement, AnyView, App, AsyncApp, Context, Entity, SemanticVersion, Subscription, Task,
  11};
  12use http_client::http::{HeaderMap, HeaderValue};
  13use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
  14use language_model::{
  15    AuthenticateError, LanguageModel, LanguageModelCacheConfiguration,
  16    LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName,
  17    LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  18    LanguageModelProviderState, LanguageModelProviderTosView, LanguageModelRequest,
  19    LanguageModelToolChoice, LanguageModelToolSchemaFormat, LlmApiToken,
  20    ModelRequestLimitReachedError, PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
  21};
  22use proto::Plan;
  23use release_channel::AppVersion;
  24use schemars::JsonSchema;
  25use serde::{Deserialize, Serialize, de::DeserializeOwned};
  26use settings::SettingsStore;
  27use smol::io::{AsyncReadExt, BufReader};
  28use std::pin::Pin;
  29use std::str::FromStr as _;
  30use std::sync::Arc;
  31use std::time::Duration;
  32use thiserror::Error;
  33use ui::{TintColor, prelude::*};
  34use util::{ResultExt as _, maybe};
  35use zed_llm_client::{
  36    CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CURRENT_PLAN_HEADER_NAME, CompletionBody,
  37    CompletionRequestStatus, CountTokensBody, CountTokensResponse, EXPIRED_LLM_TOKEN_HEADER_NAME,
  38    ListModelsResponse, MODEL_REQUESTS_RESOURCE_HEADER_VALUE,
  39    SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME,
  40    TOOL_USE_LIMIT_REACHED_HEADER_NAME, ZED_VERSION_HEADER_NAME,
  41};
  42
  43use crate::provider::anthropic::{AnthropicEventMapper, count_anthropic_tokens, into_anthropic};
  44use crate::provider::google::{GoogleEventMapper, into_google};
  45use crate::provider::open_ai::{OpenAiEventMapper, count_open_ai_tokens, into_open_ai};
  46
  47const PROVIDER_ID: LanguageModelProviderId = language_model::ZED_CLOUD_PROVIDER_ID;
  48const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVIDER_NAME;
  49
  50#[derive(Default, Clone, Debug, PartialEq)]
  51pub struct ZedDotDevSettings {
  52    pub available_models: Vec<AvailableModel>,
  53}
  54
  55#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
  56#[serde(rename_all = "lowercase")]
  57pub enum AvailableProvider {
  58    Anthropic,
  59    OpenAi,
  60    Google,
  61}
  62
  63#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
  64pub struct AvailableModel {
  65    /// The provider of the language model.
  66    pub provider: AvailableProvider,
  67    /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
  68    pub name: String,
  69    /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
  70    pub display_name: Option<String>,
  71    /// The size of the context window, indicating the maximum number of tokens the model can process.
  72    pub max_tokens: usize,
  73    /// The maximum number of output tokens allowed by the model.
  74    pub max_output_tokens: Option<u64>,
  75    /// The maximum number of completion tokens allowed by the model (o1-* only)
  76    pub max_completion_tokens: Option<u64>,
  77    /// Override this model with a different Anthropic model for tool calls.
  78    pub tool_override: Option<String>,
  79    /// Indicates whether this custom model supports caching.
  80    pub cache_configuration: Option<LanguageModelCacheConfiguration>,
  81    /// The default temperature to use for this model.
  82    pub default_temperature: Option<f32>,
  83    /// Any extra beta headers to provide when using the model.
  84    #[serde(default)]
  85    pub extra_beta_headers: Vec<String>,
  86    /// The model's mode (e.g. thinking)
  87    pub mode: Option<ModelMode>,
  88}
  89
  90#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
  91#[serde(tag = "type", rename_all = "lowercase")]
  92pub enum ModelMode {
  93    #[default]
  94    Default,
  95    Thinking {
  96        /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
  97        budget_tokens: Option<u32>,
  98    },
  99}
 100
 101impl From<ModelMode> for AnthropicModelMode {
 102    fn from(value: ModelMode) -> Self {
 103        match value {
 104            ModelMode::Default => AnthropicModelMode::Default,
 105            ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
 106        }
 107    }
 108}
 109
 110pub struct CloudLanguageModelProvider {
 111    client: Arc<Client>,
 112    state: gpui::Entity<State>,
 113    _maintain_client_status: Task<()>,
 114}
 115
 116pub struct State {
 117    client: Arc<Client>,
 118    llm_api_token: LlmApiToken,
 119    user_store: Entity<UserStore>,
 120    status: client::Status,
 121    accept_terms_of_service_task: Option<Task<Result<()>>>,
 122    models: Vec<Arc<zed_llm_client::LanguageModel>>,
 123    default_model: Option<Arc<zed_llm_client::LanguageModel>>,
 124    default_fast_model: Option<Arc<zed_llm_client::LanguageModel>>,
 125    recommended_models: Vec<Arc<zed_llm_client::LanguageModel>>,
 126    _fetch_models_task: Task<()>,
 127    _settings_subscription: Subscription,
 128    _llm_token_subscription: Subscription,
 129}
 130
 131impl State {
 132    fn new(
 133        client: Arc<Client>,
 134        user_store: Entity<UserStore>,
 135        status: client::Status,
 136        cx: &mut Context<Self>,
 137    ) -> Self {
 138        let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
 139
 140        Self {
 141            client: client.clone(),
 142            llm_api_token: LlmApiToken::default(),
 143            user_store,
 144            status,
 145            accept_terms_of_service_task: None,
 146            models: Vec::new(),
 147            default_model: None,
 148            default_fast_model: None,
 149            recommended_models: Vec::new(),
 150            _fetch_models_task: cx.spawn(async move |this, cx| {
 151                maybe!(async move {
 152                    let (client, llm_api_token) = this
 153                        .read_with(cx, |this, _cx| (client.clone(), this.llm_api_token.clone()))?;
 154
 155                    loop {
 156                        let status = this.read_with(cx, |this, _cx| this.status)?;
 157                        if matches!(status, client::Status::Connected { .. }) {
 158                            break;
 159                        }
 160
 161                        cx.background_executor()
 162                            .timer(Duration::from_millis(100))
 163                            .await;
 164                    }
 165
 166                    let response = Self::fetch_models(client, llm_api_token).await?;
 167                    this.update(cx, |this, cx| {
 168                        this.update_models(response, cx);
 169                    })
 170                })
 171                .await
 172                .context("failed to fetch Zed models")
 173                .log_err();
 174            }),
 175            _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
 176                cx.notify();
 177            }),
 178            _llm_token_subscription: cx.subscribe(
 179                &refresh_llm_token_listener,
 180                move |this, _listener, _event, cx| {
 181                    let client = this.client.clone();
 182                    let llm_api_token = this.llm_api_token.clone();
 183                    cx.spawn(async move |this, cx| {
 184                        llm_api_token.refresh(&client).await?;
 185                        let response = Self::fetch_models(client, llm_api_token).await?;
 186                        this.update(cx, |this, cx| {
 187                            this.update_models(response, cx);
 188                        })
 189                    })
 190                    .detach_and_log_err(cx);
 191                },
 192            ),
 193        }
 194    }
 195
 196    fn is_signed_out(&self) -> bool {
 197        self.status.is_signed_out()
 198    }
 199
 200    fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
 201        let client = self.client.clone();
 202        cx.spawn(async move |state, cx| {
 203            client
 204                .authenticate_and_connect(true, &cx)
 205                .await
 206                .into_response()?;
 207            state.update(cx, |_, cx| cx.notify())
 208        })
 209    }
 210
 211    fn has_accepted_terms_of_service(&self, cx: &App) -> bool {
 212        self.user_store
 213            .read(cx)
 214            .current_user_has_accepted_terms()
 215            .unwrap_or(false)
 216    }
 217
 218    fn accept_terms_of_service(&mut self, cx: &mut Context<Self>) {
 219        let user_store = self.user_store.clone();
 220        self.accept_terms_of_service_task = Some(cx.spawn(async move |this, cx| {
 221            let _ = user_store
 222                .update(cx, |store, cx| store.accept_terms_of_service(cx))?
 223                .await;
 224            this.update(cx, |this, cx| {
 225                this.accept_terms_of_service_task = None;
 226                cx.notify()
 227            })
 228        }));
 229    }
 230
 231    fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
 232        let mut models = Vec::new();
 233
 234        for model in response.models {
 235            models.push(Arc::new(model.clone()));
 236
 237            // Right now we represent thinking variants of models as separate models on the client,
 238            // so we need to insert variants for any model that supports thinking.
 239            if model.supports_thinking {
 240                models.push(Arc::new(zed_llm_client::LanguageModel {
 241                    id: zed_llm_client::LanguageModelId(format!("{}-thinking", model.id).into()),
 242                    display_name: format!("{} Thinking", model.display_name),
 243                    ..model
 244                }));
 245            }
 246        }
 247
 248        self.default_model = models
 249            .iter()
 250            .find(|model| model.id == response.default_model)
 251            .cloned();
 252        self.default_fast_model = models
 253            .iter()
 254            .find(|model| model.id == response.default_fast_model)
 255            .cloned();
 256        self.recommended_models = response
 257            .recommended_models
 258            .iter()
 259            .filter_map(|id| models.iter().find(|model| &model.id == id))
 260            .cloned()
 261            .collect();
 262        self.models = models;
 263        cx.notify();
 264    }
 265
 266    async fn fetch_models(
 267        client: Arc<Client>,
 268        llm_api_token: LlmApiToken,
 269    ) -> Result<ListModelsResponse> {
 270        let http_client = &client.http_client();
 271        let token = llm_api_token.acquire(&client).await?;
 272
 273        let request = http_client::Request::builder()
 274            .method(Method::GET)
 275            .uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
 276            .header("Authorization", format!("Bearer {token}"))
 277            .body(AsyncBody::empty())?;
 278        let mut response = http_client
 279            .send(request)
 280            .await
 281            .context("failed to send list models request")?;
 282
 283        if response.status().is_success() {
 284            let mut body = String::new();
 285            response.body_mut().read_to_string(&mut body).await?;
 286            return Ok(serde_json::from_str(&body)?);
 287        } else {
 288            let mut body = String::new();
 289            response.body_mut().read_to_string(&mut body).await?;
 290            anyhow::bail!(
 291                "error listing models.\nStatus: {:?}\nBody: {body}",
 292                response.status(),
 293            );
 294        }
 295    }
 296}
 297
 298impl CloudLanguageModelProvider {
 299    pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
 300        let mut status_rx = client.status();
 301        let status = *status_rx.borrow();
 302
 303        let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
 304
 305        let state_ref = state.downgrade();
 306        let maintain_client_status = cx.spawn(async move |cx| {
 307            while let Some(status) = status_rx.next().await {
 308                if let Some(this) = state_ref.upgrade() {
 309                    _ = this.update(cx, |this, cx| {
 310                        if this.status != status {
 311                            this.status = status;
 312                            cx.notify();
 313                        }
 314                    });
 315                } else {
 316                    break;
 317                }
 318            }
 319        });
 320
 321        Self {
 322            client,
 323            state: state.clone(),
 324            _maintain_client_status: maintain_client_status,
 325        }
 326    }
 327
 328    fn create_language_model(
 329        &self,
 330        model: Arc<zed_llm_client::LanguageModel>,
 331        llm_api_token: LlmApiToken,
 332    ) -> Arc<dyn LanguageModel> {
 333        Arc::new(CloudLanguageModel {
 334            id: LanguageModelId(SharedString::from(model.id.0.clone())),
 335            model,
 336            llm_api_token: llm_api_token.clone(),
 337            client: self.client.clone(),
 338            request_limiter: RateLimiter::new(4),
 339        })
 340    }
 341}
 342
 343impl LanguageModelProviderState for CloudLanguageModelProvider {
 344    type ObservableEntity = State;
 345
 346    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
 347        Some(self.state.clone())
 348    }
 349}
 350
 351impl LanguageModelProvider for CloudLanguageModelProvider {
 352    fn id(&self) -> LanguageModelProviderId {
 353        PROVIDER_ID
 354    }
 355
 356    fn name(&self) -> LanguageModelProviderName {
 357        PROVIDER_NAME
 358    }
 359
 360    fn icon(&self) -> IconName {
 361        IconName::AiZed
 362    }
 363
 364    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 365        let default_model = self.state.read(cx).default_model.clone()?;
 366        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 367        Some(self.create_language_model(default_model, llm_api_token))
 368    }
 369
 370    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 371        let default_fast_model = self.state.read(cx).default_fast_model.clone()?;
 372        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 373        Some(self.create_language_model(default_fast_model, llm_api_token))
 374    }
 375
 376    fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 377        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 378        self.state
 379            .read(cx)
 380            .recommended_models
 381            .iter()
 382            .cloned()
 383            .map(|model| self.create_language_model(model, llm_api_token.clone()))
 384            .collect()
 385    }
 386
 387    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 388        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 389        self.state
 390            .read(cx)
 391            .models
 392            .iter()
 393            .cloned()
 394            .map(|model| self.create_language_model(model, llm_api_token.clone()))
 395            .collect()
 396    }
 397
 398    fn is_authenticated(&self, cx: &App) -> bool {
 399        let state = self.state.read(cx);
 400        !state.is_signed_out() && state.has_accepted_terms_of_service(cx)
 401    }
 402
 403    fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 404        Task::ready(Ok(()))
 405    }
 406
 407    fn configuration_view(&self, _: &mut Window, cx: &mut App) -> AnyView {
 408        cx.new(|_| ConfigurationView::new(self.state.clone()))
 409            .into()
 410    }
 411
 412    fn must_accept_terms(&self, cx: &App) -> bool {
 413        !self.state.read(cx).has_accepted_terms_of_service(cx)
 414    }
 415
 416    fn render_accept_terms(
 417        &self,
 418        view: LanguageModelProviderTosView,
 419        cx: &mut App,
 420    ) -> Option<AnyElement> {
 421        let state = self.state.read(cx);
 422        if state.has_accepted_terms_of_service(cx) {
 423            return None;
 424        }
 425        Some(
 426            render_accept_terms(view, state.accept_terms_of_service_task.is_some(), {
 427                let state = self.state.clone();
 428                move |_window, cx| {
 429                    state.update(cx, |state, cx| state.accept_terms_of_service(cx));
 430                }
 431            })
 432            .into_any_element(),
 433        )
 434    }
 435
 436    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 437        Task::ready(Ok(()))
 438    }
 439}
 440
 441fn render_accept_terms(
 442    view_kind: LanguageModelProviderTosView,
 443    accept_terms_of_service_in_progress: bool,
 444    accept_terms_callback: impl Fn(&mut Window, &mut App) + 'static,
 445) -> impl IntoElement {
 446    let thread_fresh_start = matches!(view_kind, LanguageModelProviderTosView::ThreadFreshStart);
 447    let thread_empty_state = matches!(view_kind, LanguageModelProviderTosView::ThreadEmptyState);
 448
 449    let terms_button = Button::new("terms_of_service", "Terms of Service")
 450        .style(ButtonStyle::Subtle)
 451        .icon(IconName::ArrowUpRight)
 452        .icon_color(Color::Muted)
 453        .icon_size(IconSize::XSmall)
 454        .when(thread_empty_state, |this| this.label_size(LabelSize::Small))
 455        .on_click(move |_, _window, cx| cx.open_url("https://zed.dev/terms-of-service"));
 456
 457    let button_container = h_flex().child(
 458        Button::new("accept_terms", "I accept the Terms of Service")
 459            .when(!thread_empty_state, |this| {
 460                this.full_width()
 461                    .style(ButtonStyle::Tinted(TintColor::Accent))
 462                    .icon(IconName::Check)
 463                    .icon_position(IconPosition::Start)
 464                    .icon_size(IconSize::Small)
 465            })
 466            .when(thread_empty_state, |this| {
 467                this.style(ButtonStyle::Tinted(TintColor::Warning))
 468                    .label_size(LabelSize::Small)
 469            })
 470            .disabled(accept_terms_of_service_in_progress)
 471            .on_click(move |_, window, cx| (accept_terms_callback)(window, cx)),
 472    );
 473
 474    if thread_empty_state {
 475        h_flex()
 476            .w_full()
 477            .flex_wrap()
 478            .justify_between()
 479            .child(
 480                h_flex()
 481                    .child(
 482                        Label::new("To start using Zed AI, please read and accept the")
 483                            .size(LabelSize::Small),
 484                    )
 485                    .child(terms_button),
 486            )
 487            .child(button_container)
 488    } else {
 489        v_flex()
 490            .w_full()
 491            .gap_2()
 492            .child(
 493                h_flex()
 494                    .flex_wrap()
 495                    .when(thread_fresh_start, |this| this.justify_center())
 496                    .child(Label::new(
 497                        "To start using Zed AI, please read and accept the",
 498                    ))
 499                    .child(terms_button),
 500            )
 501            .child({
 502                match view_kind {
 503                    LanguageModelProviderTosView::PromptEditorPopup => {
 504                        button_container.w_full().justify_end()
 505                    }
 506                    LanguageModelProviderTosView::Configuration => {
 507                        button_container.w_full().justify_start()
 508                    }
 509                    LanguageModelProviderTosView::ThreadFreshStart => {
 510                        button_container.w_full().justify_center()
 511                    }
 512                    LanguageModelProviderTosView::ThreadEmptyState => div().w_0(),
 513                }
 514            })
 515    }
 516}
 517
 518pub struct CloudLanguageModel {
 519    id: LanguageModelId,
 520    model: Arc<zed_llm_client::LanguageModel>,
 521    llm_api_token: LlmApiToken,
 522    client: Arc<Client>,
 523    request_limiter: RateLimiter,
 524}
 525
 526struct PerformLlmCompletionResponse {
 527    response: Response<AsyncBody>,
 528    usage: Option<ModelRequestUsage>,
 529    tool_use_limit_reached: bool,
 530    includes_status_messages: bool,
 531}
 532
 533impl CloudLanguageModel {
 534    async fn perform_llm_completion(
 535        client: Arc<Client>,
 536        llm_api_token: LlmApiToken,
 537        app_version: Option<SemanticVersion>,
 538        body: CompletionBody,
 539    ) -> Result<PerformLlmCompletionResponse> {
 540        let http_client = &client.http_client();
 541
 542        let mut token = llm_api_token.acquire(&client).await?;
 543        let mut refreshed_token = false;
 544
 545        loop {
 546            let request_builder = http_client::Request::builder()
 547                .method(Method::POST)
 548                .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref());
 549            let request_builder = if let Some(app_version) = app_version {
 550                request_builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
 551            } else {
 552                request_builder
 553            };
 554
 555            let request = request_builder
 556                .header("Content-Type", "application/json")
 557                .header("Authorization", format!("Bearer {token}"))
 558                .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
 559                .body(serde_json::to_string(&body)?.into())?;
 560            let mut response = http_client.send(request).await?;
 561            let status = response.status();
 562            if status.is_success() {
 563                let includes_status_messages = response
 564                    .headers()
 565                    .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
 566                    .is_some();
 567
 568                let tool_use_limit_reached = response
 569                    .headers()
 570                    .get(TOOL_USE_LIMIT_REACHED_HEADER_NAME)
 571                    .is_some();
 572
 573                let usage = if includes_status_messages {
 574                    None
 575                } else {
 576                    ModelRequestUsage::from_headers(response.headers()).ok()
 577                };
 578
 579                return Ok(PerformLlmCompletionResponse {
 580                    response,
 581                    usage,
 582                    includes_status_messages,
 583                    tool_use_limit_reached,
 584                });
 585            }
 586
 587            if !refreshed_token
 588                && response
 589                    .headers()
 590                    .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
 591                    .is_some()
 592            {
 593                token = llm_api_token.refresh(&client).await?;
 594                refreshed_token = true;
 595                continue;
 596            }
 597
 598            if status == StatusCode::FORBIDDEN
 599                && response
 600                    .headers()
 601                    .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
 602                    .is_some()
 603            {
 604                if let Some(MODEL_REQUESTS_RESOURCE_HEADER_VALUE) = response
 605                    .headers()
 606                    .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
 607                    .and_then(|resource| resource.to_str().ok())
 608                {
 609                    if let Some(plan) = response
 610                        .headers()
 611                        .get(CURRENT_PLAN_HEADER_NAME)
 612                        .and_then(|plan| plan.to_str().ok())
 613                        .and_then(|plan| zed_llm_client::Plan::from_str(plan).ok())
 614                    {
 615                        let plan = match plan {
 616                            zed_llm_client::Plan::ZedFree => Plan::Free,
 617                            zed_llm_client::Plan::ZedPro => Plan::ZedPro,
 618                            zed_llm_client::Plan::ZedProTrial => Plan::ZedProTrial,
 619                        };
 620                        return Err(anyhow!(ModelRequestLimitReachedError { plan }));
 621                    }
 622                }
 623            } else if status == StatusCode::PAYMENT_REQUIRED {
 624                return Err(anyhow!(PaymentRequiredError));
 625            }
 626
 627            let mut body = String::new();
 628            let headers = response.headers().clone();
 629            response.body_mut().read_to_string(&mut body).await?;
 630            return Err(anyhow!(ApiError {
 631                status,
 632                body,
 633                headers
 634            }));
 635        }
 636    }
 637}
 638
 639#[derive(Debug, Error)]
 640#[error("cloud language model request failed with status {status}: {body}")]
 641struct ApiError {
 642    status: StatusCode,
 643    body: String,
 644    headers: HeaderMap<HeaderValue>,
 645}
 646
 647/// Represents error responses from Zed's cloud API.
 648///
 649/// Example JSON for an upstream HTTP error:
 650/// ```json
 651/// {
 652///   "code": "upstream_http_error",
 653///   "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
 654///   "upstream_status": 503
 655/// }
 656/// ```
 657#[derive(Debug, serde::Deserialize)]
 658struct CloudApiError {
 659    code: String,
 660    message: String,
 661    #[serde(default)]
 662    #[serde(deserialize_with = "deserialize_optional_status_code")]
 663    upstream_status: Option<StatusCode>,
 664    #[serde(default)]
 665    retry_after: Option<f64>,
 666}
 667
 668fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
 669where
 670    D: serde::Deserializer<'de>,
 671{
 672    let opt: Option<u16> = Option::deserialize(deserializer)?;
 673    Ok(opt.and_then(|code| StatusCode::from_u16(code).ok()))
 674}
 675
 676impl From<ApiError> for LanguageModelCompletionError {
 677    fn from(error: ApiError) -> Self {
 678        if let Ok(cloud_error) = serde_json::from_str::<CloudApiError>(&error.body) {
 679            if cloud_error.code.starts_with("upstream_http_") {
 680                let status = if let Some(status) = cloud_error.upstream_status {
 681                    status
 682                } else if cloud_error.code.ends_with("_error") {
 683                    error.status
 684                } else {
 685                    // If there's a status code in the code string (e.g. "upstream_http_429")
 686                    // then use that; otherwise, see if the JSON contains a status code.
 687                    cloud_error
 688                        .code
 689                        .strip_prefix("upstream_http_")
 690                        .and_then(|code_str| code_str.parse::<u16>().ok())
 691                        .and_then(|code| StatusCode::from_u16(code).ok())
 692                        .unwrap_or(error.status)
 693                };
 694
 695                return LanguageModelCompletionError::UpstreamProviderError {
 696                    message: cloud_error.message,
 697                    status,
 698                    retry_after: cloud_error.retry_after.map(Duration::from_secs_f64),
 699                };
 700            }
 701        }
 702
 703        let retry_after = None;
 704        LanguageModelCompletionError::from_http_status(
 705            PROVIDER_NAME,
 706            error.status,
 707            error.body,
 708            retry_after,
 709        )
 710    }
 711}
 712
 713impl LanguageModel for CloudLanguageModel {
 714    fn id(&self) -> LanguageModelId {
 715        self.id.clone()
 716    }
 717
 718    fn name(&self) -> LanguageModelName {
 719        LanguageModelName::from(self.model.display_name.clone())
 720    }
 721
 722    fn provider_id(&self) -> LanguageModelProviderId {
 723        PROVIDER_ID
 724    }
 725
 726    fn provider_name(&self) -> LanguageModelProviderName {
 727        PROVIDER_NAME
 728    }
 729
 730    fn upstream_provider_id(&self) -> LanguageModelProviderId {
 731        use zed_llm_client::LanguageModelProvider::*;
 732        match self.model.provider {
 733            Anthropic => language_model::ANTHROPIC_PROVIDER_ID,
 734            OpenAi => language_model::OPEN_AI_PROVIDER_ID,
 735            Google => language_model::GOOGLE_PROVIDER_ID,
 736        }
 737    }
 738
 739    fn upstream_provider_name(&self) -> LanguageModelProviderName {
 740        use zed_llm_client::LanguageModelProvider::*;
 741        match self.model.provider {
 742            Anthropic => language_model::ANTHROPIC_PROVIDER_NAME,
 743            OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
 744            Google => language_model::GOOGLE_PROVIDER_NAME,
 745        }
 746    }
 747
 748    fn supports_tools(&self) -> bool {
 749        self.model.supports_tools
 750    }
 751
 752    fn supports_images(&self) -> bool {
 753        self.model.supports_images
 754    }
 755
 756    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 757        match choice {
 758            LanguageModelToolChoice::Auto
 759            | LanguageModelToolChoice::Any
 760            | LanguageModelToolChoice::None => true,
 761        }
 762    }
 763
 764    fn supports_burn_mode(&self) -> bool {
 765        self.model.supports_max_mode
 766    }
 767
 768    fn telemetry_id(&self) -> String {
 769        format!("zed.dev/{}", self.model.id)
 770    }
 771
 772    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 773        match self.model.provider {
 774            zed_llm_client::LanguageModelProvider::Anthropic
 775            | zed_llm_client::LanguageModelProvider::OpenAi => {
 776                LanguageModelToolSchemaFormat::JsonSchema
 777            }
 778            zed_llm_client::LanguageModelProvider::Google => {
 779                LanguageModelToolSchemaFormat::JsonSchemaSubset
 780            }
 781        }
 782    }
 783
 784    fn max_token_count(&self) -> u64 {
 785        self.model.max_token_count as u64
 786    }
 787
 788    fn max_token_count_in_burn_mode(&self) -> Option<u64> {
 789        self.model
 790            .max_token_count_in_max_mode
 791            .filter(|_| self.model.supports_max_mode)
 792            .map(|max_token_count| max_token_count as u64)
 793    }
 794
 795    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
 796        match &self.model.provider {
 797            zed_llm_client::LanguageModelProvider::Anthropic => {
 798                Some(LanguageModelCacheConfiguration {
 799                    min_total_token: 2_048,
 800                    should_speculate: true,
 801                    max_cache_anchors: 4,
 802                })
 803            }
 804            zed_llm_client::LanguageModelProvider::OpenAi
 805            | zed_llm_client::LanguageModelProvider::Google => None,
 806        }
 807    }
 808
 809    fn count_tokens(
 810        &self,
 811        request: LanguageModelRequest,
 812        cx: &App,
 813    ) -> BoxFuture<'static, Result<u64>> {
 814        match self.model.provider {
 815            zed_llm_client::LanguageModelProvider::Anthropic => count_anthropic_tokens(request, cx),
 816            zed_llm_client::LanguageModelProvider::OpenAi => {
 817                let model = match open_ai::Model::from_id(&self.model.id.0) {
 818                    Ok(model) => model,
 819                    Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
 820                };
 821                count_open_ai_tokens(request, model, cx)
 822            }
 823            zed_llm_client::LanguageModelProvider::Google => {
 824                let client = self.client.clone();
 825                let llm_api_token = self.llm_api_token.clone();
 826                let model_id = self.model.id.to_string();
 827                let generate_content_request =
 828                    into_google(request, model_id.clone(), GoogleModelMode::Default);
 829                async move {
 830                    let http_client = &client.http_client();
 831                    let token = llm_api_token.acquire(&client).await?;
 832
 833                    let request_body = CountTokensBody {
 834                        provider: zed_llm_client::LanguageModelProvider::Google,
 835                        model: model_id,
 836                        provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
 837                            generate_content_request,
 838                        })?,
 839                    };
 840                    let request = http_client::Request::builder()
 841                        .method(Method::POST)
 842                        .uri(
 843                            http_client
 844                                .build_zed_llm_url("/count_tokens", &[])?
 845                                .as_ref(),
 846                        )
 847                        .header("Content-Type", "application/json")
 848                        .header("Authorization", format!("Bearer {token}"))
 849                        .body(serde_json::to_string(&request_body)?.into())?;
 850                    let mut response = http_client.send(request).await?;
 851                    let status = response.status();
 852                    let headers = response.headers().clone();
 853                    let mut response_body = String::new();
 854                    response
 855                        .body_mut()
 856                        .read_to_string(&mut response_body)
 857                        .await?;
 858
 859                    if status.is_success() {
 860                        let response_body: CountTokensResponse =
 861                            serde_json::from_str(&response_body)?;
 862
 863                        Ok(response_body.tokens as u64)
 864                    } else {
 865                        Err(anyhow!(ApiError {
 866                            status,
 867                            body: response_body,
 868                            headers
 869                        }))
 870                    }
 871                }
 872                .boxed()
 873            }
 874        }
 875    }
 876
 877    fn stream_completion(
 878        &self,
 879        request: LanguageModelRequest,
 880        cx: &AsyncApp,
 881    ) -> BoxFuture<
 882        'static,
 883        Result<
 884            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 885            LanguageModelCompletionError,
 886        >,
 887    > {
 888        let thread_id = request.thread_id.clone();
 889        let prompt_id = request.prompt_id.clone();
 890        let intent = request.intent;
 891        let mode = request.mode;
 892        let app_version = cx.update(|cx| AppVersion::global(cx)).ok();
 893        let thinking_allowed = request.thinking_allowed;
 894        match self.model.provider {
 895            zed_llm_client::LanguageModelProvider::Anthropic => {
 896                let request = into_anthropic(
 897                    request,
 898                    self.model.id.to_string(),
 899                    1.0,
 900                    self.model.max_output_tokens as u64,
 901                    if thinking_allowed && self.model.id.0.ends_with("-thinking") {
 902                        AnthropicModelMode::Thinking {
 903                            budget_tokens: Some(4_096),
 904                        }
 905                    } else {
 906                        AnthropicModelMode::Default
 907                    },
 908                );
 909                let client = self.client.clone();
 910                let llm_api_token = self.llm_api_token.clone();
 911                let future = self.request_limiter.stream(async move {
 912                    let PerformLlmCompletionResponse {
 913                        response,
 914                        usage,
 915                        includes_status_messages,
 916                        tool_use_limit_reached,
 917                    } = Self::perform_llm_completion(
 918                        client.clone(),
 919                        llm_api_token,
 920                        app_version,
 921                        CompletionBody {
 922                            thread_id,
 923                            prompt_id,
 924                            intent,
 925                            mode,
 926                            provider: zed_llm_client::LanguageModelProvider::Anthropic,
 927                            model: request.model.clone(),
 928                            provider_request: serde_json::to_value(&request)
 929                                .map_err(|e| anyhow!(e))?,
 930                        },
 931                    )
 932                    .await
 933                    .map_err(|err| match err.downcast::<ApiError>() {
 934                        Ok(api_err) => anyhow!(LanguageModelCompletionError::from(api_err)),
 935                        Err(err) => anyhow!(err),
 936                    })?;
 937
 938                    let mut mapper = AnthropicEventMapper::new();
 939                    Ok(map_cloud_completion_events(
 940                        Box::pin(
 941                            response_lines(response, includes_status_messages)
 942                                .chain(usage_updated_event(usage))
 943                                .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
 944                        ),
 945                        move |event| mapper.map_event(event),
 946                    ))
 947                });
 948                async move { Ok(future.await?.boxed()) }.boxed()
 949            }
 950            zed_llm_client::LanguageModelProvider::OpenAi => {
 951                let client = self.client.clone();
 952                let model = match open_ai::Model::from_id(&self.model.id.0) {
 953                    Ok(model) => model,
 954                    Err(err) => return async move { Err(anyhow!(err).into()) }.boxed(),
 955                };
 956                let request = into_open_ai(
 957                    request,
 958                    model.id(),
 959                    model.supports_parallel_tool_calls(),
 960                    None,
 961                );
 962                let llm_api_token = self.llm_api_token.clone();
 963                let future = self.request_limiter.stream(async move {
 964                    let PerformLlmCompletionResponse {
 965                        response,
 966                        usage,
 967                        includes_status_messages,
 968                        tool_use_limit_reached,
 969                    } = Self::perform_llm_completion(
 970                        client.clone(),
 971                        llm_api_token,
 972                        app_version,
 973                        CompletionBody {
 974                            thread_id,
 975                            prompt_id,
 976                            intent,
 977                            mode,
 978                            provider: zed_llm_client::LanguageModelProvider::OpenAi,
 979                            model: request.model.clone(),
 980                            provider_request: serde_json::to_value(&request)
 981                                .map_err(|e| anyhow!(e))?,
 982                        },
 983                    )
 984                    .await?;
 985
 986                    let mut mapper = OpenAiEventMapper::new();
 987                    Ok(map_cloud_completion_events(
 988                        Box::pin(
 989                            response_lines(response, includes_status_messages)
 990                                .chain(usage_updated_event(usage))
 991                                .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
 992                        ),
 993                        move |event| mapper.map_event(event),
 994                    ))
 995                });
 996                async move { Ok(future.await?.boxed()) }.boxed()
 997            }
 998            zed_llm_client::LanguageModelProvider::Google => {
 999                let client = self.client.clone();
1000                let request =
1001                    into_google(request, self.model.id.to_string(), GoogleModelMode::Default);
1002                let llm_api_token = self.llm_api_token.clone();
1003                let future = self.request_limiter.stream(async move {
1004                    let PerformLlmCompletionResponse {
1005                        response,
1006                        usage,
1007                        includes_status_messages,
1008                        tool_use_limit_reached,
1009                    } = Self::perform_llm_completion(
1010                        client.clone(),
1011                        llm_api_token,
1012                        app_version,
1013                        CompletionBody {
1014                            thread_id,
1015                            prompt_id,
1016                            intent,
1017                            mode,
1018                            provider: zed_llm_client::LanguageModelProvider::Google,
1019                            model: request.model.model_id.clone(),
1020                            provider_request: serde_json::to_value(&request)
1021                                .map_err(|e| anyhow!(e))?,
1022                        },
1023                    )
1024                    .await?;
1025
1026                    let mut mapper = GoogleEventMapper::new();
1027                    Ok(map_cloud_completion_events(
1028                        Box::pin(
1029                            response_lines(response, includes_status_messages)
1030                                .chain(usage_updated_event(usage))
1031                                .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
1032                        ),
1033                        move |event| mapper.map_event(event),
1034                    ))
1035                });
1036                async move { Ok(future.await?.boxed()) }.boxed()
1037            }
1038        }
1039    }
1040}
1041
1042#[derive(Serialize, Deserialize)]
1043#[serde(rename_all = "snake_case")]
1044pub enum CloudCompletionEvent<T> {
1045    Status(CompletionRequestStatus),
1046    Event(T),
1047}
1048
1049fn map_cloud_completion_events<T, F>(
1050    stream: Pin<Box<dyn Stream<Item = Result<CloudCompletionEvent<T>>> + Send>>,
1051    mut map_callback: F,
1052) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
1053where
1054    T: DeserializeOwned + 'static,
1055    F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
1056        + Send
1057        + 'static,
1058{
1059    stream
1060        .flat_map(move |event| {
1061            futures::stream::iter(match event {
1062                Err(error) => {
1063                    vec![Err(LanguageModelCompletionError::from(error))]
1064                }
1065                Ok(CloudCompletionEvent::Status(event)) => {
1066                    vec![Ok(LanguageModelCompletionEvent::StatusUpdate(event))]
1067                }
1068                Ok(CloudCompletionEvent::Event(event)) => map_callback(event),
1069            })
1070        })
1071        .boxed()
1072}
1073
1074fn usage_updated_event<T>(
1075    usage: Option<ModelRequestUsage>,
1076) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
1077    futures::stream::iter(usage.map(|usage| {
1078        Ok(CloudCompletionEvent::Status(
1079            CompletionRequestStatus::UsageUpdated {
1080                amount: usage.amount as usize,
1081                limit: usage.limit,
1082            },
1083        ))
1084    }))
1085}
1086
1087fn tool_use_limit_reached_event<T>(
1088    tool_use_limit_reached: bool,
1089) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
1090    futures::stream::iter(tool_use_limit_reached.then(|| {
1091        Ok(CloudCompletionEvent::Status(
1092            CompletionRequestStatus::ToolUseLimitReached,
1093        ))
1094    }))
1095}
1096
1097fn response_lines<T: DeserializeOwned>(
1098    response: Response<AsyncBody>,
1099    includes_status_messages: bool,
1100) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
1101    futures::stream::try_unfold(
1102        (String::new(), BufReader::new(response.into_body())),
1103        move |(mut line, mut body)| async move {
1104            match body.read_line(&mut line).await {
1105                Ok(0) => Ok(None),
1106                Ok(_) => {
1107                    let event = if includes_status_messages {
1108                        serde_json::from_str::<CloudCompletionEvent<T>>(&line)?
1109                    } else {
1110                        CloudCompletionEvent::Event(serde_json::from_str::<T>(&line)?)
1111                    };
1112
1113                    line.clear();
1114                    Ok(Some((event, (line, body))))
1115                }
1116                Err(e) => Err(e.into()),
1117            }
1118        },
1119    )
1120}
1121
1122#[derive(IntoElement, RegisterComponent)]
1123struct ZedAiConfiguration {
1124    is_connected: bool,
1125    plan: Option<proto::Plan>,
1126    subscription_period: Option<(DateTime<Utc>, DateTime<Utc>)>,
1127    eligible_for_trial: bool,
1128    has_accepted_terms_of_service: bool,
1129    accept_terms_of_service_in_progress: bool,
1130    accept_terms_of_service_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1131    sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1132}
1133
1134impl RenderOnce for ZedAiConfiguration {
1135    fn render(self, _window: &mut Window, _cx: &mut App) -> impl IntoElement {
1136        const ZED_PRICING_URL: &str = "https://zed.dev/pricing";
1137
1138        let is_pro = self.plan == Some(proto::Plan::ZedPro);
1139        let subscription_text = match (self.plan, self.subscription_period) {
1140            (Some(proto::Plan::ZedPro), Some(_)) => {
1141                "You have access to Zed's hosted LLMs through your Zed Pro subscription."
1142            }
1143            (Some(proto::Plan::ZedProTrial), Some(_)) => {
1144                "You have access to Zed's hosted LLMs through your Zed Pro trial."
1145            }
1146            (Some(proto::Plan::Free), Some(_)) => {
1147                "You have basic access to Zed's hosted LLMs through your Zed Free subscription."
1148            }
1149            _ => {
1150                if self.eligible_for_trial {
1151                    "Subscribe for access to Zed's hosted LLMs. Start with a 14 day free trial."
1152                } else {
1153                    "Subscribe for access to Zed's hosted LLMs."
1154                }
1155            }
1156        };
1157        let manage_subscription_buttons = if is_pro {
1158            h_flex().child(
1159                Button::new("manage_settings", "Manage Subscription")
1160                    .style(ButtonStyle::Tinted(TintColor::Accent))
1161                    .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx))),
1162            )
1163        } else {
1164            h_flex()
1165                .gap_2()
1166                .child(
1167                    Button::new("learn_more", "Learn more")
1168                        .style(ButtonStyle::Subtle)
1169                        .on_click(|_, _, cx| cx.open_url(ZED_PRICING_URL)),
1170                )
1171                .child(
1172                    Button::new(
1173                        "upgrade",
1174                        if self.plan.is_none() && self.eligible_for_trial {
1175                            "Start Trial"
1176                        } else {
1177                            "Upgrade"
1178                        },
1179                    )
1180                    .style(ButtonStyle::Subtle)
1181                    .color(Color::Accent)
1182                    .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx))),
1183                )
1184        };
1185
1186        if self.is_connected {
1187            v_flex()
1188                .gap_3()
1189                .w_full()
1190                .when(!self.has_accepted_terms_of_service, |this| {
1191                    this.child(render_accept_terms(
1192                        LanguageModelProviderTosView::Configuration,
1193                        self.accept_terms_of_service_in_progress,
1194                        {
1195                            let callback = self.accept_terms_of_service_callback.clone();
1196                            move |window, cx| (callback)(window, cx)
1197                        },
1198                    ))
1199                })
1200                .when(self.has_accepted_terms_of_service, |this| {
1201                    this.child(subscription_text)
1202                        .child(manage_subscription_buttons)
1203                })
1204        } else {
1205            v_flex()
1206                .gap_2()
1207                .child(Label::new("Use Zed AI to access hosted language models."))
1208                .child(
1209                    Button::new("sign_in", "Sign In")
1210                        .icon_color(Color::Muted)
1211                        .icon(IconName::Github)
1212                        .icon_position(IconPosition::Start)
1213                        .on_click({
1214                            let callback = self.sign_in_callback.clone();
1215                            move |_, window, cx| (callback)(window, cx)
1216                        }),
1217                )
1218        }
1219    }
1220}
1221
1222struct ConfigurationView {
1223    state: Entity<State>,
1224    accept_terms_of_service_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1225    sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1226}
1227
1228impl ConfigurationView {
1229    fn new(state: Entity<State>) -> Self {
1230        let accept_terms_of_service_callback = Arc::new({
1231            let state = state.clone();
1232            move |_window: &mut Window, cx: &mut App| {
1233                state.update(cx, |state, cx| {
1234                    state.accept_terms_of_service(cx);
1235                });
1236            }
1237        });
1238
1239        let sign_in_callback = Arc::new({
1240            let state = state.clone();
1241            move |_window: &mut Window, cx: &mut App| {
1242                state.update(cx, |state, cx| {
1243                    state.authenticate(cx).detach_and_log_err(cx);
1244                });
1245            }
1246        });
1247
1248        Self {
1249            state,
1250            accept_terms_of_service_callback,
1251            sign_in_callback,
1252        }
1253    }
1254}
1255
1256impl Render for ConfigurationView {
1257    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1258        let state = self.state.read(cx);
1259        let user_store = state.user_store.read(cx);
1260
1261        ZedAiConfiguration {
1262            is_connected: !state.is_signed_out(),
1263            plan: user_store.current_plan(),
1264            subscription_period: user_store.subscription_period(),
1265            eligible_for_trial: user_store.trial_started_at().is_none(),
1266            has_accepted_terms_of_service: state.has_accepted_terms_of_service(cx),
1267            accept_terms_of_service_in_progress: state.accept_terms_of_service_task.is_some(),
1268            accept_terms_of_service_callback: self.accept_terms_of_service_callback.clone(),
1269            sign_in_callback: self.sign_in_callback.clone(),
1270        }
1271    }
1272}
1273
1274impl Component for ZedAiConfiguration {
1275    fn scope() -> ComponentScope {
1276        ComponentScope::Agent
1277    }
1278
1279    fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
1280        fn configuration(
1281            is_connected: bool,
1282            plan: Option<proto::Plan>,
1283            eligible_for_trial: bool,
1284            has_accepted_terms_of_service: bool,
1285        ) -> AnyElement {
1286            ZedAiConfiguration {
1287                is_connected,
1288                plan,
1289                subscription_period: plan
1290                    .is_some()
1291                    .then(|| (Utc::now(), Utc::now() + chrono::Duration::days(7))),
1292                eligible_for_trial,
1293                has_accepted_terms_of_service,
1294                accept_terms_of_service_in_progress: false,
1295                accept_terms_of_service_callback: Arc::new(|_, _| {}),
1296                sign_in_callback: Arc::new(|_, _| {}),
1297            }
1298            .into_any_element()
1299        }
1300
1301        Some(
1302            v_flex()
1303                .p_4()
1304                .gap_4()
1305                .children(vec![
1306                    single_example("Not connected", configuration(false, None, false, true)),
1307                    single_example(
1308                        "Accept Terms of Service",
1309                        configuration(true, None, true, false),
1310                    ),
1311                    single_example(
1312                        "No Plan - Not eligible for trial",
1313                        configuration(true, None, false, true),
1314                    ),
1315                    single_example(
1316                        "No Plan - Eligible for trial",
1317                        configuration(true, None, true, true),
1318                    ),
1319                    single_example(
1320                        "Free Plan",
1321                        configuration(true, Some(proto::Plan::Free), true, true),
1322                    ),
1323                    single_example(
1324                        "Zed Pro Trial Plan",
1325                        configuration(true, Some(proto::Plan::ZedProTrial), true, true),
1326                    ),
1327                    single_example(
1328                        "Zed Pro Plan",
1329                        configuration(true, Some(proto::Plan::ZedPro), true, true),
1330                    ),
1331                ])
1332                .into_any_element(),
1333        )
1334    }
1335}
1336
1337#[cfg(test)]
1338mod tests {
1339    use super::*;
1340    use http_client::http::{HeaderMap, StatusCode};
1341    use language_model::LanguageModelCompletionError;
1342
1343    #[test]
1344    fn test_api_error_conversion_with_upstream_http_error() {
1345        // upstream_http_error with 503 status should become ServerOverloaded
1346        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout","upstream_status":503}"#;
1347
1348        let api_error = ApiError {
1349            status: StatusCode::INTERNAL_SERVER_ERROR,
1350            body: error_body.to_string(),
1351            headers: HeaderMap::new(),
1352        };
1353
1354        let completion_error: LanguageModelCompletionError = api_error.into();
1355
1356        match completion_error {
1357            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1358                assert_eq!(
1359                    message,
1360                    "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout"
1361                );
1362            }
1363            _ => panic!(
1364                "Expected UpstreamProviderError for upstream 503, got: {:?}",
1365                completion_error
1366            ),
1367        }
1368
1369        // upstream_http_error with 500 status should become ApiInternalServerError
1370        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the OpenAI API: internal server error","upstream_status":500}"#;
1371
1372        let api_error = ApiError {
1373            status: StatusCode::INTERNAL_SERVER_ERROR,
1374            body: error_body.to_string(),
1375            headers: HeaderMap::new(),
1376        };
1377
1378        let completion_error: LanguageModelCompletionError = api_error.into();
1379
1380        match completion_error {
1381            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1382                assert_eq!(
1383                    message,
1384                    "Received an error from the OpenAI API: internal server error"
1385                );
1386            }
1387            _ => panic!(
1388                "Expected UpstreamProviderError for upstream 500, got: {:?}",
1389                completion_error
1390            ),
1391        }
1392
1393        // upstream_http_error with 429 status should become RateLimitExceeded
1394        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Google API: rate limit exceeded","upstream_status":429}"#;
1395
1396        let api_error = ApiError {
1397            status: StatusCode::INTERNAL_SERVER_ERROR,
1398            body: error_body.to_string(),
1399            headers: HeaderMap::new(),
1400        };
1401
1402        let completion_error: LanguageModelCompletionError = api_error.into();
1403
1404        match completion_error {
1405            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1406                assert_eq!(
1407                    message,
1408                    "Received an error from the Google API: rate limit exceeded"
1409                );
1410            }
1411            _ => panic!(
1412                "Expected UpstreamProviderError for upstream 429, got: {:?}",
1413                completion_error
1414            ),
1415        }
1416
1417        // Regular 500 error without upstream_http_error should remain ApiInternalServerError for Zed
1418        let error_body = "Regular internal server error";
1419
1420        let api_error = ApiError {
1421            status: StatusCode::INTERNAL_SERVER_ERROR,
1422            body: error_body.to_string(),
1423            headers: HeaderMap::new(),
1424        };
1425
1426        let completion_error: LanguageModelCompletionError = api_error.into();
1427
1428        match completion_error {
1429            LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1430                assert_eq!(provider, PROVIDER_NAME);
1431                assert_eq!(message, "Regular internal server error");
1432            }
1433            _ => panic!(
1434                "Expected ApiInternalServerError for regular 500, got: {:?}",
1435                completion_error
1436            ),
1437        }
1438
1439        // upstream_http_429 format should be converted to UpstreamProviderError
1440        let error_body = r#"{"code":"upstream_http_429","message":"Upstream Anthropic rate limit exceeded.","retry_after":30.5}"#;
1441
1442        let api_error = ApiError {
1443            status: StatusCode::INTERNAL_SERVER_ERROR,
1444            body: error_body.to_string(),
1445            headers: HeaderMap::new(),
1446        };
1447
1448        let completion_error: LanguageModelCompletionError = api_error.into();
1449
1450        match completion_error {
1451            LanguageModelCompletionError::UpstreamProviderError {
1452                message,
1453                status,
1454                retry_after,
1455            } => {
1456                assert_eq!(message, "Upstream Anthropic rate limit exceeded.");
1457                assert_eq!(status, StatusCode::TOO_MANY_REQUESTS);
1458                assert_eq!(retry_after, Some(Duration::from_secs_f64(30.5)));
1459            }
1460            _ => panic!(
1461                "Expected UpstreamProviderError for upstream_http_429, got: {:?}",
1462                completion_error
1463            ),
1464        }
1465
1466        // Invalid JSON in error body should fall back to regular error handling
1467        let error_body = "Not JSON at all";
1468
1469        let api_error = ApiError {
1470            status: StatusCode::INTERNAL_SERVER_ERROR,
1471            body: error_body.to_string(),
1472            headers: HeaderMap::new(),
1473        };
1474
1475        let completion_error: LanguageModelCompletionError = api_error.into();
1476
1477        match completion_error {
1478            LanguageModelCompletionError::ApiInternalServerError { provider, .. } => {
1479                assert_eq!(provider, PROVIDER_NAME);
1480            }
1481            _ => panic!(
1482                "Expected ApiInternalServerError for invalid JSON, got: {:?}",
1483                completion_error
1484            ),
1485        }
1486    }
1487}