cloud.rs

   1use ai_onboarding::YoungAccountBanner;
   2use anthropic::AnthropicModelMode;
   3use anyhow::{Context as _, Result, anyhow};
   4use client::{
   5    Client, NeedsLlmTokenRefresh, RefreshLlmTokenListener, UserStore, global_llm_token, zed_urls,
   6};
   7use cloud_api_types::{OrganizationId, Plan};
   8use cloud_llm_client::{
   9    CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CLIENT_SUPPORTS_STATUS_STREAM_ENDED_HEADER_NAME,
  10    CLIENT_SUPPORTS_X_AI_HEADER_NAME, CompletionBody, CompletionEvent, CompletionRequestStatus,
  11    CountTokensBody, CountTokensResponse, ListModelsResponse,
  12    SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, ZED_VERSION_HEADER_NAME,
  13};
  14use futures::{
  15    AsyncBufReadExt, FutureExt, Stream, StreamExt,
  16    future::BoxFuture,
  17    stream::{self, BoxStream},
  18};
  19use google_ai::GoogleModelMode;
  20use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
  21use http_client::http::{HeaderMap, HeaderValue};
  22use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
  23use language_model::{
  24    ANTHROPIC_PROVIDER_ID, ANTHROPIC_PROVIDER_NAME, AuthenticateError, GOOGLE_PROVIDER_ID,
  25    GOOGLE_PROVIDER_NAME, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
  26    LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelEffortLevel,
  27    LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
  28    LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
  29    LanguageModelToolChoice, LanguageModelToolSchemaFormat, LlmApiToken, OPEN_AI_PROVIDER_ID,
  30    OPEN_AI_PROVIDER_NAME, PaymentRequiredError, RateLimiter, X_AI_PROVIDER_ID, X_AI_PROVIDER_NAME,
  31    ZED_CLOUD_PROVIDER_ID, ZED_CLOUD_PROVIDER_NAME,
  32};
  33use release_channel::AppVersion;
  34use schemars::JsonSchema;
  35use semver::Version;
  36use serde::{Deserialize, Serialize, de::DeserializeOwned};
  37use settings::SettingsStore;
  38pub use settings::ZedDotDevAvailableModel as AvailableModel;
  39pub use settings::ZedDotDevAvailableProvider as AvailableProvider;
  40use smol::io::{AsyncReadExt, BufReader};
  41use std::collections::VecDeque;
  42use std::pin::Pin;
  43use std::str::FromStr;
  44use std::sync::Arc;
  45use std::task::Poll;
  46use std::time::Duration;
  47use thiserror::Error;
  48use ui::{TintColor, prelude::*};
  49
  50use crate::provider::anthropic::{
  51    AnthropicEventMapper, count_anthropic_tokens_with_tiktoken, into_anthropic,
  52};
  53use crate::provider::google::{GoogleEventMapper, into_google};
  54use crate::provider::open_ai::{
  55    OpenAiEventMapper, OpenAiResponseEventMapper, count_open_ai_tokens, into_open_ai,
  56    into_open_ai_response,
  57};
  58use crate::provider::x_ai::count_xai_tokens;
  59
  60const PROVIDER_ID: LanguageModelProviderId = ZED_CLOUD_PROVIDER_ID;
  61const PROVIDER_NAME: LanguageModelProviderName = ZED_CLOUD_PROVIDER_NAME;
  62
  63#[derive(Default, Clone, Debug, PartialEq)]
  64pub struct ZedDotDevSettings {
  65    pub available_models: Vec<AvailableModel>,
  66}
  67#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
  68#[serde(tag = "type", rename_all = "lowercase")]
  69pub enum ModelMode {
  70    #[default]
  71    Default,
  72    Thinking {
  73        /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
  74        budget_tokens: Option<u32>,
  75    },
  76}
  77
  78impl From<ModelMode> for AnthropicModelMode {
  79    fn from(value: ModelMode) -> Self {
  80        match value {
  81            ModelMode::Default => AnthropicModelMode::Default,
  82            ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
  83        }
  84    }
  85}
  86
  87pub struct CloudLanguageModelProvider {
  88    client: Arc<Client>,
  89    state: Entity<State>,
  90    _maintain_client_status: Task<()>,
  91}
  92
  93pub struct State {
  94    client: Arc<Client>,
  95    llm_api_token: LlmApiToken,
  96    user_store: Entity<UserStore>,
  97    status: client::Status,
  98    models: Vec<Arc<cloud_llm_client::LanguageModel>>,
  99    default_model: Option<Arc<cloud_llm_client::LanguageModel>>,
 100    default_fast_model: Option<Arc<cloud_llm_client::LanguageModel>>,
 101    recommended_models: Vec<Arc<cloud_llm_client::LanguageModel>>,
 102    _user_store_subscription: Subscription,
 103    _settings_subscription: Subscription,
 104    _llm_token_subscription: Subscription,
 105}
 106
 107impl State {
 108    fn new(
 109        client: Arc<Client>,
 110        user_store: Entity<UserStore>,
 111        status: client::Status,
 112        cx: &mut Context<Self>,
 113    ) -> Self {
 114        let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
 115        let llm_api_token = global_llm_token(cx);
 116        Self {
 117            client: client.clone(),
 118            llm_api_token,
 119            user_store: user_store.clone(),
 120            status,
 121            models: Vec::new(),
 122            default_model: None,
 123            default_fast_model: None,
 124            recommended_models: Vec::new(),
 125            _user_store_subscription: cx.subscribe(
 126                &user_store,
 127                move |this, _user_store, event, cx| match event {
 128                    client::user::Event::PrivateUserInfoUpdated => {
 129                        let status = *client.status().borrow();
 130                        if status.is_signed_out() {
 131                            return;
 132                        }
 133
 134                        let client = this.client.clone();
 135                        let llm_api_token = this.llm_api_token.clone();
 136                        let organization_id = this
 137                            .user_store
 138                            .read(cx)
 139                            .current_organization()
 140                            .map(|organization| organization.id.clone());
 141                        cx.spawn(async move |this, cx| {
 142                            let response =
 143                                Self::fetch_models(client, llm_api_token, organization_id).await?;
 144                            this.update(cx, |this, cx| this.update_models(response, cx))
 145                        })
 146                        .detach_and_log_err(cx);
 147                    }
 148                    _ => {}
 149                },
 150            ),
 151            _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
 152                cx.notify();
 153            }),
 154            _llm_token_subscription: cx.subscribe(
 155                &refresh_llm_token_listener,
 156                move |this, _listener, _event, cx| {
 157                    let client = this.client.clone();
 158                    let llm_api_token = this.llm_api_token.clone();
 159                    let organization_id = this
 160                        .user_store
 161                        .read(cx)
 162                        .current_organization()
 163                        .map(|organization| organization.id.clone());
 164                    cx.spawn(async move |this, cx| {
 165                        let response =
 166                            Self::fetch_models(client, llm_api_token, organization_id).await?;
 167                        this.update(cx, |this, cx| {
 168                            this.update_models(response, cx);
 169                        })
 170                    })
 171                    .detach_and_log_err(cx);
 172                },
 173            ),
 174        }
 175    }
 176
 177    fn is_signed_out(&self, cx: &App) -> bool {
 178        self.user_store.read(cx).current_user().is_none()
 179    }
 180
 181    fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
 182        let client = self.client.clone();
 183        cx.spawn(async move |state, cx| {
 184            client.sign_in_with_optional_connect(true, cx).await?;
 185            state.update(cx, |_, cx| cx.notify())
 186        })
 187    }
 188
 189    fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
 190        let mut models = Vec::new();
 191
 192        for model in response.models {
 193            models.push(Arc::new(model.clone()));
 194        }
 195
 196        self.default_model = models
 197            .iter()
 198            .find(|model| {
 199                response
 200                    .default_model
 201                    .as_ref()
 202                    .is_some_and(|default_model_id| &model.id == default_model_id)
 203            })
 204            .cloned();
 205        self.default_fast_model = models
 206            .iter()
 207            .find(|model| {
 208                response
 209                    .default_fast_model
 210                    .as_ref()
 211                    .is_some_and(|default_fast_model_id| &model.id == default_fast_model_id)
 212            })
 213            .cloned();
 214        self.recommended_models = response
 215            .recommended_models
 216            .iter()
 217            .filter_map(|id| models.iter().find(|model| &model.id == id))
 218            .cloned()
 219            .collect();
 220        self.models = models;
 221        cx.notify();
 222    }
 223
 224    async fn fetch_models(
 225        client: Arc<Client>,
 226        llm_api_token: LlmApiToken,
 227        organization_id: Option<OrganizationId>,
 228    ) -> Result<ListModelsResponse> {
 229        let http_client = &client.http_client();
 230        let token = client
 231            .acquire_llm_token(&llm_api_token, organization_id)
 232            .await?;
 233
 234        let request = http_client::Request::builder()
 235            .method(Method::GET)
 236            .header(CLIENT_SUPPORTS_X_AI_HEADER_NAME, "true")
 237            .uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
 238            .header("Authorization", format!("Bearer {token}"))
 239            .body(AsyncBody::empty())?;
 240        let mut response = http_client
 241            .send(request)
 242            .await
 243            .context("failed to send list models request")?;
 244
 245        if response.status().is_success() {
 246            let mut body = String::new();
 247            response.body_mut().read_to_string(&mut body).await?;
 248            Ok(serde_json::from_str(&body)?)
 249        } else {
 250            let mut body = String::new();
 251            response.body_mut().read_to_string(&mut body).await?;
 252            anyhow::bail!(
 253                "error listing models.\nStatus: {:?}\nBody: {body}",
 254                response.status(),
 255            );
 256        }
 257    }
 258}
 259
 260impl CloudLanguageModelProvider {
 261    pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
 262        let mut status_rx = client.status();
 263        let status = *status_rx.borrow();
 264
 265        let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
 266
 267        let state_ref = state.downgrade();
 268        let maintain_client_status = cx.spawn(async move |cx| {
 269            while let Some(status) = status_rx.next().await {
 270                if let Some(this) = state_ref.upgrade() {
 271                    _ = this.update(cx, |this, cx| {
 272                        if this.status != status {
 273                            this.status = status;
 274                            cx.notify();
 275                        }
 276                    });
 277                } else {
 278                    break;
 279                }
 280            }
 281        });
 282
 283        Self {
 284            client,
 285            state,
 286            _maintain_client_status: maintain_client_status,
 287        }
 288    }
 289
 290    fn create_language_model(
 291        &self,
 292        model: Arc<cloud_llm_client::LanguageModel>,
 293        llm_api_token: LlmApiToken,
 294        user_store: Entity<UserStore>,
 295    ) -> Arc<dyn LanguageModel> {
 296        Arc::new(CloudLanguageModel {
 297            id: LanguageModelId(SharedString::from(model.id.0.clone())),
 298            model,
 299            llm_api_token,
 300            user_store,
 301            client: self.client.clone(),
 302            request_limiter: RateLimiter::new(4),
 303        })
 304    }
 305}
 306
 307impl LanguageModelProviderState for CloudLanguageModelProvider {
 308    type ObservableEntity = State;
 309
 310    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 311        Some(self.state.clone())
 312    }
 313}
 314
 315impl LanguageModelProvider for CloudLanguageModelProvider {
 316    fn id(&self) -> LanguageModelProviderId {
 317        PROVIDER_ID
 318    }
 319
 320    fn name(&self) -> LanguageModelProviderName {
 321        PROVIDER_NAME
 322    }
 323
 324    fn icon(&self) -> IconOrSvg {
 325        IconOrSvg::Icon(IconName::AiZed)
 326    }
 327
 328    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 329        let state = self.state.read(cx);
 330        let default_model = state.default_model.clone()?;
 331        let llm_api_token = state.llm_api_token.clone();
 332        let user_store = state.user_store.clone();
 333        Some(self.create_language_model(default_model, llm_api_token, user_store))
 334    }
 335
 336    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 337        let state = self.state.read(cx);
 338        let default_fast_model = state.default_fast_model.clone()?;
 339        let llm_api_token = state.llm_api_token.clone();
 340        let user_store = state.user_store.clone();
 341        Some(self.create_language_model(default_fast_model, llm_api_token, user_store))
 342    }
 343
 344    fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 345        let state = self.state.read(cx);
 346        let llm_api_token = state.llm_api_token.clone();
 347        let user_store = state.user_store.clone();
 348        state
 349            .recommended_models
 350            .iter()
 351            .cloned()
 352            .map(|model| {
 353                self.create_language_model(model, llm_api_token.clone(), user_store.clone())
 354            })
 355            .collect()
 356    }
 357
 358    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 359        let state = self.state.read(cx);
 360        let llm_api_token = state.llm_api_token.clone();
 361        let user_store = state.user_store.clone();
 362        state
 363            .models
 364            .iter()
 365            .cloned()
 366            .map(|model| {
 367                self.create_language_model(model, llm_api_token.clone(), user_store.clone())
 368            })
 369            .collect()
 370    }
 371
 372    fn is_authenticated(&self, cx: &App) -> bool {
 373        let state = self.state.read(cx);
 374        !state.is_signed_out(cx)
 375    }
 376
 377    fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 378        Task::ready(Ok(()))
 379    }
 380
 381    fn configuration_view(
 382        &self,
 383        _target_agent: language_model::ConfigurationViewTargetAgent,
 384        _: &mut Window,
 385        cx: &mut App,
 386    ) -> AnyView {
 387        cx.new(|_| ConfigurationView::new(self.state.clone()))
 388            .into()
 389    }
 390
 391    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 392        Task::ready(Ok(()))
 393    }
 394}
 395
 396pub struct CloudLanguageModel {
 397    id: LanguageModelId,
 398    model: Arc<cloud_llm_client::LanguageModel>,
 399    llm_api_token: LlmApiToken,
 400    user_store: Entity<UserStore>,
 401    client: Arc<Client>,
 402    request_limiter: RateLimiter,
 403}
 404
 405struct PerformLlmCompletionResponse {
 406    response: Response<AsyncBody>,
 407    includes_status_messages: bool,
 408}
 409
 410impl CloudLanguageModel {
 411    async fn perform_llm_completion(
 412        client: Arc<Client>,
 413        llm_api_token: LlmApiToken,
 414        organization_id: Option<OrganizationId>,
 415        app_version: Option<Version>,
 416        body: CompletionBody,
 417    ) -> Result<PerformLlmCompletionResponse> {
 418        let http_client = &client.http_client();
 419
 420        let mut token = client
 421            .acquire_llm_token(&llm_api_token, organization_id.clone())
 422            .await?;
 423        let mut refreshed_token = false;
 424
 425        loop {
 426            let request = http_client::Request::builder()
 427                .method(Method::POST)
 428                .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref())
 429                .when_some(app_version.as_ref(), |builder, app_version| {
 430                    builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
 431                })
 432                .header("Content-Type", "application/json")
 433                .header("Authorization", format!("Bearer {token}"))
 434                .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
 435                .header(CLIENT_SUPPORTS_STATUS_STREAM_ENDED_HEADER_NAME, "true")
 436                .body(serde_json::to_string(&body)?.into())?;
 437
 438            let mut response = http_client.send(request).await?;
 439            let status = response.status();
 440            if status.is_success() {
 441                let includes_status_messages = response
 442                    .headers()
 443                    .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
 444                    .is_some();
 445
 446                return Ok(PerformLlmCompletionResponse {
 447                    response,
 448                    includes_status_messages,
 449                });
 450            }
 451
 452            if !refreshed_token && response.needs_llm_token_refresh() {
 453                token = client
 454                    .refresh_llm_token(&llm_api_token, organization_id.clone())
 455                    .await?;
 456                refreshed_token = true;
 457                continue;
 458            }
 459
 460            if status == StatusCode::PAYMENT_REQUIRED {
 461                return Err(anyhow!(PaymentRequiredError));
 462            }
 463
 464            let mut body = String::new();
 465            let headers = response.headers().clone();
 466            response.body_mut().read_to_string(&mut body).await?;
 467            return Err(anyhow!(ApiError {
 468                status,
 469                body,
 470                headers
 471            }));
 472        }
 473    }
 474}
 475
 476#[derive(Debug, Error)]
 477#[error("cloud language model request failed with status {status}: {body}")]
 478struct ApiError {
 479    status: StatusCode,
 480    body: String,
 481    headers: HeaderMap<HeaderValue>,
 482}
 483
 484/// Represents error responses from Zed's cloud API.
 485///
 486/// Example JSON for an upstream HTTP error:
 487/// ```json
 488/// {
 489///   "code": "upstream_http_error",
 490///   "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
 491///   "upstream_status": 503
 492/// }
 493/// ```
 494#[derive(Debug, serde::Deserialize)]
 495struct CloudApiError {
 496    code: String,
 497    message: String,
 498    #[serde(default)]
 499    #[serde(deserialize_with = "deserialize_optional_status_code")]
 500    upstream_status: Option<StatusCode>,
 501    #[serde(default)]
 502    retry_after: Option<f64>,
 503}
 504
 505fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
 506where
 507    D: serde::Deserializer<'de>,
 508{
 509    let opt: Option<u16> = Option::deserialize(deserializer)?;
 510    Ok(opt.and_then(|code| StatusCode::from_u16(code).ok()))
 511}
 512
 513impl From<ApiError> for LanguageModelCompletionError {
 514    fn from(error: ApiError) -> Self {
 515        if let Ok(cloud_error) = serde_json::from_str::<CloudApiError>(&error.body) {
 516            if cloud_error.code.starts_with("upstream_http_") {
 517                let status = if let Some(status) = cloud_error.upstream_status {
 518                    status
 519                } else if cloud_error.code.ends_with("_error") {
 520                    error.status
 521                } else {
 522                    // If there's a status code in the code string (e.g. "upstream_http_429")
 523                    // then use that; otherwise, see if the JSON contains a status code.
 524                    cloud_error
 525                        .code
 526                        .strip_prefix("upstream_http_")
 527                        .and_then(|code_str| code_str.parse::<u16>().ok())
 528                        .and_then(|code| StatusCode::from_u16(code).ok())
 529                        .unwrap_or(error.status)
 530                };
 531
 532                return LanguageModelCompletionError::UpstreamProviderError {
 533                    message: cloud_error.message,
 534                    status,
 535                    retry_after: cloud_error.retry_after.map(Duration::from_secs_f64),
 536                };
 537            }
 538
 539            return LanguageModelCompletionError::from_http_status(
 540                PROVIDER_NAME,
 541                error.status,
 542                cloud_error.message,
 543                None,
 544            );
 545        }
 546
 547        let retry_after = None;
 548        LanguageModelCompletionError::from_http_status(
 549            PROVIDER_NAME,
 550            error.status,
 551            error.body,
 552            retry_after,
 553        )
 554    }
 555}
 556
 557impl LanguageModel for CloudLanguageModel {
 558    fn id(&self) -> LanguageModelId {
 559        self.id.clone()
 560    }
 561
 562    fn name(&self) -> LanguageModelName {
 563        LanguageModelName::from(self.model.display_name.clone())
 564    }
 565
 566    fn provider_id(&self) -> LanguageModelProviderId {
 567        PROVIDER_ID
 568    }
 569
 570    fn provider_name(&self) -> LanguageModelProviderName {
 571        PROVIDER_NAME
 572    }
 573
 574    fn upstream_provider_id(&self) -> LanguageModelProviderId {
 575        use cloud_llm_client::LanguageModelProvider::*;
 576        match self.model.provider {
 577            Anthropic => ANTHROPIC_PROVIDER_ID,
 578            OpenAi => OPEN_AI_PROVIDER_ID,
 579            Google => GOOGLE_PROVIDER_ID,
 580            XAi => X_AI_PROVIDER_ID,
 581        }
 582    }
 583
 584    fn upstream_provider_name(&self) -> LanguageModelProviderName {
 585        use cloud_llm_client::LanguageModelProvider::*;
 586        match self.model.provider {
 587            Anthropic => ANTHROPIC_PROVIDER_NAME,
 588            OpenAi => OPEN_AI_PROVIDER_NAME,
 589            Google => GOOGLE_PROVIDER_NAME,
 590            XAi => X_AI_PROVIDER_NAME,
 591        }
 592    }
 593
 594    fn is_latest(&self) -> bool {
 595        self.model.is_latest
 596    }
 597
 598    fn supports_tools(&self) -> bool {
 599        self.model.supports_tools
 600    }
 601
 602    fn supports_images(&self) -> bool {
 603        self.model.supports_images
 604    }
 605
 606    fn supports_thinking(&self) -> bool {
 607        self.model.supports_thinking
 608    }
 609
 610    fn supports_fast_mode(&self) -> bool {
 611        self.model.supports_fast_mode
 612    }
 613
 614    fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
 615        self.model
 616            .supported_effort_levels
 617            .iter()
 618            .map(|effort_level| LanguageModelEffortLevel {
 619                name: effort_level.name.clone().into(),
 620                value: effort_level.value.clone().into(),
 621                is_default: effort_level.is_default.unwrap_or(false),
 622            })
 623            .collect()
 624    }
 625
 626    fn supports_streaming_tools(&self) -> bool {
 627        self.model.supports_streaming_tools
 628    }
 629
 630    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 631        match choice {
 632            LanguageModelToolChoice::Auto
 633            | LanguageModelToolChoice::Any
 634            | LanguageModelToolChoice::None => true,
 635        }
 636    }
 637
 638    fn supports_split_token_display(&self) -> bool {
 639        use cloud_llm_client::LanguageModelProvider::*;
 640        matches!(self.model.provider, OpenAi | XAi)
 641    }
 642
 643    fn telemetry_id(&self) -> String {
 644        format!("zed.dev/{}", self.model.id)
 645    }
 646
 647    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 648        match self.model.provider {
 649            cloud_llm_client::LanguageModelProvider::Anthropic
 650            | cloud_llm_client::LanguageModelProvider::OpenAi => {
 651                LanguageModelToolSchemaFormat::JsonSchema
 652            }
 653            cloud_llm_client::LanguageModelProvider::Google
 654            | cloud_llm_client::LanguageModelProvider::XAi => {
 655                LanguageModelToolSchemaFormat::JsonSchemaSubset
 656            }
 657        }
 658    }
 659
 660    fn max_token_count(&self) -> u64 {
 661        self.model.max_token_count as u64
 662    }
 663
 664    fn max_output_tokens(&self) -> Option<u64> {
 665        Some(self.model.max_output_tokens as u64)
 666    }
 667
 668    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
 669        match &self.model.provider {
 670            cloud_llm_client::LanguageModelProvider::Anthropic => {
 671                Some(LanguageModelCacheConfiguration {
 672                    min_total_token: 2_048,
 673                    should_speculate: true,
 674                    max_cache_anchors: 4,
 675                })
 676            }
 677            cloud_llm_client::LanguageModelProvider::OpenAi
 678            | cloud_llm_client::LanguageModelProvider::XAi
 679            | cloud_llm_client::LanguageModelProvider::Google => None,
 680        }
 681    }
 682
 683    fn count_tokens(
 684        &self,
 685        request: LanguageModelRequest,
 686        cx: &App,
 687    ) -> BoxFuture<'static, Result<u64>> {
 688        match self.model.provider {
 689            cloud_llm_client::LanguageModelProvider::Anthropic => cx
 690                .background_spawn(async move { count_anthropic_tokens_with_tiktoken(request) })
 691                .boxed(),
 692            cloud_llm_client::LanguageModelProvider::OpenAi => {
 693                let model = match open_ai::Model::from_id(&self.model.id.0) {
 694                    Ok(model) => model,
 695                    Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
 696                };
 697                count_open_ai_tokens(request, model, cx)
 698            }
 699            cloud_llm_client::LanguageModelProvider::XAi => {
 700                let model = match x_ai::Model::from_id(&self.model.id.0) {
 701                    Ok(model) => model,
 702                    Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
 703                };
 704                count_xai_tokens(request, model, cx)
 705            }
 706            cloud_llm_client::LanguageModelProvider::Google => {
 707                let client = self.client.clone();
 708                let llm_api_token = self.llm_api_token.clone();
 709                let organization_id = self
 710                    .user_store
 711                    .read(cx)
 712                    .current_organization()
 713                    .map(|organization| organization.id.clone());
 714                let model_id = self.model.id.to_string();
 715                let generate_content_request =
 716                    into_google(request, model_id.clone(), GoogleModelMode::Default);
 717                async move {
 718                    let http_client = &client.http_client();
 719                    let token = client
 720                        .acquire_llm_token(&llm_api_token, organization_id)
 721                        .await?;
 722
 723                    let request_body = CountTokensBody {
 724                        provider: cloud_llm_client::LanguageModelProvider::Google,
 725                        model: model_id,
 726                        provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
 727                            generate_content_request,
 728                        })?,
 729                    };
 730                    let request = http_client::Request::builder()
 731                        .method(Method::POST)
 732                        .uri(
 733                            http_client
 734                                .build_zed_llm_url("/count_tokens", &[])?
 735                                .as_ref(),
 736                        )
 737                        .header("Content-Type", "application/json")
 738                        .header("Authorization", format!("Bearer {token}"))
 739                        .body(serde_json::to_string(&request_body)?.into())?;
 740                    let mut response = http_client.send(request).await?;
 741                    let status = response.status();
 742                    let headers = response.headers().clone();
 743                    let mut response_body = String::new();
 744                    response
 745                        .body_mut()
 746                        .read_to_string(&mut response_body)
 747                        .await?;
 748
 749                    if status.is_success() {
 750                        let response_body: CountTokensResponse =
 751                            serde_json::from_str(&response_body)?;
 752
 753                        Ok(response_body.tokens as u64)
 754                    } else {
 755                        Err(anyhow!(ApiError {
 756                            status,
 757                            body: response_body,
 758                            headers
 759                        }))
 760                    }
 761                }
 762                .boxed()
 763            }
 764        }
 765    }
 766
 767    fn stream_completion(
 768        &self,
 769        request: LanguageModelRequest,
 770        cx: &AsyncApp,
 771    ) -> BoxFuture<
 772        'static,
 773        Result<
 774            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 775            LanguageModelCompletionError,
 776        >,
 777    > {
 778        let thread_id = request.thread_id.clone();
 779        let prompt_id = request.prompt_id.clone();
 780        let app_version = Some(cx.update(|cx| AppVersion::global(cx)));
 781        let user_store = self.user_store.clone();
 782        let organization_id = cx.update(|cx| {
 783            user_store
 784                .read(cx)
 785                .current_organization()
 786                .map(|organization| organization.id.clone())
 787        });
 788        let thinking_allowed = request.thinking_allowed;
 789        let enable_thinking = thinking_allowed && self.model.supports_thinking;
 790        let provider_name = provider_name(&self.model.provider);
 791        match self.model.provider {
 792            cloud_llm_client::LanguageModelProvider::Anthropic => {
 793                let effort = request
 794                    .thinking_effort
 795                    .as_ref()
 796                    .and_then(|effort| anthropic::Effort::from_str(effort).ok());
 797
 798                let mut request = into_anthropic(
 799                    request,
 800                    self.model.id.to_string(),
 801                    1.0,
 802                    self.model.max_output_tokens as u64,
 803                    if enable_thinking {
 804                        AnthropicModelMode::Thinking {
 805                            budget_tokens: Some(4_096),
 806                        }
 807                    } else {
 808                        AnthropicModelMode::Default
 809                    },
 810                );
 811
 812                if enable_thinking && effort.is_some() {
 813                    request.thinking = Some(anthropic::Thinking::Adaptive);
 814                    request.output_config = Some(anthropic::OutputConfig { effort });
 815                }
 816
 817                let client = self.client.clone();
 818                let llm_api_token = self.llm_api_token.clone();
 819                let organization_id = organization_id.clone();
 820                let future = self.request_limiter.stream(async move {
 821                    let PerformLlmCompletionResponse {
 822                        response,
 823                        includes_status_messages,
 824                    } = Self::perform_llm_completion(
 825                        client.clone(),
 826                        llm_api_token,
 827                        organization_id,
 828                        app_version,
 829                        CompletionBody {
 830                            thread_id,
 831                            prompt_id,
 832                            provider: cloud_llm_client::LanguageModelProvider::Anthropic,
 833                            model: request.model.clone(),
 834                            provider_request: serde_json::to_value(&request)
 835                                .map_err(|e| anyhow!(e))?,
 836                        },
 837                    )
 838                    .await
 839                    .map_err(|err| match err.downcast::<ApiError>() {
 840                        Ok(api_err) => anyhow!(LanguageModelCompletionError::from(api_err)),
 841                        Err(err) => anyhow!(err),
 842                    })?;
 843
 844                    let mut mapper = AnthropicEventMapper::new();
 845                    Ok(map_cloud_completion_events(
 846                        Box::pin(response_lines(response, includes_status_messages)),
 847                        &provider_name,
 848                        move |event| mapper.map_event(event),
 849                    ))
 850                });
 851                async move { Ok(future.await?.boxed()) }.boxed()
 852            }
 853            cloud_llm_client::LanguageModelProvider::OpenAi => {
 854                let client = self.client.clone();
 855                let llm_api_token = self.llm_api_token.clone();
 856                let organization_id = organization_id.clone();
 857                let effort = request
 858                    .thinking_effort
 859                    .as_ref()
 860                    .and_then(|effort| open_ai::ReasoningEffort::from_str(effort).ok());
 861
 862                let mut request = into_open_ai_response(
 863                    request,
 864                    &self.model.id.0,
 865                    self.model.supports_parallel_tool_calls,
 866                    true,
 867                    None,
 868                    None,
 869                );
 870
 871                if enable_thinking && let Some(effort) = effort {
 872                    request.reasoning = Some(open_ai::responses::ReasoningConfig {
 873                        effort,
 874                        summary: Some(open_ai::responses::ReasoningSummaryMode::Auto),
 875                    });
 876                }
 877
 878                let future = self.request_limiter.stream(async move {
 879                    let PerformLlmCompletionResponse {
 880                        response,
 881                        includes_status_messages,
 882                    } = Self::perform_llm_completion(
 883                        client.clone(),
 884                        llm_api_token,
 885                        organization_id,
 886                        app_version,
 887                        CompletionBody {
 888                            thread_id,
 889                            prompt_id,
 890                            provider: cloud_llm_client::LanguageModelProvider::OpenAi,
 891                            model: request.model.clone(),
 892                            provider_request: serde_json::to_value(&request)
 893                                .map_err(|e| anyhow!(e))?,
 894                        },
 895                    )
 896                    .await?;
 897
 898                    let mut mapper = OpenAiResponseEventMapper::new();
 899                    Ok(map_cloud_completion_events(
 900                        Box::pin(response_lines(response, includes_status_messages)),
 901                        &provider_name,
 902                        move |event| mapper.map_event(event),
 903                    ))
 904                });
 905                async move { Ok(future.await?.boxed()) }.boxed()
 906            }
 907            cloud_llm_client::LanguageModelProvider::XAi => {
 908                let client = self.client.clone();
 909                let request = into_open_ai(
 910                    request,
 911                    &self.model.id.0,
 912                    self.model.supports_parallel_tool_calls,
 913                    false,
 914                    None,
 915                    None,
 916                );
 917                let llm_api_token = self.llm_api_token.clone();
 918                let organization_id = organization_id.clone();
 919                let future = self.request_limiter.stream(async move {
 920                    let PerformLlmCompletionResponse {
 921                        response,
 922                        includes_status_messages,
 923                    } = Self::perform_llm_completion(
 924                        client.clone(),
 925                        llm_api_token,
 926                        organization_id,
 927                        app_version,
 928                        CompletionBody {
 929                            thread_id,
 930                            prompt_id,
 931                            provider: cloud_llm_client::LanguageModelProvider::XAi,
 932                            model: request.model.clone(),
 933                            provider_request: serde_json::to_value(&request)
 934                                .map_err(|e| anyhow!(e))?,
 935                        },
 936                    )
 937                    .await?;
 938
 939                    let mut mapper = OpenAiEventMapper::new();
 940                    Ok(map_cloud_completion_events(
 941                        Box::pin(response_lines(response, includes_status_messages)),
 942                        &provider_name,
 943                        move |event| mapper.map_event(event),
 944                    ))
 945                });
 946                async move { Ok(future.await?.boxed()) }.boxed()
 947            }
 948            cloud_llm_client::LanguageModelProvider::Google => {
 949                let client = self.client.clone();
 950                let request =
 951                    into_google(request, self.model.id.to_string(), GoogleModelMode::Default);
 952                let llm_api_token = self.llm_api_token.clone();
 953                let future = self.request_limiter.stream(async move {
 954                    let PerformLlmCompletionResponse {
 955                        response,
 956                        includes_status_messages,
 957                    } = Self::perform_llm_completion(
 958                        client.clone(),
 959                        llm_api_token,
 960                        organization_id,
 961                        app_version,
 962                        CompletionBody {
 963                            thread_id,
 964                            prompt_id,
 965                            provider: cloud_llm_client::LanguageModelProvider::Google,
 966                            model: request.model.model_id.clone(),
 967                            provider_request: serde_json::to_value(&request)
 968                                .map_err(|e| anyhow!(e))?,
 969                        },
 970                    )
 971                    .await?;
 972
 973                    let mut mapper = GoogleEventMapper::new();
 974                    Ok(map_cloud_completion_events(
 975                        Box::pin(response_lines(response, includes_status_messages)),
 976                        &provider_name,
 977                        move |event| mapper.map_event(event),
 978                    ))
 979                });
 980                async move { Ok(future.await?.boxed()) }.boxed()
 981            }
 982        }
 983    }
 984}
 985
 986fn map_cloud_completion_events<T, F>(
 987    stream: Pin<Box<dyn Stream<Item = Result<CompletionEvent<T>>> + Send>>,
 988    provider: &LanguageModelProviderName,
 989    mut map_callback: F,
 990) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 991where
 992    T: DeserializeOwned + 'static,
 993    F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 994        + Send
 995        + 'static,
 996{
 997    let provider = provider.clone();
 998    let mut stream = stream.fuse();
 999
1000    let mut saw_stream_ended = false;
1001
1002    let mut done = false;
1003    let mut pending = VecDeque::new();
1004
1005    stream::poll_fn(move |cx| {
1006        loop {
1007            if let Some(item) = pending.pop_front() {
1008                return Poll::Ready(Some(item));
1009            }
1010
1011            if done {
1012                return Poll::Ready(None);
1013            }
1014
1015            match stream.poll_next_unpin(cx) {
1016                Poll::Ready(Some(event)) => {
1017                    let items = match event {
1018                        Err(error) => {
1019                            vec![Err(LanguageModelCompletionError::from(error))]
1020                        }
1021                        Ok(CompletionEvent::Status(CompletionRequestStatus::StreamEnded)) => {
1022                            saw_stream_ended = true;
1023                            vec![]
1024                        }
1025                        Ok(CompletionEvent::Status(status)) => {
1026                            LanguageModelCompletionEvent::from_completion_request_status(
1027                                status,
1028                                provider.clone(),
1029                            )
1030                            .transpose()
1031                            .map(|event| vec![event])
1032                            .unwrap_or_default()
1033                        }
1034                        Ok(CompletionEvent::Event(event)) => map_callback(event),
1035                    };
1036                    pending.extend(items);
1037                }
1038                Poll::Ready(None) => {
1039                    done = true;
1040
1041                    if !saw_stream_ended {
1042                        return Poll::Ready(Some(Err(
1043                            LanguageModelCompletionError::StreamEndedUnexpectedly {
1044                                provider: provider.clone(),
1045                            },
1046                        )));
1047                    }
1048                }
1049                Poll::Pending => return Poll::Pending,
1050            }
1051        }
1052    })
1053    .boxed()
1054}
1055
1056fn provider_name(provider: &cloud_llm_client::LanguageModelProvider) -> LanguageModelProviderName {
1057    match provider {
1058        cloud_llm_client::LanguageModelProvider::Anthropic => ANTHROPIC_PROVIDER_NAME,
1059        cloud_llm_client::LanguageModelProvider::OpenAi => OPEN_AI_PROVIDER_NAME,
1060        cloud_llm_client::LanguageModelProvider::Google => GOOGLE_PROVIDER_NAME,
1061        cloud_llm_client::LanguageModelProvider::XAi => X_AI_PROVIDER_NAME,
1062    }
1063}
1064
1065fn response_lines<T: DeserializeOwned>(
1066    response: Response<AsyncBody>,
1067    includes_status_messages: bool,
1068) -> impl Stream<Item = Result<CompletionEvent<T>>> {
1069    futures::stream::try_unfold(
1070        (String::new(), BufReader::new(response.into_body())),
1071        move |(mut line, mut body)| async move {
1072            match body.read_line(&mut line).await {
1073                Ok(0) => Ok(None),
1074                Ok(_) => {
1075                    let event = if includes_status_messages {
1076                        serde_json::from_str::<CompletionEvent<T>>(&line)?
1077                    } else {
1078                        CompletionEvent::Event(serde_json::from_str::<T>(&line)?)
1079                    };
1080
1081                    line.clear();
1082                    Ok(Some((event, (line, body))))
1083                }
1084                Err(e) => Err(e.into()),
1085            }
1086        },
1087    )
1088}
1089
1090#[derive(IntoElement, RegisterComponent)]
1091struct ZedAiConfiguration {
1092    is_connected: bool,
1093    plan: Option<Plan>,
1094    eligible_for_trial: bool,
1095    account_too_young: bool,
1096    sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1097}
1098
1099impl RenderOnce for ZedAiConfiguration {
1100    fn render(self, _window: &mut Window, _cx: &mut App) -> impl IntoElement {
1101        let (subscription_text, has_paid_plan) = match self.plan {
1102            Some(Plan::ZedPro) => (
1103                "You have access to Zed's hosted models through your Pro subscription.",
1104                true,
1105            ),
1106            Some(Plan::ZedProTrial) => (
1107                "You have access to Zed's hosted models through your Pro trial.",
1108                false,
1109            ),
1110            Some(Plan::ZedStudent) => (
1111                "You have access to Zed's hosted models through your Student subscription.",
1112                true,
1113            ),
1114            Some(Plan::ZedBusiness) => (
1115                "You have access to Zed's hosted models through your Organization.",
1116                true,
1117            ),
1118            Some(Plan::ZedFree) | None => (
1119                if self.eligible_for_trial {
1120                    "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1121                } else {
1122                    "Subscribe for access to Zed's hosted models."
1123                },
1124                false,
1125            ),
1126        };
1127
1128        let manage_subscription_buttons = if has_paid_plan {
1129            Button::new("manage_settings", "Manage Subscription")
1130                .full_width()
1131                .label_size(LabelSize::Small)
1132                .style(ButtonStyle::Tinted(TintColor::Accent))
1133                .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx)))
1134                .into_any_element()
1135        } else if self.plan.is_none() || self.eligible_for_trial {
1136            Button::new("start_trial", "Start 14-day Free Pro Trial")
1137                .full_width()
1138                .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1139                .on_click(|_, _, cx| cx.open_url(&zed_urls::start_trial_url(cx)))
1140                .into_any_element()
1141        } else {
1142            Button::new("upgrade", "Upgrade to Pro")
1143                .full_width()
1144                .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1145                .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx)))
1146                .into_any_element()
1147        };
1148
1149        if !self.is_connected {
1150            return v_flex()
1151                .gap_2()
1152                .child(Label::new("Sign in to have access to Zed's complete agentic experience with hosted models."))
1153                .child(
1154                    Button::new("sign_in", "Sign In to use Zed AI")
1155                        .start_icon(Icon::new(IconName::Github).size(IconSize::Small).color(Color::Muted))
1156                        .full_width()
1157                        .on_click({
1158                            let callback = self.sign_in_callback.clone();
1159                            move |_, window, cx| (callback)(window, cx)
1160                        }),
1161                );
1162        }
1163
1164        v_flex().gap_2().w_full().map(|this| {
1165            if self.account_too_young {
1166                this.child(YoungAccountBanner).child(
1167                    Button::new("upgrade", "Upgrade to Pro")
1168                        .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1169                        .full_width()
1170                        .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx))),
1171                )
1172            } else {
1173                this.text_sm()
1174                    .child(subscription_text)
1175                    .child(manage_subscription_buttons)
1176            }
1177        })
1178    }
1179}
1180
1181struct ConfigurationView {
1182    state: Entity<State>,
1183    sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1184}
1185
1186impl ConfigurationView {
1187    fn new(state: Entity<State>) -> Self {
1188        let sign_in_callback = Arc::new({
1189            let state = state.clone();
1190            move |_window: &mut Window, cx: &mut App| {
1191                state.update(cx, |state, cx| {
1192                    state.authenticate(cx).detach_and_log_err(cx);
1193                });
1194            }
1195        });
1196
1197        Self {
1198            state,
1199            sign_in_callback,
1200        }
1201    }
1202}
1203
1204impl Render for ConfigurationView {
1205    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1206        let state = self.state.read(cx);
1207        let user_store = state.user_store.read(cx);
1208
1209        ZedAiConfiguration {
1210            is_connected: !state.is_signed_out(cx),
1211            plan: user_store.plan(),
1212            eligible_for_trial: user_store.trial_started_at().is_none(),
1213            account_too_young: user_store.account_too_young(),
1214            sign_in_callback: self.sign_in_callback.clone(),
1215        }
1216    }
1217}
1218
1219impl Component for ZedAiConfiguration {
1220    fn name() -> &'static str {
1221        "AI Configuration Content"
1222    }
1223
1224    fn sort_name() -> &'static str {
1225        "AI Configuration Content"
1226    }
1227
1228    fn scope() -> ComponentScope {
1229        ComponentScope::Onboarding
1230    }
1231
1232    fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
1233        fn configuration(
1234            is_connected: bool,
1235            plan: Option<Plan>,
1236            eligible_for_trial: bool,
1237            account_too_young: bool,
1238        ) -> AnyElement {
1239            ZedAiConfiguration {
1240                is_connected,
1241                plan,
1242                eligible_for_trial,
1243                account_too_young,
1244                sign_in_callback: Arc::new(|_, _| {}),
1245            }
1246            .into_any_element()
1247        }
1248
1249        Some(
1250            v_flex()
1251                .p_4()
1252                .gap_4()
1253                .children(vec![
1254                    single_example("Not connected", configuration(false, None, false, false)),
1255                    single_example(
1256                        "Accept Terms of Service",
1257                        configuration(true, None, true, false),
1258                    ),
1259                    single_example(
1260                        "No Plan - Not eligible for trial",
1261                        configuration(true, None, false, false),
1262                    ),
1263                    single_example(
1264                        "No Plan - Eligible for trial",
1265                        configuration(true, None, true, false),
1266                    ),
1267                    single_example(
1268                        "Free Plan",
1269                        configuration(true, Some(Plan::ZedFree), true, false),
1270                    ),
1271                    single_example(
1272                        "Zed Pro Trial Plan",
1273                        configuration(true, Some(Plan::ZedProTrial), true, false),
1274                    ),
1275                    single_example(
1276                        "Zed Pro Plan",
1277                        configuration(true, Some(Plan::ZedPro), true, false),
1278                    ),
1279                ])
1280                .into_any_element(),
1281        )
1282    }
1283}
1284
1285#[cfg(test)]
1286mod tests {
1287    use super::*;
1288    use http_client::http::{HeaderMap, StatusCode};
1289    use language_model::LanguageModelCompletionError;
1290
1291    #[test]
1292    fn test_api_error_conversion_with_upstream_http_error() {
1293        // upstream_http_error with 503 status should become ServerOverloaded
1294        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout","upstream_status":503}"#;
1295
1296        let api_error = ApiError {
1297            status: StatusCode::INTERNAL_SERVER_ERROR,
1298            body: error_body.to_string(),
1299            headers: HeaderMap::new(),
1300        };
1301
1302        let completion_error: LanguageModelCompletionError = api_error.into();
1303
1304        match completion_error {
1305            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1306                assert_eq!(
1307                    message,
1308                    "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout"
1309                );
1310            }
1311            _ => panic!(
1312                "Expected UpstreamProviderError for upstream 503, got: {:?}",
1313                completion_error
1314            ),
1315        }
1316
1317        // upstream_http_error with 500 status should become ApiInternalServerError
1318        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the OpenAI API: internal server error","upstream_status":500}"#;
1319
1320        let api_error = ApiError {
1321            status: StatusCode::INTERNAL_SERVER_ERROR,
1322            body: error_body.to_string(),
1323            headers: HeaderMap::new(),
1324        };
1325
1326        let completion_error: LanguageModelCompletionError = api_error.into();
1327
1328        match completion_error {
1329            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1330                assert_eq!(
1331                    message,
1332                    "Received an error from the OpenAI API: internal server error"
1333                );
1334            }
1335            _ => panic!(
1336                "Expected UpstreamProviderError for upstream 500, got: {:?}",
1337                completion_error
1338            ),
1339        }
1340
1341        // upstream_http_error with 429 status should become RateLimitExceeded
1342        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Google API: rate limit exceeded","upstream_status":429}"#;
1343
1344        let api_error = ApiError {
1345            status: StatusCode::INTERNAL_SERVER_ERROR,
1346            body: error_body.to_string(),
1347            headers: HeaderMap::new(),
1348        };
1349
1350        let completion_error: LanguageModelCompletionError = api_error.into();
1351
1352        match completion_error {
1353            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1354                assert_eq!(
1355                    message,
1356                    "Received an error from the Google API: rate limit exceeded"
1357                );
1358            }
1359            _ => panic!(
1360                "Expected UpstreamProviderError for upstream 429, got: {:?}",
1361                completion_error
1362            ),
1363        }
1364
1365        // Regular 500 error without upstream_http_error should remain ApiInternalServerError for Zed
1366        let error_body = "Regular internal server error";
1367
1368        let api_error = ApiError {
1369            status: StatusCode::INTERNAL_SERVER_ERROR,
1370            body: error_body.to_string(),
1371            headers: HeaderMap::new(),
1372        };
1373
1374        let completion_error: LanguageModelCompletionError = api_error.into();
1375
1376        match completion_error {
1377            LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1378                assert_eq!(provider, PROVIDER_NAME);
1379                assert_eq!(message, "Regular internal server error");
1380            }
1381            _ => panic!(
1382                "Expected ApiInternalServerError for regular 500, got: {:?}",
1383                completion_error
1384            ),
1385        }
1386
1387        // upstream_http_429 format should be converted to UpstreamProviderError
1388        let error_body = r#"{"code":"upstream_http_429","message":"Upstream Anthropic rate limit exceeded.","retry_after":30.5}"#;
1389
1390        let api_error = ApiError {
1391            status: StatusCode::INTERNAL_SERVER_ERROR,
1392            body: error_body.to_string(),
1393            headers: HeaderMap::new(),
1394        };
1395
1396        let completion_error: LanguageModelCompletionError = api_error.into();
1397
1398        match completion_error {
1399            LanguageModelCompletionError::UpstreamProviderError {
1400                message,
1401                status,
1402                retry_after,
1403            } => {
1404                assert_eq!(message, "Upstream Anthropic rate limit exceeded.");
1405                assert_eq!(status, StatusCode::TOO_MANY_REQUESTS);
1406                assert_eq!(retry_after, Some(Duration::from_secs_f64(30.5)));
1407            }
1408            _ => panic!(
1409                "Expected UpstreamProviderError for upstream_http_429, got: {:?}",
1410                completion_error
1411            ),
1412        }
1413
1414        // Invalid JSON in error body should fall back to regular error handling
1415        let error_body = "Not JSON at all";
1416
1417        let api_error = ApiError {
1418            status: StatusCode::INTERNAL_SERVER_ERROR,
1419            body: error_body.to_string(),
1420            headers: HeaderMap::new(),
1421        };
1422
1423        let completion_error: LanguageModelCompletionError = api_error.into();
1424
1425        match completion_error {
1426            LanguageModelCompletionError::ApiInternalServerError { provider, .. } => {
1427                assert_eq!(provider, PROVIDER_NAME);
1428            }
1429            _ => panic!(
1430                "Expected ApiInternalServerError for invalid JSON, got: {:?}",
1431                completion_error
1432            ),
1433        }
1434    }
1435}