cloud.rs

   1use ai_onboarding::YoungAccountBanner;
   2use anthropic::AnthropicModelMode;
   3use anyhow::{Context as _, Result, anyhow};
   4use chrono::{DateTime, Utc};
   5use client::{Client, UserStore, zed_urls};
   6use cloud_api_types::Plan;
   7use cloud_llm_client::{
   8    CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CLIENT_SUPPORTS_X_AI_HEADER_NAME, CompletionBody,
   9    CompletionEvent, CountTokensBody, CountTokensResponse, ListModelsResponse,
  10    SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, ZED_VERSION_HEADER_NAME,
  11};
  12use feature_flags::{CloudThinkingToggleFeatureFlag, FeatureFlagAppExt as _};
  13use futures::{
  14    AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
  15};
  16use google_ai::GoogleModelMode;
  17use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
  18use http_client::http::{HeaderMap, HeaderValue};
  19use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
  20use language_model::{
  21    AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
  22    LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelEffortLevel,
  23    LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
  24    LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
  25    LanguageModelToolChoice, LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh,
  26    PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
  27};
  28use release_channel::AppVersion;
  29use schemars::JsonSchema;
  30use semver::Version;
  31use serde::{Deserialize, Serialize, de::DeserializeOwned};
  32use settings::SettingsStore;
  33pub use settings::ZedDotDevAvailableModel as AvailableModel;
  34pub use settings::ZedDotDevAvailableProvider as AvailableProvider;
  35use smol::io::{AsyncReadExt, BufReader};
  36use std::pin::Pin;
  37use std::sync::Arc;
  38use std::time::Duration;
  39use thiserror::Error;
  40use ui::{TintColor, prelude::*};
  41use util::{ResultExt as _, maybe};
  42
  43use crate::provider::anthropic::{
  44    AnthropicEventMapper, count_anthropic_tokens_with_tiktoken, into_anthropic,
  45};
  46use crate::provider::google::{GoogleEventMapper, into_google};
  47use crate::provider::open_ai::{
  48    OpenAiEventMapper, OpenAiResponseEventMapper, count_open_ai_tokens, into_open_ai,
  49    into_open_ai_response,
  50};
  51use crate::provider::x_ai::count_xai_tokens;
  52
  53const PROVIDER_ID: LanguageModelProviderId = language_model::ZED_CLOUD_PROVIDER_ID;
  54const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVIDER_NAME;
  55
  56#[derive(Default, Clone, Debug, PartialEq)]
  57pub struct ZedDotDevSettings {
  58    pub available_models: Vec<AvailableModel>,
  59}
  60#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
  61#[serde(tag = "type", rename_all = "lowercase")]
  62pub enum ModelMode {
  63    #[default]
  64    Default,
  65    Thinking {
  66        /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
  67        budget_tokens: Option<u32>,
  68    },
  69}
  70
  71impl From<ModelMode> for AnthropicModelMode {
  72    fn from(value: ModelMode) -> Self {
  73        match value {
  74            ModelMode::Default => AnthropicModelMode::Default,
  75            ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
  76        }
  77    }
  78}
  79
  80pub struct CloudLanguageModelProvider {
  81    client: Arc<Client>,
  82    state: Entity<State>,
  83    _maintain_client_status: Task<()>,
  84}
  85
  86pub struct State {
  87    client: Arc<Client>,
  88    llm_api_token: LlmApiToken,
  89    user_store: Entity<UserStore>,
  90    status: client::Status,
  91    models: Vec<Arc<cloud_llm_client::LanguageModel>>,
  92    default_model: Option<Arc<cloud_llm_client::LanguageModel>>,
  93    default_fast_model: Option<Arc<cloud_llm_client::LanguageModel>>,
  94    recommended_models: Vec<Arc<cloud_llm_client::LanguageModel>>,
  95    _fetch_models_task: Task<()>,
  96    _settings_subscription: Subscription,
  97    _llm_token_subscription: Subscription,
  98}
  99
 100impl State {
 101    fn new(
 102        client: Arc<Client>,
 103        user_store: Entity<UserStore>,
 104        status: client::Status,
 105        cx: &mut Context<Self>,
 106    ) -> Self {
 107        let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
 108        let mut current_user = user_store.read(cx).watch_current_user();
 109        Self {
 110            client: client.clone(),
 111            llm_api_token: LlmApiToken::default(),
 112            user_store,
 113            status,
 114            models: Vec::new(),
 115            default_model: None,
 116            default_fast_model: None,
 117            recommended_models: Vec::new(),
 118            _fetch_models_task: cx.spawn(async move |this, cx| {
 119                maybe!(async move {
 120                    let (client, llm_api_token) = this
 121                        .read_with(cx, |this, _cx| (client.clone(), this.llm_api_token.clone()))?;
 122
 123                    while current_user.borrow().is_none() {
 124                        current_user.next().await;
 125                    }
 126
 127                    let response =
 128                        Self::fetch_models(client.clone(), llm_api_token.clone()).await?;
 129                    this.update(cx, |this, cx| this.update_models(response, cx))?;
 130                    anyhow::Ok(())
 131                })
 132                .await
 133                .context("failed to fetch Zed models")
 134                .log_err();
 135            }),
 136            _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
 137                cx.notify();
 138            }),
 139            _llm_token_subscription: cx.subscribe(
 140                &refresh_llm_token_listener,
 141                move |this, _listener, _event, cx| {
 142                    let client = this.client.clone();
 143                    let llm_api_token = this.llm_api_token.clone();
 144                    cx.spawn(async move |this, cx| {
 145                        llm_api_token.refresh(&client).await?;
 146                        let response = Self::fetch_models(client, llm_api_token).await?;
 147                        this.update(cx, |this, cx| {
 148                            this.update_models(response, cx);
 149                        })
 150                    })
 151                    .detach_and_log_err(cx);
 152                },
 153            ),
 154        }
 155    }
 156
 157    fn is_signed_out(&self, cx: &App) -> bool {
 158        self.user_store.read(cx).current_user().is_none()
 159    }
 160
 161    fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
 162        let client = self.client.clone();
 163        cx.spawn(async move |state, cx| {
 164            client.sign_in_with_optional_connect(true, cx).await?;
 165            state.update(cx, |_, cx| cx.notify())
 166        })
 167    }
 168
 169    fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
 170        let is_thinking_toggle_enabled = cx.has_flag::<CloudThinkingToggleFeatureFlag>();
 171
 172        let mut models = Vec::new();
 173
 174        for model in response.models {
 175            models.push(Arc::new(model.clone()));
 176
 177            if !is_thinking_toggle_enabled {
 178                // Right now we represent thinking variants of models as separate models on the client,
 179                // so we need to insert variants for any model that supports thinking.
 180                if model.supports_thinking {
 181                    models.push(Arc::new(cloud_llm_client::LanguageModel {
 182                        id: cloud_llm_client::LanguageModelId(
 183                            format!("{}-thinking", model.id).into(),
 184                        ),
 185                        display_name: format!("{} Thinking", model.display_name),
 186                        ..model
 187                    }));
 188                }
 189            }
 190        }
 191
 192        self.default_model = models
 193            .iter()
 194            .find(|model| {
 195                response
 196                    .default_model
 197                    .as_ref()
 198                    .is_some_and(|default_model_id| &model.id == default_model_id)
 199            })
 200            .cloned();
 201        self.default_fast_model = models
 202            .iter()
 203            .find(|model| {
 204                response
 205                    .default_fast_model
 206                    .as_ref()
 207                    .is_some_and(|default_fast_model_id| &model.id == default_fast_model_id)
 208            })
 209            .cloned();
 210        self.recommended_models = response
 211            .recommended_models
 212            .iter()
 213            .filter_map(|id| models.iter().find(|model| &model.id == id))
 214            .cloned()
 215            .collect();
 216        self.models = models;
 217        cx.notify();
 218    }
 219
 220    async fn fetch_models(
 221        client: Arc<Client>,
 222        llm_api_token: LlmApiToken,
 223    ) -> Result<ListModelsResponse> {
 224        let http_client = &client.http_client();
 225        let token = llm_api_token.acquire(&client).await?;
 226
 227        let request = http_client::Request::builder()
 228            .method(Method::GET)
 229            .header(CLIENT_SUPPORTS_X_AI_HEADER_NAME, "true")
 230            .uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
 231            .header("Authorization", format!("Bearer {token}"))
 232            .body(AsyncBody::empty())?;
 233        let mut response = http_client
 234            .send(request)
 235            .await
 236            .context("failed to send list models request")?;
 237
 238        if response.status().is_success() {
 239            let mut body = String::new();
 240            response.body_mut().read_to_string(&mut body).await?;
 241            Ok(serde_json::from_str(&body)?)
 242        } else {
 243            let mut body = String::new();
 244            response.body_mut().read_to_string(&mut body).await?;
 245            anyhow::bail!(
 246                "error listing models.\nStatus: {:?}\nBody: {body}",
 247                response.status(),
 248            );
 249        }
 250    }
 251}
 252
 253impl CloudLanguageModelProvider {
 254    pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
 255        let mut status_rx = client.status();
 256        let status = *status_rx.borrow();
 257
 258        let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
 259
 260        let state_ref = state.downgrade();
 261        let maintain_client_status = cx.spawn(async move |cx| {
 262            while let Some(status) = status_rx.next().await {
 263                if let Some(this) = state_ref.upgrade() {
 264                    _ = this.update(cx, |this, cx| {
 265                        if this.status != status {
 266                            this.status = status;
 267                            cx.notify();
 268                        }
 269                    });
 270                } else {
 271                    break;
 272                }
 273            }
 274        });
 275
 276        Self {
 277            client,
 278            state,
 279            _maintain_client_status: maintain_client_status,
 280        }
 281    }
 282
 283    fn create_language_model(
 284        &self,
 285        model: Arc<cloud_llm_client::LanguageModel>,
 286        llm_api_token: LlmApiToken,
 287    ) -> Arc<dyn LanguageModel> {
 288        Arc::new(CloudLanguageModel {
 289            id: LanguageModelId(SharedString::from(model.id.0.clone())),
 290            model,
 291            llm_api_token,
 292            client: self.client.clone(),
 293            request_limiter: RateLimiter::new(4),
 294        })
 295    }
 296}
 297
 298impl LanguageModelProviderState for CloudLanguageModelProvider {
 299    type ObservableEntity = State;
 300
 301    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 302        Some(self.state.clone())
 303    }
 304}
 305
 306impl LanguageModelProvider for CloudLanguageModelProvider {
 307    fn id(&self) -> LanguageModelProviderId {
 308        PROVIDER_ID
 309    }
 310
 311    fn name(&self) -> LanguageModelProviderName {
 312        PROVIDER_NAME
 313    }
 314
 315    fn icon(&self) -> IconOrSvg {
 316        IconOrSvg::Icon(IconName::AiZed)
 317    }
 318
 319    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 320        let default_model = self.state.read(cx).default_model.clone()?;
 321        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 322        Some(self.create_language_model(default_model, llm_api_token))
 323    }
 324
 325    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 326        let default_fast_model = self.state.read(cx).default_fast_model.clone()?;
 327        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 328        Some(self.create_language_model(default_fast_model, llm_api_token))
 329    }
 330
 331    fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 332        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 333        self.state
 334            .read(cx)
 335            .recommended_models
 336            .iter()
 337            .cloned()
 338            .map(|model| self.create_language_model(model, llm_api_token.clone()))
 339            .collect()
 340    }
 341
 342    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 343        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 344        self.state
 345            .read(cx)
 346            .models
 347            .iter()
 348            .cloned()
 349            .map(|model| self.create_language_model(model, llm_api_token.clone()))
 350            .collect()
 351    }
 352
 353    fn is_authenticated(&self, cx: &App) -> bool {
 354        let state = self.state.read(cx);
 355        !state.is_signed_out(cx)
 356    }
 357
 358    fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 359        Task::ready(Ok(()))
 360    }
 361
 362    fn configuration_view(
 363        &self,
 364        _target_agent: language_model::ConfigurationViewTargetAgent,
 365        _: &mut Window,
 366        cx: &mut App,
 367    ) -> AnyView {
 368        cx.new(|_| ConfigurationView::new(self.state.clone()))
 369            .into()
 370    }
 371
 372    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 373        Task::ready(Ok(()))
 374    }
 375}
 376
 377pub struct CloudLanguageModel {
 378    id: LanguageModelId,
 379    model: Arc<cloud_llm_client::LanguageModel>,
 380    llm_api_token: LlmApiToken,
 381    client: Arc<Client>,
 382    request_limiter: RateLimiter,
 383}
 384
 385struct PerformLlmCompletionResponse {
 386    response: Response<AsyncBody>,
 387    includes_status_messages: bool,
 388}
 389
 390impl CloudLanguageModel {
 391    async fn perform_llm_completion(
 392        client: Arc<Client>,
 393        llm_api_token: LlmApiToken,
 394        app_version: Option<Version>,
 395        body: CompletionBody,
 396    ) -> Result<PerformLlmCompletionResponse> {
 397        let http_client = &client.http_client();
 398
 399        let mut token = llm_api_token.acquire(&client).await?;
 400        let mut refreshed_token = false;
 401
 402        loop {
 403            let request = http_client::Request::builder()
 404                .method(Method::POST)
 405                .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref())
 406                .when_some(app_version.as_ref(), |builder, app_version| {
 407                    builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
 408                })
 409                .header("Content-Type", "application/json")
 410                .header("Authorization", format!("Bearer {token}"))
 411                .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
 412                .body(serde_json::to_string(&body)?.into())?;
 413
 414            let mut response = http_client.send(request).await?;
 415            let status = response.status();
 416            if status.is_success() {
 417                let includes_status_messages = response
 418                    .headers()
 419                    .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
 420                    .is_some();
 421
 422                return Ok(PerformLlmCompletionResponse {
 423                    response,
 424                    includes_status_messages,
 425                });
 426            }
 427
 428            if !refreshed_token && response.needs_llm_token_refresh() {
 429                token = llm_api_token.refresh(&client).await?;
 430                refreshed_token = true;
 431                continue;
 432            }
 433
 434            if status == StatusCode::PAYMENT_REQUIRED {
 435                return Err(anyhow!(PaymentRequiredError));
 436            }
 437
 438            let mut body = String::new();
 439            let headers = response.headers().clone();
 440            response.body_mut().read_to_string(&mut body).await?;
 441            return Err(anyhow!(ApiError {
 442                status,
 443                body,
 444                headers
 445            }));
 446        }
 447    }
 448}
 449
 450#[derive(Debug, Error)]
 451#[error("cloud language model request failed with status {status}: {body}")]
 452struct ApiError {
 453    status: StatusCode,
 454    body: String,
 455    headers: HeaderMap<HeaderValue>,
 456}
 457
 458/// Represents error responses from Zed's cloud API.
 459///
 460/// Example JSON for an upstream HTTP error:
 461/// ```json
 462/// {
 463///   "code": "upstream_http_error",
 464///   "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
 465///   "upstream_status": 503
 466/// }
 467/// ```
 468#[derive(Debug, serde::Deserialize)]
 469struct CloudApiError {
 470    code: String,
 471    message: String,
 472    #[serde(default)]
 473    #[serde(deserialize_with = "deserialize_optional_status_code")]
 474    upstream_status: Option<StatusCode>,
 475    #[serde(default)]
 476    retry_after: Option<f64>,
 477}
 478
 479fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
 480where
 481    D: serde::Deserializer<'de>,
 482{
 483    let opt: Option<u16> = Option::deserialize(deserializer)?;
 484    Ok(opt.and_then(|code| StatusCode::from_u16(code).ok()))
 485}
 486
 487impl From<ApiError> for LanguageModelCompletionError {
 488    fn from(error: ApiError) -> Self {
 489        if let Ok(cloud_error) = serde_json::from_str::<CloudApiError>(&error.body) {
 490            if cloud_error.code.starts_with("upstream_http_") {
 491                let status = if let Some(status) = cloud_error.upstream_status {
 492                    status
 493                } else if cloud_error.code.ends_with("_error") {
 494                    error.status
 495                } else {
 496                    // If there's a status code in the code string (e.g. "upstream_http_429")
 497                    // then use that; otherwise, see if the JSON contains a status code.
 498                    cloud_error
 499                        .code
 500                        .strip_prefix("upstream_http_")
 501                        .and_then(|code_str| code_str.parse::<u16>().ok())
 502                        .and_then(|code| StatusCode::from_u16(code).ok())
 503                        .unwrap_or(error.status)
 504                };
 505
 506                return LanguageModelCompletionError::UpstreamProviderError {
 507                    message: cloud_error.message,
 508                    status,
 509                    retry_after: cloud_error.retry_after.map(Duration::from_secs_f64),
 510                };
 511            }
 512
 513            return LanguageModelCompletionError::from_http_status(
 514                PROVIDER_NAME,
 515                error.status,
 516                cloud_error.message,
 517                None,
 518            );
 519        }
 520
 521        let retry_after = None;
 522        LanguageModelCompletionError::from_http_status(
 523            PROVIDER_NAME,
 524            error.status,
 525            error.body,
 526            retry_after,
 527        )
 528    }
 529}
 530
 531impl LanguageModel for CloudLanguageModel {
 532    fn id(&self) -> LanguageModelId {
 533        self.id.clone()
 534    }
 535
 536    fn name(&self) -> LanguageModelName {
 537        LanguageModelName::from(self.model.display_name.clone())
 538    }
 539
 540    fn provider_id(&self) -> LanguageModelProviderId {
 541        PROVIDER_ID
 542    }
 543
 544    fn provider_name(&self) -> LanguageModelProviderName {
 545        PROVIDER_NAME
 546    }
 547
 548    fn upstream_provider_id(&self) -> LanguageModelProviderId {
 549        use cloud_llm_client::LanguageModelProvider::*;
 550        match self.model.provider {
 551            Anthropic => language_model::ANTHROPIC_PROVIDER_ID,
 552            OpenAi => language_model::OPEN_AI_PROVIDER_ID,
 553            Google => language_model::GOOGLE_PROVIDER_ID,
 554            XAi => language_model::X_AI_PROVIDER_ID,
 555        }
 556    }
 557
 558    fn upstream_provider_name(&self) -> LanguageModelProviderName {
 559        use cloud_llm_client::LanguageModelProvider::*;
 560        match self.model.provider {
 561            Anthropic => language_model::ANTHROPIC_PROVIDER_NAME,
 562            OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
 563            Google => language_model::GOOGLE_PROVIDER_NAME,
 564            XAi => language_model::X_AI_PROVIDER_NAME,
 565        }
 566    }
 567
 568    fn supports_tools(&self) -> bool {
 569        self.model.supports_tools
 570    }
 571
 572    fn supports_images(&self) -> bool {
 573        self.model.supports_images
 574    }
 575
 576    fn supports_thinking(&self) -> bool {
 577        self.model.supports_thinking
 578    }
 579
 580    fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
 581        self.model
 582            .supported_effort_levels
 583            .iter()
 584            .map(|effort_level| LanguageModelEffortLevel {
 585                name: effort_level.name.clone().into(),
 586                value: effort_level.value.clone().into(),
 587            })
 588            .collect()
 589    }
 590
 591    fn supports_streaming_tools(&self) -> bool {
 592        self.model.supports_streaming_tools
 593    }
 594
 595    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 596        match choice {
 597            LanguageModelToolChoice::Auto
 598            | LanguageModelToolChoice::Any
 599            | LanguageModelToolChoice::None => true,
 600        }
 601    }
 602
 603    fn supports_split_token_display(&self) -> bool {
 604        use cloud_llm_client::LanguageModelProvider::*;
 605        matches!(self.model.provider, OpenAi)
 606    }
 607
 608    fn telemetry_id(&self) -> String {
 609        format!("zed.dev/{}", self.model.id)
 610    }
 611
 612    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 613        match self.model.provider {
 614            cloud_llm_client::LanguageModelProvider::Anthropic
 615            | cloud_llm_client::LanguageModelProvider::OpenAi
 616            | cloud_llm_client::LanguageModelProvider::XAi => {
 617                LanguageModelToolSchemaFormat::JsonSchema
 618            }
 619            cloud_llm_client::LanguageModelProvider::Google => {
 620                LanguageModelToolSchemaFormat::JsonSchemaSubset
 621            }
 622        }
 623    }
 624
 625    fn max_token_count(&self) -> u64 {
 626        self.model.max_token_count as u64
 627    }
 628
 629    fn max_output_tokens(&self) -> Option<u64> {
 630        Some(self.model.max_output_tokens as u64)
 631    }
 632
 633    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
 634        match &self.model.provider {
 635            cloud_llm_client::LanguageModelProvider::Anthropic => {
 636                Some(LanguageModelCacheConfiguration {
 637                    min_total_token: 2_048,
 638                    should_speculate: true,
 639                    max_cache_anchors: 4,
 640                })
 641            }
 642            cloud_llm_client::LanguageModelProvider::OpenAi
 643            | cloud_llm_client::LanguageModelProvider::XAi
 644            | cloud_llm_client::LanguageModelProvider::Google => None,
 645        }
 646    }
 647
 648    fn count_tokens(
 649        &self,
 650        request: LanguageModelRequest,
 651        cx: &App,
 652    ) -> BoxFuture<'static, Result<u64>> {
 653        match self.model.provider {
 654            cloud_llm_client::LanguageModelProvider::Anthropic => cx
 655                .background_spawn(async move { count_anthropic_tokens_with_tiktoken(request) })
 656                .boxed(),
 657            cloud_llm_client::LanguageModelProvider::OpenAi => {
 658                let model = match open_ai::Model::from_id(&self.model.id.0) {
 659                    Ok(model) => model,
 660                    Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
 661                };
 662                count_open_ai_tokens(request, model, cx)
 663            }
 664            cloud_llm_client::LanguageModelProvider::XAi => {
 665                let model = match x_ai::Model::from_id(&self.model.id.0) {
 666                    Ok(model) => model,
 667                    Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
 668                };
 669                count_xai_tokens(request, model, cx)
 670            }
 671            cloud_llm_client::LanguageModelProvider::Google => {
 672                let client = self.client.clone();
 673                let llm_api_token = self.llm_api_token.clone();
 674                let model_id = self.model.id.to_string();
 675                let generate_content_request =
 676                    into_google(request, model_id.clone(), GoogleModelMode::Default);
 677                async move {
 678                    let http_client = &client.http_client();
 679                    let token = llm_api_token.acquire(&client).await?;
 680
 681                    let request_body = CountTokensBody {
 682                        provider: cloud_llm_client::LanguageModelProvider::Google,
 683                        model: model_id,
 684                        provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
 685                            generate_content_request,
 686                        })?,
 687                    };
 688                    let request = http_client::Request::builder()
 689                        .method(Method::POST)
 690                        .uri(
 691                            http_client
 692                                .build_zed_llm_url("/count_tokens", &[])?
 693                                .as_ref(),
 694                        )
 695                        .header("Content-Type", "application/json")
 696                        .header("Authorization", format!("Bearer {token}"))
 697                        .body(serde_json::to_string(&request_body)?.into())?;
 698                    let mut response = http_client.send(request).await?;
 699                    let status = response.status();
 700                    let headers = response.headers().clone();
 701                    let mut response_body = String::new();
 702                    response
 703                        .body_mut()
 704                        .read_to_string(&mut response_body)
 705                        .await?;
 706
 707                    if status.is_success() {
 708                        let response_body: CountTokensResponse =
 709                            serde_json::from_str(&response_body)?;
 710
 711                        Ok(response_body.tokens as u64)
 712                    } else {
 713                        Err(anyhow!(ApiError {
 714                            status,
 715                            body: response_body,
 716                            headers
 717                        }))
 718                    }
 719                }
 720                .boxed()
 721            }
 722        }
 723    }
 724
 725    fn stream_completion(
 726        &self,
 727        request: LanguageModelRequest,
 728        cx: &AsyncApp,
 729    ) -> BoxFuture<
 730        'static,
 731        Result<
 732            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 733            LanguageModelCompletionError,
 734        >,
 735    > {
 736        let thread_id = request.thread_id.clone();
 737        let prompt_id = request.prompt_id.clone();
 738        let intent = request.intent;
 739        let app_version = Some(cx.update(|cx| AppVersion::global(cx)));
 740        let thinking_allowed = request.thinking_allowed;
 741        let is_thinking_toggle_enabled =
 742            cx.update(|cx| cx.has_flag::<CloudThinkingToggleFeatureFlag>());
 743        let enable_thinking = if is_thinking_toggle_enabled {
 744            thinking_allowed && self.model.supports_thinking
 745        } else {
 746            thinking_allowed && self.model.id.0.ends_with("-thinking")
 747        };
 748        let provider_name = provider_name(&self.model.provider);
 749        match self.model.provider {
 750            cloud_llm_client::LanguageModelProvider::Anthropic => {
 751                let request = into_anthropic(
 752                    request,
 753                    self.model.id.to_string(),
 754                    1.0,
 755                    self.model.max_output_tokens as u64,
 756                    if enable_thinking {
 757                        AnthropicModelMode::Thinking {
 758                            budget_tokens: Some(4_096),
 759                        }
 760                    } else {
 761                        AnthropicModelMode::Default
 762                    },
 763                );
 764                let client = self.client.clone();
 765                let llm_api_token = self.llm_api_token.clone();
 766                let future = self.request_limiter.stream(async move {
 767                    let PerformLlmCompletionResponse {
 768                        response,
 769                        includes_status_messages,
 770                    } = Self::perform_llm_completion(
 771                        client.clone(),
 772                        llm_api_token,
 773                        app_version,
 774                        CompletionBody {
 775                            thread_id,
 776                            prompt_id,
 777                            intent,
 778                            provider: cloud_llm_client::LanguageModelProvider::Anthropic,
 779                            model: request.model.clone(),
 780                            provider_request: serde_json::to_value(&request)
 781                                .map_err(|e| anyhow!(e))?,
 782                        },
 783                    )
 784                    .await
 785                    .map_err(|err| match err.downcast::<ApiError>() {
 786                        Ok(api_err) => anyhow!(LanguageModelCompletionError::from(api_err)),
 787                        Err(err) => anyhow!(err),
 788                    })?;
 789
 790                    let mut mapper = AnthropicEventMapper::new();
 791                    Ok(map_cloud_completion_events(
 792                        Box::pin(response_lines(response, includes_status_messages)),
 793                        &provider_name,
 794                        move |event| mapper.map_event(event),
 795                    ))
 796                });
 797                async move { Ok(future.await?.boxed()) }.boxed()
 798            }
 799            cloud_llm_client::LanguageModelProvider::OpenAi => {
 800                let client = self.client.clone();
 801                let llm_api_token = self.llm_api_token.clone();
 802
 803                let request = into_open_ai_response(
 804                    request,
 805                    &self.model.id.0,
 806                    self.model.supports_parallel_tool_calls,
 807                    true,
 808                    None,
 809                    None,
 810                );
 811                let future = self.request_limiter.stream(async move {
 812                    let PerformLlmCompletionResponse {
 813                        response,
 814                        includes_status_messages,
 815                    } = Self::perform_llm_completion(
 816                        client.clone(),
 817                        llm_api_token,
 818                        app_version,
 819                        CompletionBody {
 820                            thread_id,
 821                            prompt_id,
 822                            intent,
 823                            provider: cloud_llm_client::LanguageModelProvider::OpenAi,
 824                            model: request.model.clone(),
 825                            provider_request: serde_json::to_value(&request)
 826                                .map_err(|e| anyhow!(e))?,
 827                        },
 828                    )
 829                    .await?;
 830
 831                    let mut mapper = OpenAiResponseEventMapper::new();
 832                    Ok(map_cloud_completion_events(
 833                        Box::pin(response_lines(response, includes_status_messages)),
 834                        &provider_name,
 835                        move |event| mapper.map_event(event),
 836                    ))
 837                });
 838                async move { Ok(future.await?.boxed()) }.boxed()
 839            }
 840            cloud_llm_client::LanguageModelProvider::XAi => {
 841                let client = self.client.clone();
 842                let request = into_open_ai(
 843                    request,
 844                    &self.model.id.0,
 845                    self.model.supports_parallel_tool_calls,
 846                    false,
 847                    None,
 848                    None,
 849                );
 850                let llm_api_token = self.llm_api_token.clone();
 851                let future = self.request_limiter.stream(async move {
 852                    let PerformLlmCompletionResponse {
 853                        response,
 854                        includes_status_messages,
 855                    } = Self::perform_llm_completion(
 856                        client.clone(),
 857                        llm_api_token,
 858                        app_version,
 859                        CompletionBody {
 860                            thread_id,
 861                            prompt_id,
 862                            intent,
 863                            provider: cloud_llm_client::LanguageModelProvider::XAi,
 864                            model: request.model.clone(),
 865                            provider_request: serde_json::to_value(&request)
 866                                .map_err(|e| anyhow!(e))?,
 867                        },
 868                    )
 869                    .await?;
 870
 871                    let mut mapper = OpenAiEventMapper::new();
 872                    Ok(map_cloud_completion_events(
 873                        Box::pin(response_lines(response, includes_status_messages)),
 874                        &provider_name,
 875                        move |event| mapper.map_event(event),
 876                    ))
 877                });
 878                async move { Ok(future.await?.boxed()) }.boxed()
 879            }
 880            cloud_llm_client::LanguageModelProvider::Google => {
 881                let client = self.client.clone();
 882                let request =
 883                    into_google(request, self.model.id.to_string(), GoogleModelMode::Default);
 884                let llm_api_token = self.llm_api_token.clone();
 885                let future = self.request_limiter.stream(async move {
 886                    let PerformLlmCompletionResponse {
 887                        response,
 888                        includes_status_messages,
 889                    } = Self::perform_llm_completion(
 890                        client.clone(),
 891                        llm_api_token,
 892                        app_version,
 893                        CompletionBody {
 894                            thread_id,
 895                            prompt_id,
 896                            intent,
 897                            provider: cloud_llm_client::LanguageModelProvider::Google,
 898                            model: request.model.model_id.clone(),
 899                            provider_request: serde_json::to_value(&request)
 900                                .map_err(|e| anyhow!(e))?,
 901                        },
 902                    )
 903                    .await?;
 904
 905                    let mut mapper = GoogleEventMapper::new();
 906                    Ok(map_cloud_completion_events(
 907                        Box::pin(response_lines(response, includes_status_messages)),
 908                        &provider_name,
 909                        move |event| mapper.map_event(event),
 910                    ))
 911                });
 912                async move { Ok(future.await?.boxed()) }.boxed()
 913            }
 914        }
 915    }
 916}
 917
 918fn map_cloud_completion_events<T, F>(
 919    stream: Pin<Box<dyn Stream<Item = Result<CompletionEvent<T>>> + Send>>,
 920    provider: &LanguageModelProviderName,
 921    mut map_callback: F,
 922) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 923where
 924    T: DeserializeOwned + 'static,
 925    F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 926        + Send
 927        + 'static,
 928{
 929    let provider = provider.clone();
 930    stream
 931        .flat_map(move |event| {
 932            futures::stream::iter(match event {
 933                Err(error) => {
 934                    vec![Err(LanguageModelCompletionError::from(error))]
 935                }
 936                Ok(CompletionEvent::Status(event)) => {
 937                    vec![
 938                        LanguageModelCompletionEvent::from_completion_request_status(
 939                            event,
 940                            provider.clone(),
 941                        ),
 942                    ]
 943                }
 944                Ok(CompletionEvent::Event(event)) => map_callback(event),
 945            })
 946        })
 947        .boxed()
 948}
 949
 950fn provider_name(provider: &cloud_llm_client::LanguageModelProvider) -> LanguageModelProviderName {
 951    match provider {
 952        cloud_llm_client::LanguageModelProvider::Anthropic => {
 953            language_model::ANTHROPIC_PROVIDER_NAME
 954        }
 955        cloud_llm_client::LanguageModelProvider::OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
 956        cloud_llm_client::LanguageModelProvider::Google => language_model::GOOGLE_PROVIDER_NAME,
 957        cloud_llm_client::LanguageModelProvider::XAi => language_model::X_AI_PROVIDER_NAME,
 958    }
 959}
 960
 961fn response_lines<T: DeserializeOwned>(
 962    response: Response<AsyncBody>,
 963    includes_status_messages: bool,
 964) -> impl Stream<Item = Result<CompletionEvent<T>>> {
 965    futures::stream::try_unfold(
 966        (String::new(), BufReader::new(response.into_body())),
 967        move |(mut line, mut body)| async move {
 968            match body.read_line(&mut line).await {
 969                Ok(0) => Ok(None),
 970                Ok(_) => {
 971                    let event = if includes_status_messages {
 972                        serde_json::from_str::<CompletionEvent<T>>(&line)?
 973                    } else {
 974                        CompletionEvent::Event(serde_json::from_str::<T>(&line)?)
 975                    };
 976
 977                    line.clear();
 978                    Ok(Some((event, (line, body))))
 979                }
 980                Err(e) => Err(e.into()),
 981            }
 982        },
 983    )
 984}
 985
 986#[derive(IntoElement, RegisterComponent)]
 987struct ZedAiConfiguration {
 988    is_connected: bool,
 989    plan: Option<Plan>,
 990    subscription_period: Option<(DateTime<Utc>, DateTime<Utc>)>,
 991    eligible_for_trial: bool,
 992    account_too_young: bool,
 993    sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
 994}
 995
 996impl RenderOnce for ZedAiConfiguration {
 997    fn render(self, _window: &mut Window, _cx: &mut App) -> impl IntoElement {
 998        let is_pro = self.plan.is_some_and(|plan| plan == Plan::ZedPro);
 999        let subscription_text = match (self.plan, self.subscription_period) {
1000            (Some(Plan::ZedPro), Some(_)) => {
1001                "You have access to Zed's hosted models through your Pro subscription."
1002            }
1003            (Some(Plan::ZedProTrial), Some(_)) => {
1004                "You have access to Zed's hosted models through your Pro trial."
1005            }
1006            (Some(Plan::ZedFree), Some(_)) => {
1007                if self.eligible_for_trial {
1008                    "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1009                } else {
1010                    "Subscribe for access to Zed's hosted models."
1011                }
1012            }
1013            _ => {
1014                if self.eligible_for_trial {
1015                    "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1016                } else {
1017                    "Subscribe for access to Zed's hosted models."
1018                }
1019            }
1020        };
1021
1022        let manage_subscription_buttons = if is_pro {
1023            Button::new("manage_settings", "Manage Subscription")
1024                .full_width()
1025                .style(ButtonStyle::Tinted(TintColor::Accent))
1026                .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx)))
1027                .into_any_element()
1028        } else if self.plan.is_none() || self.eligible_for_trial {
1029            Button::new("start_trial", "Start 14-day Free Pro Trial")
1030                .full_width()
1031                .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1032                .on_click(|_, _, cx| cx.open_url(&zed_urls::start_trial_url(cx)))
1033                .into_any_element()
1034        } else {
1035            Button::new("upgrade", "Upgrade to Pro")
1036                .full_width()
1037                .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1038                .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx)))
1039                .into_any_element()
1040        };
1041
1042        if !self.is_connected {
1043            return v_flex()
1044                .gap_2()
1045                .child(Label::new("Sign in to have access to Zed's complete agentic experience with hosted models."))
1046                .child(
1047                    Button::new("sign_in", "Sign In to use Zed AI")
1048                        .icon_color(Color::Muted)
1049                        .icon(IconName::Github)
1050                        .icon_size(IconSize::Small)
1051                        .icon_position(IconPosition::Start)
1052                        .full_width()
1053                        .on_click({
1054                            let callback = self.sign_in_callback.clone();
1055                            move |_, window, cx| (callback)(window, cx)
1056                        }),
1057                );
1058        }
1059
1060        v_flex().gap_2().w_full().map(|this| {
1061            if self.account_too_young {
1062                this.child(YoungAccountBanner).child(
1063                    Button::new("upgrade", "Upgrade to Pro")
1064                        .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1065                        .full_width()
1066                        .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx))),
1067                )
1068            } else {
1069                this.text_sm()
1070                    .child(subscription_text)
1071                    .child(manage_subscription_buttons)
1072            }
1073        })
1074    }
1075}
1076
1077struct ConfigurationView {
1078    state: Entity<State>,
1079    sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1080}
1081
1082impl ConfigurationView {
1083    fn new(state: Entity<State>) -> Self {
1084        let sign_in_callback = Arc::new({
1085            let state = state.clone();
1086            move |_window: &mut Window, cx: &mut App| {
1087                state.update(cx, |state, cx| {
1088                    state.authenticate(cx).detach_and_log_err(cx);
1089                });
1090            }
1091        });
1092
1093        Self {
1094            state,
1095            sign_in_callback,
1096        }
1097    }
1098}
1099
1100impl Render for ConfigurationView {
1101    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1102        let state = self.state.read(cx);
1103        let user_store = state.user_store.read(cx);
1104
1105        ZedAiConfiguration {
1106            is_connected: !state.is_signed_out(cx),
1107            plan: user_store.plan(),
1108            subscription_period: user_store.subscription_period(),
1109            eligible_for_trial: user_store.trial_started_at().is_none(),
1110            account_too_young: user_store.account_too_young(),
1111            sign_in_callback: self.sign_in_callback.clone(),
1112        }
1113    }
1114}
1115
1116impl Component for ZedAiConfiguration {
1117    fn name() -> &'static str {
1118        "AI Configuration Content"
1119    }
1120
1121    fn sort_name() -> &'static str {
1122        "AI Configuration Content"
1123    }
1124
1125    fn scope() -> ComponentScope {
1126        ComponentScope::Onboarding
1127    }
1128
1129    fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
1130        fn configuration(
1131            is_connected: bool,
1132            plan: Option<Plan>,
1133            eligible_for_trial: bool,
1134            account_too_young: bool,
1135        ) -> AnyElement {
1136            ZedAiConfiguration {
1137                is_connected,
1138                plan,
1139                subscription_period: plan
1140                    .is_some()
1141                    .then(|| (Utc::now(), Utc::now() + chrono::Duration::days(7))),
1142                eligible_for_trial,
1143                account_too_young,
1144                sign_in_callback: Arc::new(|_, _| {}),
1145            }
1146            .into_any_element()
1147        }
1148
1149        Some(
1150            v_flex()
1151                .p_4()
1152                .gap_4()
1153                .children(vec![
1154                    single_example("Not connected", configuration(false, None, false, false)),
1155                    single_example(
1156                        "Accept Terms of Service",
1157                        configuration(true, None, true, false),
1158                    ),
1159                    single_example(
1160                        "No Plan - Not eligible for trial",
1161                        configuration(true, None, false, false),
1162                    ),
1163                    single_example(
1164                        "No Plan - Eligible for trial",
1165                        configuration(true, None, true, false),
1166                    ),
1167                    single_example(
1168                        "Free Plan",
1169                        configuration(true, Some(Plan::ZedFree), true, false),
1170                    ),
1171                    single_example(
1172                        "Zed Pro Trial Plan",
1173                        configuration(true, Some(Plan::ZedProTrial), true, false),
1174                    ),
1175                    single_example(
1176                        "Zed Pro Plan",
1177                        configuration(true, Some(Plan::ZedPro), true, false),
1178                    ),
1179                ])
1180                .into_any_element(),
1181        )
1182    }
1183}
1184
1185#[cfg(test)]
1186mod tests {
1187    use super::*;
1188    use http_client::http::{HeaderMap, StatusCode};
1189    use language_model::LanguageModelCompletionError;
1190
1191    #[test]
1192    fn test_api_error_conversion_with_upstream_http_error() {
1193        // upstream_http_error with 503 status should become ServerOverloaded
1194        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout","upstream_status":503}"#;
1195
1196        let api_error = ApiError {
1197            status: StatusCode::INTERNAL_SERVER_ERROR,
1198            body: error_body.to_string(),
1199            headers: HeaderMap::new(),
1200        };
1201
1202        let completion_error: LanguageModelCompletionError = api_error.into();
1203
1204        match completion_error {
1205            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1206                assert_eq!(
1207                    message,
1208                    "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout"
1209                );
1210            }
1211            _ => panic!(
1212                "Expected UpstreamProviderError for upstream 503, got: {:?}",
1213                completion_error
1214            ),
1215        }
1216
1217        // upstream_http_error with 500 status should become ApiInternalServerError
1218        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the OpenAI API: internal server error","upstream_status":500}"#;
1219
1220        let api_error = ApiError {
1221            status: StatusCode::INTERNAL_SERVER_ERROR,
1222            body: error_body.to_string(),
1223            headers: HeaderMap::new(),
1224        };
1225
1226        let completion_error: LanguageModelCompletionError = api_error.into();
1227
1228        match completion_error {
1229            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1230                assert_eq!(
1231                    message,
1232                    "Received an error from the OpenAI API: internal server error"
1233                );
1234            }
1235            _ => panic!(
1236                "Expected UpstreamProviderError for upstream 500, got: {:?}",
1237                completion_error
1238            ),
1239        }
1240
1241        // upstream_http_error with 429 status should become RateLimitExceeded
1242        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Google API: rate limit exceeded","upstream_status":429}"#;
1243
1244        let api_error = ApiError {
1245            status: StatusCode::INTERNAL_SERVER_ERROR,
1246            body: error_body.to_string(),
1247            headers: HeaderMap::new(),
1248        };
1249
1250        let completion_error: LanguageModelCompletionError = api_error.into();
1251
1252        match completion_error {
1253            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1254                assert_eq!(
1255                    message,
1256                    "Received an error from the Google API: rate limit exceeded"
1257                );
1258            }
1259            _ => panic!(
1260                "Expected UpstreamProviderError for upstream 429, got: {:?}",
1261                completion_error
1262            ),
1263        }
1264
1265        // Regular 500 error without upstream_http_error should remain ApiInternalServerError for Zed
1266        let error_body = "Regular internal server error";
1267
1268        let api_error = ApiError {
1269            status: StatusCode::INTERNAL_SERVER_ERROR,
1270            body: error_body.to_string(),
1271            headers: HeaderMap::new(),
1272        };
1273
1274        let completion_error: LanguageModelCompletionError = api_error.into();
1275
1276        match completion_error {
1277            LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1278                assert_eq!(provider, PROVIDER_NAME);
1279                assert_eq!(message, "Regular internal server error");
1280            }
1281            _ => panic!(
1282                "Expected ApiInternalServerError for regular 500, got: {:?}",
1283                completion_error
1284            ),
1285        }
1286
1287        // upstream_http_429 format should be converted to UpstreamProviderError
1288        let error_body = r#"{"code":"upstream_http_429","message":"Upstream Anthropic rate limit exceeded.","retry_after":30.5}"#;
1289
1290        let api_error = ApiError {
1291            status: StatusCode::INTERNAL_SERVER_ERROR,
1292            body: error_body.to_string(),
1293            headers: HeaderMap::new(),
1294        };
1295
1296        let completion_error: LanguageModelCompletionError = api_error.into();
1297
1298        match completion_error {
1299            LanguageModelCompletionError::UpstreamProviderError {
1300                message,
1301                status,
1302                retry_after,
1303            } => {
1304                assert_eq!(message, "Upstream Anthropic rate limit exceeded.");
1305                assert_eq!(status, StatusCode::TOO_MANY_REQUESTS);
1306                assert_eq!(retry_after, Some(Duration::from_secs_f64(30.5)));
1307            }
1308            _ => panic!(
1309                "Expected UpstreamProviderError for upstream_http_429, got: {:?}",
1310                completion_error
1311            ),
1312        }
1313
1314        // Invalid JSON in error body should fall back to regular error handling
1315        let error_body = "Not JSON at all";
1316
1317        let api_error = ApiError {
1318            status: StatusCode::INTERNAL_SERVER_ERROR,
1319            body: error_body.to_string(),
1320            headers: HeaderMap::new(),
1321        };
1322
1323        let completion_error: LanguageModelCompletionError = api_error.into();
1324
1325        match completion_error {
1326            LanguageModelCompletionError::ApiInternalServerError { provider, .. } => {
1327                assert_eq!(provider, PROVIDER_NAME);
1328            }
1329            _ => panic!(
1330                "Expected ApiInternalServerError for invalid JSON, got: {:?}",
1331                completion_error
1332            ),
1333        }
1334    }
1335}