cloud.rs

   1use ai_onboarding::YoungAccountBanner;
   2use anthropic::AnthropicModelMode;
   3use anyhow::{Context as _, Result, anyhow};
   4use chrono::{DateTime, Utc};
   5use client::{Client, UserStore, zed_urls};
   6use cloud_api_types::Plan;
   7use cloud_llm_client::{
   8    CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CLIENT_SUPPORTS_X_AI_HEADER_NAME, CompletionBody,
   9    CompletionEvent, CountTokensBody, CountTokensResponse, ListModelsResponse,
  10    SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, ZED_VERSION_HEADER_NAME,
  11};
  12use feature_flags::{CloudThinkingToggleFeatureFlag, FeatureFlagAppExt as _};
  13use futures::{
  14    AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
  15};
  16use google_ai::GoogleModelMode;
  17use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
  18use http_client::http::{HeaderMap, HeaderValue};
  19use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
  20use language_model::{
  21    AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
  22    LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelEffortLevel,
  23    LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
  24    LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
  25    LanguageModelToolChoice, LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh,
  26    PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
  27};
  28use release_channel::AppVersion;
  29use schemars::JsonSchema;
  30use semver::Version;
  31use serde::{Deserialize, Serialize, de::DeserializeOwned};
  32use settings::SettingsStore;
  33pub use settings::ZedDotDevAvailableModel as AvailableModel;
  34pub use settings::ZedDotDevAvailableProvider as AvailableProvider;
  35use smol::io::{AsyncReadExt, BufReader};
  36use std::pin::Pin;
  37use std::str::FromStr;
  38use std::sync::Arc;
  39use std::time::Duration;
  40use thiserror::Error;
  41use ui::{TintColor, prelude::*};
  42use util::{ResultExt as _, maybe};
  43
  44use crate::provider::anthropic::{
  45    AnthropicEventMapper, count_anthropic_tokens_with_tiktoken, into_anthropic,
  46};
  47use crate::provider::google::{GoogleEventMapper, into_google};
  48use crate::provider::open_ai::{
  49    OpenAiEventMapper, OpenAiResponseEventMapper, count_open_ai_tokens, into_open_ai,
  50    into_open_ai_response,
  51};
  52use crate::provider::x_ai::count_xai_tokens;
  53
  54const PROVIDER_ID: LanguageModelProviderId = language_model::ZED_CLOUD_PROVIDER_ID;
  55const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVIDER_NAME;
  56
  57#[derive(Default, Clone, Debug, PartialEq)]
  58pub struct ZedDotDevSettings {
  59    pub available_models: Vec<AvailableModel>,
  60}
  61#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
  62#[serde(tag = "type", rename_all = "lowercase")]
  63pub enum ModelMode {
  64    #[default]
  65    Default,
  66    Thinking {
  67        /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
  68        budget_tokens: Option<u32>,
  69    },
  70}
  71
  72impl From<ModelMode> for AnthropicModelMode {
  73    fn from(value: ModelMode) -> Self {
  74        match value {
  75            ModelMode::Default => AnthropicModelMode::Default,
  76            ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
  77        }
  78    }
  79}
  80
  81pub struct CloudLanguageModelProvider {
  82    client: Arc<Client>,
  83    state: Entity<State>,
  84    _maintain_client_status: Task<()>,
  85}
  86
  87pub struct State {
  88    client: Arc<Client>,
  89    llm_api_token: LlmApiToken,
  90    user_store: Entity<UserStore>,
  91    status: client::Status,
  92    models: Vec<Arc<cloud_llm_client::LanguageModel>>,
  93    default_model: Option<Arc<cloud_llm_client::LanguageModel>>,
  94    default_fast_model: Option<Arc<cloud_llm_client::LanguageModel>>,
  95    recommended_models: Vec<Arc<cloud_llm_client::LanguageModel>>,
  96    _fetch_models_task: Task<()>,
  97    _settings_subscription: Subscription,
  98    _llm_token_subscription: Subscription,
  99}
 100
 101impl State {
 102    fn new(
 103        client: Arc<Client>,
 104        user_store: Entity<UserStore>,
 105        status: client::Status,
 106        cx: &mut Context<Self>,
 107    ) -> Self {
 108        let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
 109        let mut current_user = user_store.read(cx).watch_current_user();
 110        Self {
 111            client: client.clone(),
 112            llm_api_token: LlmApiToken::default(),
 113            user_store,
 114            status,
 115            models: Vec::new(),
 116            default_model: None,
 117            default_fast_model: None,
 118            recommended_models: Vec::new(),
 119            _fetch_models_task: cx.spawn(async move |this, cx| {
 120                maybe!(async move {
 121                    let (client, llm_api_token) = this
 122                        .read_with(cx, |this, _cx| (client.clone(), this.llm_api_token.clone()))?;
 123
 124                    while current_user.borrow().is_none() {
 125                        current_user.next().await;
 126                    }
 127
 128                    let response =
 129                        Self::fetch_models(client.clone(), llm_api_token.clone()).await?;
 130                    this.update(cx, |this, cx| this.update_models(response, cx))?;
 131                    anyhow::Ok(())
 132                })
 133                .await
 134                .context("failed to fetch Zed models")
 135                .log_err();
 136            }),
 137            _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
 138                cx.notify();
 139            }),
 140            _llm_token_subscription: cx.subscribe(
 141                &refresh_llm_token_listener,
 142                move |this, _listener, _event, cx| {
 143                    let client = this.client.clone();
 144                    let llm_api_token = this.llm_api_token.clone();
 145                    cx.spawn(async move |this, cx| {
 146                        llm_api_token.refresh(&client).await?;
 147                        let response = Self::fetch_models(client, llm_api_token).await?;
 148                        this.update(cx, |this, cx| {
 149                            this.update_models(response, cx);
 150                        })
 151                    })
 152                    .detach_and_log_err(cx);
 153                },
 154            ),
 155        }
 156    }
 157
 158    fn is_signed_out(&self, cx: &App) -> bool {
 159        self.user_store.read(cx).current_user().is_none()
 160    }
 161
 162    fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
 163        let client = self.client.clone();
 164        cx.spawn(async move |state, cx| {
 165            client.sign_in_with_optional_connect(true, cx).await?;
 166            state.update(cx, |_, cx| cx.notify())
 167        })
 168    }
 169
 170    fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
 171        let is_thinking_toggle_enabled = cx.has_flag::<CloudThinkingToggleFeatureFlag>();
 172
 173        let mut models = Vec::new();
 174
 175        for model in response.models {
 176            models.push(Arc::new(model.clone()));
 177
 178            if !is_thinking_toggle_enabled {
 179                // Right now we represent thinking variants of models as separate models on the client,
 180                // so we need to insert variants for any model that supports thinking.
 181                if model.supports_thinking {
 182                    models.push(Arc::new(cloud_llm_client::LanguageModel {
 183                        id: cloud_llm_client::LanguageModelId(
 184                            format!("{}-thinking", model.id).into(),
 185                        ),
 186                        display_name: format!("{} Thinking", model.display_name),
 187                        ..model
 188                    }));
 189                }
 190            }
 191        }
 192
 193        self.default_model = models
 194            .iter()
 195            .find(|model| {
 196                response
 197                    .default_model
 198                    .as_ref()
 199                    .is_some_and(|default_model_id| &model.id == default_model_id)
 200            })
 201            .cloned();
 202        self.default_fast_model = models
 203            .iter()
 204            .find(|model| {
 205                response
 206                    .default_fast_model
 207                    .as_ref()
 208                    .is_some_and(|default_fast_model_id| &model.id == default_fast_model_id)
 209            })
 210            .cloned();
 211        self.recommended_models = response
 212            .recommended_models
 213            .iter()
 214            .filter_map(|id| models.iter().find(|model| &model.id == id))
 215            .cloned()
 216            .collect();
 217        self.models = models;
 218        cx.notify();
 219    }
 220
 221    async fn fetch_models(
 222        client: Arc<Client>,
 223        llm_api_token: LlmApiToken,
 224    ) -> Result<ListModelsResponse> {
 225        let http_client = &client.http_client();
 226        let token = llm_api_token.acquire(&client).await?;
 227
 228        let request = http_client::Request::builder()
 229            .method(Method::GET)
 230            .header(CLIENT_SUPPORTS_X_AI_HEADER_NAME, "true")
 231            .uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
 232            .header("Authorization", format!("Bearer {token}"))
 233            .body(AsyncBody::empty())?;
 234        let mut response = http_client
 235            .send(request)
 236            .await
 237            .context("failed to send list models request")?;
 238
 239        if response.status().is_success() {
 240            let mut body = String::new();
 241            response.body_mut().read_to_string(&mut body).await?;
 242            Ok(serde_json::from_str(&body)?)
 243        } else {
 244            let mut body = String::new();
 245            response.body_mut().read_to_string(&mut body).await?;
 246            anyhow::bail!(
 247                "error listing models.\nStatus: {:?}\nBody: {body}",
 248                response.status(),
 249            );
 250        }
 251    }
 252}
 253
 254impl CloudLanguageModelProvider {
 255    pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
 256        let mut status_rx = client.status();
 257        let status = *status_rx.borrow();
 258
 259        let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
 260
 261        let state_ref = state.downgrade();
 262        let maintain_client_status = cx.spawn(async move |cx| {
 263            while let Some(status) = status_rx.next().await {
 264                if let Some(this) = state_ref.upgrade() {
 265                    _ = this.update(cx, |this, cx| {
 266                        if this.status != status {
 267                            this.status = status;
 268                            cx.notify();
 269                        }
 270                    });
 271                } else {
 272                    break;
 273                }
 274            }
 275        });
 276
 277        Self {
 278            client,
 279            state,
 280            _maintain_client_status: maintain_client_status,
 281        }
 282    }
 283
 284    fn create_language_model(
 285        &self,
 286        model: Arc<cloud_llm_client::LanguageModel>,
 287        llm_api_token: LlmApiToken,
 288    ) -> Arc<dyn LanguageModel> {
 289        Arc::new(CloudLanguageModel {
 290            id: LanguageModelId(SharedString::from(model.id.0.clone())),
 291            model,
 292            llm_api_token,
 293            client: self.client.clone(),
 294            request_limiter: RateLimiter::new(4),
 295        })
 296    }
 297}
 298
 299impl LanguageModelProviderState for CloudLanguageModelProvider {
 300    type ObservableEntity = State;
 301
 302    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 303        Some(self.state.clone())
 304    }
 305}
 306
 307impl LanguageModelProvider for CloudLanguageModelProvider {
 308    fn id(&self) -> LanguageModelProviderId {
 309        PROVIDER_ID
 310    }
 311
 312    fn name(&self) -> LanguageModelProviderName {
 313        PROVIDER_NAME
 314    }
 315
 316    fn icon(&self) -> IconOrSvg {
 317        IconOrSvg::Icon(IconName::AiZed)
 318    }
 319
 320    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 321        let default_model = self.state.read(cx).default_model.clone()?;
 322        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 323        Some(self.create_language_model(default_model, llm_api_token))
 324    }
 325
 326    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 327        let default_fast_model = self.state.read(cx).default_fast_model.clone()?;
 328        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 329        Some(self.create_language_model(default_fast_model, llm_api_token))
 330    }
 331
 332    fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 333        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 334        self.state
 335            .read(cx)
 336            .recommended_models
 337            .iter()
 338            .cloned()
 339            .map(|model| self.create_language_model(model, llm_api_token.clone()))
 340            .collect()
 341    }
 342
 343    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 344        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 345        self.state
 346            .read(cx)
 347            .models
 348            .iter()
 349            .cloned()
 350            .map(|model| self.create_language_model(model, llm_api_token.clone()))
 351            .collect()
 352    }
 353
 354    fn is_authenticated(&self, cx: &App) -> bool {
 355        let state = self.state.read(cx);
 356        !state.is_signed_out(cx)
 357    }
 358
 359    fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 360        Task::ready(Ok(()))
 361    }
 362
 363    fn configuration_view(
 364        &self,
 365        _target_agent: language_model::ConfigurationViewTargetAgent,
 366        _: &mut Window,
 367        cx: &mut App,
 368    ) -> AnyView {
 369        cx.new(|_| ConfigurationView::new(self.state.clone()))
 370            .into()
 371    }
 372
 373    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 374        Task::ready(Ok(()))
 375    }
 376}
 377
 378pub struct CloudLanguageModel {
 379    id: LanguageModelId,
 380    model: Arc<cloud_llm_client::LanguageModel>,
 381    llm_api_token: LlmApiToken,
 382    client: Arc<Client>,
 383    request_limiter: RateLimiter,
 384}
 385
 386struct PerformLlmCompletionResponse {
 387    response: Response<AsyncBody>,
 388    includes_status_messages: bool,
 389}
 390
 391impl CloudLanguageModel {
 392    async fn perform_llm_completion(
 393        client: Arc<Client>,
 394        llm_api_token: LlmApiToken,
 395        app_version: Option<Version>,
 396        body: CompletionBody,
 397    ) -> Result<PerformLlmCompletionResponse> {
 398        let http_client = &client.http_client();
 399
 400        let mut token = llm_api_token.acquire(&client).await?;
 401        let mut refreshed_token = false;
 402
 403        loop {
 404            let request = http_client::Request::builder()
 405                .method(Method::POST)
 406                .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref())
 407                .when_some(app_version.as_ref(), |builder, app_version| {
 408                    builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
 409                })
 410                .header("Content-Type", "application/json")
 411                .header("Authorization", format!("Bearer {token}"))
 412                .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
 413                .body(serde_json::to_string(&body)?.into())?;
 414
 415            let mut response = http_client.send(request).await?;
 416            let status = response.status();
 417            if status.is_success() {
 418                let includes_status_messages = response
 419                    .headers()
 420                    .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
 421                    .is_some();
 422
 423                return Ok(PerformLlmCompletionResponse {
 424                    response,
 425                    includes_status_messages,
 426                });
 427            }
 428
 429            if !refreshed_token && response.needs_llm_token_refresh() {
 430                token = llm_api_token.refresh(&client).await?;
 431                refreshed_token = true;
 432                continue;
 433            }
 434
 435            if status == StatusCode::PAYMENT_REQUIRED {
 436                return Err(anyhow!(PaymentRequiredError));
 437            }
 438
 439            let mut body = String::new();
 440            let headers = response.headers().clone();
 441            response.body_mut().read_to_string(&mut body).await?;
 442            return Err(anyhow!(ApiError {
 443                status,
 444                body,
 445                headers
 446            }));
 447        }
 448    }
 449}
 450
 451#[derive(Debug, Error)]
 452#[error("cloud language model request failed with status {status}: {body}")]
 453struct ApiError {
 454    status: StatusCode,
 455    body: String,
 456    headers: HeaderMap<HeaderValue>,
 457}
 458
 459/// Represents error responses from Zed's cloud API.
 460///
 461/// Example JSON for an upstream HTTP error:
 462/// ```json
 463/// {
 464///   "code": "upstream_http_error",
 465///   "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
 466///   "upstream_status": 503
 467/// }
 468/// ```
 469#[derive(Debug, serde::Deserialize)]
 470struct CloudApiError {
 471    code: String,
 472    message: String,
 473    #[serde(default)]
 474    #[serde(deserialize_with = "deserialize_optional_status_code")]
 475    upstream_status: Option<StatusCode>,
 476    #[serde(default)]
 477    retry_after: Option<f64>,
 478}
 479
 480fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
 481where
 482    D: serde::Deserializer<'de>,
 483{
 484    let opt: Option<u16> = Option::deserialize(deserializer)?;
 485    Ok(opt.and_then(|code| StatusCode::from_u16(code).ok()))
 486}
 487
 488impl From<ApiError> for LanguageModelCompletionError {
 489    fn from(error: ApiError) -> Self {
 490        if let Ok(cloud_error) = serde_json::from_str::<CloudApiError>(&error.body) {
 491            if cloud_error.code.starts_with("upstream_http_") {
 492                let status = if let Some(status) = cloud_error.upstream_status {
 493                    status
 494                } else if cloud_error.code.ends_with("_error") {
 495                    error.status
 496                } else {
 497                    // If there's a status code in the code string (e.g. "upstream_http_429")
 498                    // then use that; otherwise, see if the JSON contains a status code.
 499                    cloud_error
 500                        .code
 501                        .strip_prefix("upstream_http_")
 502                        .and_then(|code_str| code_str.parse::<u16>().ok())
 503                        .and_then(|code| StatusCode::from_u16(code).ok())
 504                        .unwrap_or(error.status)
 505                };
 506
 507                return LanguageModelCompletionError::UpstreamProviderError {
 508                    message: cloud_error.message,
 509                    status,
 510                    retry_after: cloud_error.retry_after.map(Duration::from_secs_f64),
 511                };
 512            }
 513
 514            return LanguageModelCompletionError::from_http_status(
 515                PROVIDER_NAME,
 516                error.status,
 517                cloud_error.message,
 518                None,
 519            );
 520        }
 521
 522        let retry_after = None;
 523        LanguageModelCompletionError::from_http_status(
 524            PROVIDER_NAME,
 525            error.status,
 526            error.body,
 527            retry_after,
 528        )
 529    }
 530}
 531
 532impl LanguageModel for CloudLanguageModel {
 533    fn id(&self) -> LanguageModelId {
 534        self.id.clone()
 535    }
 536
 537    fn name(&self) -> LanguageModelName {
 538        LanguageModelName::from(self.model.display_name.clone())
 539    }
 540
 541    fn provider_id(&self) -> LanguageModelProviderId {
 542        PROVIDER_ID
 543    }
 544
 545    fn provider_name(&self) -> LanguageModelProviderName {
 546        PROVIDER_NAME
 547    }
 548
 549    fn upstream_provider_id(&self) -> LanguageModelProviderId {
 550        use cloud_llm_client::LanguageModelProvider::*;
 551        match self.model.provider {
 552            Anthropic => language_model::ANTHROPIC_PROVIDER_ID,
 553            OpenAi => language_model::OPEN_AI_PROVIDER_ID,
 554            Google => language_model::GOOGLE_PROVIDER_ID,
 555            XAi => language_model::X_AI_PROVIDER_ID,
 556        }
 557    }
 558
 559    fn upstream_provider_name(&self) -> LanguageModelProviderName {
 560        use cloud_llm_client::LanguageModelProvider::*;
 561        match self.model.provider {
 562            Anthropic => language_model::ANTHROPIC_PROVIDER_NAME,
 563            OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
 564            Google => language_model::GOOGLE_PROVIDER_NAME,
 565            XAi => language_model::X_AI_PROVIDER_NAME,
 566        }
 567    }
 568
 569    fn supports_tools(&self) -> bool {
 570        self.model.supports_tools
 571    }
 572
 573    fn supports_images(&self) -> bool {
 574        self.model.supports_images
 575    }
 576
 577    fn supports_thinking(&self) -> bool {
 578        self.model.supports_thinking
 579    }
 580
 581    fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
 582        self.model
 583            .supported_effort_levels
 584            .iter()
 585            .map(|effort_level| LanguageModelEffortLevel {
 586                name: effort_level.name.clone().into(),
 587                value: effort_level.value.clone().into(),
 588                is_default: effort_level.is_default.unwrap_or(false),
 589            })
 590            .collect()
 591    }
 592
 593    fn supports_streaming_tools(&self) -> bool {
 594        self.model.supports_streaming_tools
 595    }
 596
 597    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 598        match choice {
 599            LanguageModelToolChoice::Auto
 600            | LanguageModelToolChoice::Any
 601            | LanguageModelToolChoice::None => true,
 602        }
 603    }
 604
 605    fn supports_split_token_display(&self) -> bool {
 606        use cloud_llm_client::LanguageModelProvider::*;
 607        matches!(self.model.provider, OpenAi)
 608    }
 609
 610    fn telemetry_id(&self) -> String {
 611        format!("zed.dev/{}", self.model.id)
 612    }
 613
 614    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 615        match self.model.provider {
 616            cloud_llm_client::LanguageModelProvider::Anthropic
 617            | cloud_llm_client::LanguageModelProvider::OpenAi
 618            | cloud_llm_client::LanguageModelProvider::XAi => {
 619                LanguageModelToolSchemaFormat::JsonSchema
 620            }
 621            cloud_llm_client::LanguageModelProvider::Google => {
 622                LanguageModelToolSchemaFormat::JsonSchemaSubset
 623            }
 624        }
 625    }
 626
 627    fn max_token_count(&self) -> u64 {
 628        self.model.max_token_count as u64
 629    }
 630
 631    fn max_output_tokens(&self) -> Option<u64> {
 632        Some(self.model.max_output_tokens as u64)
 633    }
 634
 635    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
 636        match &self.model.provider {
 637            cloud_llm_client::LanguageModelProvider::Anthropic => {
 638                Some(LanguageModelCacheConfiguration {
 639                    min_total_token: 2_048,
 640                    should_speculate: true,
 641                    max_cache_anchors: 4,
 642                })
 643            }
 644            cloud_llm_client::LanguageModelProvider::OpenAi
 645            | cloud_llm_client::LanguageModelProvider::XAi
 646            | cloud_llm_client::LanguageModelProvider::Google => None,
 647        }
 648    }
 649
 650    fn count_tokens(
 651        &self,
 652        request: LanguageModelRequest,
 653        cx: &App,
 654    ) -> BoxFuture<'static, Result<u64>> {
 655        match self.model.provider {
 656            cloud_llm_client::LanguageModelProvider::Anthropic => cx
 657                .background_spawn(async move { count_anthropic_tokens_with_tiktoken(request) })
 658                .boxed(),
 659            cloud_llm_client::LanguageModelProvider::OpenAi => {
 660                let model = match open_ai::Model::from_id(&self.model.id.0) {
 661                    Ok(model) => model,
 662                    Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
 663                };
 664                count_open_ai_tokens(request, model, cx)
 665            }
 666            cloud_llm_client::LanguageModelProvider::XAi => {
 667                let model = match x_ai::Model::from_id(&self.model.id.0) {
 668                    Ok(model) => model,
 669                    Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
 670                };
 671                count_xai_tokens(request, model, cx)
 672            }
 673            cloud_llm_client::LanguageModelProvider::Google => {
 674                let client = self.client.clone();
 675                let llm_api_token = self.llm_api_token.clone();
 676                let model_id = self.model.id.to_string();
 677                let generate_content_request =
 678                    into_google(request, model_id.clone(), GoogleModelMode::Default);
 679                async move {
 680                    let http_client = &client.http_client();
 681                    let token = llm_api_token.acquire(&client).await?;
 682
 683                    let request_body = CountTokensBody {
 684                        provider: cloud_llm_client::LanguageModelProvider::Google,
 685                        model: model_id,
 686                        provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
 687                            generate_content_request,
 688                        })?,
 689                    };
 690                    let request = http_client::Request::builder()
 691                        .method(Method::POST)
 692                        .uri(
 693                            http_client
 694                                .build_zed_llm_url("/count_tokens", &[])?
 695                                .as_ref(),
 696                        )
 697                        .header("Content-Type", "application/json")
 698                        .header("Authorization", format!("Bearer {token}"))
 699                        .body(serde_json::to_string(&request_body)?.into())?;
 700                    let mut response = http_client.send(request).await?;
 701                    let status = response.status();
 702                    let headers = response.headers().clone();
 703                    let mut response_body = String::new();
 704                    response
 705                        .body_mut()
 706                        .read_to_string(&mut response_body)
 707                        .await?;
 708
 709                    if status.is_success() {
 710                        let response_body: CountTokensResponse =
 711                            serde_json::from_str(&response_body)?;
 712
 713                        Ok(response_body.tokens as u64)
 714                    } else {
 715                        Err(anyhow!(ApiError {
 716                            status,
 717                            body: response_body,
 718                            headers
 719                        }))
 720                    }
 721                }
 722                .boxed()
 723            }
 724        }
 725    }
 726
 727    fn stream_completion(
 728        &self,
 729        request: LanguageModelRequest,
 730        cx: &AsyncApp,
 731    ) -> BoxFuture<
 732        'static,
 733        Result<
 734            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 735            LanguageModelCompletionError,
 736        >,
 737    > {
 738        let thread_id = request.thread_id.clone();
 739        let prompt_id = request.prompt_id.clone();
 740        let intent = request.intent;
 741        let app_version = Some(cx.update(|cx| AppVersion::global(cx)));
 742        let thinking_allowed = request.thinking_allowed;
 743        let is_thinking_toggle_enabled =
 744            cx.update(|cx| cx.has_flag::<CloudThinkingToggleFeatureFlag>());
 745        let enable_thinking = if is_thinking_toggle_enabled {
 746            thinking_allowed && self.model.supports_thinking
 747        } else {
 748            thinking_allowed && self.model.id.0.ends_with("-thinking")
 749        };
 750        let effort = request
 751            .thinking_effort
 752            .as_ref()
 753            .and_then(|effort| anthropic::Effort::from_str(effort).ok());
 754        let provider_name = provider_name(&self.model.provider);
 755        match self.model.provider {
 756            cloud_llm_client::LanguageModelProvider::Anthropic => {
 757                let mut request = into_anthropic(
 758                    request,
 759                    self.model.id.to_string(),
 760                    1.0,
 761                    self.model.max_output_tokens as u64,
 762                    if enable_thinking {
 763                        AnthropicModelMode::Thinking {
 764                            budget_tokens: Some(4_096),
 765                        }
 766                    } else {
 767                        AnthropicModelMode::Default
 768                    },
 769                );
 770
 771                if enable_thinking && effort.is_some() {
 772                    request.thinking = Some(anthropic::Thinking::Adaptive);
 773                    request.output_config = Some(anthropic::OutputConfig { effort });
 774                }
 775
 776                let client = self.client.clone();
 777                let llm_api_token = self.llm_api_token.clone();
 778                let future = self.request_limiter.stream(async move {
 779                    let PerformLlmCompletionResponse {
 780                        response,
 781                        includes_status_messages,
 782                    } = Self::perform_llm_completion(
 783                        client.clone(),
 784                        llm_api_token,
 785                        app_version,
 786                        CompletionBody {
 787                            thread_id,
 788                            prompt_id,
 789                            intent,
 790                            provider: cloud_llm_client::LanguageModelProvider::Anthropic,
 791                            model: request.model.clone(),
 792                            provider_request: serde_json::to_value(&request)
 793                                .map_err(|e| anyhow!(e))?,
 794                        },
 795                    )
 796                    .await
 797                    .map_err(|err| match err.downcast::<ApiError>() {
 798                        Ok(api_err) => anyhow!(LanguageModelCompletionError::from(api_err)),
 799                        Err(err) => anyhow!(err),
 800                    })?;
 801
 802                    let mut mapper = AnthropicEventMapper::new();
 803                    Ok(map_cloud_completion_events(
 804                        Box::pin(response_lines(response, includes_status_messages)),
 805                        &provider_name,
 806                        move |event| mapper.map_event(event),
 807                    ))
 808                });
 809                async move { Ok(future.await?.boxed()) }.boxed()
 810            }
 811            cloud_llm_client::LanguageModelProvider::OpenAi => {
 812                let client = self.client.clone();
 813                let llm_api_token = self.llm_api_token.clone();
 814
 815                let request = into_open_ai_response(
 816                    request,
 817                    &self.model.id.0,
 818                    self.model.supports_parallel_tool_calls,
 819                    true,
 820                    None,
 821                    None,
 822                );
 823                let future = self.request_limiter.stream(async move {
 824                    let PerformLlmCompletionResponse {
 825                        response,
 826                        includes_status_messages,
 827                    } = Self::perform_llm_completion(
 828                        client.clone(),
 829                        llm_api_token,
 830                        app_version,
 831                        CompletionBody {
 832                            thread_id,
 833                            prompt_id,
 834                            intent,
 835                            provider: cloud_llm_client::LanguageModelProvider::OpenAi,
 836                            model: request.model.clone(),
 837                            provider_request: serde_json::to_value(&request)
 838                                .map_err(|e| anyhow!(e))?,
 839                        },
 840                    )
 841                    .await?;
 842
 843                    let mut mapper = OpenAiResponseEventMapper::new();
 844                    Ok(map_cloud_completion_events(
 845                        Box::pin(response_lines(response, includes_status_messages)),
 846                        &provider_name,
 847                        move |event| mapper.map_event(event),
 848                    ))
 849                });
 850                async move { Ok(future.await?.boxed()) }.boxed()
 851            }
 852            cloud_llm_client::LanguageModelProvider::XAi => {
 853                let client = self.client.clone();
 854                let request = into_open_ai(
 855                    request,
 856                    &self.model.id.0,
 857                    self.model.supports_parallel_tool_calls,
 858                    false,
 859                    None,
 860                    None,
 861                );
 862                let llm_api_token = self.llm_api_token.clone();
 863                let future = self.request_limiter.stream(async move {
 864                    let PerformLlmCompletionResponse {
 865                        response,
 866                        includes_status_messages,
 867                    } = Self::perform_llm_completion(
 868                        client.clone(),
 869                        llm_api_token,
 870                        app_version,
 871                        CompletionBody {
 872                            thread_id,
 873                            prompt_id,
 874                            intent,
 875                            provider: cloud_llm_client::LanguageModelProvider::XAi,
 876                            model: request.model.clone(),
 877                            provider_request: serde_json::to_value(&request)
 878                                .map_err(|e| anyhow!(e))?,
 879                        },
 880                    )
 881                    .await?;
 882
 883                    let mut mapper = OpenAiEventMapper::new();
 884                    Ok(map_cloud_completion_events(
 885                        Box::pin(response_lines(response, includes_status_messages)),
 886                        &provider_name,
 887                        move |event| mapper.map_event(event),
 888                    ))
 889                });
 890                async move { Ok(future.await?.boxed()) }.boxed()
 891            }
 892            cloud_llm_client::LanguageModelProvider::Google => {
 893                let client = self.client.clone();
 894                let request =
 895                    into_google(request, self.model.id.to_string(), GoogleModelMode::Default);
 896                let llm_api_token = self.llm_api_token.clone();
 897                let future = self.request_limiter.stream(async move {
 898                    let PerformLlmCompletionResponse {
 899                        response,
 900                        includes_status_messages,
 901                    } = Self::perform_llm_completion(
 902                        client.clone(),
 903                        llm_api_token,
 904                        app_version,
 905                        CompletionBody {
 906                            thread_id,
 907                            prompt_id,
 908                            intent,
 909                            provider: cloud_llm_client::LanguageModelProvider::Google,
 910                            model: request.model.model_id.clone(),
 911                            provider_request: serde_json::to_value(&request)
 912                                .map_err(|e| anyhow!(e))?,
 913                        },
 914                    )
 915                    .await?;
 916
 917                    let mut mapper = GoogleEventMapper::new();
 918                    Ok(map_cloud_completion_events(
 919                        Box::pin(response_lines(response, includes_status_messages)),
 920                        &provider_name,
 921                        move |event| mapper.map_event(event),
 922                    ))
 923                });
 924                async move { Ok(future.await?.boxed()) }.boxed()
 925            }
 926        }
 927    }
 928}
 929
 930fn map_cloud_completion_events<T, F>(
 931    stream: Pin<Box<dyn Stream<Item = Result<CompletionEvent<T>>> + Send>>,
 932    provider: &LanguageModelProviderName,
 933    mut map_callback: F,
 934) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 935where
 936    T: DeserializeOwned + 'static,
 937    F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 938        + Send
 939        + 'static,
 940{
 941    let provider = provider.clone();
 942    stream
 943        .flat_map(move |event| {
 944            futures::stream::iter(match event {
 945                Err(error) => {
 946                    vec![Err(LanguageModelCompletionError::from(error))]
 947                }
 948                Ok(CompletionEvent::Status(event)) => {
 949                    vec![
 950                        LanguageModelCompletionEvent::from_completion_request_status(
 951                            event,
 952                            provider.clone(),
 953                        ),
 954                    ]
 955                }
 956                Ok(CompletionEvent::Event(event)) => map_callback(event),
 957            })
 958        })
 959        .boxed()
 960}
 961
 962fn provider_name(provider: &cloud_llm_client::LanguageModelProvider) -> LanguageModelProviderName {
 963    match provider {
 964        cloud_llm_client::LanguageModelProvider::Anthropic => {
 965            language_model::ANTHROPIC_PROVIDER_NAME
 966        }
 967        cloud_llm_client::LanguageModelProvider::OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
 968        cloud_llm_client::LanguageModelProvider::Google => language_model::GOOGLE_PROVIDER_NAME,
 969        cloud_llm_client::LanguageModelProvider::XAi => language_model::X_AI_PROVIDER_NAME,
 970    }
 971}
 972
 973fn response_lines<T: DeserializeOwned>(
 974    response: Response<AsyncBody>,
 975    includes_status_messages: bool,
 976) -> impl Stream<Item = Result<CompletionEvent<T>>> {
 977    futures::stream::try_unfold(
 978        (String::new(), BufReader::new(response.into_body())),
 979        move |(mut line, mut body)| async move {
 980            match body.read_line(&mut line).await {
 981                Ok(0) => Ok(None),
 982                Ok(_) => {
 983                    let event = if includes_status_messages {
 984                        serde_json::from_str::<CompletionEvent<T>>(&line)?
 985                    } else {
 986                        CompletionEvent::Event(serde_json::from_str::<T>(&line)?)
 987                    };
 988
 989                    line.clear();
 990                    Ok(Some((event, (line, body))))
 991                }
 992                Err(e) => Err(e.into()),
 993            }
 994        },
 995    )
 996}
 997
 998#[derive(IntoElement, RegisterComponent)]
 999struct ZedAiConfiguration {
1000    is_connected: bool,
1001    plan: Option<Plan>,
1002    subscription_period: Option<(DateTime<Utc>, DateTime<Utc>)>,
1003    eligible_for_trial: bool,
1004    account_too_young: bool,
1005    sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1006}
1007
1008impl RenderOnce for ZedAiConfiguration {
1009    fn render(self, _window: &mut Window, _cx: &mut App) -> impl IntoElement {
1010        let is_pro = self.plan.is_some_and(|plan| plan == Plan::ZedPro);
1011        let subscription_text = match (self.plan, self.subscription_period) {
1012            (Some(Plan::ZedPro), Some(_)) => {
1013                "You have access to Zed's hosted models through your Pro subscription."
1014            }
1015            (Some(Plan::ZedProTrial), Some(_)) => {
1016                "You have access to Zed's hosted models through your Pro trial."
1017            }
1018            (Some(Plan::ZedFree), Some(_)) => {
1019                if self.eligible_for_trial {
1020                    "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1021                } else {
1022                    "Subscribe for access to Zed's hosted models."
1023                }
1024            }
1025            _ => {
1026                if self.eligible_for_trial {
1027                    "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1028                } else {
1029                    "Subscribe for access to Zed's hosted models."
1030                }
1031            }
1032        };
1033
1034        let manage_subscription_buttons = if is_pro {
1035            Button::new("manage_settings", "Manage Subscription")
1036                .full_width()
1037                .style(ButtonStyle::Tinted(TintColor::Accent))
1038                .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx)))
1039                .into_any_element()
1040        } else if self.plan.is_none() || self.eligible_for_trial {
1041            Button::new("start_trial", "Start 14-day Free Pro Trial")
1042                .full_width()
1043                .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1044                .on_click(|_, _, cx| cx.open_url(&zed_urls::start_trial_url(cx)))
1045                .into_any_element()
1046        } else {
1047            Button::new("upgrade", "Upgrade to Pro")
1048                .full_width()
1049                .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1050                .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx)))
1051                .into_any_element()
1052        };
1053
1054        if !self.is_connected {
1055            return v_flex()
1056                .gap_2()
1057                .child(Label::new("Sign in to have access to Zed's complete agentic experience with hosted models."))
1058                .child(
1059                    Button::new("sign_in", "Sign In to use Zed AI")
1060                        .icon_color(Color::Muted)
1061                        .icon(IconName::Github)
1062                        .icon_size(IconSize::Small)
1063                        .icon_position(IconPosition::Start)
1064                        .full_width()
1065                        .on_click({
1066                            let callback = self.sign_in_callback.clone();
1067                            move |_, window, cx| (callback)(window, cx)
1068                        }),
1069                );
1070        }
1071
1072        v_flex().gap_2().w_full().map(|this| {
1073            if self.account_too_young {
1074                this.child(YoungAccountBanner).child(
1075                    Button::new("upgrade", "Upgrade to Pro")
1076                        .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1077                        .full_width()
1078                        .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx))),
1079                )
1080            } else {
1081                this.text_sm()
1082                    .child(subscription_text)
1083                    .child(manage_subscription_buttons)
1084            }
1085        })
1086    }
1087}
1088
1089struct ConfigurationView {
1090    state: Entity<State>,
1091    sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1092}
1093
1094impl ConfigurationView {
1095    fn new(state: Entity<State>) -> Self {
1096        let sign_in_callback = Arc::new({
1097            let state = state.clone();
1098            move |_window: &mut Window, cx: &mut App| {
1099                state.update(cx, |state, cx| {
1100                    state.authenticate(cx).detach_and_log_err(cx);
1101                });
1102            }
1103        });
1104
1105        Self {
1106            state,
1107            sign_in_callback,
1108        }
1109    }
1110}
1111
1112impl Render for ConfigurationView {
1113    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1114        let state = self.state.read(cx);
1115        let user_store = state.user_store.read(cx);
1116
1117        ZedAiConfiguration {
1118            is_connected: !state.is_signed_out(cx),
1119            plan: user_store.plan(),
1120            subscription_period: user_store.subscription_period(),
1121            eligible_for_trial: user_store.trial_started_at().is_none(),
1122            account_too_young: user_store.account_too_young(),
1123            sign_in_callback: self.sign_in_callback.clone(),
1124        }
1125    }
1126}
1127
1128impl Component for ZedAiConfiguration {
1129    fn name() -> &'static str {
1130        "AI Configuration Content"
1131    }
1132
1133    fn sort_name() -> &'static str {
1134        "AI Configuration Content"
1135    }
1136
1137    fn scope() -> ComponentScope {
1138        ComponentScope::Onboarding
1139    }
1140
1141    fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
1142        fn configuration(
1143            is_connected: bool,
1144            plan: Option<Plan>,
1145            eligible_for_trial: bool,
1146            account_too_young: bool,
1147        ) -> AnyElement {
1148            ZedAiConfiguration {
1149                is_connected,
1150                plan,
1151                subscription_period: plan
1152                    .is_some()
1153                    .then(|| (Utc::now(), Utc::now() + chrono::Duration::days(7))),
1154                eligible_for_trial,
1155                account_too_young,
1156                sign_in_callback: Arc::new(|_, _| {}),
1157            }
1158            .into_any_element()
1159        }
1160
1161        Some(
1162            v_flex()
1163                .p_4()
1164                .gap_4()
1165                .children(vec![
1166                    single_example("Not connected", configuration(false, None, false, false)),
1167                    single_example(
1168                        "Accept Terms of Service",
1169                        configuration(true, None, true, false),
1170                    ),
1171                    single_example(
1172                        "No Plan - Not eligible for trial",
1173                        configuration(true, None, false, false),
1174                    ),
1175                    single_example(
1176                        "No Plan - Eligible for trial",
1177                        configuration(true, None, true, false),
1178                    ),
1179                    single_example(
1180                        "Free Plan",
1181                        configuration(true, Some(Plan::ZedFree), true, false),
1182                    ),
1183                    single_example(
1184                        "Zed Pro Trial Plan",
1185                        configuration(true, Some(Plan::ZedProTrial), true, false),
1186                    ),
1187                    single_example(
1188                        "Zed Pro Plan",
1189                        configuration(true, Some(Plan::ZedPro), true, false),
1190                    ),
1191                ])
1192                .into_any_element(),
1193        )
1194    }
1195}
1196
1197#[cfg(test)]
1198mod tests {
1199    use super::*;
1200    use http_client::http::{HeaderMap, StatusCode};
1201    use language_model::LanguageModelCompletionError;
1202
1203    #[test]
1204    fn test_api_error_conversion_with_upstream_http_error() {
1205        // upstream_http_error with 503 status should become ServerOverloaded
1206        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout","upstream_status":503}"#;
1207
1208        let api_error = ApiError {
1209            status: StatusCode::INTERNAL_SERVER_ERROR,
1210            body: error_body.to_string(),
1211            headers: HeaderMap::new(),
1212        };
1213
1214        let completion_error: LanguageModelCompletionError = api_error.into();
1215
1216        match completion_error {
1217            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1218                assert_eq!(
1219                    message,
1220                    "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout"
1221                );
1222            }
1223            _ => panic!(
1224                "Expected UpstreamProviderError for upstream 503, got: {:?}",
1225                completion_error
1226            ),
1227        }
1228
1229        // upstream_http_error with 500 status should become ApiInternalServerError
1230        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the OpenAI API: internal server error","upstream_status":500}"#;
1231
1232        let api_error = ApiError {
1233            status: StatusCode::INTERNAL_SERVER_ERROR,
1234            body: error_body.to_string(),
1235            headers: HeaderMap::new(),
1236        };
1237
1238        let completion_error: LanguageModelCompletionError = api_error.into();
1239
1240        match completion_error {
1241            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1242                assert_eq!(
1243                    message,
1244                    "Received an error from the OpenAI API: internal server error"
1245                );
1246            }
1247            _ => panic!(
1248                "Expected UpstreamProviderError for upstream 500, got: {:?}",
1249                completion_error
1250            ),
1251        }
1252
1253        // upstream_http_error with 429 status should become RateLimitExceeded
1254        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Google API: rate limit exceeded","upstream_status":429}"#;
1255
1256        let api_error = ApiError {
1257            status: StatusCode::INTERNAL_SERVER_ERROR,
1258            body: error_body.to_string(),
1259            headers: HeaderMap::new(),
1260        };
1261
1262        let completion_error: LanguageModelCompletionError = api_error.into();
1263
1264        match completion_error {
1265            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1266                assert_eq!(
1267                    message,
1268                    "Received an error from the Google API: rate limit exceeded"
1269                );
1270            }
1271            _ => panic!(
1272                "Expected UpstreamProviderError for upstream 429, got: {:?}",
1273                completion_error
1274            ),
1275        }
1276
1277        // Regular 500 error without upstream_http_error should remain ApiInternalServerError for Zed
1278        let error_body = "Regular internal server error";
1279
1280        let api_error = ApiError {
1281            status: StatusCode::INTERNAL_SERVER_ERROR,
1282            body: error_body.to_string(),
1283            headers: HeaderMap::new(),
1284        };
1285
1286        let completion_error: LanguageModelCompletionError = api_error.into();
1287
1288        match completion_error {
1289            LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1290                assert_eq!(provider, PROVIDER_NAME);
1291                assert_eq!(message, "Regular internal server error");
1292            }
1293            _ => panic!(
1294                "Expected ApiInternalServerError for regular 500, got: {:?}",
1295                completion_error
1296            ),
1297        }
1298
1299        // upstream_http_429 format should be converted to UpstreamProviderError
1300        let error_body = r#"{"code":"upstream_http_429","message":"Upstream Anthropic rate limit exceeded.","retry_after":30.5}"#;
1301
1302        let api_error = ApiError {
1303            status: StatusCode::INTERNAL_SERVER_ERROR,
1304            body: error_body.to_string(),
1305            headers: HeaderMap::new(),
1306        };
1307
1308        let completion_error: LanguageModelCompletionError = api_error.into();
1309
1310        match completion_error {
1311            LanguageModelCompletionError::UpstreamProviderError {
1312                message,
1313                status,
1314                retry_after,
1315            } => {
1316                assert_eq!(message, "Upstream Anthropic rate limit exceeded.");
1317                assert_eq!(status, StatusCode::TOO_MANY_REQUESTS);
1318                assert_eq!(retry_after, Some(Duration::from_secs_f64(30.5)));
1319            }
1320            _ => panic!(
1321                "Expected UpstreamProviderError for upstream_http_429, got: {:?}",
1322                completion_error
1323            ),
1324        }
1325
1326        // Invalid JSON in error body should fall back to regular error handling
1327        let error_body = "Not JSON at all";
1328
1329        let api_error = ApiError {
1330            status: StatusCode::INTERNAL_SERVER_ERROR,
1331            body: error_body.to_string(),
1332            headers: HeaderMap::new(),
1333        };
1334
1335        let completion_error: LanguageModelCompletionError = api_error.into();
1336
1337        match completion_error {
1338            LanguageModelCompletionError::ApiInternalServerError { provider, .. } => {
1339                assert_eq!(provider, PROVIDER_NAME);
1340            }
1341            _ => panic!(
1342                "Expected ApiInternalServerError for invalid JSON, got: {:?}",
1343                completion_error
1344            ),
1345        }
1346    }
1347}