cloud.rs

   1use ai_onboarding::YoungAccountBanner;
   2use anthropic::AnthropicModelMode;
   3use anyhow::{Context as _, Result, anyhow};
   4use chrono::{DateTime, Utc};
   5use client::{Client, UserStore, zed_urls};
   6use cloud_llm_client::{
   7    CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CLIENT_SUPPORTS_X_AI_HEADER_NAME, CompletionBody,
   8    CompletionEvent, CountTokensBody, CountTokensResponse, ListModelsResponse, Plan, PlanV2,
   9    SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, ZED_VERSION_HEADER_NAME,
  10};
  11use feature_flags::{CloudThinkingToggleFeatureFlag, FeatureFlagAppExt as _};
  12use futures::{
  13    AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
  14};
  15use google_ai::GoogleModelMode;
  16use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
  17use http_client::http::{HeaderMap, HeaderValue};
  18use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
  19use language_model::{
  20    AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
  21    LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName,
  22    LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  23    LanguageModelProviderState, LanguageModelRequest, LanguageModelToolChoice,
  24    LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh, PaymentRequiredError,
  25    RateLimiter, RefreshLlmTokenListener,
  26};
  27use release_channel::AppVersion;
  28use schemars::JsonSchema;
  29use semver::Version;
  30use serde::{Deserialize, Serialize, de::DeserializeOwned};
  31use settings::SettingsStore;
  32pub use settings::ZedDotDevAvailableModel as AvailableModel;
  33pub use settings::ZedDotDevAvailableProvider as AvailableProvider;
  34use smol::io::{AsyncReadExt, BufReader};
  35use std::pin::Pin;
  36use std::sync::Arc;
  37use std::time::Duration;
  38use thiserror::Error;
  39use ui::{TintColor, prelude::*};
  40use util::{ResultExt as _, maybe};
  41
  42use crate::provider::anthropic::{
  43    AnthropicEventMapper, count_anthropic_tokens_with_tiktoken, into_anthropic,
  44};
  45use crate::provider::google::{GoogleEventMapper, into_google};
  46use crate::provider::open_ai::{
  47    OpenAiEventMapper, OpenAiResponseEventMapper, count_open_ai_tokens, into_open_ai,
  48    into_open_ai_response,
  49};
  50use crate::provider::x_ai::count_xai_tokens;
  51
  52const PROVIDER_ID: LanguageModelProviderId = language_model::ZED_CLOUD_PROVIDER_ID;
  53const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVIDER_NAME;
  54
  55#[derive(Default, Clone, Debug, PartialEq)]
  56pub struct ZedDotDevSettings {
  57    pub available_models: Vec<AvailableModel>,
  58}
  59#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
  60#[serde(tag = "type", rename_all = "lowercase")]
  61pub enum ModelMode {
  62    #[default]
  63    Default,
  64    Thinking {
  65        /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
  66        budget_tokens: Option<u32>,
  67    },
  68}
  69
  70impl From<ModelMode> for AnthropicModelMode {
  71    fn from(value: ModelMode) -> Self {
  72        match value {
  73            ModelMode::Default => AnthropicModelMode::Default,
  74            ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
  75        }
  76    }
  77}
  78
  79pub struct CloudLanguageModelProvider {
  80    client: Arc<Client>,
  81    state: Entity<State>,
  82    _maintain_client_status: Task<()>,
  83}
  84
  85pub struct State {
  86    client: Arc<Client>,
  87    llm_api_token: LlmApiToken,
  88    user_store: Entity<UserStore>,
  89    status: client::Status,
  90    models: Vec<Arc<cloud_llm_client::LanguageModel>>,
  91    default_model: Option<Arc<cloud_llm_client::LanguageModel>>,
  92    default_fast_model: Option<Arc<cloud_llm_client::LanguageModel>>,
  93    recommended_models: Vec<Arc<cloud_llm_client::LanguageModel>>,
  94    _fetch_models_task: Task<()>,
  95    _settings_subscription: Subscription,
  96    _llm_token_subscription: Subscription,
  97}
  98
  99impl State {
 100    fn new(
 101        client: Arc<Client>,
 102        user_store: Entity<UserStore>,
 103        status: client::Status,
 104        cx: &mut Context<Self>,
 105    ) -> Self {
 106        let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
 107        let mut current_user = user_store.read(cx).watch_current_user();
 108        Self {
 109            client: client.clone(),
 110            llm_api_token: LlmApiToken::default(),
 111            user_store,
 112            status,
 113            models: Vec::new(),
 114            default_model: None,
 115            default_fast_model: None,
 116            recommended_models: Vec::new(),
 117            _fetch_models_task: cx.spawn(async move |this, cx| {
 118                maybe!(async move {
 119                    let (client, llm_api_token) = this
 120                        .read_with(cx, |this, _cx| (client.clone(), this.llm_api_token.clone()))?;
 121
 122                    while current_user.borrow().is_none() {
 123                        current_user.next().await;
 124                    }
 125
 126                    let response =
 127                        Self::fetch_models(client.clone(), llm_api_token.clone()).await?;
 128                    this.update(cx, |this, cx| this.update_models(response, cx))?;
 129                    anyhow::Ok(())
 130                })
 131                .await
 132                .context("failed to fetch Zed models")
 133                .log_err();
 134            }),
 135            _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
 136                cx.notify();
 137            }),
 138            _llm_token_subscription: cx.subscribe(
 139                &refresh_llm_token_listener,
 140                move |this, _listener, _event, cx| {
 141                    let client = this.client.clone();
 142                    let llm_api_token = this.llm_api_token.clone();
 143                    cx.spawn(async move |this, cx| {
 144                        llm_api_token.refresh(&client).await?;
 145                        let response = Self::fetch_models(client, llm_api_token).await?;
 146                        this.update(cx, |this, cx| {
 147                            this.update_models(response, cx);
 148                        })
 149                    })
 150                    .detach_and_log_err(cx);
 151                },
 152            ),
 153        }
 154    }
 155
 156    fn is_signed_out(&self, cx: &App) -> bool {
 157        self.user_store.read(cx).current_user().is_none()
 158    }
 159
 160    fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
 161        let client = self.client.clone();
 162        cx.spawn(async move |state, cx| {
 163            client.sign_in_with_optional_connect(true, cx).await?;
 164            state.update(cx, |_, cx| cx.notify())
 165        })
 166    }
 167
 168    fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
 169        let is_thinking_toggle_enabled = cx.has_flag::<CloudThinkingToggleFeatureFlag>();
 170
 171        let mut models = Vec::new();
 172
 173        for model in response.models {
 174            models.push(Arc::new(model.clone()));
 175
 176            if !is_thinking_toggle_enabled {
 177                // Right now we represent thinking variants of models as separate models on the client,
 178                // so we need to insert variants for any model that supports thinking.
 179                if model.supports_thinking {
 180                    models.push(Arc::new(cloud_llm_client::LanguageModel {
 181                        id: cloud_llm_client::LanguageModelId(
 182                            format!("{}-thinking", model.id).into(),
 183                        ),
 184                        display_name: format!("{} Thinking", model.display_name),
 185                        ..model
 186                    }));
 187                }
 188            }
 189        }
 190
 191        self.default_model = models
 192            .iter()
 193            .find(|model| {
 194                response
 195                    .default_model
 196                    .as_ref()
 197                    .is_some_and(|default_model_id| &model.id == default_model_id)
 198            })
 199            .cloned();
 200        self.default_fast_model = models
 201            .iter()
 202            .find(|model| {
 203                response
 204                    .default_fast_model
 205                    .as_ref()
 206                    .is_some_and(|default_fast_model_id| &model.id == default_fast_model_id)
 207            })
 208            .cloned();
 209        self.recommended_models = response
 210            .recommended_models
 211            .iter()
 212            .filter_map(|id| models.iter().find(|model| &model.id == id))
 213            .cloned()
 214            .collect();
 215        self.models = models;
 216        cx.notify();
 217    }
 218
 219    async fn fetch_models(
 220        client: Arc<Client>,
 221        llm_api_token: LlmApiToken,
 222    ) -> Result<ListModelsResponse> {
 223        let http_client = &client.http_client();
 224        let token = llm_api_token.acquire(&client).await?;
 225
 226        let request = http_client::Request::builder()
 227            .method(Method::GET)
 228            .header(CLIENT_SUPPORTS_X_AI_HEADER_NAME, "true")
 229            .uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
 230            .header("Authorization", format!("Bearer {token}"))
 231            .body(AsyncBody::empty())?;
 232        let mut response = http_client
 233            .send(request)
 234            .await
 235            .context("failed to send list models request")?;
 236
 237        if response.status().is_success() {
 238            let mut body = String::new();
 239            response.body_mut().read_to_string(&mut body).await?;
 240            Ok(serde_json::from_str(&body)?)
 241        } else {
 242            let mut body = String::new();
 243            response.body_mut().read_to_string(&mut body).await?;
 244            anyhow::bail!(
 245                "error listing models.\nStatus: {:?}\nBody: {body}",
 246                response.status(),
 247            );
 248        }
 249    }
 250}
 251
 252impl CloudLanguageModelProvider {
 253    pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
 254        let mut status_rx = client.status();
 255        let status = *status_rx.borrow();
 256
 257        let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
 258
 259        let state_ref = state.downgrade();
 260        let maintain_client_status = cx.spawn(async move |cx| {
 261            while let Some(status) = status_rx.next().await {
 262                if let Some(this) = state_ref.upgrade() {
 263                    _ = this.update(cx, |this, cx| {
 264                        if this.status != status {
 265                            this.status = status;
 266                            cx.notify();
 267                        }
 268                    });
 269                } else {
 270                    break;
 271                }
 272            }
 273        });
 274
 275        Self {
 276            client,
 277            state,
 278            _maintain_client_status: maintain_client_status,
 279        }
 280    }
 281
 282    fn create_language_model(
 283        &self,
 284        model: Arc<cloud_llm_client::LanguageModel>,
 285        llm_api_token: LlmApiToken,
 286    ) -> Arc<dyn LanguageModel> {
 287        Arc::new(CloudLanguageModel {
 288            id: LanguageModelId(SharedString::from(model.id.0.clone())),
 289            model,
 290            llm_api_token,
 291            client: self.client.clone(),
 292            request_limiter: RateLimiter::new(4),
 293        })
 294    }
 295}
 296
 297impl LanguageModelProviderState for CloudLanguageModelProvider {
 298    type ObservableEntity = State;
 299
 300    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 301        Some(self.state.clone())
 302    }
 303}
 304
 305impl LanguageModelProvider for CloudLanguageModelProvider {
 306    fn id(&self) -> LanguageModelProviderId {
 307        PROVIDER_ID
 308    }
 309
 310    fn name(&self) -> LanguageModelProviderName {
 311        PROVIDER_NAME
 312    }
 313
 314    fn icon(&self) -> IconOrSvg {
 315        IconOrSvg::Icon(IconName::AiZed)
 316    }
 317
 318    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 319        let default_model = self.state.read(cx).default_model.clone()?;
 320        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 321        Some(self.create_language_model(default_model, llm_api_token))
 322    }
 323
 324    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
 325        let default_fast_model = self.state.read(cx).default_fast_model.clone()?;
 326        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 327        Some(self.create_language_model(default_fast_model, llm_api_token))
 328    }
 329
 330    fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 331        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 332        self.state
 333            .read(cx)
 334            .recommended_models
 335            .iter()
 336            .cloned()
 337            .map(|model| self.create_language_model(model, llm_api_token.clone()))
 338            .collect()
 339    }
 340
 341    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 342        let llm_api_token = self.state.read(cx).llm_api_token.clone();
 343        self.state
 344            .read(cx)
 345            .models
 346            .iter()
 347            .cloned()
 348            .map(|model| self.create_language_model(model, llm_api_token.clone()))
 349            .collect()
 350    }
 351
 352    fn is_authenticated(&self, cx: &App) -> bool {
 353        let state = self.state.read(cx);
 354        !state.is_signed_out(cx)
 355    }
 356
 357    fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 358        Task::ready(Ok(()))
 359    }
 360
 361    fn configuration_view(
 362        &self,
 363        _target_agent: language_model::ConfigurationViewTargetAgent,
 364        _: &mut Window,
 365        cx: &mut App,
 366    ) -> AnyView {
 367        cx.new(|_| ConfigurationView::new(self.state.clone()))
 368            .into()
 369    }
 370
 371    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
 372        Task::ready(Ok(()))
 373    }
 374}
 375
 376pub struct CloudLanguageModel {
 377    id: LanguageModelId,
 378    model: Arc<cloud_llm_client::LanguageModel>,
 379    llm_api_token: LlmApiToken,
 380    client: Arc<Client>,
 381    request_limiter: RateLimiter,
 382}
 383
 384struct PerformLlmCompletionResponse {
 385    response: Response<AsyncBody>,
 386    includes_status_messages: bool,
 387}
 388
 389impl CloudLanguageModel {
 390    async fn perform_llm_completion(
 391        client: Arc<Client>,
 392        llm_api_token: LlmApiToken,
 393        app_version: Option<Version>,
 394        body: CompletionBody,
 395    ) -> Result<PerformLlmCompletionResponse> {
 396        let http_client = &client.http_client();
 397
 398        let mut token = llm_api_token.acquire(&client).await?;
 399        let mut refreshed_token = false;
 400
 401        loop {
 402            let request = http_client::Request::builder()
 403                .method(Method::POST)
 404                .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref())
 405                .when_some(app_version.as_ref(), |builder, app_version| {
 406                    builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
 407                })
 408                .header("Content-Type", "application/json")
 409                .header("Authorization", format!("Bearer {token}"))
 410                .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
 411                .body(serde_json::to_string(&body)?.into())?;
 412
 413            let mut response = http_client.send(request).await?;
 414            let status = response.status();
 415            if status.is_success() {
 416                let includes_status_messages = response
 417                    .headers()
 418                    .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
 419                    .is_some();
 420
 421                return Ok(PerformLlmCompletionResponse {
 422                    response,
 423                    includes_status_messages,
 424                });
 425            }
 426
 427            if !refreshed_token && response.needs_llm_token_refresh() {
 428                token = llm_api_token.refresh(&client).await?;
 429                refreshed_token = true;
 430                continue;
 431            }
 432
 433            if status == StatusCode::PAYMENT_REQUIRED {
 434                return Err(anyhow!(PaymentRequiredError));
 435            }
 436
 437            let mut body = String::new();
 438            let headers = response.headers().clone();
 439            response.body_mut().read_to_string(&mut body).await?;
 440            return Err(anyhow!(ApiError {
 441                status,
 442                body,
 443                headers
 444            }));
 445        }
 446    }
 447}
 448
 449#[derive(Debug, Error)]
 450#[error("cloud language model request failed with status {status}: {body}")]
 451struct ApiError {
 452    status: StatusCode,
 453    body: String,
 454    headers: HeaderMap<HeaderValue>,
 455}
 456
 457/// Represents error responses from Zed's cloud API.
 458///
 459/// Example JSON for an upstream HTTP error:
 460/// ```json
 461/// {
 462///   "code": "upstream_http_error",
 463///   "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
 464///   "upstream_status": 503
 465/// }
 466/// ```
 467#[derive(Debug, serde::Deserialize)]
 468struct CloudApiError {
 469    code: String,
 470    message: String,
 471    #[serde(default)]
 472    #[serde(deserialize_with = "deserialize_optional_status_code")]
 473    upstream_status: Option<StatusCode>,
 474    #[serde(default)]
 475    retry_after: Option<f64>,
 476}
 477
 478fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
 479where
 480    D: serde::Deserializer<'de>,
 481{
 482    let opt: Option<u16> = Option::deserialize(deserializer)?;
 483    Ok(opt.and_then(|code| StatusCode::from_u16(code).ok()))
 484}
 485
 486impl From<ApiError> for LanguageModelCompletionError {
 487    fn from(error: ApiError) -> Self {
 488        if let Ok(cloud_error) = serde_json::from_str::<CloudApiError>(&error.body) {
 489            if cloud_error.code.starts_with("upstream_http_") {
 490                let status = if let Some(status) = cloud_error.upstream_status {
 491                    status
 492                } else if cloud_error.code.ends_with("_error") {
 493                    error.status
 494                } else {
 495                    // If there's a status code in the code string (e.g. "upstream_http_429")
 496                    // then use that; otherwise, see if the JSON contains a status code.
 497                    cloud_error
 498                        .code
 499                        .strip_prefix("upstream_http_")
 500                        .and_then(|code_str| code_str.parse::<u16>().ok())
 501                        .and_then(|code| StatusCode::from_u16(code).ok())
 502                        .unwrap_or(error.status)
 503                };
 504
 505                return LanguageModelCompletionError::UpstreamProviderError {
 506                    message: cloud_error.message,
 507                    status,
 508                    retry_after: cloud_error.retry_after.map(Duration::from_secs_f64),
 509                };
 510            }
 511
 512            return LanguageModelCompletionError::from_http_status(
 513                PROVIDER_NAME,
 514                error.status,
 515                cloud_error.message,
 516                None,
 517            );
 518        }
 519
 520        let retry_after = None;
 521        LanguageModelCompletionError::from_http_status(
 522            PROVIDER_NAME,
 523            error.status,
 524            error.body,
 525            retry_after,
 526        )
 527    }
 528}
 529
 530impl LanguageModel for CloudLanguageModel {
 531    fn id(&self) -> LanguageModelId {
 532        self.id.clone()
 533    }
 534
 535    fn name(&self) -> LanguageModelName {
 536        LanguageModelName::from(self.model.display_name.clone())
 537    }
 538
 539    fn provider_id(&self) -> LanguageModelProviderId {
 540        PROVIDER_ID
 541    }
 542
 543    fn provider_name(&self) -> LanguageModelProviderName {
 544        PROVIDER_NAME
 545    }
 546
 547    fn upstream_provider_id(&self) -> LanguageModelProviderId {
 548        use cloud_llm_client::LanguageModelProvider::*;
 549        match self.model.provider {
 550            Anthropic => language_model::ANTHROPIC_PROVIDER_ID,
 551            OpenAi => language_model::OPEN_AI_PROVIDER_ID,
 552            Google => language_model::GOOGLE_PROVIDER_ID,
 553            XAi => language_model::X_AI_PROVIDER_ID,
 554        }
 555    }
 556
 557    fn upstream_provider_name(&self) -> LanguageModelProviderName {
 558        use cloud_llm_client::LanguageModelProvider::*;
 559        match self.model.provider {
 560            Anthropic => language_model::ANTHROPIC_PROVIDER_NAME,
 561            OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
 562            Google => language_model::GOOGLE_PROVIDER_NAME,
 563            XAi => language_model::X_AI_PROVIDER_NAME,
 564        }
 565    }
 566
 567    fn supports_tools(&self) -> bool {
 568        self.model.supports_tools
 569    }
 570
 571    fn supports_images(&self) -> bool {
 572        self.model.supports_images
 573    }
 574
 575    fn supports_thinking(&self) -> bool {
 576        self.model.supports_thinking
 577    }
 578
 579    fn supports_streaming_tools(&self) -> bool {
 580        self.model.supports_streaming_tools
 581    }
 582
 583    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 584        match choice {
 585            LanguageModelToolChoice::Auto
 586            | LanguageModelToolChoice::Any
 587            | LanguageModelToolChoice::None => true,
 588        }
 589    }
 590
 591    fn supports_split_token_display(&self) -> bool {
 592        use cloud_llm_client::LanguageModelProvider::*;
 593        matches!(self.model.provider, OpenAi)
 594    }
 595
 596    fn telemetry_id(&self) -> String {
 597        format!("zed.dev/{}", self.model.id)
 598    }
 599
 600    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
 601        match self.model.provider {
 602            cloud_llm_client::LanguageModelProvider::Anthropic
 603            | cloud_llm_client::LanguageModelProvider::OpenAi
 604            | cloud_llm_client::LanguageModelProvider::XAi => {
 605                LanguageModelToolSchemaFormat::JsonSchema
 606            }
 607            cloud_llm_client::LanguageModelProvider::Google => {
 608                LanguageModelToolSchemaFormat::JsonSchemaSubset
 609            }
 610        }
 611    }
 612
 613    fn max_token_count(&self) -> u64 {
 614        self.model.max_token_count as u64
 615    }
 616
 617    fn max_output_tokens(&self) -> Option<u64> {
 618        Some(self.model.max_output_tokens as u64)
 619    }
 620
 621    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
 622        match &self.model.provider {
 623            cloud_llm_client::LanguageModelProvider::Anthropic => {
 624                Some(LanguageModelCacheConfiguration {
 625                    min_total_token: 2_048,
 626                    should_speculate: true,
 627                    max_cache_anchors: 4,
 628                })
 629            }
 630            cloud_llm_client::LanguageModelProvider::OpenAi
 631            | cloud_llm_client::LanguageModelProvider::XAi
 632            | cloud_llm_client::LanguageModelProvider::Google => None,
 633        }
 634    }
 635
 636    fn count_tokens(
 637        &self,
 638        request: LanguageModelRequest,
 639        cx: &App,
 640    ) -> BoxFuture<'static, Result<u64>> {
 641        match self.model.provider {
 642            cloud_llm_client::LanguageModelProvider::Anthropic => cx
 643                .background_spawn(async move { count_anthropic_tokens_with_tiktoken(request) })
 644                .boxed(),
 645            cloud_llm_client::LanguageModelProvider::OpenAi => {
 646                let model = match open_ai::Model::from_id(&self.model.id.0) {
 647                    Ok(model) => model,
 648                    Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
 649                };
 650                count_open_ai_tokens(request, model, cx)
 651            }
 652            cloud_llm_client::LanguageModelProvider::XAi => {
 653                let model = match x_ai::Model::from_id(&self.model.id.0) {
 654                    Ok(model) => model,
 655                    Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
 656                };
 657                count_xai_tokens(request, model, cx)
 658            }
 659            cloud_llm_client::LanguageModelProvider::Google => {
 660                let client = self.client.clone();
 661                let llm_api_token = self.llm_api_token.clone();
 662                let model_id = self.model.id.to_string();
 663                let generate_content_request =
 664                    into_google(request, model_id.clone(), GoogleModelMode::Default);
 665                async move {
 666                    let http_client = &client.http_client();
 667                    let token = llm_api_token.acquire(&client).await?;
 668
 669                    let request_body = CountTokensBody {
 670                        provider: cloud_llm_client::LanguageModelProvider::Google,
 671                        model: model_id,
 672                        provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
 673                            generate_content_request,
 674                        })?,
 675                    };
 676                    let request = http_client::Request::builder()
 677                        .method(Method::POST)
 678                        .uri(
 679                            http_client
 680                                .build_zed_llm_url("/count_tokens", &[])?
 681                                .as_ref(),
 682                        )
 683                        .header("Content-Type", "application/json")
 684                        .header("Authorization", format!("Bearer {token}"))
 685                        .body(serde_json::to_string(&request_body)?.into())?;
 686                    let mut response = http_client.send(request).await?;
 687                    let status = response.status();
 688                    let headers = response.headers().clone();
 689                    let mut response_body = String::new();
 690                    response
 691                        .body_mut()
 692                        .read_to_string(&mut response_body)
 693                        .await?;
 694
 695                    if status.is_success() {
 696                        let response_body: CountTokensResponse =
 697                            serde_json::from_str(&response_body)?;
 698
 699                        Ok(response_body.tokens as u64)
 700                    } else {
 701                        Err(anyhow!(ApiError {
 702                            status,
 703                            body: response_body,
 704                            headers
 705                        }))
 706                    }
 707                }
 708                .boxed()
 709            }
 710        }
 711    }
 712
 713    fn stream_completion(
 714        &self,
 715        request: LanguageModelRequest,
 716        cx: &AsyncApp,
 717    ) -> BoxFuture<
 718        'static,
 719        Result<
 720            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
 721            LanguageModelCompletionError,
 722        >,
 723    > {
 724        let thread_id = request.thread_id.clone();
 725        let prompt_id = request.prompt_id.clone();
 726        let intent = request.intent;
 727        let app_version = Some(cx.update(|cx| AppVersion::global(cx)));
 728        let thinking_allowed = request.thinking_allowed;
 729        let is_thinking_toggle_enabled =
 730            cx.update(|cx| cx.has_flag::<CloudThinkingToggleFeatureFlag>());
 731        let enable_thinking = if is_thinking_toggle_enabled {
 732            thinking_allowed && self.model.supports_thinking
 733        } else {
 734            thinking_allowed && self.model.id.0.ends_with("-thinking")
 735        };
 736        let provider_name = provider_name(&self.model.provider);
 737        match self.model.provider {
 738            cloud_llm_client::LanguageModelProvider::Anthropic => {
 739                let request = into_anthropic(
 740                    request,
 741                    self.model.id.to_string(),
 742                    1.0,
 743                    self.model.max_output_tokens as u64,
 744                    if enable_thinking {
 745                        AnthropicModelMode::Thinking {
 746                            budget_tokens: Some(4_096),
 747                        }
 748                    } else {
 749                        AnthropicModelMode::Default
 750                    },
 751                );
 752                let client = self.client.clone();
 753                let llm_api_token = self.llm_api_token.clone();
 754                let future = self.request_limiter.stream(async move {
 755                    let PerformLlmCompletionResponse {
 756                        response,
 757                        includes_status_messages,
 758                    } = Self::perform_llm_completion(
 759                        client.clone(),
 760                        llm_api_token,
 761                        app_version,
 762                        CompletionBody {
 763                            thread_id,
 764                            prompt_id,
 765                            intent,
 766                            provider: cloud_llm_client::LanguageModelProvider::Anthropic,
 767                            model: request.model.clone(),
 768                            provider_request: serde_json::to_value(&request)
 769                                .map_err(|e| anyhow!(e))?,
 770                        },
 771                    )
 772                    .await
 773                    .map_err(|err| match err.downcast::<ApiError>() {
 774                        Ok(api_err) => anyhow!(LanguageModelCompletionError::from(api_err)),
 775                        Err(err) => anyhow!(err),
 776                    })?;
 777
 778                    let mut mapper = AnthropicEventMapper::new();
 779                    Ok(map_cloud_completion_events(
 780                        Box::pin(response_lines(response, includes_status_messages)),
 781                        &provider_name,
 782                        move |event| mapper.map_event(event),
 783                    ))
 784                });
 785                async move { Ok(future.await?.boxed()) }.boxed()
 786            }
 787            cloud_llm_client::LanguageModelProvider::OpenAi => {
 788                let client = self.client.clone();
 789                let llm_api_token = self.llm_api_token.clone();
 790
 791                let request = into_open_ai_response(
 792                    request,
 793                    &self.model.id.0,
 794                    self.model.supports_parallel_tool_calls,
 795                    true,
 796                    None,
 797                    None,
 798                );
 799                let future = self.request_limiter.stream(async move {
 800                    let PerformLlmCompletionResponse {
 801                        response,
 802                        includes_status_messages,
 803                    } = Self::perform_llm_completion(
 804                        client.clone(),
 805                        llm_api_token,
 806                        app_version,
 807                        CompletionBody {
 808                            thread_id,
 809                            prompt_id,
 810                            intent,
 811                            provider: cloud_llm_client::LanguageModelProvider::OpenAi,
 812                            model: request.model.clone(),
 813                            provider_request: serde_json::to_value(&request)
 814                                .map_err(|e| anyhow!(e))?,
 815                        },
 816                    )
 817                    .await?;
 818
 819                    let mut mapper = OpenAiResponseEventMapper::new();
 820                    Ok(map_cloud_completion_events(
 821                        Box::pin(response_lines(response, includes_status_messages)),
 822                        &provider_name,
 823                        move |event| mapper.map_event(event),
 824                    ))
 825                });
 826                async move { Ok(future.await?.boxed()) }.boxed()
 827            }
 828            cloud_llm_client::LanguageModelProvider::XAi => {
 829                let client = self.client.clone();
 830                let request = into_open_ai(
 831                    request,
 832                    &self.model.id.0,
 833                    self.model.supports_parallel_tool_calls,
 834                    false,
 835                    None,
 836                    None,
 837                );
 838                let llm_api_token = self.llm_api_token.clone();
 839                let future = self.request_limiter.stream(async move {
 840                    let PerformLlmCompletionResponse {
 841                        response,
 842                        includes_status_messages,
 843                    } = Self::perform_llm_completion(
 844                        client.clone(),
 845                        llm_api_token,
 846                        app_version,
 847                        CompletionBody {
 848                            thread_id,
 849                            prompt_id,
 850                            intent,
 851                            provider: cloud_llm_client::LanguageModelProvider::XAi,
 852                            model: request.model.clone(),
 853                            provider_request: serde_json::to_value(&request)
 854                                .map_err(|e| anyhow!(e))?,
 855                        },
 856                    )
 857                    .await?;
 858
 859                    let mut mapper = OpenAiEventMapper::new();
 860                    Ok(map_cloud_completion_events(
 861                        Box::pin(response_lines(response, includes_status_messages)),
 862                        &provider_name,
 863                        move |event| mapper.map_event(event),
 864                    ))
 865                });
 866                async move { Ok(future.await?.boxed()) }.boxed()
 867            }
 868            cloud_llm_client::LanguageModelProvider::Google => {
 869                let client = self.client.clone();
 870                let request =
 871                    into_google(request, self.model.id.to_string(), GoogleModelMode::Default);
 872                let llm_api_token = self.llm_api_token.clone();
 873                let future = self.request_limiter.stream(async move {
 874                    let PerformLlmCompletionResponse {
 875                        response,
 876                        includes_status_messages,
 877                    } = Self::perform_llm_completion(
 878                        client.clone(),
 879                        llm_api_token,
 880                        app_version,
 881                        CompletionBody {
 882                            thread_id,
 883                            prompt_id,
 884                            intent,
 885                            provider: cloud_llm_client::LanguageModelProvider::Google,
 886                            model: request.model.model_id.clone(),
 887                            provider_request: serde_json::to_value(&request)
 888                                .map_err(|e| anyhow!(e))?,
 889                        },
 890                    )
 891                    .await?;
 892
 893                    let mut mapper = GoogleEventMapper::new();
 894                    Ok(map_cloud_completion_events(
 895                        Box::pin(response_lines(response, includes_status_messages)),
 896                        &provider_name,
 897                        move |event| mapper.map_event(event),
 898                    ))
 899                });
 900                async move { Ok(future.await?.boxed()) }.boxed()
 901            }
 902        }
 903    }
 904}
 905
 906fn map_cloud_completion_events<T, F>(
 907    stream: Pin<Box<dyn Stream<Item = Result<CompletionEvent<T>>> + Send>>,
 908    provider: &LanguageModelProviderName,
 909    mut map_callback: F,
 910) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 911where
 912    T: DeserializeOwned + 'static,
 913    F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 914        + Send
 915        + 'static,
 916{
 917    let provider = provider.clone();
 918    stream
 919        .flat_map(move |event| {
 920            futures::stream::iter(match event {
 921                Err(error) => {
 922                    vec![Err(LanguageModelCompletionError::from(error))]
 923                }
 924                Ok(CompletionEvent::Status(event)) => {
 925                    vec![
 926                        LanguageModelCompletionEvent::from_completion_request_status(
 927                            event,
 928                            provider.clone(),
 929                        ),
 930                    ]
 931                }
 932                Ok(CompletionEvent::Event(event)) => map_callback(event),
 933            })
 934        })
 935        .boxed()
 936}
 937
 938fn provider_name(provider: &cloud_llm_client::LanguageModelProvider) -> LanguageModelProviderName {
 939    match provider {
 940        cloud_llm_client::LanguageModelProvider::Anthropic => {
 941            language_model::ANTHROPIC_PROVIDER_NAME
 942        }
 943        cloud_llm_client::LanguageModelProvider::OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
 944        cloud_llm_client::LanguageModelProvider::Google => language_model::GOOGLE_PROVIDER_NAME,
 945        cloud_llm_client::LanguageModelProvider::XAi => language_model::X_AI_PROVIDER_NAME,
 946    }
 947}
 948
 949fn response_lines<T: DeserializeOwned>(
 950    response: Response<AsyncBody>,
 951    includes_status_messages: bool,
 952) -> impl Stream<Item = Result<CompletionEvent<T>>> {
 953    futures::stream::try_unfold(
 954        (String::new(), BufReader::new(response.into_body())),
 955        move |(mut line, mut body)| async move {
 956            match body.read_line(&mut line).await {
 957                Ok(0) => Ok(None),
 958                Ok(_) => {
 959                    let event = if includes_status_messages {
 960                        serde_json::from_str::<CompletionEvent<T>>(&line)?
 961                    } else {
 962                        CompletionEvent::Event(serde_json::from_str::<T>(&line)?)
 963                    };
 964
 965                    line.clear();
 966                    Ok(Some((event, (line, body))))
 967                }
 968                Err(e) => Err(e.into()),
 969            }
 970        },
 971    )
 972}
 973
 974#[derive(IntoElement, RegisterComponent)]
 975struct ZedAiConfiguration {
 976    is_connected: bool,
 977    plan: Option<Plan>,
 978    subscription_period: Option<(DateTime<Utc>, DateTime<Utc>)>,
 979    eligible_for_trial: bool,
 980    account_too_young: bool,
 981    sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
 982}
 983
 984impl RenderOnce for ZedAiConfiguration {
 985    fn render(self, _window: &mut Window, _cx: &mut App) -> impl IntoElement {
 986        let is_pro = self
 987            .plan
 988            .is_some_and(|plan| plan == Plan::V2(PlanV2::ZedPro));
 989        let subscription_text = match (self.plan, self.subscription_period) {
 990            (Some(Plan::V2(PlanV2::ZedPro)), Some(_)) => {
 991                "You have access to Zed's hosted models through your Pro subscription."
 992            }
 993            (Some(Plan::V2(PlanV2::ZedProTrial)), Some(_)) => {
 994                "You have access to Zed's hosted models through your Pro trial."
 995            }
 996            (Some(Plan::V2(PlanV2::ZedFree)), Some(_)) => {
 997                if self.eligible_for_trial {
 998                    "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
 999                } else {
1000                    "Subscribe for access to Zed's hosted models."
1001                }
1002            }
1003            _ => {
1004                if self.eligible_for_trial {
1005                    "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1006                } else {
1007                    "Subscribe for access to Zed's hosted models."
1008                }
1009            }
1010        };
1011
1012        let manage_subscription_buttons = if is_pro {
1013            Button::new("manage_settings", "Manage Subscription")
1014                .full_width()
1015                .style(ButtonStyle::Tinted(TintColor::Accent))
1016                .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx)))
1017                .into_any_element()
1018        } else if self.plan.is_none() || self.eligible_for_trial {
1019            Button::new("start_trial", "Start 14-day Free Pro Trial")
1020                .full_width()
1021                .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1022                .on_click(|_, _, cx| cx.open_url(&zed_urls::start_trial_url(cx)))
1023                .into_any_element()
1024        } else {
1025            Button::new("upgrade", "Upgrade to Pro")
1026                .full_width()
1027                .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1028                .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx)))
1029                .into_any_element()
1030        };
1031
1032        if !self.is_connected {
1033            return v_flex()
1034                .gap_2()
1035                .child(Label::new("Sign in to have access to Zed's complete agentic experience with hosted models."))
1036                .child(
1037                    Button::new("sign_in", "Sign In to use Zed AI")
1038                        .icon_color(Color::Muted)
1039                        .icon(IconName::Github)
1040                        .icon_size(IconSize::Small)
1041                        .icon_position(IconPosition::Start)
1042                        .full_width()
1043                        .on_click({
1044                            let callback = self.sign_in_callback.clone();
1045                            move |_, window, cx| (callback)(window, cx)
1046                        }),
1047                );
1048        }
1049
1050        v_flex().gap_2().w_full().map(|this| {
1051            if self.account_too_young {
1052                this.child(YoungAccountBanner).child(
1053                    Button::new("upgrade", "Upgrade to Pro")
1054                        .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1055                        .full_width()
1056                        .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx))),
1057                )
1058            } else {
1059                this.text_sm()
1060                    .child(subscription_text)
1061                    .child(manage_subscription_buttons)
1062            }
1063        })
1064    }
1065}
1066
1067struct ConfigurationView {
1068    state: Entity<State>,
1069    sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1070}
1071
1072impl ConfigurationView {
1073    fn new(state: Entity<State>) -> Self {
1074        let sign_in_callback = Arc::new({
1075            let state = state.clone();
1076            move |_window: &mut Window, cx: &mut App| {
1077                state.update(cx, |state, cx| {
1078                    state.authenticate(cx).detach_and_log_err(cx);
1079                });
1080            }
1081        });
1082
1083        Self {
1084            state,
1085            sign_in_callback,
1086        }
1087    }
1088}
1089
1090impl Render for ConfigurationView {
1091    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1092        let state = self.state.read(cx);
1093        let user_store = state.user_store.read(cx);
1094
1095        ZedAiConfiguration {
1096            is_connected: !state.is_signed_out(cx),
1097            plan: user_store.plan(),
1098            subscription_period: user_store.subscription_period(),
1099            eligible_for_trial: user_store.trial_started_at().is_none(),
1100            account_too_young: user_store.account_too_young(),
1101            sign_in_callback: self.sign_in_callback.clone(),
1102        }
1103    }
1104}
1105
1106impl Component for ZedAiConfiguration {
1107    fn name() -> &'static str {
1108        "AI Configuration Content"
1109    }
1110
1111    fn sort_name() -> &'static str {
1112        "AI Configuration Content"
1113    }
1114
1115    fn scope() -> ComponentScope {
1116        ComponentScope::Onboarding
1117    }
1118
1119    fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
1120        fn configuration(
1121            is_connected: bool,
1122            plan: Option<Plan>,
1123            eligible_for_trial: bool,
1124            account_too_young: bool,
1125        ) -> AnyElement {
1126            ZedAiConfiguration {
1127                is_connected,
1128                plan,
1129                subscription_period: plan
1130                    .is_some()
1131                    .then(|| (Utc::now(), Utc::now() + chrono::Duration::days(7))),
1132                eligible_for_trial,
1133                account_too_young,
1134                sign_in_callback: Arc::new(|_, _| {}),
1135            }
1136            .into_any_element()
1137        }
1138
1139        Some(
1140            v_flex()
1141                .p_4()
1142                .gap_4()
1143                .children(vec![
1144                    single_example("Not connected", configuration(false, None, false, false)),
1145                    single_example(
1146                        "Accept Terms of Service",
1147                        configuration(true, None, true, false),
1148                    ),
1149                    single_example(
1150                        "No Plan - Not eligible for trial",
1151                        configuration(true, None, false, false),
1152                    ),
1153                    single_example(
1154                        "No Plan - Eligible for trial",
1155                        configuration(true, None, true, false),
1156                    ),
1157                    single_example(
1158                        "Free Plan",
1159                        configuration(true, Some(Plan::V2(PlanV2::ZedFree)), true, false),
1160                    ),
1161                    single_example(
1162                        "Zed Pro Trial Plan",
1163                        configuration(true, Some(Plan::V2(PlanV2::ZedProTrial)), true, false),
1164                    ),
1165                    single_example(
1166                        "Zed Pro Plan",
1167                        configuration(true, Some(Plan::V2(PlanV2::ZedPro)), true, false),
1168                    ),
1169                ])
1170                .into_any_element(),
1171        )
1172    }
1173}
1174
1175#[cfg(test)]
1176mod tests {
1177    use super::*;
1178    use http_client::http::{HeaderMap, StatusCode};
1179    use language_model::LanguageModelCompletionError;
1180
1181    #[test]
1182    fn test_api_error_conversion_with_upstream_http_error() {
1183        // upstream_http_error with 503 status should become ServerOverloaded
1184        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout","upstream_status":503}"#;
1185
1186        let api_error = ApiError {
1187            status: StatusCode::INTERNAL_SERVER_ERROR,
1188            body: error_body.to_string(),
1189            headers: HeaderMap::new(),
1190        };
1191
1192        let completion_error: LanguageModelCompletionError = api_error.into();
1193
1194        match completion_error {
1195            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1196                assert_eq!(
1197                    message,
1198                    "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout"
1199                );
1200            }
1201            _ => panic!(
1202                "Expected UpstreamProviderError for upstream 503, got: {:?}",
1203                completion_error
1204            ),
1205        }
1206
1207        // upstream_http_error with 500 status should become ApiInternalServerError
1208        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the OpenAI API: internal server error","upstream_status":500}"#;
1209
1210        let api_error = ApiError {
1211            status: StatusCode::INTERNAL_SERVER_ERROR,
1212            body: error_body.to_string(),
1213            headers: HeaderMap::new(),
1214        };
1215
1216        let completion_error: LanguageModelCompletionError = api_error.into();
1217
1218        match completion_error {
1219            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1220                assert_eq!(
1221                    message,
1222                    "Received an error from the OpenAI API: internal server error"
1223                );
1224            }
1225            _ => panic!(
1226                "Expected UpstreamProviderError for upstream 500, got: {:?}",
1227                completion_error
1228            ),
1229        }
1230
1231        // upstream_http_error with 429 status should become RateLimitExceeded
1232        let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Google API: rate limit exceeded","upstream_status":429}"#;
1233
1234        let api_error = ApiError {
1235            status: StatusCode::INTERNAL_SERVER_ERROR,
1236            body: error_body.to_string(),
1237            headers: HeaderMap::new(),
1238        };
1239
1240        let completion_error: LanguageModelCompletionError = api_error.into();
1241
1242        match completion_error {
1243            LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1244                assert_eq!(
1245                    message,
1246                    "Received an error from the Google API: rate limit exceeded"
1247                );
1248            }
1249            _ => panic!(
1250                "Expected UpstreamProviderError for upstream 429, got: {:?}",
1251                completion_error
1252            ),
1253        }
1254
1255        // Regular 500 error without upstream_http_error should remain ApiInternalServerError for Zed
1256        let error_body = "Regular internal server error";
1257
1258        let api_error = ApiError {
1259            status: StatusCode::INTERNAL_SERVER_ERROR,
1260            body: error_body.to_string(),
1261            headers: HeaderMap::new(),
1262        };
1263
1264        let completion_error: LanguageModelCompletionError = api_error.into();
1265
1266        match completion_error {
1267            LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1268                assert_eq!(provider, PROVIDER_NAME);
1269                assert_eq!(message, "Regular internal server error");
1270            }
1271            _ => panic!(
1272                "Expected ApiInternalServerError for regular 500, got: {:?}",
1273                completion_error
1274            ),
1275        }
1276
1277        // upstream_http_429 format should be converted to UpstreamProviderError
1278        let error_body = r#"{"code":"upstream_http_429","message":"Upstream Anthropic rate limit exceeded.","retry_after":30.5}"#;
1279
1280        let api_error = ApiError {
1281            status: StatusCode::INTERNAL_SERVER_ERROR,
1282            body: error_body.to_string(),
1283            headers: HeaderMap::new(),
1284        };
1285
1286        let completion_error: LanguageModelCompletionError = api_error.into();
1287
1288        match completion_error {
1289            LanguageModelCompletionError::UpstreamProviderError {
1290                message,
1291                status,
1292                retry_after,
1293            } => {
1294                assert_eq!(message, "Upstream Anthropic rate limit exceeded.");
1295                assert_eq!(status, StatusCode::TOO_MANY_REQUESTS);
1296                assert_eq!(retry_after, Some(Duration::from_secs_f64(30.5)));
1297            }
1298            _ => panic!(
1299                "Expected UpstreamProviderError for upstream_http_429, got: {:?}",
1300                completion_error
1301            ),
1302        }
1303
1304        // Invalid JSON in error body should fall back to regular error handling
1305        let error_body = "Not JSON at all";
1306
1307        let api_error = ApiError {
1308            status: StatusCode::INTERNAL_SERVER_ERROR,
1309            body: error_body.to_string(),
1310            headers: HeaderMap::new(),
1311        };
1312
1313        let completion_error: LanguageModelCompletionError = api_error.into();
1314
1315        match completion_error {
1316            LanguageModelCompletionError::ApiInternalServerError { provider, .. } => {
1317                assert_eq!(provider, PROVIDER_NAME);
1318            }
1319            _ => panic!(
1320                "Expected ApiInternalServerError for invalid JSON, got: {:?}",
1321                completion_error
1322            ),
1323        }
1324    }
1325}