1use ai_onboarding::YoungAccountBanner;
2use anthropic::AnthropicModelMode;
3use anyhow::{Context as _, Result, anyhow};
4use chrono::{DateTime, Utc};
5use client::{Client, UserStore, zed_urls};
6use cloud_api_types::{OrganizationId, Plan};
7use cloud_llm_client::{
8 CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CLIENT_SUPPORTS_STATUS_STREAM_ENDED_HEADER_NAME,
9 CLIENT_SUPPORTS_X_AI_HEADER_NAME, CompletionBody, CompletionEvent, CompletionRequestStatus,
10 CountTokensBody, CountTokensResponse, ListModelsResponse,
11 SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, ZED_VERSION_HEADER_NAME,
12};
13use futures::{
14 AsyncBufReadExt, FutureExt, Stream, StreamExt,
15 future::BoxFuture,
16 stream::{self, BoxStream},
17};
18use google_ai::GoogleModelMode;
19use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
20use http_client::http::{HeaderMap, HeaderValue};
21use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
22use language_model::{
23 AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
24 LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelEffortLevel,
25 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
26 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
27 LanguageModelToolChoice, LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh,
28 PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
29};
30use release_channel::AppVersion;
31use schemars::JsonSchema;
32use semver::Version;
33use serde::{Deserialize, Serialize, de::DeserializeOwned};
34use settings::SettingsStore;
35pub use settings::ZedDotDevAvailableModel as AvailableModel;
36pub use settings::ZedDotDevAvailableProvider as AvailableProvider;
37use smol::io::{AsyncReadExt, BufReader};
38use std::collections::VecDeque;
39use std::pin::Pin;
40use std::str::FromStr;
41use std::sync::Arc;
42use std::task::Poll;
43use std::time::Duration;
44use thiserror::Error;
45use ui::{TintColor, prelude::*};
46
47use crate::provider::anthropic::{
48 AnthropicEventMapper, count_anthropic_tokens_with_tiktoken, into_anthropic,
49};
50use crate::provider::google::{GoogleEventMapper, into_google};
51use crate::provider::open_ai::{
52 OpenAiEventMapper, OpenAiResponseEventMapper, count_open_ai_tokens, into_open_ai,
53 into_open_ai_response,
54};
55use crate::provider::x_ai::count_xai_tokens;
56
57const PROVIDER_ID: LanguageModelProviderId = language_model::ZED_CLOUD_PROVIDER_ID;
58const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVIDER_NAME;
59
60#[derive(Default, Clone, Debug, PartialEq)]
61pub struct ZedDotDevSettings {
62 pub available_models: Vec<AvailableModel>,
63}
64#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
65#[serde(tag = "type", rename_all = "lowercase")]
66pub enum ModelMode {
67 #[default]
68 Default,
69 Thinking {
70 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
71 budget_tokens: Option<u32>,
72 },
73}
74
75impl From<ModelMode> for AnthropicModelMode {
76 fn from(value: ModelMode) -> Self {
77 match value {
78 ModelMode::Default => AnthropicModelMode::Default,
79 ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
80 }
81 }
82}
83
84pub struct CloudLanguageModelProvider {
85 client: Arc<Client>,
86 state: Entity<State>,
87 _maintain_client_status: Task<()>,
88}
89
90pub struct State {
91 client: Arc<Client>,
92 llm_api_token: LlmApiToken,
93 user_store: Entity<UserStore>,
94 status: client::Status,
95 models: Vec<Arc<cloud_llm_client::LanguageModel>>,
96 default_model: Option<Arc<cloud_llm_client::LanguageModel>>,
97 default_fast_model: Option<Arc<cloud_llm_client::LanguageModel>>,
98 recommended_models: Vec<Arc<cloud_llm_client::LanguageModel>>,
99 _user_store_subscription: Subscription,
100 _settings_subscription: Subscription,
101 _llm_token_subscription: Subscription,
102}
103
104impl State {
105 fn new(
106 client: Arc<Client>,
107 user_store: Entity<UserStore>,
108 status: client::Status,
109 cx: &mut Context<Self>,
110 ) -> Self {
111 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
112 let llm_api_token = LlmApiToken::global(cx);
113 Self {
114 client: client.clone(),
115 llm_api_token,
116 user_store: user_store.clone(),
117 status,
118 models: Vec::new(),
119 default_model: None,
120 default_fast_model: None,
121 recommended_models: Vec::new(),
122 _user_store_subscription: cx.subscribe(
123 &user_store,
124 move |this, _user_store, event, cx| match event {
125 client::user::Event::PrivateUserInfoUpdated => {
126 let status = *client.status().borrow();
127 if status.is_signed_out() {
128 return;
129 }
130
131 let client = this.client.clone();
132 let llm_api_token = this.llm_api_token.clone();
133 let organization_id = this
134 .user_store
135 .read(cx)
136 .current_organization()
137 .map(|organization| organization.id.clone());
138 cx.spawn(async move |this, cx| {
139 let response =
140 Self::fetch_models(client, llm_api_token, organization_id).await?;
141 this.update(cx, |this, cx| this.update_models(response, cx))
142 })
143 .detach_and_log_err(cx);
144 }
145 _ => {}
146 },
147 ),
148 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
149 cx.notify();
150 }),
151 _llm_token_subscription: cx.subscribe(
152 &refresh_llm_token_listener,
153 move |this, _listener, _event, cx| {
154 let client = this.client.clone();
155 let llm_api_token = this.llm_api_token.clone();
156 let organization_id = this
157 .user_store
158 .read(cx)
159 .current_organization()
160 .map(|o| o.id.clone());
161 cx.spawn(async move |this, cx| {
162 let response =
163 Self::fetch_models(client, llm_api_token, organization_id).await?;
164 this.update(cx, |this, cx| {
165 this.update_models(response, cx);
166 })
167 })
168 .detach_and_log_err(cx);
169 },
170 ),
171 }
172 }
173
174 fn is_signed_out(&self, cx: &App) -> bool {
175 self.user_store.read(cx).current_user().is_none()
176 }
177
178 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
179 let client = self.client.clone();
180 cx.spawn(async move |state, cx| {
181 client.sign_in_with_optional_connect(true, cx).await?;
182 state.update(cx, |_, cx| cx.notify())
183 })
184 }
185
186 fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
187 let mut models = Vec::new();
188
189 for model in response.models {
190 models.push(Arc::new(model.clone()));
191 }
192
193 self.default_model = models
194 .iter()
195 .find(|model| {
196 response
197 .default_model
198 .as_ref()
199 .is_some_and(|default_model_id| &model.id == default_model_id)
200 })
201 .cloned();
202 self.default_fast_model = models
203 .iter()
204 .find(|model| {
205 response
206 .default_fast_model
207 .as_ref()
208 .is_some_and(|default_fast_model_id| &model.id == default_fast_model_id)
209 })
210 .cloned();
211 self.recommended_models = response
212 .recommended_models
213 .iter()
214 .filter_map(|id| models.iter().find(|model| &model.id == id))
215 .cloned()
216 .collect();
217 self.models = models;
218 cx.notify();
219 }
220
221 async fn fetch_models(
222 client: Arc<Client>,
223 llm_api_token: LlmApiToken,
224 organization_id: Option<OrganizationId>,
225 ) -> Result<ListModelsResponse> {
226 let http_client = &client.http_client();
227 let token = llm_api_token.acquire(&client, organization_id).await?;
228
229 let request = http_client::Request::builder()
230 .method(Method::GET)
231 .header(CLIENT_SUPPORTS_X_AI_HEADER_NAME, "true")
232 .uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
233 .header("Authorization", format!("Bearer {token}"))
234 .body(AsyncBody::empty())?;
235 let mut response = http_client
236 .send(request)
237 .await
238 .context("failed to send list models request")?;
239
240 if response.status().is_success() {
241 let mut body = String::new();
242 response.body_mut().read_to_string(&mut body).await?;
243 Ok(serde_json::from_str(&body)?)
244 } else {
245 let mut body = String::new();
246 response.body_mut().read_to_string(&mut body).await?;
247 anyhow::bail!(
248 "error listing models.\nStatus: {:?}\nBody: {body}",
249 response.status(),
250 );
251 }
252 }
253}
254
255impl CloudLanguageModelProvider {
256 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
257 let mut status_rx = client.status();
258 let status = *status_rx.borrow();
259
260 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
261
262 let state_ref = state.downgrade();
263 let maintain_client_status = cx.spawn(async move |cx| {
264 while let Some(status) = status_rx.next().await {
265 if let Some(this) = state_ref.upgrade() {
266 _ = this.update(cx, |this, cx| {
267 if this.status != status {
268 this.status = status;
269 cx.notify();
270 }
271 });
272 } else {
273 break;
274 }
275 }
276 });
277
278 Self {
279 client,
280 state,
281 _maintain_client_status: maintain_client_status,
282 }
283 }
284
285 fn create_language_model(
286 &self,
287 model: Arc<cloud_llm_client::LanguageModel>,
288 llm_api_token: LlmApiToken,
289 user_store: Entity<UserStore>,
290 ) -> Arc<dyn LanguageModel> {
291 Arc::new(CloudLanguageModel {
292 id: LanguageModelId(SharedString::from(model.id.0.clone())),
293 model,
294 llm_api_token,
295 user_store,
296 client: self.client.clone(),
297 request_limiter: RateLimiter::new(4),
298 })
299 }
300}
301
302impl LanguageModelProviderState for CloudLanguageModelProvider {
303 type ObservableEntity = State;
304
305 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
306 Some(self.state.clone())
307 }
308}
309
310impl LanguageModelProvider for CloudLanguageModelProvider {
311 fn id(&self) -> LanguageModelProviderId {
312 PROVIDER_ID
313 }
314
315 fn name(&self) -> LanguageModelProviderName {
316 PROVIDER_NAME
317 }
318
319 fn icon(&self) -> IconOrSvg {
320 IconOrSvg::Icon(IconName::AiZed)
321 }
322
323 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
324 let state = self.state.read(cx);
325 let default_model = state.default_model.clone()?;
326 let llm_api_token = state.llm_api_token.clone();
327 let user_store = state.user_store.clone();
328 Some(self.create_language_model(default_model, llm_api_token, user_store))
329 }
330
331 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
332 let state = self.state.read(cx);
333 let default_fast_model = state.default_fast_model.clone()?;
334 let llm_api_token = state.llm_api_token.clone();
335 let user_store = state.user_store.clone();
336 Some(self.create_language_model(default_fast_model, llm_api_token, user_store))
337 }
338
339 fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
340 let state = self.state.read(cx);
341 let llm_api_token = state.llm_api_token.clone();
342 let user_store = state.user_store.clone();
343 state
344 .recommended_models
345 .iter()
346 .cloned()
347 .map(|model| {
348 self.create_language_model(model, llm_api_token.clone(), user_store.clone())
349 })
350 .collect()
351 }
352
353 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
354 let state = self.state.read(cx);
355 let llm_api_token = state.llm_api_token.clone();
356 let user_store = state.user_store.clone();
357 state
358 .models
359 .iter()
360 .cloned()
361 .map(|model| {
362 self.create_language_model(model, llm_api_token.clone(), user_store.clone())
363 })
364 .collect()
365 }
366
367 fn is_authenticated(&self, cx: &App) -> bool {
368 let state = self.state.read(cx);
369 !state.is_signed_out(cx)
370 }
371
372 fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
373 Task::ready(Ok(()))
374 }
375
376 fn configuration_view(
377 &self,
378 _target_agent: language_model::ConfigurationViewTargetAgent,
379 _: &mut Window,
380 cx: &mut App,
381 ) -> AnyView {
382 cx.new(|_| ConfigurationView::new(self.state.clone()))
383 .into()
384 }
385
386 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
387 Task::ready(Ok(()))
388 }
389}
390
391pub struct CloudLanguageModel {
392 id: LanguageModelId,
393 model: Arc<cloud_llm_client::LanguageModel>,
394 llm_api_token: LlmApiToken,
395 user_store: Entity<UserStore>,
396 client: Arc<Client>,
397 request_limiter: RateLimiter,
398}
399
400struct PerformLlmCompletionResponse {
401 response: Response<AsyncBody>,
402 includes_status_messages: bool,
403}
404
405impl CloudLanguageModel {
406 async fn perform_llm_completion(
407 client: Arc<Client>,
408 llm_api_token: LlmApiToken,
409 organization_id: Option<OrganizationId>,
410 app_version: Option<Version>,
411 body: CompletionBody,
412 ) -> Result<PerformLlmCompletionResponse> {
413 let http_client = &client.http_client();
414
415 let mut token = llm_api_token
416 .acquire(&client, organization_id.clone())
417 .await?;
418 let mut refreshed_token = false;
419
420 loop {
421 let request = http_client::Request::builder()
422 .method(Method::POST)
423 .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref())
424 .when_some(app_version.as_ref(), |builder, app_version| {
425 builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
426 })
427 .header("Content-Type", "application/json")
428 .header("Authorization", format!("Bearer {token}"))
429 .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
430 .header(CLIENT_SUPPORTS_STATUS_STREAM_ENDED_HEADER_NAME, "true")
431 .body(serde_json::to_string(&body)?.into())?;
432
433 let mut response = http_client.send(request).await?;
434 let status = response.status();
435 if status.is_success() {
436 let includes_status_messages = response
437 .headers()
438 .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
439 .is_some();
440
441 return Ok(PerformLlmCompletionResponse {
442 response,
443 includes_status_messages,
444 });
445 }
446
447 if !refreshed_token && response.needs_llm_token_refresh() {
448 token = llm_api_token
449 .refresh(&client, organization_id.clone())
450 .await?;
451 refreshed_token = true;
452 continue;
453 }
454
455 if status == StatusCode::PAYMENT_REQUIRED {
456 return Err(anyhow!(PaymentRequiredError));
457 }
458
459 let mut body = String::new();
460 let headers = response.headers().clone();
461 response.body_mut().read_to_string(&mut body).await?;
462 return Err(anyhow!(ApiError {
463 status,
464 body,
465 headers
466 }));
467 }
468 }
469}
470
471#[derive(Debug, Error)]
472#[error("cloud language model request failed with status {status}: {body}")]
473struct ApiError {
474 status: StatusCode,
475 body: String,
476 headers: HeaderMap<HeaderValue>,
477}
478
479/// Represents error responses from Zed's cloud API.
480///
481/// Example JSON for an upstream HTTP error:
482/// ```json
483/// {
484/// "code": "upstream_http_error",
485/// "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
486/// "upstream_status": 503
487/// }
488/// ```
489#[derive(Debug, serde::Deserialize)]
490struct CloudApiError {
491 code: String,
492 message: String,
493 #[serde(default)]
494 #[serde(deserialize_with = "deserialize_optional_status_code")]
495 upstream_status: Option<StatusCode>,
496 #[serde(default)]
497 retry_after: Option<f64>,
498}
499
500fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
501where
502 D: serde::Deserializer<'de>,
503{
504 let opt: Option<u16> = Option::deserialize(deserializer)?;
505 Ok(opt.and_then(|code| StatusCode::from_u16(code).ok()))
506}
507
508impl From<ApiError> for LanguageModelCompletionError {
509 fn from(error: ApiError) -> Self {
510 if let Ok(cloud_error) = serde_json::from_str::<CloudApiError>(&error.body) {
511 if cloud_error.code.starts_with("upstream_http_") {
512 let status = if let Some(status) = cloud_error.upstream_status {
513 status
514 } else if cloud_error.code.ends_with("_error") {
515 error.status
516 } else {
517 // If there's a status code in the code string (e.g. "upstream_http_429")
518 // then use that; otherwise, see if the JSON contains a status code.
519 cloud_error
520 .code
521 .strip_prefix("upstream_http_")
522 .and_then(|code_str| code_str.parse::<u16>().ok())
523 .and_then(|code| StatusCode::from_u16(code).ok())
524 .unwrap_or(error.status)
525 };
526
527 return LanguageModelCompletionError::UpstreamProviderError {
528 message: cloud_error.message,
529 status,
530 retry_after: cloud_error.retry_after.map(Duration::from_secs_f64),
531 };
532 }
533
534 return LanguageModelCompletionError::from_http_status(
535 PROVIDER_NAME,
536 error.status,
537 cloud_error.message,
538 None,
539 );
540 }
541
542 let retry_after = None;
543 LanguageModelCompletionError::from_http_status(
544 PROVIDER_NAME,
545 error.status,
546 error.body,
547 retry_after,
548 )
549 }
550}
551
552impl LanguageModel for CloudLanguageModel {
553 fn id(&self) -> LanguageModelId {
554 self.id.clone()
555 }
556
557 fn name(&self) -> LanguageModelName {
558 LanguageModelName::from(self.model.display_name.clone())
559 }
560
561 fn provider_id(&self) -> LanguageModelProviderId {
562 PROVIDER_ID
563 }
564
565 fn provider_name(&self) -> LanguageModelProviderName {
566 PROVIDER_NAME
567 }
568
569 fn upstream_provider_id(&self) -> LanguageModelProviderId {
570 use cloud_llm_client::LanguageModelProvider::*;
571 match self.model.provider {
572 Anthropic => language_model::ANTHROPIC_PROVIDER_ID,
573 OpenAi => language_model::OPEN_AI_PROVIDER_ID,
574 Google => language_model::GOOGLE_PROVIDER_ID,
575 XAi => language_model::X_AI_PROVIDER_ID,
576 }
577 }
578
579 fn upstream_provider_name(&self) -> LanguageModelProviderName {
580 use cloud_llm_client::LanguageModelProvider::*;
581 match self.model.provider {
582 Anthropic => language_model::ANTHROPIC_PROVIDER_NAME,
583 OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
584 Google => language_model::GOOGLE_PROVIDER_NAME,
585 XAi => language_model::X_AI_PROVIDER_NAME,
586 }
587 }
588
589 fn is_latest(&self) -> bool {
590 self.model.is_latest
591 }
592
593 fn supports_tools(&self) -> bool {
594 self.model.supports_tools
595 }
596
597 fn supports_images(&self) -> bool {
598 self.model.supports_images
599 }
600
601 fn supports_thinking(&self) -> bool {
602 self.model.supports_thinking
603 }
604
605 fn supports_fast_mode(&self) -> bool {
606 self.model.supports_fast_mode
607 }
608
609 fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
610 self.model
611 .supported_effort_levels
612 .iter()
613 .map(|effort_level| LanguageModelEffortLevel {
614 name: effort_level.name.clone().into(),
615 value: effort_level.value.clone().into(),
616 is_default: effort_level.is_default.unwrap_or(false),
617 })
618 .collect()
619 }
620
621 fn supports_streaming_tools(&self) -> bool {
622 self.model.supports_streaming_tools
623 }
624
625 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
626 match choice {
627 LanguageModelToolChoice::Auto
628 | LanguageModelToolChoice::Any
629 | LanguageModelToolChoice::None => true,
630 }
631 }
632
633 fn supports_split_token_display(&self) -> bool {
634 use cloud_llm_client::LanguageModelProvider::*;
635 matches!(self.model.provider, OpenAi)
636 }
637
638 fn telemetry_id(&self) -> String {
639 format!("zed.dev/{}", self.model.id)
640 }
641
642 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
643 match self.model.provider {
644 cloud_llm_client::LanguageModelProvider::Anthropic
645 | cloud_llm_client::LanguageModelProvider::OpenAi
646 | cloud_llm_client::LanguageModelProvider::XAi => {
647 LanguageModelToolSchemaFormat::JsonSchema
648 }
649 cloud_llm_client::LanguageModelProvider::Google => {
650 LanguageModelToolSchemaFormat::JsonSchemaSubset
651 }
652 }
653 }
654
655 fn max_token_count(&self) -> u64 {
656 self.model.max_token_count as u64
657 }
658
659 fn max_output_tokens(&self) -> Option<u64> {
660 Some(self.model.max_output_tokens as u64)
661 }
662
663 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
664 match &self.model.provider {
665 cloud_llm_client::LanguageModelProvider::Anthropic => {
666 Some(LanguageModelCacheConfiguration {
667 min_total_token: 2_048,
668 should_speculate: true,
669 max_cache_anchors: 4,
670 })
671 }
672 cloud_llm_client::LanguageModelProvider::OpenAi
673 | cloud_llm_client::LanguageModelProvider::XAi
674 | cloud_llm_client::LanguageModelProvider::Google => None,
675 }
676 }
677
678 fn count_tokens(
679 &self,
680 request: LanguageModelRequest,
681 cx: &App,
682 ) -> BoxFuture<'static, Result<u64>> {
683 match self.model.provider {
684 cloud_llm_client::LanguageModelProvider::Anthropic => cx
685 .background_spawn(async move { count_anthropic_tokens_with_tiktoken(request) })
686 .boxed(),
687 cloud_llm_client::LanguageModelProvider::OpenAi => {
688 let model = match open_ai::Model::from_id(&self.model.id.0) {
689 Ok(model) => model,
690 Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
691 };
692 count_open_ai_tokens(request, model, cx)
693 }
694 cloud_llm_client::LanguageModelProvider::XAi => {
695 let model = match x_ai::Model::from_id(&self.model.id.0) {
696 Ok(model) => model,
697 Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
698 };
699 count_xai_tokens(request, model, cx)
700 }
701 cloud_llm_client::LanguageModelProvider::Google => {
702 let client = self.client.clone();
703 let llm_api_token = self.llm_api_token.clone();
704 let organization_id = self
705 .user_store
706 .read(cx)
707 .current_organization()
708 .map(|o| o.id.clone());
709 let model_id = self.model.id.to_string();
710 let generate_content_request =
711 into_google(request, model_id.clone(), GoogleModelMode::Default);
712 async move {
713 let http_client = &client.http_client();
714 let token = llm_api_token.acquire(&client, organization_id).await?;
715
716 let request_body = CountTokensBody {
717 provider: cloud_llm_client::LanguageModelProvider::Google,
718 model: model_id,
719 provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
720 generate_content_request,
721 })?,
722 };
723 let request = http_client::Request::builder()
724 .method(Method::POST)
725 .uri(
726 http_client
727 .build_zed_llm_url("/count_tokens", &[])?
728 .as_ref(),
729 )
730 .header("Content-Type", "application/json")
731 .header("Authorization", format!("Bearer {token}"))
732 .body(serde_json::to_string(&request_body)?.into())?;
733 let mut response = http_client.send(request).await?;
734 let status = response.status();
735 let headers = response.headers().clone();
736 let mut response_body = String::new();
737 response
738 .body_mut()
739 .read_to_string(&mut response_body)
740 .await?;
741
742 if status.is_success() {
743 let response_body: CountTokensResponse =
744 serde_json::from_str(&response_body)?;
745
746 Ok(response_body.tokens as u64)
747 } else {
748 Err(anyhow!(ApiError {
749 status,
750 body: response_body,
751 headers
752 }))
753 }
754 }
755 .boxed()
756 }
757 }
758 }
759
760 fn stream_completion(
761 &self,
762 request: LanguageModelRequest,
763 cx: &AsyncApp,
764 ) -> BoxFuture<
765 'static,
766 Result<
767 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
768 LanguageModelCompletionError,
769 >,
770 > {
771 let thread_id = request.thread_id.clone();
772 let prompt_id = request.prompt_id.clone();
773 let intent = request.intent;
774 let app_version = Some(cx.update(|cx| AppVersion::global(cx)));
775 let user_store = self.user_store.clone();
776 let organization_id = cx.update(|cx| {
777 user_store
778 .read(cx)
779 .current_organization()
780 .map(|o| o.id.clone())
781 });
782 let thinking_allowed = request.thinking_allowed;
783 let enable_thinking = thinking_allowed && self.model.supports_thinking;
784 let provider_name = provider_name(&self.model.provider);
785 match self.model.provider {
786 cloud_llm_client::LanguageModelProvider::Anthropic => {
787 let effort = request
788 .thinking_effort
789 .as_ref()
790 .and_then(|effort| anthropic::Effort::from_str(effort).ok());
791
792 let mut request = into_anthropic(
793 request,
794 self.model.id.to_string(),
795 1.0,
796 self.model.max_output_tokens as u64,
797 if enable_thinking {
798 AnthropicModelMode::Thinking {
799 budget_tokens: Some(4_096),
800 }
801 } else {
802 AnthropicModelMode::Default
803 },
804 );
805
806 if enable_thinking && effort.is_some() {
807 request.thinking = Some(anthropic::Thinking::Adaptive);
808 request.output_config = Some(anthropic::OutputConfig { effort });
809 }
810
811 let client = self.client.clone();
812 let llm_api_token = self.llm_api_token.clone();
813 let organization_id = organization_id.clone();
814 let future = self.request_limiter.stream(async move {
815 let PerformLlmCompletionResponse {
816 response,
817 includes_status_messages,
818 } = Self::perform_llm_completion(
819 client.clone(),
820 llm_api_token,
821 organization_id,
822 app_version,
823 CompletionBody {
824 thread_id,
825 prompt_id,
826 intent,
827 provider: cloud_llm_client::LanguageModelProvider::Anthropic,
828 model: request.model.clone(),
829 provider_request: serde_json::to_value(&request)
830 .map_err(|e| anyhow!(e))?,
831 },
832 )
833 .await
834 .map_err(|err| match err.downcast::<ApiError>() {
835 Ok(api_err) => anyhow!(LanguageModelCompletionError::from(api_err)),
836 Err(err) => anyhow!(err),
837 })?;
838
839 let mut mapper = AnthropicEventMapper::new();
840 Ok(map_cloud_completion_events(
841 Box::pin(response_lines(response, includes_status_messages)),
842 &provider_name,
843 move |event| mapper.map_event(event),
844 ))
845 });
846 async move { Ok(future.await?.boxed()) }.boxed()
847 }
848 cloud_llm_client::LanguageModelProvider::OpenAi => {
849 let client = self.client.clone();
850 let llm_api_token = self.llm_api_token.clone();
851 let organization_id = organization_id.clone();
852 let effort = request
853 .thinking_effort
854 .as_ref()
855 .and_then(|effort| open_ai::ReasoningEffort::from_str(effort).ok());
856
857 let mut request = into_open_ai_response(
858 request,
859 &self.model.id.0,
860 self.model.supports_parallel_tool_calls,
861 true,
862 None,
863 None,
864 );
865
866 if enable_thinking && let Some(effort) = effort {
867 request.reasoning = Some(open_ai::responses::ReasoningConfig {
868 effort,
869 summary: Some(open_ai::responses::ReasoningSummaryMode::Auto),
870 });
871 }
872
873 let future = self.request_limiter.stream(async move {
874 let PerformLlmCompletionResponse {
875 response,
876 includes_status_messages,
877 } = Self::perform_llm_completion(
878 client.clone(),
879 llm_api_token,
880 organization_id,
881 app_version,
882 CompletionBody {
883 thread_id,
884 prompt_id,
885 intent,
886 provider: cloud_llm_client::LanguageModelProvider::OpenAi,
887 model: request.model.clone(),
888 provider_request: serde_json::to_value(&request)
889 .map_err(|e| anyhow!(e))?,
890 },
891 )
892 .await?;
893
894 let mut mapper = OpenAiResponseEventMapper::new();
895 Ok(map_cloud_completion_events(
896 Box::pin(response_lines(response, includes_status_messages)),
897 &provider_name,
898 move |event| mapper.map_event(event),
899 ))
900 });
901 async move { Ok(future.await?.boxed()) }.boxed()
902 }
903 cloud_llm_client::LanguageModelProvider::XAi => {
904 let client = self.client.clone();
905 let request = into_open_ai(
906 request,
907 &self.model.id.0,
908 self.model.supports_parallel_tool_calls,
909 false,
910 None,
911 None,
912 );
913 let llm_api_token = self.llm_api_token.clone();
914 let organization_id = organization_id.clone();
915 let future = self.request_limiter.stream(async move {
916 let PerformLlmCompletionResponse {
917 response,
918 includes_status_messages,
919 } = Self::perform_llm_completion(
920 client.clone(),
921 llm_api_token,
922 organization_id,
923 app_version,
924 CompletionBody {
925 thread_id,
926 prompt_id,
927 intent,
928 provider: cloud_llm_client::LanguageModelProvider::XAi,
929 model: request.model.clone(),
930 provider_request: serde_json::to_value(&request)
931 .map_err(|e| anyhow!(e))?,
932 },
933 )
934 .await?;
935
936 let mut mapper = OpenAiEventMapper::new();
937 Ok(map_cloud_completion_events(
938 Box::pin(response_lines(response, includes_status_messages)),
939 &provider_name,
940 move |event| mapper.map_event(event),
941 ))
942 });
943 async move { Ok(future.await?.boxed()) }.boxed()
944 }
945 cloud_llm_client::LanguageModelProvider::Google => {
946 let client = self.client.clone();
947 let request =
948 into_google(request, self.model.id.to_string(), GoogleModelMode::Default);
949 let llm_api_token = self.llm_api_token.clone();
950 let future = self.request_limiter.stream(async move {
951 let PerformLlmCompletionResponse {
952 response,
953 includes_status_messages,
954 } = Self::perform_llm_completion(
955 client.clone(),
956 llm_api_token,
957 organization_id,
958 app_version,
959 CompletionBody {
960 thread_id,
961 prompt_id,
962 intent,
963 provider: cloud_llm_client::LanguageModelProvider::Google,
964 model: request.model.model_id.clone(),
965 provider_request: serde_json::to_value(&request)
966 .map_err(|e| anyhow!(e))?,
967 },
968 )
969 .await?;
970
971 let mut mapper = GoogleEventMapper::new();
972 Ok(map_cloud_completion_events(
973 Box::pin(response_lines(response, includes_status_messages)),
974 &provider_name,
975 move |event| mapper.map_event(event),
976 ))
977 });
978 async move { Ok(future.await?.boxed()) }.boxed()
979 }
980 }
981 }
982}
983
984fn map_cloud_completion_events<T, F>(
985 stream: Pin<Box<dyn Stream<Item = Result<CompletionEvent<T>>> + Send>>,
986 provider: &LanguageModelProviderName,
987 mut map_callback: F,
988) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
989where
990 T: DeserializeOwned + 'static,
991 F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
992 + Send
993 + 'static,
994{
995 let provider = provider.clone();
996 let mut stream = stream.fuse();
997
998 let mut saw_stream_ended = false;
999
1000 let mut done = false;
1001 let mut pending = VecDeque::new();
1002
1003 stream::poll_fn(move |cx| {
1004 loop {
1005 if let Some(item) = pending.pop_front() {
1006 return Poll::Ready(Some(item));
1007 }
1008
1009 if done {
1010 return Poll::Ready(None);
1011 }
1012
1013 match stream.poll_next_unpin(cx) {
1014 Poll::Ready(Some(event)) => {
1015 let items = match event {
1016 Err(error) => {
1017 vec![Err(LanguageModelCompletionError::from(error))]
1018 }
1019 Ok(CompletionEvent::Status(CompletionRequestStatus::StreamEnded)) => {
1020 saw_stream_ended = true;
1021 vec![]
1022 }
1023 Ok(CompletionEvent::Status(status)) => {
1024 LanguageModelCompletionEvent::from_completion_request_status(
1025 status,
1026 provider.clone(),
1027 )
1028 .transpose()
1029 .map(|event| vec![event])
1030 .unwrap_or_default()
1031 }
1032 Ok(CompletionEvent::Event(event)) => map_callback(event),
1033 };
1034 pending.extend(items);
1035 }
1036 Poll::Ready(None) => {
1037 done = true;
1038
1039 if !saw_stream_ended {
1040 return Poll::Ready(Some(Err(
1041 LanguageModelCompletionError::StreamEndedUnexpectedly {
1042 provider: provider.clone(),
1043 },
1044 )));
1045 }
1046 }
1047 Poll::Pending => return Poll::Pending,
1048 }
1049 }
1050 })
1051 .boxed()
1052}
1053
1054fn provider_name(provider: &cloud_llm_client::LanguageModelProvider) -> LanguageModelProviderName {
1055 match provider {
1056 cloud_llm_client::LanguageModelProvider::Anthropic => {
1057 language_model::ANTHROPIC_PROVIDER_NAME
1058 }
1059 cloud_llm_client::LanguageModelProvider::OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
1060 cloud_llm_client::LanguageModelProvider::Google => language_model::GOOGLE_PROVIDER_NAME,
1061 cloud_llm_client::LanguageModelProvider::XAi => language_model::X_AI_PROVIDER_NAME,
1062 }
1063}
1064
1065fn response_lines<T: DeserializeOwned>(
1066 response: Response<AsyncBody>,
1067 includes_status_messages: bool,
1068) -> impl Stream<Item = Result<CompletionEvent<T>>> {
1069 futures::stream::try_unfold(
1070 (String::new(), BufReader::new(response.into_body())),
1071 move |(mut line, mut body)| async move {
1072 match body.read_line(&mut line).await {
1073 Ok(0) => Ok(None),
1074 Ok(_) => {
1075 let event = if includes_status_messages {
1076 serde_json::from_str::<CompletionEvent<T>>(&line)?
1077 } else {
1078 CompletionEvent::Event(serde_json::from_str::<T>(&line)?)
1079 };
1080
1081 line.clear();
1082 Ok(Some((event, (line, body))))
1083 }
1084 Err(e) => Err(e.into()),
1085 }
1086 },
1087 )
1088}
1089
1090#[derive(IntoElement, RegisterComponent)]
1091struct ZedAiConfiguration {
1092 is_connected: bool,
1093 plan: Option<Plan>,
1094 subscription_period: Option<(DateTime<Utc>, DateTime<Utc>)>,
1095 eligible_for_trial: bool,
1096 account_too_young: bool,
1097 sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1098}
1099
1100impl RenderOnce for ZedAiConfiguration {
1101 fn render(self, _window: &mut Window, _cx: &mut App) -> impl IntoElement {
1102 let is_pro = self.plan.is_some_and(|plan| plan == Plan::ZedPro);
1103 let subscription_text = match (self.plan, self.subscription_period) {
1104 (Some(Plan::ZedPro), Some(_)) => {
1105 "You have access to Zed's hosted models through your Pro subscription."
1106 }
1107 (Some(Plan::ZedProTrial), Some(_)) => {
1108 "You have access to Zed's hosted models through your Pro trial."
1109 }
1110 (Some(Plan::ZedFree), Some(_)) => {
1111 if self.eligible_for_trial {
1112 "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1113 } else {
1114 "Subscribe for access to Zed's hosted models."
1115 }
1116 }
1117 _ => {
1118 if self.eligible_for_trial {
1119 "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1120 } else {
1121 "Subscribe for access to Zed's hosted models."
1122 }
1123 }
1124 };
1125
1126 let manage_subscription_buttons = if is_pro {
1127 Button::new("manage_settings", "Manage Subscription")
1128 .full_width()
1129 .style(ButtonStyle::Tinted(TintColor::Accent))
1130 .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx)))
1131 .into_any_element()
1132 } else if self.plan.is_none() || self.eligible_for_trial {
1133 Button::new("start_trial", "Start 14-day Free Pro Trial")
1134 .full_width()
1135 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1136 .on_click(|_, _, cx| cx.open_url(&zed_urls::start_trial_url(cx)))
1137 .into_any_element()
1138 } else {
1139 Button::new("upgrade", "Upgrade to Pro")
1140 .full_width()
1141 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1142 .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx)))
1143 .into_any_element()
1144 };
1145
1146 if !self.is_connected {
1147 return v_flex()
1148 .gap_2()
1149 .child(Label::new("Sign in to have access to Zed's complete agentic experience with hosted models."))
1150 .child(
1151 Button::new("sign_in", "Sign In to use Zed AI")
1152 .icon_color(Color::Muted)
1153 .icon(IconName::Github)
1154 .icon_size(IconSize::Small)
1155 .icon_position(IconPosition::Start)
1156 .full_width()
1157 .on_click({
1158 let callback = self.sign_in_callback.clone();
1159 move |_, window, cx| (callback)(window, cx)
1160 }),
1161 );
1162 }
1163
1164 v_flex().gap_2().w_full().map(|this| {
1165 if self.account_too_young {
1166 this.child(YoungAccountBanner).child(
1167 Button::new("upgrade", "Upgrade to Pro")
1168 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1169 .full_width()
1170 .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx))),
1171 )
1172 } else {
1173 this.text_sm()
1174 .child(subscription_text)
1175 .child(manage_subscription_buttons)
1176 }
1177 })
1178 }
1179}
1180
1181struct ConfigurationView {
1182 state: Entity<State>,
1183 sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1184}
1185
1186impl ConfigurationView {
1187 fn new(state: Entity<State>) -> Self {
1188 let sign_in_callback = Arc::new({
1189 let state = state.clone();
1190 move |_window: &mut Window, cx: &mut App| {
1191 state.update(cx, |state, cx| {
1192 state.authenticate(cx).detach_and_log_err(cx);
1193 });
1194 }
1195 });
1196
1197 Self {
1198 state,
1199 sign_in_callback,
1200 }
1201 }
1202}
1203
1204impl Render for ConfigurationView {
1205 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1206 let state = self.state.read(cx);
1207 let user_store = state.user_store.read(cx);
1208
1209 ZedAiConfiguration {
1210 is_connected: !state.is_signed_out(cx),
1211 plan: user_store.plan(),
1212 subscription_period: user_store.subscription_period(),
1213 eligible_for_trial: user_store.trial_started_at().is_none(),
1214 account_too_young: user_store.account_too_young(),
1215 sign_in_callback: self.sign_in_callback.clone(),
1216 }
1217 }
1218}
1219
1220impl Component for ZedAiConfiguration {
1221 fn name() -> &'static str {
1222 "AI Configuration Content"
1223 }
1224
1225 fn sort_name() -> &'static str {
1226 "AI Configuration Content"
1227 }
1228
1229 fn scope() -> ComponentScope {
1230 ComponentScope::Onboarding
1231 }
1232
1233 fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
1234 fn configuration(
1235 is_connected: bool,
1236 plan: Option<Plan>,
1237 eligible_for_trial: bool,
1238 account_too_young: bool,
1239 ) -> AnyElement {
1240 ZedAiConfiguration {
1241 is_connected,
1242 plan,
1243 subscription_period: plan
1244 .is_some()
1245 .then(|| (Utc::now(), Utc::now() + chrono::Duration::days(7))),
1246 eligible_for_trial,
1247 account_too_young,
1248 sign_in_callback: Arc::new(|_, _| {}),
1249 }
1250 .into_any_element()
1251 }
1252
1253 Some(
1254 v_flex()
1255 .p_4()
1256 .gap_4()
1257 .children(vec![
1258 single_example("Not connected", configuration(false, None, false, false)),
1259 single_example(
1260 "Accept Terms of Service",
1261 configuration(true, None, true, false),
1262 ),
1263 single_example(
1264 "No Plan - Not eligible for trial",
1265 configuration(true, None, false, false),
1266 ),
1267 single_example(
1268 "No Plan - Eligible for trial",
1269 configuration(true, None, true, false),
1270 ),
1271 single_example(
1272 "Free Plan",
1273 configuration(true, Some(Plan::ZedFree), true, false),
1274 ),
1275 single_example(
1276 "Zed Pro Trial Plan",
1277 configuration(true, Some(Plan::ZedProTrial), true, false),
1278 ),
1279 single_example(
1280 "Zed Pro Plan",
1281 configuration(true, Some(Plan::ZedPro), true, false),
1282 ),
1283 ])
1284 .into_any_element(),
1285 )
1286 }
1287}
1288
1289#[cfg(test)]
1290mod tests {
1291 use super::*;
1292 use http_client::http::{HeaderMap, StatusCode};
1293 use language_model::LanguageModelCompletionError;
1294
1295 #[test]
1296 fn test_api_error_conversion_with_upstream_http_error() {
1297 // upstream_http_error with 503 status should become ServerOverloaded
1298 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout","upstream_status":503}"#;
1299
1300 let api_error = ApiError {
1301 status: StatusCode::INTERNAL_SERVER_ERROR,
1302 body: error_body.to_string(),
1303 headers: HeaderMap::new(),
1304 };
1305
1306 let completion_error: LanguageModelCompletionError = api_error.into();
1307
1308 match completion_error {
1309 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1310 assert_eq!(
1311 message,
1312 "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout"
1313 );
1314 }
1315 _ => panic!(
1316 "Expected UpstreamProviderError for upstream 503, got: {:?}",
1317 completion_error
1318 ),
1319 }
1320
1321 // upstream_http_error with 500 status should become ApiInternalServerError
1322 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the OpenAI API: internal server error","upstream_status":500}"#;
1323
1324 let api_error = ApiError {
1325 status: StatusCode::INTERNAL_SERVER_ERROR,
1326 body: error_body.to_string(),
1327 headers: HeaderMap::new(),
1328 };
1329
1330 let completion_error: LanguageModelCompletionError = api_error.into();
1331
1332 match completion_error {
1333 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1334 assert_eq!(
1335 message,
1336 "Received an error from the OpenAI API: internal server error"
1337 );
1338 }
1339 _ => panic!(
1340 "Expected UpstreamProviderError for upstream 500, got: {:?}",
1341 completion_error
1342 ),
1343 }
1344
1345 // upstream_http_error with 429 status should become RateLimitExceeded
1346 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Google API: rate limit exceeded","upstream_status":429}"#;
1347
1348 let api_error = ApiError {
1349 status: StatusCode::INTERNAL_SERVER_ERROR,
1350 body: error_body.to_string(),
1351 headers: HeaderMap::new(),
1352 };
1353
1354 let completion_error: LanguageModelCompletionError = api_error.into();
1355
1356 match completion_error {
1357 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1358 assert_eq!(
1359 message,
1360 "Received an error from the Google API: rate limit exceeded"
1361 );
1362 }
1363 _ => panic!(
1364 "Expected UpstreamProviderError for upstream 429, got: {:?}",
1365 completion_error
1366 ),
1367 }
1368
1369 // Regular 500 error without upstream_http_error should remain ApiInternalServerError for Zed
1370 let error_body = "Regular internal server error";
1371
1372 let api_error = ApiError {
1373 status: StatusCode::INTERNAL_SERVER_ERROR,
1374 body: error_body.to_string(),
1375 headers: HeaderMap::new(),
1376 };
1377
1378 let completion_error: LanguageModelCompletionError = api_error.into();
1379
1380 match completion_error {
1381 LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1382 assert_eq!(provider, PROVIDER_NAME);
1383 assert_eq!(message, "Regular internal server error");
1384 }
1385 _ => panic!(
1386 "Expected ApiInternalServerError for regular 500, got: {:?}",
1387 completion_error
1388 ),
1389 }
1390
1391 // upstream_http_429 format should be converted to UpstreamProviderError
1392 let error_body = r#"{"code":"upstream_http_429","message":"Upstream Anthropic rate limit exceeded.","retry_after":30.5}"#;
1393
1394 let api_error = ApiError {
1395 status: StatusCode::INTERNAL_SERVER_ERROR,
1396 body: error_body.to_string(),
1397 headers: HeaderMap::new(),
1398 };
1399
1400 let completion_error: LanguageModelCompletionError = api_error.into();
1401
1402 match completion_error {
1403 LanguageModelCompletionError::UpstreamProviderError {
1404 message,
1405 status,
1406 retry_after,
1407 } => {
1408 assert_eq!(message, "Upstream Anthropic rate limit exceeded.");
1409 assert_eq!(status, StatusCode::TOO_MANY_REQUESTS);
1410 assert_eq!(retry_after, Some(Duration::from_secs_f64(30.5)));
1411 }
1412 _ => panic!(
1413 "Expected UpstreamProviderError for upstream_http_429, got: {:?}",
1414 completion_error
1415 ),
1416 }
1417
1418 // Invalid JSON in error body should fall back to regular error handling
1419 let error_body = "Not JSON at all";
1420
1421 let api_error = ApiError {
1422 status: StatusCode::INTERNAL_SERVER_ERROR,
1423 body: error_body.to_string(),
1424 headers: HeaderMap::new(),
1425 };
1426
1427 let completion_error: LanguageModelCompletionError = api_error.into();
1428
1429 match completion_error {
1430 LanguageModelCompletionError::ApiInternalServerError { provider, .. } => {
1431 assert_eq!(provider, PROVIDER_NAME);
1432 }
1433 _ => panic!(
1434 "Expected ApiInternalServerError for invalid JSON, got: {:?}",
1435 completion_error
1436 ),
1437 }
1438 }
1439}