1use ai_onboarding::YoungAccountBanner;
2use anthropic::AnthropicModelMode;
3use anyhow::{Context as _, Result, anyhow};
4use chrono::{DateTime, Utc};
5use client::{Client, ModelRequestUsage, UserStore, zed_urls};
6use cloud_llm_client::{
7 CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CLIENT_SUPPORTS_X_AI_HEADER_NAME,
8 CURRENT_PLAN_HEADER_NAME, CompletionBody, CompletionEvent, CompletionRequestStatus,
9 CountTokensBody, CountTokensResponse, EXPIRED_LLM_TOKEN_HEADER_NAME, ListModelsResponse,
10 MODEL_REQUESTS_RESOURCE_HEADER_VALUE, Plan, PlanV1, PlanV2,
11 SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME,
12 TOOL_USE_LIMIT_REACHED_HEADER_NAME, ZED_VERSION_HEADER_NAME,
13};
14use futures::{
15 AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
16};
17use google_ai::GoogleModelMode;
18use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
19use http_client::http::{HeaderMap, HeaderValue};
20use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
21use language_model::{
22 AuthenticateError, LanguageModel, LanguageModelCacheConfiguration,
23 LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName,
24 LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
25 LanguageModelProviderState, LanguageModelRequest, LanguageModelToolChoice,
26 LanguageModelToolSchemaFormat, LlmApiToken, ModelRequestLimitReachedError,
27 PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
28};
29use release_channel::AppVersion;
30use schemars::JsonSchema;
31use semver::Version;
32use serde::{Deserialize, Serialize, de::DeserializeOwned};
33use settings::SettingsStore;
34pub use settings::ZedDotDevAvailableModel as AvailableModel;
35pub use settings::ZedDotDevAvailableProvider as AvailableProvider;
36use smol::io::{AsyncReadExt, BufReader};
37use std::pin::Pin;
38use std::str::FromStr as _;
39use std::sync::Arc;
40use std::time::Duration;
41use thiserror::Error;
42use ui::{TintColor, prelude::*};
43use util::{ResultExt as _, maybe};
44
45use crate::provider::anthropic::{AnthropicEventMapper, count_anthropic_tokens, into_anthropic};
46use crate::provider::google::{GoogleEventMapper, into_google};
47use crate::provider::open_ai::{OpenAiEventMapper, count_open_ai_tokens, into_open_ai};
48use crate::provider::x_ai::count_xai_tokens;
49
50const PROVIDER_ID: LanguageModelProviderId = language_model::ZED_CLOUD_PROVIDER_ID;
51const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVIDER_NAME;
52
53#[derive(Default, Clone, Debug, PartialEq)]
54pub struct ZedDotDevSettings {
55 pub available_models: Vec<AvailableModel>,
56}
57#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
58#[serde(tag = "type", rename_all = "lowercase")]
59pub enum ModelMode {
60 #[default]
61 Default,
62 Thinking {
63 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
64 budget_tokens: Option<u32>,
65 },
66}
67
68impl From<ModelMode> for AnthropicModelMode {
69 fn from(value: ModelMode) -> Self {
70 match value {
71 ModelMode::Default => AnthropicModelMode::Default,
72 ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
73 }
74 }
75}
76
77pub struct CloudLanguageModelProvider {
78 client: Arc<Client>,
79 state: Entity<State>,
80 _maintain_client_status: Task<()>,
81}
82
83pub struct State {
84 client: Arc<Client>,
85 llm_api_token: LlmApiToken,
86 user_store: Entity<UserStore>,
87 status: client::Status,
88 models: Vec<Arc<cloud_llm_client::LanguageModel>>,
89 default_model: Option<Arc<cloud_llm_client::LanguageModel>>,
90 default_fast_model: Option<Arc<cloud_llm_client::LanguageModel>>,
91 recommended_models: Vec<Arc<cloud_llm_client::LanguageModel>>,
92 _fetch_models_task: Task<()>,
93 _settings_subscription: Subscription,
94 _llm_token_subscription: Subscription,
95}
96
97impl State {
98 fn new(
99 client: Arc<Client>,
100 user_store: Entity<UserStore>,
101 status: client::Status,
102 cx: &mut Context<Self>,
103 ) -> Self {
104 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
105 let mut current_user = user_store.read(cx).watch_current_user();
106 Self {
107 client: client.clone(),
108 llm_api_token: LlmApiToken::default(),
109 user_store,
110 status,
111 models: Vec::new(),
112 default_model: None,
113 default_fast_model: None,
114 recommended_models: Vec::new(),
115 _fetch_models_task: cx.spawn(async move |this, cx| {
116 maybe!(async move {
117 let (client, llm_api_token) = this
118 .read_with(cx, |this, _cx| (client.clone(), this.llm_api_token.clone()))?;
119
120 while current_user.borrow().is_none() {
121 current_user.next().await;
122 }
123
124 let response =
125 Self::fetch_models(client.clone(), llm_api_token.clone()).await?;
126 this.update(cx, |this, cx| this.update_models(response, cx))?;
127 anyhow::Ok(())
128 })
129 .await
130 .context("failed to fetch Zed models")
131 .log_err();
132 }),
133 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
134 cx.notify();
135 }),
136 _llm_token_subscription: cx.subscribe(
137 &refresh_llm_token_listener,
138 move |this, _listener, _event, cx| {
139 let client = this.client.clone();
140 let llm_api_token = this.llm_api_token.clone();
141 cx.spawn(async move |this, cx| {
142 llm_api_token.refresh(&client).await?;
143 let response = Self::fetch_models(client, llm_api_token).await?;
144 this.update(cx, |this, cx| {
145 this.update_models(response, cx);
146 })
147 })
148 .detach_and_log_err(cx);
149 },
150 ),
151 }
152 }
153
154 fn is_signed_out(&self, cx: &App) -> bool {
155 self.user_store.read(cx).current_user().is_none()
156 }
157
158 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
159 let client = self.client.clone();
160 cx.spawn(async move |state, cx| {
161 client.sign_in_with_optional_connect(true, cx).await?;
162 state.update(cx, |_, cx| cx.notify())
163 })
164 }
165 fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
166 let mut models = Vec::new();
167
168 for model in response.models {
169 models.push(Arc::new(model.clone()));
170
171 // Right now we represent thinking variants of models as separate models on the client,
172 // so we need to insert variants for any model that supports thinking.
173 if model.supports_thinking {
174 models.push(Arc::new(cloud_llm_client::LanguageModel {
175 id: cloud_llm_client::LanguageModelId(format!("{}-thinking", model.id).into()),
176 display_name: format!("{} Thinking", model.display_name),
177 ..model
178 }));
179 }
180 }
181
182 self.default_model = models
183 .iter()
184 .find(|model| {
185 response
186 .default_model
187 .as_ref()
188 .is_some_and(|default_model_id| &model.id == default_model_id)
189 })
190 .cloned();
191 self.default_fast_model = models
192 .iter()
193 .find(|model| {
194 response
195 .default_fast_model
196 .as_ref()
197 .is_some_and(|default_fast_model_id| &model.id == default_fast_model_id)
198 })
199 .cloned();
200 self.recommended_models = response
201 .recommended_models
202 .iter()
203 .filter_map(|id| models.iter().find(|model| &model.id == id))
204 .cloned()
205 .collect();
206 self.models = models;
207 cx.notify();
208 }
209
210 async fn fetch_models(
211 client: Arc<Client>,
212 llm_api_token: LlmApiToken,
213 ) -> Result<ListModelsResponse> {
214 let http_client = &client.http_client();
215 let token = llm_api_token.acquire(&client).await?;
216
217 let request = http_client::Request::builder()
218 .method(Method::GET)
219 .header(CLIENT_SUPPORTS_X_AI_HEADER_NAME, "true")
220 .uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
221 .header("Authorization", format!("Bearer {token}"))
222 .body(AsyncBody::empty())?;
223 let mut response = http_client
224 .send(request)
225 .await
226 .context("failed to send list models request")?;
227
228 if response.status().is_success() {
229 let mut body = String::new();
230 response.body_mut().read_to_string(&mut body).await?;
231 Ok(serde_json::from_str(&body)?)
232 } else {
233 let mut body = String::new();
234 response.body_mut().read_to_string(&mut body).await?;
235 anyhow::bail!(
236 "error listing models.\nStatus: {:?}\nBody: {body}",
237 response.status(),
238 );
239 }
240 }
241}
242
243impl CloudLanguageModelProvider {
244 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
245 let mut status_rx = client.status();
246 let status = *status_rx.borrow();
247
248 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
249
250 let state_ref = state.downgrade();
251 let maintain_client_status = cx.spawn(async move |cx| {
252 while let Some(status) = status_rx.next().await {
253 if let Some(this) = state_ref.upgrade() {
254 _ = this.update(cx, |this, cx| {
255 if this.status != status {
256 this.status = status;
257 cx.notify();
258 }
259 });
260 } else {
261 break;
262 }
263 }
264 });
265
266 Self {
267 client,
268 state,
269 _maintain_client_status: maintain_client_status,
270 }
271 }
272
273 fn create_language_model(
274 &self,
275 model: Arc<cloud_llm_client::LanguageModel>,
276 llm_api_token: LlmApiToken,
277 ) -> Arc<dyn LanguageModel> {
278 Arc::new(CloudLanguageModel {
279 id: LanguageModelId(SharedString::from(model.id.0.clone())),
280 model,
281 llm_api_token,
282 client: self.client.clone(),
283 request_limiter: RateLimiter::new(4),
284 })
285 }
286}
287
288impl LanguageModelProviderState for CloudLanguageModelProvider {
289 type ObservableEntity = State;
290
291 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
292 Some(self.state.clone())
293 }
294}
295
296impl LanguageModelProvider for CloudLanguageModelProvider {
297 fn id(&self) -> LanguageModelProviderId {
298 PROVIDER_ID
299 }
300
301 fn name(&self) -> LanguageModelProviderName {
302 PROVIDER_NAME
303 }
304
305 fn icon(&self) -> IconName {
306 IconName::AiZed
307 }
308
309 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
310 let default_model = self.state.read(cx).default_model.clone()?;
311 let llm_api_token = self.state.read(cx).llm_api_token.clone();
312 Some(self.create_language_model(default_model, llm_api_token))
313 }
314
315 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
316 let default_fast_model = self.state.read(cx).default_fast_model.clone()?;
317 let llm_api_token = self.state.read(cx).llm_api_token.clone();
318 Some(self.create_language_model(default_fast_model, llm_api_token))
319 }
320
321 fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
322 let llm_api_token = self.state.read(cx).llm_api_token.clone();
323 self.state
324 .read(cx)
325 .recommended_models
326 .iter()
327 .cloned()
328 .map(|model| self.create_language_model(model, llm_api_token.clone()))
329 .collect()
330 }
331
332 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
333 let llm_api_token = self.state.read(cx).llm_api_token.clone();
334 self.state
335 .read(cx)
336 .models
337 .iter()
338 .cloned()
339 .map(|model| self.create_language_model(model, llm_api_token.clone()))
340 .collect()
341 }
342
343 fn is_authenticated(&self, cx: &App) -> bool {
344 let state = self.state.read(cx);
345 !state.is_signed_out(cx)
346 }
347
348 fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
349 Task::ready(Ok(()))
350 }
351
352 fn configuration_view(
353 &self,
354 _target_agent: language_model::ConfigurationViewTargetAgent,
355 _: &mut Window,
356 cx: &mut App,
357 ) -> AnyView {
358 cx.new(|_| ConfigurationView::new(self.state.clone()))
359 .into()
360 }
361
362 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
363 Task::ready(Ok(()))
364 }
365}
366
367pub struct CloudLanguageModel {
368 id: LanguageModelId,
369 model: Arc<cloud_llm_client::LanguageModel>,
370 llm_api_token: LlmApiToken,
371 client: Arc<Client>,
372 request_limiter: RateLimiter,
373}
374
375struct PerformLlmCompletionResponse {
376 response: Response<AsyncBody>,
377 usage: Option<ModelRequestUsage>,
378 tool_use_limit_reached: bool,
379 includes_status_messages: bool,
380}
381
382impl CloudLanguageModel {
383 async fn perform_llm_completion(
384 client: Arc<Client>,
385 llm_api_token: LlmApiToken,
386 app_version: Option<Version>,
387 body: CompletionBody,
388 ) -> Result<PerformLlmCompletionResponse> {
389 let http_client = &client.http_client();
390
391 let mut token = llm_api_token.acquire(&client).await?;
392 let mut refreshed_token = false;
393
394 loop {
395 let request = http_client::Request::builder()
396 .method(Method::POST)
397 .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref())
398 .when_some(app_version.as_ref(), |builder, app_version| {
399 builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
400 })
401 .header("Content-Type", "application/json")
402 .header("Authorization", format!("Bearer {token}"))
403 .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
404 .body(serde_json::to_string(&body)?.into())?;
405
406 let mut response = http_client.send(request).await?;
407 let status = response.status();
408 if status.is_success() {
409 let includes_status_messages = response
410 .headers()
411 .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
412 .is_some();
413
414 let tool_use_limit_reached = response
415 .headers()
416 .get(TOOL_USE_LIMIT_REACHED_HEADER_NAME)
417 .is_some();
418
419 let usage = if includes_status_messages {
420 None
421 } else {
422 ModelRequestUsage::from_headers(response.headers()).ok()
423 };
424
425 return Ok(PerformLlmCompletionResponse {
426 response,
427 usage,
428 includes_status_messages,
429 tool_use_limit_reached,
430 });
431 }
432
433 if !refreshed_token
434 && response
435 .headers()
436 .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
437 .is_some()
438 {
439 token = llm_api_token.refresh(&client).await?;
440 refreshed_token = true;
441 continue;
442 }
443
444 if status == StatusCode::FORBIDDEN
445 && response
446 .headers()
447 .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
448 .is_some()
449 {
450 if let Some(MODEL_REQUESTS_RESOURCE_HEADER_VALUE) = response
451 .headers()
452 .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
453 .and_then(|resource| resource.to_str().ok())
454 && let Some(plan) = response
455 .headers()
456 .get(CURRENT_PLAN_HEADER_NAME)
457 .and_then(|plan| plan.to_str().ok())
458 .and_then(|plan| cloud_llm_client::PlanV1::from_str(plan).ok())
459 .map(Plan::V1)
460 {
461 return Err(anyhow!(ModelRequestLimitReachedError { plan }));
462 }
463 } else if status == StatusCode::PAYMENT_REQUIRED {
464 return Err(anyhow!(PaymentRequiredError));
465 }
466
467 let mut body = String::new();
468 let headers = response.headers().clone();
469 response.body_mut().read_to_string(&mut body).await?;
470 return Err(anyhow!(ApiError {
471 status,
472 body,
473 headers
474 }));
475 }
476 }
477}
478
479#[derive(Debug, Error)]
480#[error("cloud language model request failed with status {status}: {body}")]
481struct ApiError {
482 status: StatusCode,
483 body: String,
484 headers: HeaderMap<HeaderValue>,
485}
486
487/// Represents error responses from Zed's cloud API.
488///
489/// Example JSON for an upstream HTTP error:
490/// ```json
491/// {
492/// "code": "upstream_http_error",
493/// "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
494/// "upstream_status": 503
495/// }
496/// ```
497#[derive(Debug, serde::Deserialize)]
498struct CloudApiError {
499 code: String,
500 message: String,
501 #[serde(default)]
502 #[serde(deserialize_with = "deserialize_optional_status_code")]
503 upstream_status: Option<StatusCode>,
504 #[serde(default)]
505 retry_after: Option<f64>,
506}
507
508fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
509where
510 D: serde::Deserializer<'de>,
511{
512 let opt: Option<u16> = Option::deserialize(deserializer)?;
513 Ok(opt.and_then(|code| StatusCode::from_u16(code).ok()))
514}
515
516impl From<ApiError> for LanguageModelCompletionError {
517 fn from(error: ApiError) -> Self {
518 if let Ok(cloud_error) = serde_json::from_str::<CloudApiError>(&error.body) {
519 if cloud_error.code.starts_with("upstream_http_") {
520 let status = if let Some(status) = cloud_error.upstream_status {
521 status
522 } else if cloud_error.code.ends_with("_error") {
523 error.status
524 } else {
525 // If there's a status code in the code string (e.g. "upstream_http_429")
526 // then use that; otherwise, see if the JSON contains a status code.
527 cloud_error
528 .code
529 .strip_prefix("upstream_http_")
530 .and_then(|code_str| code_str.parse::<u16>().ok())
531 .and_then(|code| StatusCode::from_u16(code).ok())
532 .unwrap_or(error.status)
533 };
534
535 return LanguageModelCompletionError::UpstreamProviderError {
536 message: cloud_error.message,
537 status,
538 retry_after: cloud_error.retry_after.map(Duration::from_secs_f64),
539 };
540 }
541
542 return LanguageModelCompletionError::from_http_status(
543 PROVIDER_NAME,
544 error.status,
545 cloud_error.message,
546 None,
547 );
548 }
549
550 let retry_after = None;
551 LanguageModelCompletionError::from_http_status(
552 PROVIDER_NAME,
553 error.status,
554 error.body,
555 retry_after,
556 )
557 }
558}
559
560impl LanguageModel for CloudLanguageModel {
561 fn id(&self) -> LanguageModelId {
562 self.id.clone()
563 }
564
565 fn name(&self) -> LanguageModelName {
566 LanguageModelName::from(self.model.display_name.clone())
567 }
568
569 fn provider_id(&self) -> LanguageModelProviderId {
570 PROVIDER_ID
571 }
572
573 fn provider_name(&self) -> LanguageModelProviderName {
574 PROVIDER_NAME
575 }
576
577 fn upstream_provider_id(&self) -> LanguageModelProviderId {
578 use cloud_llm_client::LanguageModelProvider::*;
579 match self.model.provider {
580 Anthropic => language_model::ANTHROPIC_PROVIDER_ID,
581 OpenAi => language_model::OPEN_AI_PROVIDER_ID,
582 Google => language_model::GOOGLE_PROVIDER_ID,
583 XAi => language_model::X_AI_PROVIDER_ID,
584 }
585 }
586
587 fn upstream_provider_name(&self) -> LanguageModelProviderName {
588 use cloud_llm_client::LanguageModelProvider::*;
589 match self.model.provider {
590 Anthropic => language_model::ANTHROPIC_PROVIDER_NAME,
591 OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
592 Google => language_model::GOOGLE_PROVIDER_NAME,
593 XAi => language_model::X_AI_PROVIDER_NAME,
594 }
595 }
596
597 fn supports_tools(&self) -> bool {
598 self.model.supports_tools
599 }
600
601 fn supports_images(&self) -> bool {
602 self.model.supports_images
603 }
604
605 fn supports_streaming_tools(&self) -> bool {
606 self.model.supports_streaming_tools
607 }
608
609 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
610 match choice {
611 LanguageModelToolChoice::Auto
612 | LanguageModelToolChoice::Any
613 | LanguageModelToolChoice::None => true,
614 }
615 }
616
617 fn supports_burn_mode(&self) -> bool {
618 self.model.supports_max_mode
619 }
620
621 fn telemetry_id(&self) -> String {
622 format!("zed.dev/{}", self.model.id)
623 }
624
625 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
626 match self.model.provider {
627 cloud_llm_client::LanguageModelProvider::Anthropic
628 | cloud_llm_client::LanguageModelProvider::OpenAi
629 | cloud_llm_client::LanguageModelProvider::XAi => {
630 LanguageModelToolSchemaFormat::JsonSchema
631 }
632 cloud_llm_client::LanguageModelProvider::Google => {
633 LanguageModelToolSchemaFormat::JsonSchemaSubset
634 }
635 }
636 }
637
638 fn max_token_count(&self) -> u64 {
639 self.model.max_token_count as u64
640 }
641
642 fn max_token_count_in_burn_mode(&self) -> Option<u64> {
643 self.model
644 .max_token_count_in_max_mode
645 .filter(|_| self.model.supports_max_mode)
646 .map(|max_token_count| max_token_count as u64)
647 }
648
649 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
650 match &self.model.provider {
651 cloud_llm_client::LanguageModelProvider::Anthropic => {
652 Some(LanguageModelCacheConfiguration {
653 min_total_token: 2_048,
654 should_speculate: true,
655 max_cache_anchors: 4,
656 })
657 }
658 cloud_llm_client::LanguageModelProvider::OpenAi
659 | cloud_llm_client::LanguageModelProvider::XAi
660 | cloud_llm_client::LanguageModelProvider::Google => None,
661 }
662 }
663
664 fn count_tokens(
665 &self,
666 request: LanguageModelRequest,
667 cx: &App,
668 ) -> BoxFuture<'static, Result<u64>> {
669 match self.model.provider {
670 cloud_llm_client::LanguageModelProvider::Anthropic => {
671 count_anthropic_tokens(request, cx)
672 }
673 cloud_llm_client::LanguageModelProvider::OpenAi => {
674 let model = match open_ai::Model::from_id(&self.model.id.0) {
675 Ok(model) => model,
676 Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
677 };
678 count_open_ai_tokens(request, model, cx)
679 }
680 cloud_llm_client::LanguageModelProvider::XAi => {
681 let model = match x_ai::Model::from_id(&self.model.id.0) {
682 Ok(model) => model,
683 Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
684 };
685 count_xai_tokens(request, model, cx)
686 }
687 cloud_llm_client::LanguageModelProvider::Google => {
688 let client = self.client.clone();
689 let llm_api_token = self.llm_api_token.clone();
690 let model_id = self.model.id.to_string();
691 let generate_content_request =
692 into_google(request, model_id.clone(), GoogleModelMode::Default);
693 async move {
694 let http_client = &client.http_client();
695 let token = llm_api_token.acquire(&client).await?;
696
697 let request_body = CountTokensBody {
698 provider: cloud_llm_client::LanguageModelProvider::Google,
699 model: model_id,
700 provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
701 generate_content_request,
702 })?,
703 };
704 let request = http_client::Request::builder()
705 .method(Method::POST)
706 .uri(
707 http_client
708 .build_zed_llm_url("/count_tokens", &[])?
709 .as_ref(),
710 )
711 .header("Content-Type", "application/json")
712 .header("Authorization", format!("Bearer {token}"))
713 .body(serde_json::to_string(&request_body)?.into())?;
714 let mut response = http_client.send(request).await?;
715 let status = response.status();
716 let headers = response.headers().clone();
717 let mut response_body = String::new();
718 response
719 .body_mut()
720 .read_to_string(&mut response_body)
721 .await?;
722
723 if status.is_success() {
724 let response_body: CountTokensResponse =
725 serde_json::from_str(&response_body)?;
726
727 Ok(response_body.tokens as u64)
728 } else {
729 Err(anyhow!(ApiError {
730 status,
731 body: response_body,
732 headers
733 }))
734 }
735 }
736 .boxed()
737 }
738 }
739 }
740
741 fn stream_completion(
742 &self,
743 request: LanguageModelRequest,
744 cx: &AsyncApp,
745 ) -> BoxFuture<
746 'static,
747 Result<
748 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
749 LanguageModelCompletionError,
750 >,
751 > {
752 let thread_id = request.thread_id.clone();
753 let prompt_id = request.prompt_id.clone();
754 let intent = request.intent;
755 let mode = request.mode;
756 let app_version = cx.update(|cx| AppVersion::global(cx)).ok();
757 let thinking_allowed = request.thinking_allowed;
758 let provider_name = provider_name(&self.model.provider);
759 match self.model.provider {
760 cloud_llm_client::LanguageModelProvider::Anthropic => {
761 let request = into_anthropic(
762 request,
763 self.model.id.to_string(),
764 1.0,
765 self.model.max_output_tokens as u64,
766 if thinking_allowed && self.model.id.0.ends_with("-thinking") {
767 AnthropicModelMode::Thinking {
768 budget_tokens: Some(4_096),
769 }
770 } else {
771 AnthropicModelMode::Default
772 },
773 );
774 let client = self.client.clone();
775 let llm_api_token = self.llm_api_token.clone();
776 let future = self.request_limiter.stream(async move {
777 let PerformLlmCompletionResponse {
778 response,
779 usage,
780 includes_status_messages,
781 tool_use_limit_reached,
782 } = Self::perform_llm_completion(
783 client.clone(),
784 llm_api_token,
785 app_version,
786 CompletionBody {
787 thread_id,
788 prompt_id,
789 intent,
790 mode,
791 provider: cloud_llm_client::LanguageModelProvider::Anthropic,
792 model: request.model.clone(),
793 provider_request: serde_json::to_value(&request)
794 .map_err(|e| anyhow!(e))?,
795 },
796 )
797 .await
798 .map_err(|err| match err.downcast::<ApiError>() {
799 Ok(api_err) => anyhow!(LanguageModelCompletionError::from(api_err)),
800 Err(err) => anyhow!(err),
801 })?;
802
803 let mut mapper = AnthropicEventMapper::new();
804 Ok(map_cloud_completion_events(
805 Box::pin(
806 response_lines(response, includes_status_messages)
807 .chain(usage_updated_event(usage))
808 .chain(tool_use_limit_reached_event(tool_use_limit_reached)), // .map(|_| {}),
809 ),
810 &provider_name,
811 move |event| mapper.map_event(event),
812 ))
813 });
814 async move { Ok(future.await?.boxed()) }.boxed()
815 }
816 cloud_llm_client::LanguageModelProvider::OpenAi => {
817 let client = self.client.clone();
818 let request = into_open_ai(
819 request,
820 &self.model.id.0,
821 self.model.supports_parallel_tool_calls,
822 true,
823 None,
824 None,
825 );
826 let llm_api_token = self.llm_api_token.clone();
827 let future = self.request_limiter.stream(async move {
828 let PerformLlmCompletionResponse {
829 response,
830 usage,
831 includes_status_messages,
832 tool_use_limit_reached,
833 } = Self::perform_llm_completion(
834 client.clone(),
835 llm_api_token,
836 app_version,
837 CompletionBody {
838 thread_id,
839 prompt_id,
840 intent,
841 mode,
842 provider: cloud_llm_client::LanguageModelProvider::OpenAi,
843 model: request.model.clone(),
844 provider_request: serde_json::to_value(&request)
845 .map_err(|e| anyhow!(e))?,
846 },
847 )
848 .await?;
849
850 let mut mapper = OpenAiEventMapper::new();
851 Ok(map_cloud_completion_events(
852 Box::pin(
853 response_lines(response, includes_status_messages)
854 .chain(usage_updated_event(usage))
855 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
856 ),
857 &provider_name,
858 move |event| mapper.map_event(event),
859 ))
860 });
861 async move { Ok(future.await?.boxed()) }.boxed()
862 }
863 cloud_llm_client::LanguageModelProvider::XAi => {
864 let client = self.client.clone();
865 let request = into_open_ai(
866 request,
867 &self.model.id.0,
868 self.model.supports_parallel_tool_calls,
869 false,
870 None,
871 None,
872 );
873 let llm_api_token = self.llm_api_token.clone();
874 let future = self.request_limiter.stream(async move {
875 let PerformLlmCompletionResponse {
876 response,
877 usage,
878 includes_status_messages,
879 tool_use_limit_reached,
880 } = Self::perform_llm_completion(
881 client.clone(),
882 llm_api_token,
883 app_version,
884 CompletionBody {
885 thread_id,
886 prompt_id,
887 intent,
888 mode,
889 provider: cloud_llm_client::LanguageModelProvider::XAi,
890 model: request.model.clone(),
891 provider_request: serde_json::to_value(&request)
892 .map_err(|e| anyhow!(e))?,
893 },
894 )
895 .await?;
896
897 let mut mapper = OpenAiEventMapper::new();
898 Ok(map_cloud_completion_events(
899 Box::pin(
900 response_lines(response, includes_status_messages)
901 .chain(usage_updated_event(usage))
902 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
903 ),
904 &provider_name,
905 move |event| mapper.map_event(event),
906 ))
907 });
908 async move { Ok(future.await?.boxed()) }.boxed()
909 }
910 cloud_llm_client::LanguageModelProvider::Google => {
911 let client = self.client.clone();
912 let request =
913 into_google(request, self.model.id.to_string(), GoogleModelMode::Default);
914 let llm_api_token = self.llm_api_token.clone();
915 let future = self.request_limiter.stream(async move {
916 let PerformLlmCompletionResponse {
917 response,
918 usage,
919 includes_status_messages,
920 tool_use_limit_reached,
921 } = Self::perform_llm_completion(
922 client.clone(),
923 llm_api_token,
924 app_version,
925 CompletionBody {
926 thread_id,
927 prompt_id,
928 intent,
929 mode,
930 provider: cloud_llm_client::LanguageModelProvider::Google,
931 model: request.model.model_id.clone(),
932 provider_request: serde_json::to_value(&request)
933 .map_err(|e| anyhow!(e))?,
934 },
935 )
936 .await?;
937
938 let mut mapper = GoogleEventMapper::new();
939 Ok(map_cloud_completion_events(
940 Box::pin(
941 response_lines(response, includes_status_messages)
942 .chain(usage_updated_event(usage))
943 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
944 ),
945 &provider_name,
946 move |event| mapper.map_event(event),
947 ))
948 });
949 async move { Ok(future.await?.boxed()) }.boxed()
950 }
951 }
952 }
953}
954
955fn map_cloud_completion_events<T, F>(
956 stream: Pin<Box<dyn Stream<Item = Result<CompletionEvent<T>>> + Send>>,
957 provider: &LanguageModelProviderName,
958 mut map_callback: F,
959) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
960where
961 T: DeserializeOwned + 'static,
962 F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
963 + Send
964 + 'static,
965{
966 let provider = provider.clone();
967 stream
968 .flat_map(move |event| {
969 futures::stream::iter(match event {
970 Err(error) => {
971 vec![Err(LanguageModelCompletionError::from(error))]
972 }
973 Ok(CompletionEvent::Status(event)) => {
974 vec![
975 LanguageModelCompletionEvent::from_completion_request_status(
976 event,
977 provider.clone(),
978 ),
979 ]
980 }
981 Ok(CompletionEvent::Event(event)) => map_callback(event),
982 })
983 })
984 .boxed()
985}
986
987fn provider_name(provider: &cloud_llm_client::LanguageModelProvider) -> LanguageModelProviderName {
988 match provider {
989 cloud_llm_client::LanguageModelProvider::Anthropic => {
990 language_model::ANTHROPIC_PROVIDER_NAME
991 }
992 cloud_llm_client::LanguageModelProvider::OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
993 cloud_llm_client::LanguageModelProvider::Google => language_model::GOOGLE_PROVIDER_NAME,
994 cloud_llm_client::LanguageModelProvider::XAi => language_model::X_AI_PROVIDER_NAME,
995 }
996}
997
998fn usage_updated_event<T>(
999 usage: Option<ModelRequestUsage>,
1000) -> impl Stream<Item = Result<CompletionEvent<T>>> {
1001 futures::stream::iter(usage.map(|usage| {
1002 Ok(CompletionEvent::Status(
1003 CompletionRequestStatus::UsageUpdated {
1004 amount: usage.amount as usize,
1005 limit: usage.limit,
1006 },
1007 ))
1008 }))
1009}
1010
1011fn tool_use_limit_reached_event<T>(
1012 tool_use_limit_reached: bool,
1013) -> impl Stream<Item = Result<CompletionEvent<T>>> {
1014 futures::stream::iter(tool_use_limit_reached.then(|| {
1015 Ok(CompletionEvent::Status(
1016 CompletionRequestStatus::ToolUseLimitReached,
1017 ))
1018 }))
1019}
1020
1021fn response_lines<T: DeserializeOwned>(
1022 response: Response<AsyncBody>,
1023 includes_status_messages: bool,
1024) -> impl Stream<Item = Result<CompletionEvent<T>>> {
1025 futures::stream::try_unfold(
1026 (String::new(), BufReader::new(response.into_body())),
1027 move |(mut line, mut body)| async move {
1028 match body.read_line(&mut line).await {
1029 Ok(0) => Ok(None),
1030 Ok(_) => {
1031 let event = if includes_status_messages {
1032 serde_json::from_str::<CompletionEvent<T>>(&line)?
1033 } else {
1034 CompletionEvent::Event(serde_json::from_str::<T>(&line)?)
1035 };
1036
1037 line.clear();
1038 Ok(Some((event, (line, body))))
1039 }
1040 Err(e) => Err(e.into()),
1041 }
1042 },
1043 )
1044}
1045
1046#[derive(IntoElement, RegisterComponent)]
1047struct ZedAiConfiguration {
1048 is_connected: bool,
1049 plan: Option<Plan>,
1050 subscription_period: Option<(DateTime<Utc>, DateTime<Utc>)>,
1051 eligible_for_trial: bool,
1052 account_too_young: bool,
1053 sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1054}
1055
1056impl RenderOnce for ZedAiConfiguration {
1057 fn render(self, _window: &mut Window, _cx: &mut App) -> impl IntoElement {
1058 let is_pro = self.plan.is_some_and(|plan| {
1059 matches!(plan, Plan::V1(PlanV1::ZedPro) | Plan::V2(PlanV2::ZedPro))
1060 });
1061 let subscription_text = match (self.plan, self.subscription_period) {
1062 (Some(Plan::V1(PlanV1::ZedPro) | Plan::V2(PlanV2::ZedPro)), Some(_)) => {
1063 "You have access to Zed's hosted models through your Pro subscription."
1064 }
1065 (Some(Plan::V1(PlanV1::ZedProTrial) | Plan::V2(PlanV2::ZedProTrial)), Some(_)) => {
1066 "You have access to Zed's hosted models through your Pro trial."
1067 }
1068 (Some(Plan::V1(PlanV1::ZedFree)), Some(_)) => {
1069 "You have basic access to Zed's hosted models through the Free plan."
1070 }
1071 (Some(Plan::V2(PlanV2::ZedFree)), Some(_)) => {
1072 if self.eligible_for_trial {
1073 "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1074 } else {
1075 "Subscribe for access to Zed's hosted models."
1076 }
1077 }
1078 _ => {
1079 if self.eligible_for_trial {
1080 "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1081 } else {
1082 "Subscribe for access to Zed's hosted models."
1083 }
1084 }
1085 };
1086
1087 let manage_subscription_buttons = if is_pro {
1088 Button::new("manage_settings", "Manage Subscription")
1089 .full_width()
1090 .style(ButtonStyle::Tinted(TintColor::Accent))
1091 .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx)))
1092 .into_any_element()
1093 } else if self.plan.is_none() || self.eligible_for_trial {
1094 Button::new("start_trial", "Start 14-day Free Pro Trial")
1095 .full_width()
1096 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1097 .on_click(|_, _, cx| cx.open_url(&zed_urls::start_trial_url(cx)))
1098 .into_any_element()
1099 } else {
1100 Button::new("upgrade", "Upgrade to Pro")
1101 .full_width()
1102 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1103 .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx)))
1104 .into_any_element()
1105 };
1106
1107 if !self.is_connected {
1108 return v_flex()
1109 .gap_2()
1110 .child(Label::new("Sign in to have access to Zed's complete agentic experience with hosted models."))
1111 .child(
1112 Button::new("sign_in", "Sign In to use Zed AI")
1113 .icon_color(Color::Muted)
1114 .icon(IconName::Github)
1115 .icon_size(IconSize::Small)
1116 .icon_position(IconPosition::Start)
1117 .full_width()
1118 .on_click({
1119 let callback = self.sign_in_callback.clone();
1120 move |_, window, cx| (callback)(window, cx)
1121 }),
1122 );
1123 }
1124
1125 v_flex().gap_2().w_full().map(|this| {
1126 if self.account_too_young {
1127 this.child(YoungAccountBanner).child(
1128 Button::new("upgrade", "Upgrade to Pro")
1129 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1130 .full_width()
1131 .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx))),
1132 )
1133 } else {
1134 this.text_sm()
1135 .child(subscription_text)
1136 .child(manage_subscription_buttons)
1137 }
1138 })
1139 }
1140}
1141
1142struct ConfigurationView {
1143 state: Entity<State>,
1144 sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1145}
1146
1147impl ConfigurationView {
1148 fn new(state: Entity<State>) -> Self {
1149 let sign_in_callback = Arc::new({
1150 let state = state.clone();
1151 move |_window: &mut Window, cx: &mut App| {
1152 state.update(cx, |state, cx| {
1153 state.authenticate(cx).detach_and_log_err(cx);
1154 });
1155 }
1156 });
1157
1158 Self {
1159 state,
1160 sign_in_callback,
1161 }
1162 }
1163}
1164
1165impl Render for ConfigurationView {
1166 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1167 let state = self.state.read(cx);
1168 let user_store = state.user_store.read(cx);
1169
1170 ZedAiConfiguration {
1171 is_connected: !state.is_signed_out(cx),
1172 plan: user_store.plan(),
1173 subscription_period: user_store.subscription_period(),
1174 eligible_for_trial: user_store.trial_started_at().is_none(),
1175 account_too_young: user_store.account_too_young(),
1176 sign_in_callback: self.sign_in_callback.clone(),
1177 }
1178 }
1179}
1180
1181impl Component for ZedAiConfiguration {
1182 fn name() -> &'static str {
1183 "AI Configuration Content"
1184 }
1185
1186 fn sort_name() -> &'static str {
1187 "AI Configuration Content"
1188 }
1189
1190 fn scope() -> ComponentScope {
1191 ComponentScope::Onboarding
1192 }
1193
1194 fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
1195 fn configuration(
1196 is_connected: bool,
1197 plan: Option<Plan>,
1198 eligible_for_trial: bool,
1199 account_too_young: bool,
1200 ) -> AnyElement {
1201 ZedAiConfiguration {
1202 is_connected,
1203 plan,
1204 subscription_period: plan
1205 .is_some()
1206 .then(|| (Utc::now(), Utc::now() + chrono::Duration::days(7))),
1207 eligible_for_trial,
1208 account_too_young,
1209 sign_in_callback: Arc::new(|_, _| {}),
1210 }
1211 .into_any_element()
1212 }
1213
1214 Some(
1215 v_flex()
1216 .p_4()
1217 .gap_4()
1218 .children(vec![
1219 single_example("Not connected", configuration(false, None, false, false)),
1220 single_example(
1221 "Accept Terms of Service",
1222 configuration(true, None, true, false),
1223 ),
1224 single_example(
1225 "No Plan - Not eligible for trial",
1226 configuration(true, None, false, false),
1227 ),
1228 single_example(
1229 "No Plan - Eligible for trial",
1230 configuration(true, None, true, false),
1231 ),
1232 single_example(
1233 "Free Plan",
1234 configuration(true, Some(Plan::V1(PlanV1::ZedFree)), true, false),
1235 ),
1236 single_example(
1237 "Zed Pro Trial Plan",
1238 configuration(true, Some(Plan::V1(PlanV1::ZedProTrial)), true, false),
1239 ),
1240 single_example(
1241 "Zed Pro Plan",
1242 configuration(true, Some(Plan::V1(PlanV1::ZedPro)), true, false),
1243 ),
1244 ])
1245 .into_any_element(),
1246 )
1247 }
1248}
1249
1250#[cfg(test)]
1251mod tests {
1252 use super::*;
1253 use http_client::http::{HeaderMap, StatusCode};
1254 use language_model::LanguageModelCompletionError;
1255
1256 #[test]
1257 fn test_api_error_conversion_with_upstream_http_error() {
1258 // upstream_http_error with 503 status should become ServerOverloaded
1259 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout","upstream_status":503}"#;
1260
1261 let api_error = ApiError {
1262 status: StatusCode::INTERNAL_SERVER_ERROR,
1263 body: error_body.to_string(),
1264 headers: HeaderMap::new(),
1265 };
1266
1267 let completion_error: LanguageModelCompletionError = api_error.into();
1268
1269 match completion_error {
1270 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1271 assert_eq!(
1272 message,
1273 "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout"
1274 );
1275 }
1276 _ => panic!(
1277 "Expected UpstreamProviderError for upstream 503, got: {:?}",
1278 completion_error
1279 ),
1280 }
1281
1282 // upstream_http_error with 500 status should become ApiInternalServerError
1283 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the OpenAI API: internal server error","upstream_status":500}"#;
1284
1285 let api_error = ApiError {
1286 status: StatusCode::INTERNAL_SERVER_ERROR,
1287 body: error_body.to_string(),
1288 headers: HeaderMap::new(),
1289 };
1290
1291 let completion_error: LanguageModelCompletionError = api_error.into();
1292
1293 match completion_error {
1294 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1295 assert_eq!(
1296 message,
1297 "Received an error from the OpenAI API: internal server error"
1298 );
1299 }
1300 _ => panic!(
1301 "Expected UpstreamProviderError for upstream 500, got: {:?}",
1302 completion_error
1303 ),
1304 }
1305
1306 // upstream_http_error with 429 status should become RateLimitExceeded
1307 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Google API: rate limit exceeded","upstream_status":429}"#;
1308
1309 let api_error = ApiError {
1310 status: StatusCode::INTERNAL_SERVER_ERROR,
1311 body: error_body.to_string(),
1312 headers: HeaderMap::new(),
1313 };
1314
1315 let completion_error: LanguageModelCompletionError = api_error.into();
1316
1317 match completion_error {
1318 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1319 assert_eq!(
1320 message,
1321 "Received an error from the Google API: rate limit exceeded"
1322 );
1323 }
1324 _ => panic!(
1325 "Expected UpstreamProviderError for upstream 429, got: {:?}",
1326 completion_error
1327 ),
1328 }
1329
1330 // Regular 500 error without upstream_http_error should remain ApiInternalServerError for Zed
1331 let error_body = "Regular internal server error";
1332
1333 let api_error = ApiError {
1334 status: StatusCode::INTERNAL_SERVER_ERROR,
1335 body: error_body.to_string(),
1336 headers: HeaderMap::new(),
1337 };
1338
1339 let completion_error: LanguageModelCompletionError = api_error.into();
1340
1341 match completion_error {
1342 LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1343 assert_eq!(provider, PROVIDER_NAME);
1344 assert_eq!(message, "Regular internal server error");
1345 }
1346 _ => panic!(
1347 "Expected ApiInternalServerError for regular 500, got: {:?}",
1348 completion_error
1349 ),
1350 }
1351
1352 // upstream_http_429 format should be converted to UpstreamProviderError
1353 let error_body = r#"{"code":"upstream_http_429","message":"Upstream Anthropic rate limit exceeded.","retry_after":30.5}"#;
1354
1355 let api_error = ApiError {
1356 status: StatusCode::INTERNAL_SERVER_ERROR,
1357 body: error_body.to_string(),
1358 headers: HeaderMap::new(),
1359 };
1360
1361 let completion_error: LanguageModelCompletionError = api_error.into();
1362
1363 match completion_error {
1364 LanguageModelCompletionError::UpstreamProviderError {
1365 message,
1366 status,
1367 retry_after,
1368 } => {
1369 assert_eq!(message, "Upstream Anthropic rate limit exceeded.");
1370 assert_eq!(status, StatusCode::TOO_MANY_REQUESTS);
1371 assert_eq!(retry_after, Some(Duration::from_secs_f64(30.5)));
1372 }
1373 _ => panic!(
1374 "Expected UpstreamProviderError for upstream_http_429, got: {:?}",
1375 completion_error
1376 ),
1377 }
1378
1379 // Invalid JSON in error body should fall back to regular error handling
1380 let error_body = "Not JSON at all";
1381
1382 let api_error = ApiError {
1383 status: StatusCode::INTERNAL_SERVER_ERROR,
1384 body: error_body.to_string(),
1385 headers: HeaderMap::new(),
1386 };
1387
1388 let completion_error: LanguageModelCompletionError = api_error.into();
1389
1390 match completion_error {
1391 LanguageModelCompletionError::ApiInternalServerError { provider, .. } => {
1392 assert_eq!(provider, PROVIDER_NAME);
1393 }
1394 _ => panic!(
1395 "Expected ApiInternalServerError for invalid JSON, got: {:?}",
1396 completion_error
1397 ),
1398 }
1399 }
1400}