1use ai_onboarding::YoungAccountBanner;
2use anthropic::AnthropicModelMode;
3use anyhow::{Context as _, Result, anyhow};
4use chrono::{DateTime, Utc};
5use client::{Client, UserStore, zed_urls};
6use cloud_api_types::Plan;
7use cloud_llm_client::{
8 CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CLIENT_SUPPORTS_X_AI_HEADER_NAME, CompletionBody,
9 CompletionEvent, CountTokensBody, CountTokensResponse, ListModelsResponse,
10 SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, ZED_VERSION_HEADER_NAME,
11};
12use feature_flags::{CloudThinkingEffortFeatureFlag, FeatureFlagAppExt as _};
13use futures::{
14 AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
15};
16use google_ai::GoogleModelMode;
17use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
18use http_client::http::{HeaderMap, HeaderValue};
19use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
20use language_model::{
21 AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
22 LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelEffortLevel,
23 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
24 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
25 LanguageModelToolChoice, LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh,
26 PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
27};
28use release_channel::AppVersion;
29use schemars::JsonSchema;
30use semver::Version;
31use serde::{Deserialize, Serialize, de::DeserializeOwned};
32use settings::SettingsStore;
33pub use settings::ZedDotDevAvailableModel as AvailableModel;
34pub use settings::ZedDotDevAvailableProvider as AvailableProvider;
35use smol::io::{AsyncReadExt, BufReader};
36use std::pin::Pin;
37use std::str::FromStr;
38use std::sync::Arc;
39use std::time::Duration;
40use thiserror::Error;
41use ui::{TintColor, prelude::*};
42use util::{ResultExt as _, maybe};
43
44use crate::provider::anthropic::{
45 AnthropicEventMapper, count_anthropic_tokens_with_tiktoken, into_anthropic,
46};
47use crate::provider::google::{GoogleEventMapper, into_google};
48use crate::provider::open_ai::{
49 OpenAiEventMapper, OpenAiResponseEventMapper, count_open_ai_tokens, into_open_ai,
50 into_open_ai_response,
51};
52use crate::provider::x_ai::count_xai_tokens;
53
54const PROVIDER_ID: LanguageModelProviderId = language_model::ZED_CLOUD_PROVIDER_ID;
55const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVIDER_NAME;
56
57#[derive(Default, Clone, Debug, PartialEq)]
58pub struct ZedDotDevSettings {
59 pub available_models: Vec<AvailableModel>,
60}
61#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
62#[serde(tag = "type", rename_all = "lowercase")]
63pub enum ModelMode {
64 #[default]
65 Default,
66 Thinking {
67 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
68 budget_tokens: Option<u32>,
69 },
70}
71
72impl From<ModelMode> for AnthropicModelMode {
73 fn from(value: ModelMode) -> Self {
74 match value {
75 ModelMode::Default => AnthropicModelMode::Default,
76 ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
77 }
78 }
79}
80
81pub struct CloudLanguageModelProvider {
82 client: Arc<Client>,
83 state: Entity<State>,
84 _maintain_client_status: Task<()>,
85}
86
87pub struct State {
88 client: Arc<Client>,
89 llm_api_token: LlmApiToken,
90 user_store: Entity<UserStore>,
91 status: client::Status,
92 models: Vec<Arc<cloud_llm_client::LanguageModel>>,
93 default_model: Option<Arc<cloud_llm_client::LanguageModel>>,
94 default_fast_model: Option<Arc<cloud_llm_client::LanguageModel>>,
95 recommended_models: Vec<Arc<cloud_llm_client::LanguageModel>>,
96 _fetch_models_task: Task<()>,
97 _settings_subscription: Subscription,
98 _llm_token_subscription: Subscription,
99}
100
101impl State {
102 fn new(
103 client: Arc<Client>,
104 user_store: Entity<UserStore>,
105 status: client::Status,
106 cx: &mut Context<Self>,
107 ) -> Self {
108 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
109 let mut current_user = user_store.read(cx).watch_current_user();
110 Self {
111 client: client.clone(),
112 llm_api_token: LlmApiToken::default(),
113 user_store,
114 status,
115 models: Vec::new(),
116 default_model: None,
117 default_fast_model: None,
118 recommended_models: Vec::new(),
119 _fetch_models_task: cx.spawn(async move |this, cx| {
120 maybe!(async move {
121 let (client, llm_api_token) = this
122 .read_with(cx, |this, _cx| (client.clone(), this.llm_api_token.clone()))?;
123
124 while current_user.borrow().is_none() {
125 current_user.next().await;
126 }
127
128 let response =
129 Self::fetch_models(client.clone(), llm_api_token.clone()).await?;
130 this.update(cx, |this, cx| this.update_models(response, cx))?;
131 anyhow::Ok(())
132 })
133 .await
134 .context("failed to fetch Zed models")
135 .log_err();
136 }),
137 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
138 cx.notify();
139 }),
140 _llm_token_subscription: cx.subscribe(
141 &refresh_llm_token_listener,
142 move |this, _listener, _event, cx| {
143 let client = this.client.clone();
144 let llm_api_token = this.llm_api_token.clone();
145 cx.spawn(async move |this, cx| {
146 llm_api_token.refresh(&client).await?;
147 let response = Self::fetch_models(client, llm_api_token).await?;
148 this.update(cx, |this, cx| {
149 this.update_models(response, cx);
150 })
151 })
152 .detach_and_log_err(cx);
153 },
154 ),
155 }
156 }
157
158 fn is_signed_out(&self, cx: &App) -> bool {
159 self.user_store.read(cx).current_user().is_none()
160 }
161
162 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
163 let client = self.client.clone();
164 cx.spawn(async move |state, cx| {
165 client.sign_in_with_optional_connect(true, cx).await?;
166 state.update(cx, |_, cx| cx.notify())
167 })
168 }
169
170 fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
171 let is_thinking_effort_enabled = cx.has_flag::<CloudThinkingEffortFeatureFlag>();
172
173 let mut models = Vec::new();
174
175 for model in response.models {
176 models.push(Arc::new(model.clone()));
177
178 if !is_thinking_effort_enabled {
179 // Right now we represent thinking variants of models as separate models on the client,
180 // so we need to insert variants for any model that supports thinking.
181 if model.supports_thinking {
182 models.push(Arc::new(cloud_llm_client::LanguageModel {
183 id: cloud_llm_client::LanguageModelId(
184 format!("{}-thinking", model.id).into(),
185 ),
186 display_name: format!("{} Thinking", model.display_name),
187 ..model
188 }));
189 }
190 }
191 }
192
193 self.default_model = models
194 .iter()
195 .find(|model| {
196 response
197 .default_model
198 .as_ref()
199 .is_some_and(|default_model_id| &model.id == default_model_id)
200 })
201 .cloned();
202 self.default_fast_model = models
203 .iter()
204 .find(|model| {
205 response
206 .default_fast_model
207 .as_ref()
208 .is_some_and(|default_fast_model_id| &model.id == default_fast_model_id)
209 })
210 .cloned();
211 self.recommended_models = response
212 .recommended_models
213 .iter()
214 .filter_map(|id| models.iter().find(|model| &model.id == id))
215 .cloned()
216 .collect();
217 self.models = models;
218 cx.notify();
219 }
220
221 async fn fetch_models(
222 client: Arc<Client>,
223 llm_api_token: LlmApiToken,
224 ) -> Result<ListModelsResponse> {
225 let http_client = &client.http_client();
226 let token = llm_api_token.acquire(&client).await?;
227
228 let request = http_client::Request::builder()
229 .method(Method::GET)
230 .header(CLIENT_SUPPORTS_X_AI_HEADER_NAME, "true")
231 .uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
232 .header("Authorization", format!("Bearer {token}"))
233 .body(AsyncBody::empty())?;
234 let mut response = http_client
235 .send(request)
236 .await
237 .context("failed to send list models request")?;
238
239 if response.status().is_success() {
240 let mut body = String::new();
241 response.body_mut().read_to_string(&mut body).await?;
242 Ok(serde_json::from_str(&body)?)
243 } else {
244 let mut body = String::new();
245 response.body_mut().read_to_string(&mut body).await?;
246 anyhow::bail!(
247 "error listing models.\nStatus: {:?}\nBody: {body}",
248 response.status(),
249 );
250 }
251 }
252}
253
254impl CloudLanguageModelProvider {
255 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
256 let mut status_rx = client.status();
257 let status = *status_rx.borrow();
258
259 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
260
261 let state_ref = state.downgrade();
262 let maintain_client_status = cx.spawn(async move |cx| {
263 while let Some(status) = status_rx.next().await {
264 if let Some(this) = state_ref.upgrade() {
265 _ = this.update(cx, |this, cx| {
266 if this.status != status {
267 this.status = status;
268 cx.notify();
269 }
270 });
271 } else {
272 break;
273 }
274 }
275 });
276
277 Self {
278 client,
279 state,
280 _maintain_client_status: maintain_client_status,
281 }
282 }
283
284 fn create_language_model(
285 &self,
286 model: Arc<cloud_llm_client::LanguageModel>,
287 llm_api_token: LlmApiToken,
288 ) -> Arc<dyn LanguageModel> {
289 Arc::new(CloudLanguageModel {
290 id: LanguageModelId(SharedString::from(model.id.0.clone())),
291 model,
292 llm_api_token,
293 client: self.client.clone(),
294 request_limiter: RateLimiter::new(4),
295 })
296 }
297}
298
299impl LanguageModelProviderState for CloudLanguageModelProvider {
300 type ObservableEntity = State;
301
302 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
303 Some(self.state.clone())
304 }
305}
306
307impl LanguageModelProvider for CloudLanguageModelProvider {
308 fn id(&self) -> LanguageModelProviderId {
309 PROVIDER_ID
310 }
311
312 fn name(&self) -> LanguageModelProviderName {
313 PROVIDER_NAME
314 }
315
316 fn icon(&self) -> IconOrSvg {
317 IconOrSvg::Icon(IconName::AiZed)
318 }
319
320 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
321 let default_model = self.state.read(cx).default_model.clone()?;
322 let llm_api_token = self.state.read(cx).llm_api_token.clone();
323 Some(self.create_language_model(default_model, llm_api_token))
324 }
325
326 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
327 let default_fast_model = self.state.read(cx).default_fast_model.clone()?;
328 let llm_api_token = self.state.read(cx).llm_api_token.clone();
329 Some(self.create_language_model(default_fast_model, llm_api_token))
330 }
331
332 fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
333 let llm_api_token = self.state.read(cx).llm_api_token.clone();
334 self.state
335 .read(cx)
336 .recommended_models
337 .iter()
338 .cloned()
339 .map(|model| self.create_language_model(model, llm_api_token.clone()))
340 .collect()
341 }
342
343 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
344 let llm_api_token = self.state.read(cx).llm_api_token.clone();
345 self.state
346 .read(cx)
347 .models
348 .iter()
349 .cloned()
350 .map(|model| self.create_language_model(model, llm_api_token.clone()))
351 .collect()
352 }
353
354 fn is_authenticated(&self, cx: &App) -> bool {
355 let state = self.state.read(cx);
356 !state.is_signed_out(cx)
357 }
358
359 fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
360 Task::ready(Ok(()))
361 }
362
363 fn configuration_view(
364 &self,
365 _target_agent: language_model::ConfigurationViewTargetAgent,
366 _: &mut Window,
367 cx: &mut App,
368 ) -> AnyView {
369 cx.new(|_| ConfigurationView::new(self.state.clone()))
370 .into()
371 }
372
373 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
374 Task::ready(Ok(()))
375 }
376}
377
378pub struct CloudLanguageModel {
379 id: LanguageModelId,
380 model: Arc<cloud_llm_client::LanguageModel>,
381 llm_api_token: LlmApiToken,
382 client: Arc<Client>,
383 request_limiter: RateLimiter,
384}
385
386struct PerformLlmCompletionResponse {
387 response: Response<AsyncBody>,
388 includes_status_messages: bool,
389}
390
391impl CloudLanguageModel {
392 async fn perform_llm_completion(
393 client: Arc<Client>,
394 llm_api_token: LlmApiToken,
395 app_version: Option<Version>,
396 body: CompletionBody,
397 ) -> Result<PerformLlmCompletionResponse> {
398 let http_client = &client.http_client();
399
400 let mut token = llm_api_token.acquire(&client).await?;
401 let mut refreshed_token = false;
402
403 loop {
404 let request = http_client::Request::builder()
405 .method(Method::POST)
406 .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref())
407 .when_some(app_version.as_ref(), |builder, app_version| {
408 builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
409 })
410 .header("Content-Type", "application/json")
411 .header("Authorization", format!("Bearer {token}"))
412 .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
413 .body(serde_json::to_string(&body)?.into())?;
414
415 let mut response = http_client.send(request).await?;
416 let status = response.status();
417 if status.is_success() {
418 let includes_status_messages = response
419 .headers()
420 .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
421 .is_some();
422
423 return Ok(PerformLlmCompletionResponse {
424 response,
425 includes_status_messages,
426 });
427 }
428
429 if !refreshed_token && response.needs_llm_token_refresh() {
430 token = llm_api_token.refresh(&client).await?;
431 refreshed_token = true;
432 continue;
433 }
434
435 if status == StatusCode::PAYMENT_REQUIRED {
436 return Err(anyhow!(PaymentRequiredError));
437 }
438
439 let mut body = String::new();
440 let headers = response.headers().clone();
441 response.body_mut().read_to_string(&mut body).await?;
442 return Err(anyhow!(ApiError {
443 status,
444 body,
445 headers
446 }));
447 }
448 }
449}
450
451#[derive(Debug, Error)]
452#[error("cloud language model request failed with status {status}: {body}")]
453struct ApiError {
454 status: StatusCode,
455 body: String,
456 headers: HeaderMap<HeaderValue>,
457}
458
459/// Represents error responses from Zed's cloud API.
460///
461/// Example JSON for an upstream HTTP error:
462/// ```json
463/// {
464/// "code": "upstream_http_error",
465/// "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
466/// "upstream_status": 503
467/// }
468/// ```
469#[derive(Debug, serde::Deserialize)]
470struct CloudApiError {
471 code: String,
472 message: String,
473 #[serde(default)]
474 #[serde(deserialize_with = "deserialize_optional_status_code")]
475 upstream_status: Option<StatusCode>,
476 #[serde(default)]
477 retry_after: Option<f64>,
478}
479
480fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
481where
482 D: serde::Deserializer<'de>,
483{
484 let opt: Option<u16> = Option::deserialize(deserializer)?;
485 Ok(opt.and_then(|code| StatusCode::from_u16(code).ok()))
486}
487
488impl From<ApiError> for LanguageModelCompletionError {
489 fn from(error: ApiError) -> Self {
490 if let Ok(cloud_error) = serde_json::from_str::<CloudApiError>(&error.body) {
491 if cloud_error.code.starts_with("upstream_http_") {
492 let status = if let Some(status) = cloud_error.upstream_status {
493 status
494 } else if cloud_error.code.ends_with("_error") {
495 error.status
496 } else {
497 // If there's a status code in the code string (e.g. "upstream_http_429")
498 // then use that; otherwise, see if the JSON contains a status code.
499 cloud_error
500 .code
501 .strip_prefix("upstream_http_")
502 .and_then(|code_str| code_str.parse::<u16>().ok())
503 .and_then(|code| StatusCode::from_u16(code).ok())
504 .unwrap_or(error.status)
505 };
506
507 return LanguageModelCompletionError::UpstreamProviderError {
508 message: cloud_error.message,
509 status,
510 retry_after: cloud_error.retry_after.map(Duration::from_secs_f64),
511 };
512 }
513
514 return LanguageModelCompletionError::from_http_status(
515 PROVIDER_NAME,
516 error.status,
517 cloud_error.message,
518 None,
519 );
520 }
521
522 let retry_after = None;
523 LanguageModelCompletionError::from_http_status(
524 PROVIDER_NAME,
525 error.status,
526 error.body,
527 retry_after,
528 )
529 }
530}
531
532impl LanguageModel for CloudLanguageModel {
533 fn id(&self) -> LanguageModelId {
534 self.id.clone()
535 }
536
537 fn name(&self) -> LanguageModelName {
538 LanguageModelName::from(self.model.display_name.clone())
539 }
540
541 fn provider_id(&self) -> LanguageModelProviderId {
542 PROVIDER_ID
543 }
544
545 fn provider_name(&self) -> LanguageModelProviderName {
546 PROVIDER_NAME
547 }
548
549 fn upstream_provider_id(&self) -> LanguageModelProviderId {
550 use cloud_llm_client::LanguageModelProvider::*;
551 match self.model.provider {
552 Anthropic => language_model::ANTHROPIC_PROVIDER_ID,
553 OpenAi => language_model::OPEN_AI_PROVIDER_ID,
554 Google => language_model::GOOGLE_PROVIDER_ID,
555 XAi => language_model::X_AI_PROVIDER_ID,
556 }
557 }
558
559 fn upstream_provider_name(&self) -> LanguageModelProviderName {
560 use cloud_llm_client::LanguageModelProvider::*;
561 match self.model.provider {
562 Anthropic => language_model::ANTHROPIC_PROVIDER_NAME,
563 OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
564 Google => language_model::GOOGLE_PROVIDER_NAME,
565 XAi => language_model::X_AI_PROVIDER_NAME,
566 }
567 }
568
569 fn is_latest(&self) -> bool {
570 self.model.is_latest
571 }
572
573 fn supports_tools(&self) -> bool {
574 self.model.supports_tools
575 }
576
577 fn supports_images(&self) -> bool {
578 self.model.supports_images
579 }
580
581 fn supports_thinking(&self) -> bool {
582 self.model.supports_thinking
583 }
584
585 fn supported_effort_levels(&self) -> Vec<LanguageModelEffortLevel> {
586 self.model
587 .supported_effort_levels
588 .iter()
589 .map(|effort_level| LanguageModelEffortLevel {
590 name: effort_level.name.clone().into(),
591 value: effort_level.value.clone().into(),
592 is_default: effort_level.is_default.unwrap_or(false),
593 })
594 .collect()
595 }
596
597 fn supports_streaming_tools(&self) -> bool {
598 self.model.supports_streaming_tools
599 }
600
601 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
602 match choice {
603 LanguageModelToolChoice::Auto
604 | LanguageModelToolChoice::Any
605 | LanguageModelToolChoice::None => true,
606 }
607 }
608
609 fn supports_split_token_display(&self) -> bool {
610 use cloud_llm_client::LanguageModelProvider::*;
611 matches!(self.model.provider, OpenAi)
612 }
613
614 fn telemetry_id(&self) -> String {
615 format!("zed.dev/{}", self.model.id)
616 }
617
618 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
619 match self.model.provider {
620 cloud_llm_client::LanguageModelProvider::Anthropic
621 | cloud_llm_client::LanguageModelProvider::OpenAi
622 | cloud_llm_client::LanguageModelProvider::XAi => {
623 LanguageModelToolSchemaFormat::JsonSchema
624 }
625 cloud_llm_client::LanguageModelProvider::Google => {
626 LanguageModelToolSchemaFormat::JsonSchemaSubset
627 }
628 }
629 }
630
631 fn max_token_count(&self) -> u64 {
632 self.model.max_token_count as u64
633 }
634
635 fn max_output_tokens(&self) -> Option<u64> {
636 Some(self.model.max_output_tokens as u64)
637 }
638
639 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
640 match &self.model.provider {
641 cloud_llm_client::LanguageModelProvider::Anthropic => {
642 Some(LanguageModelCacheConfiguration {
643 min_total_token: 2_048,
644 should_speculate: true,
645 max_cache_anchors: 4,
646 })
647 }
648 cloud_llm_client::LanguageModelProvider::OpenAi
649 | cloud_llm_client::LanguageModelProvider::XAi
650 | cloud_llm_client::LanguageModelProvider::Google => None,
651 }
652 }
653
654 fn count_tokens(
655 &self,
656 request: LanguageModelRequest,
657 cx: &App,
658 ) -> BoxFuture<'static, Result<u64>> {
659 match self.model.provider {
660 cloud_llm_client::LanguageModelProvider::Anthropic => cx
661 .background_spawn(async move { count_anthropic_tokens_with_tiktoken(request) })
662 .boxed(),
663 cloud_llm_client::LanguageModelProvider::OpenAi => {
664 let model = match open_ai::Model::from_id(&self.model.id.0) {
665 Ok(model) => model,
666 Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
667 };
668 count_open_ai_tokens(request, model, cx)
669 }
670 cloud_llm_client::LanguageModelProvider::XAi => {
671 let model = match x_ai::Model::from_id(&self.model.id.0) {
672 Ok(model) => model,
673 Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
674 };
675 count_xai_tokens(request, model, cx)
676 }
677 cloud_llm_client::LanguageModelProvider::Google => {
678 let client = self.client.clone();
679 let llm_api_token = self.llm_api_token.clone();
680 let model_id = self.model.id.to_string();
681 let generate_content_request =
682 into_google(request, model_id.clone(), GoogleModelMode::Default);
683 async move {
684 let http_client = &client.http_client();
685 let token = llm_api_token.acquire(&client).await?;
686
687 let request_body = CountTokensBody {
688 provider: cloud_llm_client::LanguageModelProvider::Google,
689 model: model_id,
690 provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
691 generate_content_request,
692 })?,
693 };
694 let request = http_client::Request::builder()
695 .method(Method::POST)
696 .uri(
697 http_client
698 .build_zed_llm_url("/count_tokens", &[])?
699 .as_ref(),
700 )
701 .header("Content-Type", "application/json")
702 .header("Authorization", format!("Bearer {token}"))
703 .body(serde_json::to_string(&request_body)?.into())?;
704 let mut response = http_client.send(request).await?;
705 let status = response.status();
706 let headers = response.headers().clone();
707 let mut response_body = String::new();
708 response
709 .body_mut()
710 .read_to_string(&mut response_body)
711 .await?;
712
713 if status.is_success() {
714 let response_body: CountTokensResponse =
715 serde_json::from_str(&response_body)?;
716
717 Ok(response_body.tokens as u64)
718 } else {
719 Err(anyhow!(ApiError {
720 status,
721 body: response_body,
722 headers
723 }))
724 }
725 }
726 .boxed()
727 }
728 }
729 }
730
731 fn stream_completion(
732 &self,
733 request: LanguageModelRequest,
734 cx: &AsyncApp,
735 ) -> BoxFuture<
736 'static,
737 Result<
738 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
739 LanguageModelCompletionError,
740 >,
741 > {
742 let thread_id = request.thread_id.clone();
743 let prompt_id = request.prompt_id.clone();
744 let intent = request.intent;
745 let app_version = Some(cx.update(|cx| AppVersion::global(cx)));
746 let thinking_allowed = request.thinking_allowed;
747 let is_thinking_effort_enabled =
748 cx.update(|cx| cx.has_flag::<CloudThinkingEffortFeatureFlag>());
749 let enable_thinking = if is_thinking_effort_enabled {
750 thinking_allowed && self.model.supports_thinking
751 } else {
752 thinking_allowed && self.model.id.0.ends_with("-thinking")
753 };
754 let provider_name = provider_name(&self.model.provider);
755 match self.model.provider {
756 cloud_llm_client::LanguageModelProvider::Anthropic => {
757 let effort = request
758 .thinking_effort
759 .as_ref()
760 .and_then(|effort| anthropic::Effort::from_str(effort).ok());
761
762 let mut request = into_anthropic(
763 request,
764 self.model.id.to_string(),
765 1.0,
766 self.model.max_output_tokens as u64,
767 if enable_thinking {
768 AnthropicModelMode::Thinking {
769 budget_tokens: Some(4_096),
770 }
771 } else {
772 AnthropicModelMode::Default
773 },
774 );
775
776 if enable_thinking && effort.is_some() {
777 request.thinking = Some(anthropic::Thinking::Adaptive);
778 request.output_config = Some(anthropic::OutputConfig { effort });
779 }
780
781 let client = self.client.clone();
782 let llm_api_token = self.llm_api_token.clone();
783 let future = self.request_limiter.stream(async move {
784 let PerformLlmCompletionResponse {
785 response,
786 includes_status_messages,
787 } = Self::perform_llm_completion(
788 client.clone(),
789 llm_api_token,
790 app_version,
791 CompletionBody {
792 thread_id,
793 prompt_id,
794 intent,
795 provider: cloud_llm_client::LanguageModelProvider::Anthropic,
796 model: request.model.clone(),
797 provider_request: serde_json::to_value(&request)
798 .map_err(|e| anyhow!(e))?,
799 },
800 )
801 .await
802 .map_err(|err| match err.downcast::<ApiError>() {
803 Ok(api_err) => anyhow!(LanguageModelCompletionError::from(api_err)),
804 Err(err) => anyhow!(err),
805 })?;
806
807 let mut mapper = AnthropicEventMapper::new();
808 Ok(map_cloud_completion_events(
809 Box::pin(response_lines(response, includes_status_messages)),
810 &provider_name,
811 move |event| mapper.map_event(event),
812 ))
813 });
814 async move { Ok(future.await?.boxed()) }.boxed()
815 }
816 cloud_llm_client::LanguageModelProvider::OpenAi => {
817 let client = self.client.clone();
818 let llm_api_token = self.llm_api_token.clone();
819 let effort = request
820 .thinking_effort
821 .as_ref()
822 .and_then(|effort| open_ai::ReasoningEffort::from_str(effort).ok());
823
824 let mut request = into_open_ai_response(
825 request,
826 &self.model.id.0,
827 self.model.supports_parallel_tool_calls,
828 true,
829 None,
830 None,
831 );
832
833 if enable_thinking && let Some(effort) = effort {
834 request.reasoning = Some(open_ai::responses::ReasoningConfig { effort });
835 }
836
837 let future = self.request_limiter.stream(async move {
838 let PerformLlmCompletionResponse {
839 response,
840 includes_status_messages,
841 } = Self::perform_llm_completion(
842 client.clone(),
843 llm_api_token,
844 app_version,
845 CompletionBody {
846 thread_id,
847 prompt_id,
848 intent,
849 provider: cloud_llm_client::LanguageModelProvider::OpenAi,
850 model: request.model.clone(),
851 provider_request: serde_json::to_value(&request)
852 .map_err(|e| anyhow!(e))?,
853 },
854 )
855 .await?;
856
857 let mut mapper = OpenAiResponseEventMapper::new();
858 Ok(map_cloud_completion_events(
859 Box::pin(response_lines(response, includes_status_messages)),
860 &provider_name,
861 move |event| mapper.map_event(event),
862 ))
863 });
864 async move { Ok(future.await?.boxed()) }.boxed()
865 }
866 cloud_llm_client::LanguageModelProvider::XAi => {
867 let client = self.client.clone();
868 let request = into_open_ai(
869 request,
870 &self.model.id.0,
871 self.model.supports_parallel_tool_calls,
872 false,
873 None,
874 None,
875 );
876 let llm_api_token = self.llm_api_token.clone();
877 let future = self.request_limiter.stream(async move {
878 let PerformLlmCompletionResponse {
879 response,
880 includes_status_messages,
881 } = Self::perform_llm_completion(
882 client.clone(),
883 llm_api_token,
884 app_version,
885 CompletionBody {
886 thread_id,
887 prompt_id,
888 intent,
889 provider: cloud_llm_client::LanguageModelProvider::XAi,
890 model: request.model.clone(),
891 provider_request: serde_json::to_value(&request)
892 .map_err(|e| anyhow!(e))?,
893 },
894 )
895 .await?;
896
897 let mut mapper = OpenAiEventMapper::new();
898 Ok(map_cloud_completion_events(
899 Box::pin(response_lines(response, includes_status_messages)),
900 &provider_name,
901 move |event| mapper.map_event(event),
902 ))
903 });
904 async move { Ok(future.await?.boxed()) }.boxed()
905 }
906 cloud_llm_client::LanguageModelProvider::Google => {
907 let client = self.client.clone();
908 let request =
909 into_google(request, self.model.id.to_string(), GoogleModelMode::Default);
910 let llm_api_token = self.llm_api_token.clone();
911 let future = self.request_limiter.stream(async move {
912 let PerformLlmCompletionResponse {
913 response,
914 includes_status_messages,
915 } = Self::perform_llm_completion(
916 client.clone(),
917 llm_api_token,
918 app_version,
919 CompletionBody {
920 thread_id,
921 prompt_id,
922 intent,
923 provider: cloud_llm_client::LanguageModelProvider::Google,
924 model: request.model.model_id.clone(),
925 provider_request: serde_json::to_value(&request)
926 .map_err(|e| anyhow!(e))?,
927 },
928 )
929 .await?;
930
931 let mut mapper = GoogleEventMapper::new();
932 Ok(map_cloud_completion_events(
933 Box::pin(response_lines(response, includes_status_messages)),
934 &provider_name,
935 move |event| mapper.map_event(event),
936 ))
937 });
938 async move { Ok(future.await?.boxed()) }.boxed()
939 }
940 }
941 }
942}
943
944fn map_cloud_completion_events<T, F>(
945 stream: Pin<Box<dyn Stream<Item = Result<CompletionEvent<T>>> + Send>>,
946 provider: &LanguageModelProviderName,
947 mut map_callback: F,
948) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
949where
950 T: DeserializeOwned + 'static,
951 F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
952 + Send
953 + 'static,
954{
955 let provider = provider.clone();
956 stream
957 .flat_map(move |event| {
958 futures::stream::iter(match event {
959 Err(error) => {
960 vec![Err(LanguageModelCompletionError::from(error))]
961 }
962 Ok(CompletionEvent::Status(event)) => {
963 vec![
964 LanguageModelCompletionEvent::from_completion_request_status(
965 event,
966 provider.clone(),
967 ),
968 ]
969 }
970 Ok(CompletionEvent::Event(event)) => map_callback(event),
971 })
972 })
973 .boxed()
974}
975
976fn provider_name(provider: &cloud_llm_client::LanguageModelProvider) -> LanguageModelProviderName {
977 match provider {
978 cloud_llm_client::LanguageModelProvider::Anthropic => {
979 language_model::ANTHROPIC_PROVIDER_NAME
980 }
981 cloud_llm_client::LanguageModelProvider::OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
982 cloud_llm_client::LanguageModelProvider::Google => language_model::GOOGLE_PROVIDER_NAME,
983 cloud_llm_client::LanguageModelProvider::XAi => language_model::X_AI_PROVIDER_NAME,
984 }
985}
986
987fn response_lines<T: DeserializeOwned>(
988 response: Response<AsyncBody>,
989 includes_status_messages: bool,
990) -> impl Stream<Item = Result<CompletionEvent<T>>> {
991 futures::stream::try_unfold(
992 (String::new(), BufReader::new(response.into_body())),
993 move |(mut line, mut body)| async move {
994 match body.read_line(&mut line).await {
995 Ok(0) => Ok(None),
996 Ok(_) => {
997 let event = if includes_status_messages {
998 serde_json::from_str::<CompletionEvent<T>>(&line)?
999 } else {
1000 CompletionEvent::Event(serde_json::from_str::<T>(&line)?)
1001 };
1002
1003 line.clear();
1004 Ok(Some((event, (line, body))))
1005 }
1006 Err(e) => Err(e.into()),
1007 }
1008 },
1009 )
1010}
1011
1012#[derive(IntoElement, RegisterComponent)]
1013struct ZedAiConfiguration {
1014 is_connected: bool,
1015 plan: Option<Plan>,
1016 subscription_period: Option<(DateTime<Utc>, DateTime<Utc>)>,
1017 eligible_for_trial: bool,
1018 account_too_young: bool,
1019 sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1020}
1021
1022impl RenderOnce for ZedAiConfiguration {
1023 fn render(self, _window: &mut Window, _cx: &mut App) -> impl IntoElement {
1024 let is_pro = self.plan.is_some_and(|plan| plan == Plan::ZedPro);
1025 let subscription_text = match (self.plan, self.subscription_period) {
1026 (Some(Plan::ZedPro), Some(_)) => {
1027 "You have access to Zed's hosted models through your Pro subscription."
1028 }
1029 (Some(Plan::ZedProTrial), Some(_)) => {
1030 "You have access to Zed's hosted models through your Pro trial."
1031 }
1032 (Some(Plan::ZedFree), Some(_)) => {
1033 if self.eligible_for_trial {
1034 "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1035 } else {
1036 "Subscribe for access to Zed's hosted models."
1037 }
1038 }
1039 _ => {
1040 if self.eligible_for_trial {
1041 "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1042 } else {
1043 "Subscribe for access to Zed's hosted models."
1044 }
1045 }
1046 };
1047
1048 let manage_subscription_buttons = if is_pro {
1049 Button::new("manage_settings", "Manage Subscription")
1050 .full_width()
1051 .style(ButtonStyle::Tinted(TintColor::Accent))
1052 .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx)))
1053 .into_any_element()
1054 } else if self.plan.is_none() || self.eligible_for_trial {
1055 Button::new("start_trial", "Start 14-day Free Pro Trial")
1056 .full_width()
1057 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1058 .on_click(|_, _, cx| cx.open_url(&zed_urls::start_trial_url(cx)))
1059 .into_any_element()
1060 } else {
1061 Button::new("upgrade", "Upgrade to Pro")
1062 .full_width()
1063 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1064 .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx)))
1065 .into_any_element()
1066 };
1067
1068 if !self.is_connected {
1069 return v_flex()
1070 .gap_2()
1071 .child(Label::new("Sign in to have access to Zed's complete agentic experience with hosted models."))
1072 .child(
1073 Button::new("sign_in", "Sign In to use Zed AI")
1074 .icon_color(Color::Muted)
1075 .icon(IconName::Github)
1076 .icon_size(IconSize::Small)
1077 .icon_position(IconPosition::Start)
1078 .full_width()
1079 .on_click({
1080 let callback = self.sign_in_callback.clone();
1081 move |_, window, cx| (callback)(window, cx)
1082 }),
1083 );
1084 }
1085
1086 v_flex().gap_2().w_full().map(|this| {
1087 if self.account_too_young {
1088 this.child(YoungAccountBanner).child(
1089 Button::new("upgrade", "Upgrade to Pro")
1090 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1091 .full_width()
1092 .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx))),
1093 )
1094 } else {
1095 this.text_sm()
1096 .child(subscription_text)
1097 .child(manage_subscription_buttons)
1098 }
1099 })
1100 }
1101}
1102
1103struct ConfigurationView {
1104 state: Entity<State>,
1105 sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1106}
1107
1108impl ConfigurationView {
1109 fn new(state: Entity<State>) -> Self {
1110 let sign_in_callback = Arc::new({
1111 let state = state.clone();
1112 move |_window: &mut Window, cx: &mut App| {
1113 state.update(cx, |state, cx| {
1114 state.authenticate(cx).detach_and_log_err(cx);
1115 });
1116 }
1117 });
1118
1119 Self {
1120 state,
1121 sign_in_callback,
1122 }
1123 }
1124}
1125
1126impl Render for ConfigurationView {
1127 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1128 let state = self.state.read(cx);
1129 let user_store = state.user_store.read(cx);
1130
1131 ZedAiConfiguration {
1132 is_connected: !state.is_signed_out(cx),
1133 plan: user_store.plan(),
1134 subscription_period: user_store.subscription_period(),
1135 eligible_for_trial: user_store.trial_started_at().is_none(),
1136 account_too_young: user_store.account_too_young(),
1137 sign_in_callback: self.sign_in_callback.clone(),
1138 }
1139 }
1140}
1141
1142impl Component for ZedAiConfiguration {
1143 fn name() -> &'static str {
1144 "AI Configuration Content"
1145 }
1146
1147 fn sort_name() -> &'static str {
1148 "AI Configuration Content"
1149 }
1150
1151 fn scope() -> ComponentScope {
1152 ComponentScope::Onboarding
1153 }
1154
1155 fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
1156 fn configuration(
1157 is_connected: bool,
1158 plan: Option<Plan>,
1159 eligible_for_trial: bool,
1160 account_too_young: bool,
1161 ) -> AnyElement {
1162 ZedAiConfiguration {
1163 is_connected,
1164 plan,
1165 subscription_period: plan
1166 .is_some()
1167 .then(|| (Utc::now(), Utc::now() + chrono::Duration::days(7))),
1168 eligible_for_trial,
1169 account_too_young,
1170 sign_in_callback: Arc::new(|_, _| {}),
1171 }
1172 .into_any_element()
1173 }
1174
1175 Some(
1176 v_flex()
1177 .p_4()
1178 .gap_4()
1179 .children(vec![
1180 single_example("Not connected", configuration(false, None, false, false)),
1181 single_example(
1182 "Accept Terms of Service",
1183 configuration(true, None, true, false),
1184 ),
1185 single_example(
1186 "No Plan - Not eligible for trial",
1187 configuration(true, None, false, false),
1188 ),
1189 single_example(
1190 "No Plan - Eligible for trial",
1191 configuration(true, None, true, false),
1192 ),
1193 single_example(
1194 "Free Plan",
1195 configuration(true, Some(Plan::ZedFree), true, false),
1196 ),
1197 single_example(
1198 "Zed Pro Trial Plan",
1199 configuration(true, Some(Plan::ZedProTrial), true, false),
1200 ),
1201 single_example(
1202 "Zed Pro Plan",
1203 configuration(true, Some(Plan::ZedPro), true, false),
1204 ),
1205 ])
1206 .into_any_element(),
1207 )
1208 }
1209}
1210
1211#[cfg(test)]
1212mod tests {
1213 use super::*;
1214 use http_client::http::{HeaderMap, StatusCode};
1215 use language_model::LanguageModelCompletionError;
1216
1217 #[test]
1218 fn test_api_error_conversion_with_upstream_http_error() {
1219 // upstream_http_error with 503 status should become ServerOverloaded
1220 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout","upstream_status":503}"#;
1221
1222 let api_error = ApiError {
1223 status: StatusCode::INTERNAL_SERVER_ERROR,
1224 body: error_body.to_string(),
1225 headers: HeaderMap::new(),
1226 };
1227
1228 let completion_error: LanguageModelCompletionError = api_error.into();
1229
1230 match completion_error {
1231 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1232 assert_eq!(
1233 message,
1234 "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout"
1235 );
1236 }
1237 _ => panic!(
1238 "Expected UpstreamProviderError for upstream 503, got: {:?}",
1239 completion_error
1240 ),
1241 }
1242
1243 // upstream_http_error with 500 status should become ApiInternalServerError
1244 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the OpenAI API: internal server error","upstream_status":500}"#;
1245
1246 let api_error = ApiError {
1247 status: StatusCode::INTERNAL_SERVER_ERROR,
1248 body: error_body.to_string(),
1249 headers: HeaderMap::new(),
1250 };
1251
1252 let completion_error: LanguageModelCompletionError = api_error.into();
1253
1254 match completion_error {
1255 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1256 assert_eq!(
1257 message,
1258 "Received an error from the OpenAI API: internal server error"
1259 );
1260 }
1261 _ => panic!(
1262 "Expected UpstreamProviderError for upstream 500, got: {:?}",
1263 completion_error
1264 ),
1265 }
1266
1267 // upstream_http_error with 429 status should become RateLimitExceeded
1268 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Google API: rate limit exceeded","upstream_status":429}"#;
1269
1270 let api_error = ApiError {
1271 status: StatusCode::INTERNAL_SERVER_ERROR,
1272 body: error_body.to_string(),
1273 headers: HeaderMap::new(),
1274 };
1275
1276 let completion_error: LanguageModelCompletionError = api_error.into();
1277
1278 match completion_error {
1279 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1280 assert_eq!(
1281 message,
1282 "Received an error from the Google API: rate limit exceeded"
1283 );
1284 }
1285 _ => panic!(
1286 "Expected UpstreamProviderError for upstream 429, got: {:?}",
1287 completion_error
1288 ),
1289 }
1290
1291 // Regular 500 error without upstream_http_error should remain ApiInternalServerError for Zed
1292 let error_body = "Regular internal server error";
1293
1294 let api_error = ApiError {
1295 status: StatusCode::INTERNAL_SERVER_ERROR,
1296 body: error_body.to_string(),
1297 headers: HeaderMap::new(),
1298 };
1299
1300 let completion_error: LanguageModelCompletionError = api_error.into();
1301
1302 match completion_error {
1303 LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1304 assert_eq!(provider, PROVIDER_NAME);
1305 assert_eq!(message, "Regular internal server error");
1306 }
1307 _ => panic!(
1308 "Expected ApiInternalServerError for regular 500, got: {:?}",
1309 completion_error
1310 ),
1311 }
1312
1313 // upstream_http_429 format should be converted to UpstreamProviderError
1314 let error_body = r#"{"code":"upstream_http_429","message":"Upstream Anthropic rate limit exceeded.","retry_after":30.5}"#;
1315
1316 let api_error = ApiError {
1317 status: StatusCode::INTERNAL_SERVER_ERROR,
1318 body: error_body.to_string(),
1319 headers: HeaderMap::new(),
1320 };
1321
1322 let completion_error: LanguageModelCompletionError = api_error.into();
1323
1324 match completion_error {
1325 LanguageModelCompletionError::UpstreamProviderError {
1326 message,
1327 status,
1328 retry_after,
1329 } => {
1330 assert_eq!(message, "Upstream Anthropic rate limit exceeded.");
1331 assert_eq!(status, StatusCode::TOO_MANY_REQUESTS);
1332 assert_eq!(retry_after, Some(Duration::from_secs_f64(30.5)));
1333 }
1334 _ => panic!(
1335 "Expected UpstreamProviderError for upstream_http_429, got: {:?}",
1336 completion_error
1337 ),
1338 }
1339
1340 // Invalid JSON in error body should fall back to regular error handling
1341 let error_body = "Not JSON at all";
1342
1343 let api_error = ApiError {
1344 status: StatusCode::INTERNAL_SERVER_ERROR,
1345 body: error_body.to_string(),
1346 headers: HeaderMap::new(),
1347 };
1348
1349 let completion_error: LanguageModelCompletionError = api_error.into();
1350
1351 match completion_error {
1352 LanguageModelCompletionError::ApiInternalServerError { provider, .. } => {
1353 assert_eq!(provider, PROVIDER_NAME);
1354 }
1355 _ => panic!(
1356 "Expected ApiInternalServerError for invalid JSON, got: {:?}",
1357 completion_error
1358 ),
1359 }
1360 }
1361}