1use ai_onboarding::YoungAccountBanner;
2use anthropic::AnthropicModelMode;
3use anyhow::{Context as _, Result, anyhow};
4use chrono::{DateTime, Utc};
5use client::{Client, ModelRequestUsage, UserStore, zed_urls};
6use cloud_llm_client::{
7 CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CLIENT_SUPPORTS_X_AI_HEADER_NAME,
8 CURRENT_PLAN_HEADER_NAME, CompletionBody, CompletionEvent, CompletionRequestStatus,
9 CountTokensBody, CountTokensResponse, EXPIRED_LLM_TOKEN_HEADER_NAME, ListModelsResponse,
10 MODEL_REQUESTS_RESOURCE_HEADER_VALUE, Plan, PlanV1, PlanV2,
11 SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME,
12 TOOL_USE_LIMIT_REACHED_HEADER_NAME, ZED_VERSION_HEADER_NAME,
13};
14use feature_flags::{BillingV2FeatureFlag, FeatureFlagAppExt};
15use futures::{
16 AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
17};
18use google_ai::GoogleModelMode;
19use gpui::{
20 AnyElement, AnyView, App, AsyncApp, Context, Entity, SemanticVersion, Subscription, Task,
21};
22use http_client::http::{HeaderMap, HeaderValue};
23use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
24use language_model::{
25 AuthenticateError, LanguageModel, LanguageModelCacheConfiguration,
26 LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName,
27 LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
28 LanguageModelProviderState, LanguageModelRequest, LanguageModelToolChoice,
29 LanguageModelToolSchemaFormat, LlmApiToken, ModelRequestLimitReachedError,
30 PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
31};
32use release_channel::AppVersion;
33use schemars::JsonSchema;
34use serde::{Deserialize, Serialize, de::DeserializeOwned};
35use settings::SettingsStore;
36pub use settings::ZedDotDevAvailableModel as AvailableModel;
37pub use settings::ZedDotDevAvailableProvider as AvailableProvider;
38use smol::io::{AsyncReadExt, BufReader};
39use std::pin::Pin;
40use std::str::FromStr as _;
41use std::sync::Arc;
42use std::time::Duration;
43use thiserror::Error;
44use ui::{TintColor, prelude::*};
45use util::{ResultExt as _, maybe};
46
47use crate::provider::anthropic::{AnthropicEventMapper, count_anthropic_tokens, into_anthropic};
48use crate::provider::google::{GoogleEventMapper, into_google};
49use crate::provider::open_ai::{OpenAiEventMapper, count_open_ai_tokens, into_open_ai};
50use crate::provider::x_ai::count_xai_tokens;
51
52const PROVIDER_ID: LanguageModelProviderId = language_model::ZED_CLOUD_PROVIDER_ID;
53const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVIDER_NAME;
54
55#[derive(Default, Clone, Debug, PartialEq)]
56pub struct ZedDotDevSettings {
57 pub available_models: Vec<AvailableModel>,
58}
59#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
60#[serde(tag = "type", rename_all = "lowercase")]
61pub enum ModelMode {
62 #[default]
63 Default,
64 Thinking {
65 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
66 budget_tokens: Option<u32>,
67 },
68}
69
70impl From<ModelMode> for AnthropicModelMode {
71 fn from(value: ModelMode) -> Self {
72 match value {
73 ModelMode::Default => AnthropicModelMode::Default,
74 ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
75 }
76 }
77}
78
79pub struct CloudLanguageModelProvider {
80 client: Arc<Client>,
81 state: gpui::Entity<State>,
82 _maintain_client_status: Task<()>,
83}
84
85pub struct State {
86 client: Arc<Client>,
87 llm_api_token: LlmApiToken,
88 user_store: Entity<UserStore>,
89 status: client::Status,
90 models: Vec<Arc<cloud_llm_client::LanguageModel>>,
91 default_model: Option<Arc<cloud_llm_client::LanguageModel>>,
92 default_fast_model: Option<Arc<cloud_llm_client::LanguageModel>>,
93 recommended_models: Vec<Arc<cloud_llm_client::LanguageModel>>,
94 _fetch_models_task: Task<()>,
95 _settings_subscription: Subscription,
96 _llm_token_subscription: Subscription,
97}
98
99impl State {
100 fn new(
101 client: Arc<Client>,
102 user_store: Entity<UserStore>,
103 status: client::Status,
104 cx: &mut Context<Self>,
105 ) -> Self {
106 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
107 let mut current_user = user_store.read(cx).watch_current_user();
108 Self {
109 client: client.clone(),
110 llm_api_token: LlmApiToken::default(),
111 user_store,
112 status,
113 models: Vec::new(),
114 default_model: None,
115 default_fast_model: None,
116 recommended_models: Vec::new(),
117 _fetch_models_task: cx.spawn(async move |this, cx| {
118 maybe!(async move {
119 let (client, llm_api_token) = this
120 .read_with(cx, |this, _cx| (client.clone(), this.llm_api_token.clone()))?;
121
122 while current_user.borrow().is_none() {
123 current_user.next().await;
124 }
125
126 let response =
127 Self::fetch_models(client.clone(), llm_api_token.clone()).await?;
128 this.update(cx, |this, cx| this.update_models(response, cx))?;
129 anyhow::Ok(())
130 })
131 .await
132 .context("failed to fetch Zed models")
133 .log_err();
134 }),
135 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
136 cx.notify();
137 }),
138 _llm_token_subscription: cx.subscribe(
139 &refresh_llm_token_listener,
140 move |this, _listener, _event, cx| {
141 let client = this.client.clone();
142 let llm_api_token = this.llm_api_token.clone();
143 cx.spawn(async move |this, cx| {
144 llm_api_token.refresh(&client).await?;
145 let response = Self::fetch_models(client, llm_api_token).await?;
146 this.update(cx, |this, cx| {
147 this.update_models(response, cx);
148 })
149 })
150 .detach_and_log_err(cx);
151 },
152 ),
153 }
154 }
155
156 fn is_signed_out(&self, cx: &App) -> bool {
157 self.user_store.read(cx).current_user().is_none()
158 }
159
160 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
161 let client = self.client.clone();
162 cx.spawn(async move |state, cx| {
163 client.sign_in_with_optional_connect(true, cx).await?;
164 state.update(cx, |_, cx| cx.notify())
165 })
166 }
167 fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
168 let mut models = Vec::new();
169
170 for model in response.models {
171 models.push(Arc::new(model.clone()));
172
173 // Right now we represent thinking variants of models as separate models on the client,
174 // so we need to insert variants for any model that supports thinking.
175 if model.supports_thinking {
176 models.push(Arc::new(cloud_llm_client::LanguageModel {
177 id: cloud_llm_client::LanguageModelId(format!("{}-thinking", model.id).into()),
178 display_name: format!("{} Thinking", model.display_name),
179 ..model
180 }));
181 }
182 }
183
184 self.default_model = models
185 .iter()
186 .find(|model| {
187 response
188 .default_model
189 .as_ref()
190 .is_some_and(|default_model_id| &model.id == default_model_id)
191 })
192 .cloned();
193 self.default_fast_model = models
194 .iter()
195 .find(|model| {
196 response
197 .default_fast_model
198 .as_ref()
199 .is_some_and(|default_fast_model_id| &model.id == default_fast_model_id)
200 })
201 .cloned();
202 self.recommended_models = response
203 .recommended_models
204 .iter()
205 .filter_map(|id| models.iter().find(|model| &model.id == id))
206 .cloned()
207 .collect();
208 self.models = models;
209 cx.notify();
210 }
211
212 async fn fetch_models(
213 client: Arc<Client>,
214 llm_api_token: LlmApiToken,
215 ) -> Result<ListModelsResponse> {
216 let http_client = &client.http_client();
217 let token = llm_api_token.acquire(&client).await?;
218
219 let request = http_client::Request::builder()
220 .method(Method::GET)
221 .header(CLIENT_SUPPORTS_X_AI_HEADER_NAME, "true")
222 .uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
223 .header("Authorization", format!("Bearer {token}"))
224 .body(AsyncBody::empty())?;
225 let mut response = http_client
226 .send(request)
227 .await
228 .context("failed to send list models request")?;
229
230 if response.status().is_success() {
231 let mut body = String::new();
232 response.body_mut().read_to_string(&mut body).await?;
233 Ok(serde_json::from_str(&body)?)
234 } else {
235 let mut body = String::new();
236 response.body_mut().read_to_string(&mut body).await?;
237 anyhow::bail!(
238 "error listing models.\nStatus: {:?}\nBody: {body}",
239 response.status(),
240 );
241 }
242 }
243}
244
245impl CloudLanguageModelProvider {
246 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
247 let mut status_rx = client.status();
248 let status = *status_rx.borrow();
249
250 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
251
252 let state_ref = state.downgrade();
253 let maintain_client_status = cx.spawn(async move |cx| {
254 while let Some(status) = status_rx.next().await {
255 if let Some(this) = state_ref.upgrade() {
256 _ = this.update(cx, |this, cx| {
257 if this.status != status {
258 this.status = status;
259 cx.notify();
260 }
261 });
262 } else {
263 break;
264 }
265 }
266 });
267
268 Self {
269 client,
270 state,
271 _maintain_client_status: maintain_client_status,
272 }
273 }
274
275 fn create_language_model(
276 &self,
277 model: Arc<cloud_llm_client::LanguageModel>,
278 llm_api_token: LlmApiToken,
279 ) -> Arc<dyn LanguageModel> {
280 Arc::new(CloudLanguageModel {
281 id: LanguageModelId(SharedString::from(model.id.0.clone())),
282 model,
283 llm_api_token,
284 client: self.client.clone(),
285 request_limiter: RateLimiter::new(4),
286 })
287 }
288}
289
290impl LanguageModelProviderState for CloudLanguageModelProvider {
291 type ObservableEntity = State;
292
293 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
294 Some(self.state.clone())
295 }
296}
297
298impl LanguageModelProvider for CloudLanguageModelProvider {
299 fn id(&self) -> LanguageModelProviderId {
300 PROVIDER_ID
301 }
302
303 fn name(&self) -> LanguageModelProviderName {
304 PROVIDER_NAME
305 }
306
307 fn icon(&self) -> IconName {
308 IconName::AiZed
309 }
310
311 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
312 let default_model = self.state.read(cx).default_model.clone()?;
313 let llm_api_token = self.state.read(cx).llm_api_token.clone();
314 Some(self.create_language_model(default_model, llm_api_token))
315 }
316
317 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
318 let default_fast_model = self.state.read(cx).default_fast_model.clone()?;
319 let llm_api_token = self.state.read(cx).llm_api_token.clone();
320 Some(self.create_language_model(default_fast_model, llm_api_token))
321 }
322
323 fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
324 let llm_api_token = self.state.read(cx).llm_api_token.clone();
325 self.state
326 .read(cx)
327 .recommended_models
328 .iter()
329 .cloned()
330 .map(|model| self.create_language_model(model, llm_api_token.clone()))
331 .collect()
332 }
333
334 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
335 let llm_api_token = self.state.read(cx).llm_api_token.clone();
336 self.state
337 .read(cx)
338 .models
339 .iter()
340 .cloned()
341 .map(|model| self.create_language_model(model, llm_api_token.clone()))
342 .collect()
343 }
344
345 fn is_authenticated(&self, cx: &App) -> bool {
346 let state = self.state.read(cx);
347 !state.is_signed_out(cx)
348 }
349
350 fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
351 Task::ready(Ok(()))
352 }
353
354 fn configuration_view(
355 &self,
356 _target_agent: language_model::ConfigurationViewTargetAgent,
357 _: &mut Window,
358 cx: &mut App,
359 ) -> AnyView {
360 cx.new(|_| ConfigurationView::new(self.state.clone()))
361 .into()
362 }
363
364 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
365 Task::ready(Ok(()))
366 }
367}
368
369pub struct CloudLanguageModel {
370 id: LanguageModelId,
371 model: Arc<cloud_llm_client::LanguageModel>,
372 llm_api_token: LlmApiToken,
373 client: Arc<Client>,
374 request_limiter: RateLimiter,
375}
376
377struct PerformLlmCompletionResponse {
378 response: Response<AsyncBody>,
379 usage: Option<ModelRequestUsage>,
380 tool_use_limit_reached: bool,
381 includes_status_messages: bool,
382}
383
384impl CloudLanguageModel {
385 async fn perform_llm_completion(
386 client: Arc<Client>,
387 llm_api_token: LlmApiToken,
388 app_version: Option<SemanticVersion>,
389 body: CompletionBody,
390 ) -> Result<PerformLlmCompletionResponse> {
391 let http_client = &client.http_client();
392
393 let mut token = llm_api_token.acquire(&client).await?;
394 let mut refreshed_token = false;
395
396 loop {
397 let request = http_client::Request::builder()
398 .method(Method::POST)
399 .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref())
400 .when_some(app_version, |builder, app_version| {
401 builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
402 })
403 .header("Content-Type", "application/json")
404 .header("Authorization", format!("Bearer {token}"))
405 .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
406 .body(serde_json::to_string(&body)?.into())?;
407
408 let mut response = http_client.send(request).await?;
409 let status = response.status();
410 if status.is_success() {
411 let includes_status_messages = response
412 .headers()
413 .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
414 .is_some();
415
416 let tool_use_limit_reached = response
417 .headers()
418 .get(TOOL_USE_LIMIT_REACHED_HEADER_NAME)
419 .is_some();
420
421 let usage = if includes_status_messages {
422 None
423 } else {
424 ModelRequestUsage::from_headers(response.headers()).ok()
425 };
426
427 return Ok(PerformLlmCompletionResponse {
428 response,
429 usage,
430 includes_status_messages,
431 tool_use_limit_reached,
432 });
433 }
434
435 if !refreshed_token
436 && response
437 .headers()
438 .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
439 .is_some()
440 {
441 token = llm_api_token.refresh(&client).await?;
442 refreshed_token = true;
443 continue;
444 }
445
446 if status == StatusCode::FORBIDDEN
447 && response
448 .headers()
449 .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
450 .is_some()
451 {
452 if let Some(MODEL_REQUESTS_RESOURCE_HEADER_VALUE) = response
453 .headers()
454 .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
455 .and_then(|resource| resource.to_str().ok())
456 && let Some(plan) = response
457 .headers()
458 .get(CURRENT_PLAN_HEADER_NAME)
459 .and_then(|plan| plan.to_str().ok())
460 .and_then(|plan| cloud_llm_client::PlanV1::from_str(plan).ok())
461 .map(Plan::V1)
462 {
463 return Err(anyhow!(ModelRequestLimitReachedError { plan }));
464 }
465 } else if status == StatusCode::PAYMENT_REQUIRED {
466 return Err(anyhow!(PaymentRequiredError));
467 }
468
469 let mut body = String::new();
470 let headers = response.headers().clone();
471 response.body_mut().read_to_string(&mut body).await?;
472 return Err(anyhow!(ApiError {
473 status,
474 body,
475 headers
476 }));
477 }
478 }
479}
480
481#[derive(Debug, Error)]
482#[error("cloud language model request failed with status {status}: {body}")]
483struct ApiError {
484 status: StatusCode,
485 body: String,
486 headers: HeaderMap<HeaderValue>,
487}
488
489/// Represents error responses from Zed's cloud API.
490///
491/// Example JSON for an upstream HTTP error:
492/// ```json
493/// {
494/// "code": "upstream_http_error",
495/// "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
496/// "upstream_status": 503
497/// }
498/// ```
499#[derive(Debug, serde::Deserialize)]
500struct CloudApiError {
501 code: String,
502 message: String,
503 #[serde(default)]
504 #[serde(deserialize_with = "deserialize_optional_status_code")]
505 upstream_status: Option<StatusCode>,
506 #[serde(default)]
507 retry_after: Option<f64>,
508}
509
510fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
511where
512 D: serde::Deserializer<'de>,
513{
514 let opt: Option<u16> = Option::deserialize(deserializer)?;
515 Ok(opt.and_then(|code| StatusCode::from_u16(code).ok()))
516}
517
518impl From<ApiError> for LanguageModelCompletionError {
519 fn from(error: ApiError) -> Self {
520 if let Ok(cloud_error) = serde_json::from_str::<CloudApiError>(&error.body) {
521 if cloud_error.code.starts_with("upstream_http_") {
522 let status = if let Some(status) = cloud_error.upstream_status {
523 status
524 } else if cloud_error.code.ends_with("_error") {
525 error.status
526 } else {
527 // If there's a status code in the code string (e.g. "upstream_http_429")
528 // then use that; otherwise, see if the JSON contains a status code.
529 cloud_error
530 .code
531 .strip_prefix("upstream_http_")
532 .and_then(|code_str| code_str.parse::<u16>().ok())
533 .and_then(|code| StatusCode::from_u16(code).ok())
534 .unwrap_or(error.status)
535 };
536
537 return LanguageModelCompletionError::UpstreamProviderError {
538 message: cloud_error.message,
539 status,
540 retry_after: cloud_error.retry_after.map(Duration::from_secs_f64),
541 };
542 }
543
544 return LanguageModelCompletionError::from_http_status(
545 PROVIDER_NAME,
546 error.status,
547 cloud_error.message,
548 None,
549 );
550 }
551
552 let retry_after = None;
553 LanguageModelCompletionError::from_http_status(
554 PROVIDER_NAME,
555 error.status,
556 error.body,
557 retry_after,
558 )
559 }
560}
561
562impl LanguageModel for CloudLanguageModel {
563 fn id(&self) -> LanguageModelId {
564 self.id.clone()
565 }
566
567 fn name(&self) -> LanguageModelName {
568 LanguageModelName::from(self.model.display_name.clone())
569 }
570
571 fn provider_id(&self) -> LanguageModelProviderId {
572 PROVIDER_ID
573 }
574
575 fn provider_name(&self) -> LanguageModelProviderName {
576 PROVIDER_NAME
577 }
578
579 fn upstream_provider_id(&self) -> LanguageModelProviderId {
580 use cloud_llm_client::LanguageModelProvider::*;
581 match self.model.provider {
582 Anthropic => language_model::ANTHROPIC_PROVIDER_ID,
583 OpenAi => language_model::OPEN_AI_PROVIDER_ID,
584 Google => language_model::GOOGLE_PROVIDER_ID,
585 XAi => language_model::X_AI_PROVIDER_ID,
586 }
587 }
588
589 fn upstream_provider_name(&self) -> LanguageModelProviderName {
590 use cloud_llm_client::LanguageModelProvider::*;
591 match self.model.provider {
592 Anthropic => language_model::ANTHROPIC_PROVIDER_NAME,
593 OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
594 Google => language_model::GOOGLE_PROVIDER_NAME,
595 XAi => language_model::X_AI_PROVIDER_NAME,
596 }
597 }
598
599 fn supports_tools(&self) -> bool {
600 self.model.supports_tools
601 }
602
603 fn supports_images(&self) -> bool {
604 self.model.supports_images
605 }
606
607 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
608 match choice {
609 LanguageModelToolChoice::Auto
610 | LanguageModelToolChoice::Any
611 | LanguageModelToolChoice::None => true,
612 }
613 }
614
615 fn supports_burn_mode(&self) -> bool {
616 self.model.supports_max_mode
617 }
618
619 fn telemetry_id(&self) -> String {
620 format!("zed.dev/{}", self.model.id)
621 }
622
623 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
624 match self.model.provider {
625 cloud_llm_client::LanguageModelProvider::Anthropic
626 | cloud_llm_client::LanguageModelProvider::OpenAi
627 | cloud_llm_client::LanguageModelProvider::XAi => {
628 LanguageModelToolSchemaFormat::JsonSchema
629 }
630 cloud_llm_client::LanguageModelProvider::Google => {
631 LanguageModelToolSchemaFormat::JsonSchemaSubset
632 }
633 }
634 }
635
636 fn max_token_count(&self) -> u64 {
637 self.model.max_token_count as u64
638 }
639
640 fn max_token_count_in_burn_mode(&self) -> Option<u64> {
641 self.model
642 .max_token_count_in_max_mode
643 .filter(|_| self.model.supports_max_mode)
644 .map(|max_token_count| max_token_count as u64)
645 }
646
647 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
648 match &self.model.provider {
649 cloud_llm_client::LanguageModelProvider::Anthropic => {
650 Some(LanguageModelCacheConfiguration {
651 min_total_token: 2_048,
652 should_speculate: true,
653 max_cache_anchors: 4,
654 })
655 }
656 cloud_llm_client::LanguageModelProvider::OpenAi
657 | cloud_llm_client::LanguageModelProvider::XAi
658 | cloud_llm_client::LanguageModelProvider::Google => None,
659 }
660 }
661
662 fn count_tokens(
663 &self,
664 request: LanguageModelRequest,
665 cx: &App,
666 ) -> BoxFuture<'static, Result<u64>> {
667 match self.model.provider {
668 cloud_llm_client::LanguageModelProvider::Anthropic => {
669 count_anthropic_tokens(request, cx)
670 }
671 cloud_llm_client::LanguageModelProvider::OpenAi => {
672 let model = match open_ai::Model::from_id(&self.model.id.0) {
673 Ok(model) => model,
674 Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
675 };
676 count_open_ai_tokens(request, model, cx)
677 }
678 cloud_llm_client::LanguageModelProvider::XAi => {
679 let model = match x_ai::Model::from_id(&self.model.id.0) {
680 Ok(model) => model,
681 Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
682 };
683 count_xai_tokens(request, model, cx)
684 }
685 cloud_llm_client::LanguageModelProvider::Google => {
686 let client = self.client.clone();
687 let llm_api_token = self.llm_api_token.clone();
688 let model_id = self.model.id.to_string();
689 let generate_content_request =
690 into_google(request, model_id.clone(), GoogleModelMode::Default);
691 async move {
692 let http_client = &client.http_client();
693 let token = llm_api_token.acquire(&client).await?;
694
695 let request_body = CountTokensBody {
696 provider: cloud_llm_client::LanguageModelProvider::Google,
697 model: model_id,
698 provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
699 generate_content_request,
700 })?,
701 };
702 let request = http_client::Request::builder()
703 .method(Method::POST)
704 .uri(
705 http_client
706 .build_zed_llm_url("/count_tokens", &[])?
707 .as_ref(),
708 )
709 .header("Content-Type", "application/json")
710 .header("Authorization", format!("Bearer {token}"))
711 .body(serde_json::to_string(&request_body)?.into())?;
712 let mut response = http_client.send(request).await?;
713 let status = response.status();
714 let headers = response.headers().clone();
715 let mut response_body = String::new();
716 response
717 .body_mut()
718 .read_to_string(&mut response_body)
719 .await?;
720
721 if status.is_success() {
722 let response_body: CountTokensResponse =
723 serde_json::from_str(&response_body)?;
724
725 Ok(response_body.tokens as u64)
726 } else {
727 Err(anyhow!(ApiError {
728 status,
729 body: response_body,
730 headers
731 }))
732 }
733 }
734 .boxed()
735 }
736 }
737 }
738
739 fn stream_completion(
740 &self,
741 request: LanguageModelRequest,
742 cx: &AsyncApp,
743 ) -> BoxFuture<
744 'static,
745 Result<
746 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
747 LanguageModelCompletionError,
748 >,
749 > {
750 let thread_id = request.thread_id.clone();
751 let prompt_id = request.prompt_id.clone();
752 let intent = request.intent;
753 let mode = request.mode;
754 let app_version = cx.update(|cx| AppVersion::global(cx)).ok();
755 let thinking_allowed = request.thinking_allowed;
756 match self.model.provider {
757 cloud_llm_client::LanguageModelProvider::Anthropic => {
758 let request = into_anthropic(
759 request,
760 self.model.id.to_string(),
761 1.0,
762 self.model.max_output_tokens as u64,
763 if thinking_allowed && self.model.id.0.ends_with("-thinking") {
764 AnthropicModelMode::Thinking {
765 budget_tokens: Some(4_096),
766 }
767 } else {
768 AnthropicModelMode::Default
769 },
770 );
771 let client = self.client.clone();
772 let llm_api_token = self.llm_api_token.clone();
773 let future = self.request_limiter.stream(async move {
774 let PerformLlmCompletionResponse {
775 response,
776 usage,
777 includes_status_messages,
778 tool_use_limit_reached,
779 } = Self::perform_llm_completion(
780 client.clone(),
781 llm_api_token,
782 app_version,
783 CompletionBody {
784 thread_id,
785 prompt_id,
786 intent,
787 mode,
788 provider: cloud_llm_client::LanguageModelProvider::Anthropic,
789 model: request.model.clone(),
790 provider_request: serde_json::to_value(&request)
791 .map_err(|e| anyhow!(e))?,
792 },
793 )
794 .await
795 .map_err(|err| match err.downcast::<ApiError>() {
796 Ok(api_err) => anyhow!(LanguageModelCompletionError::from(api_err)),
797 Err(err) => anyhow!(err),
798 })?;
799
800 let mut mapper = AnthropicEventMapper::new();
801 Ok(map_cloud_completion_events(
802 Box::pin(
803 response_lines(response, includes_status_messages)
804 .chain(usage_updated_event(usage))
805 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
806 ),
807 move |event| mapper.map_event(event),
808 ))
809 });
810 async move { Ok(future.await?.boxed()) }.boxed()
811 }
812 cloud_llm_client::LanguageModelProvider::OpenAi => {
813 let client = self.client.clone();
814 let model = match open_ai::Model::from_id(&self.model.id.0) {
815 Ok(model) => model,
816 Err(err) => return async move { Err(anyhow!(err).into()) }.boxed(),
817 };
818 let request = into_open_ai(
819 request,
820 model.id(),
821 model.supports_parallel_tool_calls(),
822 model.supports_prompt_cache_key(),
823 None,
824 None,
825 );
826 let llm_api_token = self.llm_api_token.clone();
827 let future = self.request_limiter.stream(async move {
828 let PerformLlmCompletionResponse {
829 response,
830 usage,
831 includes_status_messages,
832 tool_use_limit_reached,
833 } = Self::perform_llm_completion(
834 client.clone(),
835 llm_api_token,
836 app_version,
837 CompletionBody {
838 thread_id,
839 prompt_id,
840 intent,
841 mode,
842 provider: cloud_llm_client::LanguageModelProvider::OpenAi,
843 model: request.model.clone(),
844 provider_request: serde_json::to_value(&request)
845 .map_err(|e| anyhow!(e))?,
846 },
847 )
848 .await?;
849
850 let mut mapper = OpenAiEventMapper::new();
851 Ok(map_cloud_completion_events(
852 Box::pin(
853 response_lines(response, includes_status_messages)
854 .chain(usage_updated_event(usage))
855 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
856 ),
857 move |event| mapper.map_event(event),
858 ))
859 });
860 async move { Ok(future.await?.boxed()) }.boxed()
861 }
862 cloud_llm_client::LanguageModelProvider::XAi => {
863 let client = self.client.clone();
864 let model = match x_ai::Model::from_id(&self.model.id.0) {
865 Ok(model) => model,
866 Err(err) => return async move { Err(anyhow!(err).into()) }.boxed(),
867 };
868 let request = into_open_ai(
869 request,
870 model.id(),
871 model.supports_parallel_tool_calls(),
872 model.supports_prompt_cache_key(),
873 None,
874 None,
875 );
876 let llm_api_token = self.llm_api_token.clone();
877 let future = self.request_limiter.stream(async move {
878 let PerformLlmCompletionResponse {
879 response,
880 usage,
881 includes_status_messages,
882 tool_use_limit_reached,
883 } = Self::perform_llm_completion(
884 client.clone(),
885 llm_api_token,
886 app_version,
887 CompletionBody {
888 thread_id,
889 prompt_id,
890 intent,
891 mode,
892 provider: cloud_llm_client::LanguageModelProvider::XAi,
893 model: request.model.clone(),
894 provider_request: serde_json::to_value(&request)
895 .map_err(|e| anyhow!(e))?,
896 },
897 )
898 .await?;
899
900 let mut mapper = OpenAiEventMapper::new();
901 Ok(map_cloud_completion_events(
902 Box::pin(
903 response_lines(response, includes_status_messages)
904 .chain(usage_updated_event(usage))
905 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
906 ),
907 move |event| mapper.map_event(event),
908 ))
909 });
910 async move { Ok(future.await?.boxed()) }.boxed()
911 }
912 cloud_llm_client::LanguageModelProvider::Google => {
913 let client = self.client.clone();
914 let request =
915 into_google(request, self.model.id.to_string(), GoogleModelMode::Default);
916 let llm_api_token = self.llm_api_token.clone();
917 let future = self.request_limiter.stream(async move {
918 let PerformLlmCompletionResponse {
919 response,
920 usage,
921 includes_status_messages,
922 tool_use_limit_reached,
923 } = Self::perform_llm_completion(
924 client.clone(),
925 llm_api_token,
926 app_version,
927 CompletionBody {
928 thread_id,
929 prompt_id,
930 intent,
931 mode,
932 provider: cloud_llm_client::LanguageModelProvider::Google,
933 model: request.model.model_id.clone(),
934 provider_request: serde_json::to_value(&request)
935 .map_err(|e| anyhow!(e))?,
936 },
937 )
938 .await?;
939
940 let mut mapper = GoogleEventMapper::new();
941 Ok(map_cloud_completion_events(
942 Box::pin(
943 response_lines(response, includes_status_messages)
944 .chain(usage_updated_event(usage))
945 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
946 ),
947 move |event| mapper.map_event(event),
948 ))
949 });
950 async move { Ok(future.await?.boxed()) }.boxed()
951 }
952 }
953 }
954}
955
956fn map_cloud_completion_events<T, F>(
957 stream: Pin<Box<dyn Stream<Item = Result<CompletionEvent<T>>> + Send>>,
958 mut map_callback: F,
959) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
960where
961 T: DeserializeOwned + 'static,
962 F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
963 + Send
964 + 'static,
965{
966 stream
967 .flat_map(move |event| {
968 futures::stream::iter(match event {
969 Err(error) => {
970 vec![Err(LanguageModelCompletionError::from(error))]
971 }
972 Ok(CompletionEvent::Status(event)) => {
973 vec![Ok(LanguageModelCompletionEvent::StatusUpdate(event))]
974 }
975 Ok(CompletionEvent::Event(event)) => map_callback(event),
976 })
977 })
978 .boxed()
979}
980
981fn usage_updated_event<T>(
982 usage: Option<ModelRequestUsage>,
983) -> impl Stream<Item = Result<CompletionEvent<T>>> {
984 futures::stream::iter(usage.map(|usage| {
985 Ok(CompletionEvent::Status(
986 CompletionRequestStatus::UsageUpdated {
987 amount: usage.amount as usize,
988 limit: usage.limit,
989 },
990 ))
991 }))
992}
993
994fn tool_use_limit_reached_event<T>(
995 tool_use_limit_reached: bool,
996) -> impl Stream<Item = Result<CompletionEvent<T>>> {
997 futures::stream::iter(tool_use_limit_reached.then(|| {
998 Ok(CompletionEvent::Status(
999 CompletionRequestStatus::ToolUseLimitReached,
1000 ))
1001 }))
1002}
1003
1004fn response_lines<T: DeserializeOwned>(
1005 response: Response<AsyncBody>,
1006 includes_status_messages: bool,
1007) -> impl Stream<Item = Result<CompletionEvent<T>>> {
1008 futures::stream::try_unfold(
1009 (String::new(), BufReader::new(response.into_body())),
1010 move |(mut line, mut body)| async move {
1011 match body.read_line(&mut line).await {
1012 Ok(0) => Ok(None),
1013 Ok(_) => {
1014 let event = if includes_status_messages {
1015 serde_json::from_str::<CompletionEvent<T>>(&line)?
1016 } else {
1017 CompletionEvent::Event(serde_json::from_str::<T>(&line)?)
1018 };
1019
1020 line.clear();
1021 Ok(Some((event, (line, body))))
1022 }
1023 Err(e) => Err(e.into()),
1024 }
1025 },
1026 )
1027}
1028
1029#[derive(IntoElement, RegisterComponent)]
1030struct ZedAiConfiguration {
1031 is_connected: bool,
1032 plan: Option<Plan>,
1033 subscription_period: Option<(DateTime<Utc>, DateTime<Utc>)>,
1034 eligible_for_trial: bool,
1035 account_too_young: bool,
1036 sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1037}
1038
1039impl RenderOnce for ZedAiConfiguration {
1040 fn render(self, _window: &mut Window, cx: &mut App) -> impl IntoElement {
1041 let is_pro = self.plan.is_some_and(|plan| {
1042 matches!(plan, Plan::V1(PlanV1::ZedPro) | Plan::V2(PlanV2::ZedPro))
1043 });
1044 let is_free_v2 = self
1045 .plan
1046 .is_some_and(|plan| plan == Plan::V2(PlanV2::ZedFree));
1047 let subscription_text = match (self.plan, self.subscription_period) {
1048 (Some(Plan::V1(PlanV1::ZedPro) | Plan::V2(PlanV2::ZedPro)), Some(_)) => {
1049 "You have access to Zed's hosted models through your Pro subscription."
1050 }
1051 (Some(Plan::V1(PlanV1::ZedProTrial) | Plan::V2(PlanV2::ZedProTrial)), Some(_)) => {
1052 "You have access to Zed's hosted models through your Pro trial."
1053 }
1054 (Some(Plan::V1(PlanV1::ZedFree)), Some(_)) => {
1055 "You have basic access to Zed's hosted models through the Free plan."
1056 }
1057 (Some(Plan::V2(PlanV2::ZedFree)), Some(_)) => {
1058 if self.eligible_for_trial {
1059 "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1060 } else {
1061 "Subscribe for access to Zed's hosted models."
1062 }
1063 }
1064 _ => {
1065 if self.eligible_for_trial {
1066 "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1067 } else {
1068 "Subscribe for access to Zed's hosted models."
1069 }
1070 }
1071 };
1072
1073 let manage_subscription_buttons = if is_pro {
1074 Button::new("manage_settings", "Manage Subscription")
1075 .full_width()
1076 .style(ButtonStyle::Tinted(TintColor::Accent))
1077 .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx)))
1078 .into_any_element()
1079 } else if self.plan.is_none() || self.eligible_for_trial {
1080 Button::new("start_trial", "Start 14-day Free Pro Trial")
1081 .full_width()
1082 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1083 .on_click(|_, _, cx| cx.open_url(&zed_urls::start_trial_url(cx)))
1084 .into_any_element()
1085 } else {
1086 Button::new("upgrade", "Upgrade to Pro")
1087 .full_width()
1088 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1089 .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx)))
1090 .into_any_element()
1091 };
1092
1093 if !self.is_connected {
1094 return v_flex()
1095 .gap_2()
1096 .child(Label::new("Sign in to have access to Zed's complete agentic experience with hosted models."))
1097 .child(
1098 Button::new("sign_in", "Sign In to use Zed AI")
1099 .icon_color(Color::Muted)
1100 .icon(IconName::Github)
1101 .icon_size(IconSize::Small)
1102 .icon_position(IconPosition::Start)
1103 .full_width()
1104 .on_click({
1105 let callback = self.sign_in_callback.clone();
1106 move |_, window, cx| (callback)(window, cx)
1107 }),
1108 );
1109 }
1110
1111 v_flex().gap_2().w_full().map(|this| {
1112 if self.account_too_young {
1113 this.child(YoungAccountBanner::new(
1114 is_free_v2 || cx.has_flag::<BillingV2FeatureFlag>(),
1115 ))
1116 .child(
1117 Button::new("upgrade", "Upgrade to Pro")
1118 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1119 .full_width()
1120 .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx))),
1121 )
1122 } else {
1123 this.text_sm()
1124 .child(subscription_text)
1125 .child(manage_subscription_buttons)
1126 }
1127 })
1128 }
1129}
1130
1131struct ConfigurationView {
1132 state: Entity<State>,
1133 sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1134}
1135
1136impl ConfigurationView {
1137 fn new(state: Entity<State>) -> Self {
1138 let sign_in_callback = Arc::new({
1139 let state = state.clone();
1140 move |_window: &mut Window, cx: &mut App| {
1141 state.update(cx, |state, cx| {
1142 state.authenticate(cx).detach_and_log_err(cx);
1143 });
1144 }
1145 });
1146
1147 Self {
1148 state,
1149 sign_in_callback,
1150 }
1151 }
1152}
1153
1154impl Render for ConfigurationView {
1155 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1156 let state = self.state.read(cx);
1157 let user_store = state.user_store.read(cx);
1158
1159 ZedAiConfiguration {
1160 is_connected: !state.is_signed_out(cx),
1161 plan: user_store.plan(),
1162 subscription_period: user_store.subscription_period(),
1163 eligible_for_trial: user_store.trial_started_at().is_none(),
1164 account_too_young: user_store.account_too_young(),
1165 sign_in_callback: self.sign_in_callback.clone(),
1166 }
1167 }
1168}
1169
1170impl Component for ZedAiConfiguration {
1171 fn name() -> &'static str {
1172 "AI Configuration Content"
1173 }
1174
1175 fn sort_name() -> &'static str {
1176 "AI Configuration Content"
1177 }
1178
1179 fn scope() -> ComponentScope {
1180 ComponentScope::Onboarding
1181 }
1182
1183 fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
1184 fn configuration(
1185 is_connected: bool,
1186 plan: Option<Plan>,
1187 eligible_for_trial: bool,
1188 account_too_young: bool,
1189 ) -> AnyElement {
1190 ZedAiConfiguration {
1191 is_connected,
1192 plan,
1193 subscription_period: plan
1194 .is_some()
1195 .then(|| (Utc::now(), Utc::now() + chrono::Duration::days(7))),
1196 eligible_for_trial,
1197 account_too_young,
1198 sign_in_callback: Arc::new(|_, _| {}),
1199 }
1200 .into_any_element()
1201 }
1202
1203 Some(
1204 v_flex()
1205 .p_4()
1206 .gap_4()
1207 .children(vec![
1208 single_example("Not connected", configuration(false, None, false, false)),
1209 single_example(
1210 "Accept Terms of Service",
1211 configuration(true, None, true, false),
1212 ),
1213 single_example(
1214 "No Plan - Not eligible for trial",
1215 configuration(true, None, false, false),
1216 ),
1217 single_example(
1218 "No Plan - Eligible for trial",
1219 configuration(true, None, true, false),
1220 ),
1221 single_example(
1222 "Free Plan",
1223 configuration(true, Some(Plan::V1(PlanV1::ZedFree)), true, false),
1224 ),
1225 single_example(
1226 "Zed Pro Trial Plan",
1227 configuration(true, Some(Plan::V1(PlanV1::ZedProTrial)), true, false),
1228 ),
1229 single_example(
1230 "Zed Pro Plan",
1231 configuration(true, Some(Plan::V1(PlanV1::ZedPro)), true, false),
1232 ),
1233 ])
1234 .into_any_element(),
1235 )
1236 }
1237}
1238
1239#[cfg(test)]
1240mod tests {
1241 use super::*;
1242 use http_client::http::{HeaderMap, StatusCode};
1243 use language_model::LanguageModelCompletionError;
1244
1245 #[test]
1246 fn test_api_error_conversion_with_upstream_http_error() {
1247 // upstream_http_error with 503 status should become ServerOverloaded
1248 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout","upstream_status":503}"#;
1249
1250 let api_error = ApiError {
1251 status: StatusCode::INTERNAL_SERVER_ERROR,
1252 body: error_body.to_string(),
1253 headers: HeaderMap::new(),
1254 };
1255
1256 let completion_error: LanguageModelCompletionError = api_error.into();
1257
1258 match completion_error {
1259 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1260 assert_eq!(
1261 message,
1262 "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout"
1263 );
1264 }
1265 _ => panic!(
1266 "Expected UpstreamProviderError for upstream 503, got: {:?}",
1267 completion_error
1268 ),
1269 }
1270
1271 // upstream_http_error with 500 status should become ApiInternalServerError
1272 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the OpenAI API: internal server error","upstream_status":500}"#;
1273
1274 let api_error = ApiError {
1275 status: StatusCode::INTERNAL_SERVER_ERROR,
1276 body: error_body.to_string(),
1277 headers: HeaderMap::new(),
1278 };
1279
1280 let completion_error: LanguageModelCompletionError = api_error.into();
1281
1282 match completion_error {
1283 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1284 assert_eq!(
1285 message,
1286 "Received an error from the OpenAI API: internal server error"
1287 );
1288 }
1289 _ => panic!(
1290 "Expected UpstreamProviderError for upstream 500, got: {:?}",
1291 completion_error
1292 ),
1293 }
1294
1295 // upstream_http_error with 429 status should become RateLimitExceeded
1296 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Google API: rate limit exceeded","upstream_status":429}"#;
1297
1298 let api_error = ApiError {
1299 status: StatusCode::INTERNAL_SERVER_ERROR,
1300 body: error_body.to_string(),
1301 headers: HeaderMap::new(),
1302 };
1303
1304 let completion_error: LanguageModelCompletionError = api_error.into();
1305
1306 match completion_error {
1307 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1308 assert_eq!(
1309 message,
1310 "Received an error from the Google API: rate limit exceeded"
1311 );
1312 }
1313 _ => panic!(
1314 "Expected UpstreamProviderError for upstream 429, got: {:?}",
1315 completion_error
1316 ),
1317 }
1318
1319 // Regular 500 error without upstream_http_error should remain ApiInternalServerError for Zed
1320 let error_body = "Regular internal server error";
1321
1322 let api_error = ApiError {
1323 status: StatusCode::INTERNAL_SERVER_ERROR,
1324 body: error_body.to_string(),
1325 headers: HeaderMap::new(),
1326 };
1327
1328 let completion_error: LanguageModelCompletionError = api_error.into();
1329
1330 match completion_error {
1331 LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1332 assert_eq!(provider, PROVIDER_NAME);
1333 assert_eq!(message, "Regular internal server error");
1334 }
1335 _ => panic!(
1336 "Expected ApiInternalServerError for regular 500, got: {:?}",
1337 completion_error
1338 ),
1339 }
1340
1341 // upstream_http_429 format should be converted to UpstreamProviderError
1342 let error_body = r#"{"code":"upstream_http_429","message":"Upstream Anthropic rate limit exceeded.","retry_after":30.5}"#;
1343
1344 let api_error = ApiError {
1345 status: StatusCode::INTERNAL_SERVER_ERROR,
1346 body: error_body.to_string(),
1347 headers: HeaderMap::new(),
1348 };
1349
1350 let completion_error: LanguageModelCompletionError = api_error.into();
1351
1352 match completion_error {
1353 LanguageModelCompletionError::UpstreamProviderError {
1354 message,
1355 status,
1356 retry_after,
1357 } => {
1358 assert_eq!(message, "Upstream Anthropic rate limit exceeded.");
1359 assert_eq!(status, StatusCode::TOO_MANY_REQUESTS);
1360 assert_eq!(retry_after, Some(Duration::from_secs_f64(30.5)));
1361 }
1362 _ => panic!(
1363 "Expected UpstreamProviderError for upstream_http_429, got: {:?}",
1364 completion_error
1365 ),
1366 }
1367
1368 // Invalid JSON in error body should fall back to regular error handling
1369 let error_body = "Not JSON at all";
1370
1371 let api_error = ApiError {
1372 status: StatusCode::INTERNAL_SERVER_ERROR,
1373 body: error_body.to_string(),
1374 headers: HeaderMap::new(),
1375 };
1376
1377 let completion_error: LanguageModelCompletionError = api_error.into();
1378
1379 match completion_error {
1380 LanguageModelCompletionError::ApiInternalServerError { provider, .. } => {
1381 assert_eq!(provider, PROVIDER_NAME);
1382 }
1383 _ => panic!(
1384 "Expected ApiInternalServerError for invalid JSON, got: {:?}",
1385 completion_error
1386 ),
1387 }
1388 }
1389}