1use ai_onboarding::YoungAccountBanner;
2use anthropic::AnthropicModelMode;
3use anyhow::{Context as _, Result, anyhow};
4use chrono::{DateTime, Utc};
5use client::{Client, ModelRequestUsage, UserStore, zed_urls};
6use cloud_llm_client::{
7 CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CURRENT_PLAN_HEADER_NAME, CompletionBody,
8 CompletionEvent, CompletionRequestStatus, CountTokensBody, CountTokensResponse,
9 EXPIRED_LLM_TOKEN_HEADER_NAME, ListModelsResponse, MODEL_REQUESTS_RESOURCE_HEADER_VALUE, Plan,
10 PlanV1, PlanV2, SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME,
11 SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME, TOOL_USE_LIMIT_REACHED_HEADER_NAME,
12 ZED_VERSION_HEADER_NAME,
13};
14use futures::{
15 AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
16};
17use google_ai::GoogleModelMode;
18use gpui::{
19 AnyElement, AnyView, App, AsyncApp, Context, Entity, SemanticVersion, Subscription, Task,
20};
21use http_client::http::{HeaderMap, HeaderValue};
22use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
23use language_model::{
24 AuthenticateError, LanguageModel, LanguageModelCacheConfiguration,
25 LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId, LanguageModelName,
26 LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
27 LanguageModelProviderState, LanguageModelRequest, LanguageModelToolChoice,
28 LanguageModelToolSchemaFormat, LlmApiToken, ModelRequestLimitReachedError,
29 PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
30};
31use release_channel::AppVersion;
32use schemars::JsonSchema;
33use serde::{Deserialize, Serialize, de::DeserializeOwned};
34use settings::SettingsStore;
35use smol::io::{AsyncReadExt, BufReader};
36use std::pin::Pin;
37use std::str::FromStr as _;
38use std::sync::Arc;
39use std::time::Duration;
40use thiserror::Error;
41use ui::{TintColor, prelude::*};
42use util::{ResultExt as _, maybe};
43
44use crate::provider::anthropic::{AnthropicEventMapper, count_anthropic_tokens, into_anthropic};
45use crate::provider::google::{GoogleEventMapper, into_google};
46use crate::provider::open_ai::{OpenAiEventMapper, count_open_ai_tokens, into_open_ai};
47
48const PROVIDER_ID: LanguageModelProviderId = language_model::ZED_CLOUD_PROVIDER_ID;
49const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVIDER_NAME;
50
51#[derive(Default, Clone, Debug, PartialEq)]
52pub struct ZedDotDevSettings {
53 pub available_models: Vec<AvailableModel>,
54}
55
56#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
57#[serde(rename_all = "lowercase")]
58pub enum AvailableProvider {
59 Anthropic,
60 OpenAi,
61 Google,
62}
63
64#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
65pub struct AvailableModel {
66 /// The provider of the language model.
67 pub provider: AvailableProvider,
68 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
69 pub name: String,
70 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
71 pub display_name: Option<String>,
72 /// The size of the context window, indicating the maximum number of tokens the model can process.
73 pub max_tokens: usize,
74 /// The maximum number of output tokens allowed by the model.
75 pub max_output_tokens: Option<u64>,
76 /// The maximum number of completion tokens allowed by the model (o1-* only)
77 pub max_completion_tokens: Option<u64>,
78 /// Override this model with a different Anthropic model for tool calls.
79 pub tool_override: Option<String>,
80 /// Indicates whether this custom model supports caching.
81 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
82 /// The default temperature to use for this model.
83 pub default_temperature: Option<f32>,
84 /// Any extra beta headers to provide when using the model.
85 #[serde(default)]
86 pub extra_beta_headers: Vec<String>,
87 /// The model's mode (e.g. thinking)
88 pub mode: Option<ModelMode>,
89}
90
91#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
92#[serde(tag = "type", rename_all = "lowercase")]
93pub enum ModelMode {
94 #[default]
95 Default,
96 Thinking {
97 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
98 budget_tokens: Option<u32>,
99 },
100}
101
102impl From<ModelMode> for AnthropicModelMode {
103 fn from(value: ModelMode) -> Self {
104 match value {
105 ModelMode::Default => AnthropicModelMode::Default,
106 ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
107 }
108 }
109}
110
111pub struct CloudLanguageModelProvider {
112 client: Arc<Client>,
113 state: gpui::Entity<State>,
114 _maintain_client_status: Task<()>,
115}
116
117pub struct State {
118 client: Arc<Client>,
119 llm_api_token: LlmApiToken,
120 user_store: Entity<UserStore>,
121 status: client::Status,
122 models: Vec<Arc<cloud_llm_client::LanguageModel>>,
123 default_model: Option<Arc<cloud_llm_client::LanguageModel>>,
124 default_fast_model: Option<Arc<cloud_llm_client::LanguageModel>>,
125 recommended_models: Vec<Arc<cloud_llm_client::LanguageModel>>,
126 _fetch_models_task: Task<()>,
127 _settings_subscription: Subscription,
128 _llm_token_subscription: Subscription,
129}
130
131impl State {
132 fn new(
133 client: Arc<Client>,
134 user_store: Entity<UserStore>,
135 status: client::Status,
136 cx: &mut Context<Self>,
137 ) -> Self {
138 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
139 let mut current_user = user_store.read(cx).watch_current_user();
140 Self {
141 client: client.clone(),
142 llm_api_token: LlmApiToken::default(),
143 user_store,
144 status,
145 models: Vec::new(),
146 default_model: None,
147 default_fast_model: None,
148 recommended_models: Vec::new(),
149 _fetch_models_task: cx.spawn(async move |this, cx| {
150 maybe!(async move {
151 let (client, llm_api_token) = this
152 .read_with(cx, |this, _cx| (client.clone(), this.llm_api_token.clone()))?;
153
154 while current_user.borrow().is_none() {
155 current_user.next().await;
156 }
157
158 let response =
159 Self::fetch_models(client.clone(), llm_api_token.clone()).await?;
160 this.update(cx, |this, cx| this.update_models(response, cx))?;
161 anyhow::Ok(())
162 })
163 .await
164 .context("failed to fetch Zed models")
165 .log_err();
166 }),
167 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
168 cx.notify();
169 }),
170 _llm_token_subscription: cx.subscribe(
171 &refresh_llm_token_listener,
172 move |this, _listener, _event, cx| {
173 let client = this.client.clone();
174 let llm_api_token = this.llm_api_token.clone();
175 cx.spawn(async move |this, cx| {
176 llm_api_token.refresh(&client).await?;
177 let response = Self::fetch_models(client, llm_api_token).await?;
178 this.update(cx, |this, cx| {
179 this.update_models(response, cx);
180 })
181 })
182 .detach_and_log_err(cx);
183 },
184 ),
185 }
186 }
187
188 fn is_signed_out(&self, cx: &App) -> bool {
189 self.user_store.read(cx).current_user().is_none()
190 }
191
192 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
193 let client = self.client.clone();
194 cx.spawn(async move |state, cx| {
195 client.sign_in_with_optional_connect(true, cx).await?;
196 state.update(cx, |_, cx| cx.notify())
197 })
198 }
199 fn update_models(&mut self, response: ListModelsResponse, cx: &mut Context<Self>) {
200 let mut models = Vec::new();
201
202 for model in response.models {
203 models.push(Arc::new(model.clone()));
204
205 // Right now we represent thinking variants of models as separate models on the client,
206 // so we need to insert variants for any model that supports thinking.
207 if model.supports_thinking {
208 models.push(Arc::new(cloud_llm_client::LanguageModel {
209 id: cloud_llm_client::LanguageModelId(format!("{}-thinking", model.id).into()),
210 display_name: format!("{} Thinking", model.display_name),
211 ..model
212 }));
213 }
214 }
215
216 self.default_model = models
217 .iter()
218 .find(|model| model.id == response.default_model)
219 .cloned();
220 self.default_fast_model = models
221 .iter()
222 .find(|model| model.id == response.default_fast_model)
223 .cloned();
224 self.recommended_models = response
225 .recommended_models
226 .iter()
227 .filter_map(|id| models.iter().find(|model| &model.id == id))
228 .cloned()
229 .collect();
230 self.models = models;
231 cx.notify();
232 }
233
234 async fn fetch_models(
235 client: Arc<Client>,
236 llm_api_token: LlmApiToken,
237 ) -> Result<ListModelsResponse> {
238 let http_client = &client.http_client();
239 let token = llm_api_token.acquire(&client).await?;
240
241 let request = http_client::Request::builder()
242 .method(Method::GET)
243 .uri(http_client.build_zed_llm_url("/models", &[])?.as_ref())
244 .header("Authorization", format!("Bearer {token}"))
245 .body(AsyncBody::empty())?;
246 let mut response = http_client
247 .send(request)
248 .await
249 .context("failed to send list models request")?;
250
251 if response.status().is_success() {
252 let mut body = String::new();
253 response.body_mut().read_to_string(&mut body).await?;
254 Ok(serde_json::from_str(&body)?)
255 } else {
256 let mut body = String::new();
257 response.body_mut().read_to_string(&mut body).await?;
258 anyhow::bail!(
259 "error listing models.\nStatus: {:?}\nBody: {body}",
260 response.status(),
261 );
262 }
263 }
264}
265
266impl CloudLanguageModelProvider {
267 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
268 let mut status_rx = client.status();
269 let status = *status_rx.borrow();
270
271 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
272
273 let state_ref = state.downgrade();
274 let maintain_client_status = cx.spawn(async move |cx| {
275 while let Some(status) = status_rx.next().await {
276 if let Some(this) = state_ref.upgrade() {
277 _ = this.update(cx, |this, cx| {
278 if this.status != status {
279 this.status = status;
280 cx.notify();
281 }
282 });
283 } else {
284 break;
285 }
286 }
287 });
288
289 Self {
290 client,
291 state,
292 _maintain_client_status: maintain_client_status,
293 }
294 }
295
296 fn create_language_model(
297 &self,
298 model: Arc<cloud_llm_client::LanguageModel>,
299 llm_api_token: LlmApiToken,
300 ) -> Arc<dyn LanguageModel> {
301 Arc::new(CloudLanguageModel {
302 id: LanguageModelId(SharedString::from(model.id.0.clone())),
303 model,
304 llm_api_token,
305 client: self.client.clone(),
306 request_limiter: RateLimiter::new(4),
307 })
308 }
309}
310
311impl LanguageModelProviderState for CloudLanguageModelProvider {
312 type ObservableEntity = State;
313
314 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
315 Some(self.state.clone())
316 }
317}
318
319impl LanguageModelProvider for CloudLanguageModelProvider {
320 fn id(&self) -> LanguageModelProviderId {
321 PROVIDER_ID
322 }
323
324 fn name(&self) -> LanguageModelProviderName {
325 PROVIDER_NAME
326 }
327
328 fn icon(&self) -> IconName {
329 IconName::AiZed
330 }
331
332 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
333 let default_model = self.state.read(cx).default_model.clone()?;
334 let llm_api_token = self.state.read(cx).llm_api_token.clone();
335 Some(self.create_language_model(default_model, llm_api_token))
336 }
337
338 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
339 let default_fast_model = self.state.read(cx).default_fast_model.clone()?;
340 let llm_api_token = self.state.read(cx).llm_api_token.clone();
341 Some(self.create_language_model(default_fast_model, llm_api_token))
342 }
343
344 fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
345 let llm_api_token = self.state.read(cx).llm_api_token.clone();
346 self.state
347 .read(cx)
348 .recommended_models
349 .iter()
350 .cloned()
351 .map(|model| self.create_language_model(model, llm_api_token.clone()))
352 .collect()
353 }
354
355 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
356 let llm_api_token = self.state.read(cx).llm_api_token.clone();
357 self.state
358 .read(cx)
359 .models
360 .iter()
361 .cloned()
362 .map(|model| self.create_language_model(model, llm_api_token.clone()))
363 .collect()
364 }
365
366 fn is_authenticated(&self, cx: &App) -> bool {
367 let state = self.state.read(cx);
368 !state.is_signed_out(cx)
369 }
370
371 fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
372 Task::ready(Ok(()))
373 }
374
375 fn configuration_view(
376 &self,
377 _target_agent: language_model::ConfigurationViewTargetAgent,
378 _: &mut Window,
379 cx: &mut App,
380 ) -> AnyView {
381 cx.new(|_| ConfigurationView::new(self.state.clone()))
382 .into()
383 }
384
385 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
386 Task::ready(Ok(()))
387 }
388}
389
390pub struct CloudLanguageModel {
391 id: LanguageModelId,
392 model: Arc<cloud_llm_client::LanguageModel>,
393 llm_api_token: LlmApiToken,
394 client: Arc<Client>,
395 request_limiter: RateLimiter,
396}
397
398struct PerformLlmCompletionResponse {
399 response: Response<AsyncBody>,
400 usage: Option<ModelRequestUsage>,
401 tool_use_limit_reached: bool,
402 includes_status_messages: bool,
403}
404
405impl CloudLanguageModel {
406 async fn perform_llm_completion(
407 client: Arc<Client>,
408 llm_api_token: LlmApiToken,
409 app_version: Option<SemanticVersion>,
410 body: CompletionBody,
411 ) -> Result<PerformLlmCompletionResponse> {
412 let http_client = &client.http_client();
413
414 let mut token = llm_api_token.acquire(&client).await?;
415 let mut refreshed_token = false;
416
417 loop {
418 let request_builder = http_client::Request::builder()
419 .method(Method::POST)
420 .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref());
421 let request_builder = if let Some(app_version) = app_version {
422 request_builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
423 } else {
424 request_builder
425 };
426
427 let request = request_builder
428 .header("Content-Type", "application/json")
429 .header("Authorization", format!("Bearer {token}"))
430 .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
431 .body(serde_json::to_string(&body)?.into())?;
432 let mut response = http_client.send(request).await?;
433 let status = response.status();
434 if status.is_success() {
435 let includes_status_messages = response
436 .headers()
437 .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
438 .is_some();
439
440 let tool_use_limit_reached = response
441 .headers()
442 .get(TOOL_USE_LIMIT_REACHED_HEADER_NAME)
443 .is_some();
444
445 let usage = if includes_status_messages {
446 None
447 } else {
448 ModelRequestUsage::from_headers(response.headers()).ok()
449 };
450
451 return Ok(PerformLlmCompletionResponse {
452 response,
453 usage,
454 includes_status_messages,
455 tool_use_limit_reached,
456 });
457 }
458
459 if !refreshed_token
460 && response
461 .headers()
462 .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
463 .is_some()
464 {
465 token = llm_api_token.refresh(&client).await?;
466 refreshed_token = true;
467 continue;
468 }
469
470 if status == StatusCode::FORBIDDEN
471 && response
472 .headers()
473 .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
474 .is_some()
475 {
476 if let Some(MODEL_REQUESTS_RESOURCE_HEADER_VALUE) = response
477 .headers()
478 .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
479 .and_then(|resource| resource.to_str().ok())
480 && let Some(plan) = response
481 .headers()
482 .get(CURRENT_PLAN_HEADER_NAME)
483 .and_then(|plan| plan.to_str().ok())
484 .and_then(|plan| cloud_llm_client::PlanV1::from_str(plan).ok())
485 .map(Plan::V1)
486 {
487 return Err(anyhow!(ModelRequestLimitReachedError { plan }));
488 }
489 } else if status == StatusCode::PAYMENT_REQUIRED {
490 return Err(anyhow!(PaymentRequiredError));
491 }
492
493 let mut body = String::new();
494 let headers = response.headers().clone();
495 response.body_mut().read_to_string(&mut body).await?;
496 return Err(anyhow!(ApiError {
497 status,
498 body,
499 headers
500 }));
501 }
502 }
503}
504
505#[derive(Debug, Error)]
506#[error("cloud language model request failed with status {status}: {body}")]
507struct ApiError {
508 status: StatusCode,
509 body: String,
510 headers: HeaderMap<HeaderValue>,
511}
512
513/// Represents error responses from Zed's cloud API.
514///
515/// Example JSON for an upstream HTTP error:
516/// ```json
517/// {
518/// "code": "upstream_http_error",
519/// "message": "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout",
520/// "upstream_status": 503
521/// }
522/// ```
523#[derive(Debug, serde::Deserialize)]
524struct CloudApiError {
525 code: String,
526 message: String,
527 #[serde(default)]
528 #[serde(deserialize_with = "deserialize_optional_status_code")]
529 upstream_status: Option<StatusCode>,
530 #[serde(default)]
531 retry_after: Option<f64>,
532}
533
534fn deserialize_optional_status_code<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
535where
536 D: serde::Deserializer<'de>,
537{
538 let opt: Option<u16> = Option::deserialize(deserializer)?;
539 Ok(opt.and_then(|code| StatusCode::from_u16(code).ok()))
540}
541
542impl From<ApiError> for LanguageModelCompletionError {
543 fn from(error: ApiError) -> Self {
544 if let Ok(cloud_error) = serde_json::from_str::<CloudApiError>(&error.body)
545 && cloud_error.code.starts_with("upstream_http_")
546 {
547 let status = if let Some(status) = cloud_error.upstream_status {
548 status
549 } else if cloud_error.code.ends_with("_error") {
550 error.status
551 } else {
552 // If there's a status code in the code string (e.g. "upstream_http_429")
553 // then use that; otherwise, see if the JSON contains a status code.
554 cloud_error
555 .code
556 .strip_prefix("upstream_http_")
557 .and_then(|code_str| code_str.parse::<u16>().ok())
558 .and_then(|code| StatusCode::from_u16(code).ok())
559 .unwrap_or(error.status)
560 };
561
562 return LanguageModelCompletionError::UpstreamProviderError {
563 message: cloud_error.message,
564 status,
565 retry_after: cloud_error.retry_after.map(Duration::from_secs_f64),
566 };
567 }
568
569 let retry_after = None;
570 LanguageModelCompletionError::from_http_status(
571 PROVIDER_NAME,
572 error.status,
573 error.body,
574 retry_after,
575 )
576 }
577}
578
579impl LanguageModel for CloudLanguageModel {
580 fn id(&self) -> LanguageModelId {
581 self.id.clone()
582 }
583
584 fn name(&self) -> LanguageModelName {
585 LanguageModelName::from(self.model.display_name.clone())
586 }
587
588 fn provider_id(&self) -> LanguageModelProviderId {
589 PROVIDER_ID
590 }
591
592 fn provider_name(&self) -> LanguageModelProviderName {
593 PROVIDER_NAME
594 }
595
596 fn upstream_provider_id(&self) -> LanguageModelProviderId {
597 use cloud_llm_client::LanguageModelProvider::*;
598 match self.model.provider {
599 Anthropic => language_model::ANTHROPIC_PROVIDER_ID,
600 OpenAi => language_model::OPEN_AI_PROVIDER_ID,
601 Google => language_model::GOOGLE_PROVIDER_ID,
602 }
603 }
604
605 fn upstream_provider_name(&self) -> LanguageModelProviderName {
606 use cloud_llm_client::LanguageModelProvider::*;
607 match self.model.provider {
608 Anthropic => language_model::ANTHROPIC_PROVIDER_NAME,
609 OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
610 Google => language_model::GOOGLE_PROVIDER_NAME,
611 }
612 }
613
614 fn supports_tools(&self) -> bool {
615 self.model.supports_tools
616 }
617
618 fn supports_images(&self) -> bool {
619 self.model.supports_images
620 }
621
622 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
623 match choice {
624 LanguageModelToolChoice::Auto
625 | LanguageModelToolChoice::Any
626 | LanguageModelToolChoice::None => true,
627 }
628 }
629
630 fn supports_burn_mode(&self) -> bool {
631 self.model.supports_max_mode
632 }
633
634 fn telemetry_id(&self) -> String {
635 format!("zed.dev/{}", self.model.id)
636 }
637
638 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
639 match self.model.provider {
640 cloud_llm_client::LanguageModelProvider::Anthropic
641 | cloud_llm_client::LanguageModelProvider::OpenAi => {
642 LanguageModelToolSchemaFormat::JsonSchema
643 }
644 cloud_llm_client::LanguageModelProvider::Google => {
645 LanguageModelToolSchemaFormat::JsonSchemaSubset
646 }
647 }
648 }
649
650 fn max_token_count(&self) -> u64 {
651 self.model.max_token_count as u64
652 }
653
654 fn max_token_count_in_burn_mode(&self) -> Option<u64> {
655 self.model
656 .max_token_count_in_max_mode
657 .filter(|_| self.model.supports_max_mode)
658 .map(|max_token_count| max_token_count as u64)
659 }
660
661 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
662 match &self.model.provider {
663 cloud_llm_client::LanguageModelProvider::Anthropic => {
664 Some(LanguageModelCacheConfiguration {
665 min_total_token: 2_048,
666 should_speculate: true,
667 max_cache_anchors: 4,
668 })
669 }
670 cloud_llm_client::LanguageModelProvider::OpenAi
671 | cloud_llm_client::LanguageModelProvider::Google => None,
672 }
673 }
674
675 fn count_tokens(
676 &self,
677 request: LanguageModelRequest,
678 cx: &App,
679 ) -> BoxFuture<'static, Result<u64>> {
680 match self.model.provider {
681 cloud_llm_client::LanguageModelProvider::Anthropic => {
682 count_anthropic_tokens(request, cx)
683 }
684 cloud_llm_client::LanguageModelProvider::OpenAi => {
685 let model = match open_ai::Model::from_id(&self.model.id.0) {
686 Ok(model) => model,
687 Err(err) => return async move { Err(anyhow!(err)) }.boxed(),
688 };
689 count_open_ai_tokens(request, model, cx)
690 }
691 cloud_llm_client::LanguageModelProvider::Google => {
692 let client = self.client.clone();
693 let llm_api_token = self.llm_api_token.clone();
694 let model_id = self.model.id.to_string();
695 let generate_content_request =
696 into_google(request, model_id.clone(), GoogleModelMode::Default);
697 async move {
698 let http_client = &client.http_client();
699 let token = llm_api_token.acquire(&client).await?;
700
701 let request_body = CountTokensBody {
702 provider: cloud_llm_client::LanguageModelProvider::Google,
703 model: model_id,
704 provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
705 generate_content_request,
706 })?,
707 };
708 let request = http_client::Request::builder()
709 .method(Method::POST)
710 .uri(
711 http_client
712 .build_zed_llm_url("/count_tokens", &[])?
713 .as_ref(),
714 )
715 .header("Content-Type", "application/json")
716 .header("Authorization", format!("Bearer {token}"))
717 .body(serde_json::to_string(&request_body)?.into())?;
718 let mut response = http_client.send(request).await?;
719 let status = response.status();
720 let headers = response.headers().clone();
721 let mut response_body = String::new();
722 response
723 .body_mut()
724 .read_to_string(&mut response_body)
725 .await?;
726
727 if status.is_success() {
728 let response_body: CountTokensResponse =
729 serde_json::from_str(&response_body)?;
730
731 Ok(response_body.tokens as u64)
732 } else {
733 Err(anyhow!(ApiError {
734 status,
735 body: response_body,
736 headers
737 }))
738 }
739 }
740 .boxed()
741 }
742 }
743 }
744
745 fn stream_completion(
746 &self,
747 request: LanguageModelRequest,
748 cx: &AsyncApp,
749 ) -> BoxFuture<
750 'static,
751 Result<
752 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
753 LanguageModelCompletionError,
754 >,
755 > {
756 let thread_id = request.thread_id.clone();
757 let prompt_id = request.prompt_id.clone();
758 let intent = request.intent;
759 let mode = request.mode;
760 let app_version = cx.update(|cx| AppVersion::global(cx)).ok();
761 let thinking_allowed = request.thinking_allowed;
762 match self.model.provider {
763 cloud_llm_client::LanguageModelProvider::Anthropic => {
764 let request = into_anthropic(
765 request,
766 self.model.id.to_string(),
767 1.0,
768 self.model.max_output_tokens as u64,
769 if thinking_allowed && self.model.id.0.ends_with("-thinking") {
770 AnthropicModelMode::Thinking {
771 budget_tokens: Some(4_096),
772 }
773 } else {
774 AnthropicModelMode::Default
775 },
776 );
777 let client = self.client.clone();
778 let llm_api_token = self.llm_api_token.clone();
779 let future = self.request_limiter.stream(async move {
780 let PerformLlmCompletionResponse {
781 response,
782 usage,
783 includes_status_messages,
784 tool_use_limit_reached,
785 } = Self::perform_llm_completion(
786 client.clone(),
787 llm_api_token,
788 app_version,
789 CompletionBody {
790 thread_id,
791 prompt_id,
792 intent,
793 mode,
794 provider: cloud_llm_client::LanguageModelProvider::Anthropic,
795 model: request.model.clone(),
796 provider_request: serde_json::to_value(&request)
797 .map_err(|e| anyhow!(e))?,
798 },
799 )
800 .await
801 .map_err(|err| match err.downcast::<ApiError>() {
802 Ok(api_err) => anyhow!(LanguageModelCompletionError::from(api_err)),
803 Err(err) => anyhow!(err),
804 })?;
805
806 let mut mapper = AnthropicEventMapper::new();
807 Ok(map_cloud_completion_events(
808 Box::pin(
809 response_lines(response, includes_status_messages)
810 .chain(usage_updated_event(usage))
811 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
812 ),
813 move |event| mapper.map_event(event),
814 ))
815 });
816 async move { Ok(future.await?.boxed()) }.boxed()
817 }
818 cloud_llm_client::LanguageModelProvider::OpenAi => {
819 let client = self.client.clone();
820 let model = match open_ai::Model::from_id(&self.model.id.0) {
821 Ok(model) => model,
822 Err(err) => return async move { Err(anyhow!(err).into()) }.boxed(),
823 };
824 let request = into_open_ai(
825 request,
826 model.id(),
827 model.supports_parallel_tool_calls(),
828 model.supports_prompt_cache_key(),
829 None,
830 None,
831 );
832 let llm_api_token = self.llm_api_token.clone();
833 let future = self.request_limiter.stream(async move {
834 let PerformLlmCompletionResponse {
835 response,
836 usage,
837 includes_status_messages,
838 tool_use_limit_reached,
839 } = Self::perform_llm_completion(
840 client.clone(),
841 llm_api_token,
842 app_version,
843 CompletionBody {
844 thread_id,
845 prompt_id,
846 intent,
847 mode,
848 provider: cloud_llm_client::LanguageModelProvider::OpenAi,
849 model: request.model.clone(),
850 provider_request: serde_json::to_value(&request)
851 .map_err(|e| anyhow!(e))?,
852 },
853 )
854 .await?;
855
856 let mut mapper = OpenAiEventMapper::new();
857 Ok(map_cloud_completion_events(
858 Box::pin(
859 response_lines(response, includes_status_messages)
860 .chain(usage_updated_event(usage))
861 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
862 ),
863 move |event| mapper.map_event(event),
864 ))
865 });
866 async move { Ok(future.await?.boxed()) }.boxed()
867 }
868 cloud_llm_client::LanguageModelProvider::Google => {
869 let client = self.client.clone();
870 let request =
871 into_google(request, self.model.id.to_string(), GoogleModelMode::Default);
872 let llm_api_token = self.llm_api_token.clone();
873 let future = self.request_limiter.stream(async move {
874 let PerformLlmCompletionResponse {
875 response,
876 usage,
877 includes_status_messages,
878 tool_use_limit_reached,
879 } = Self::perform_llm_completion(
880 client.clone(),
881 llm_api_token,
882 app_version,
883 CompletionBody {
884 thread_id,
885 prompt_id,
886 intent,
887 mode,
888 provider: cloud_llm_client::LanguageModelProvider::Google,
889 model: request.model.model_id.clone(),
890 provider_request: serde_json::to_value(&request)
891 .map_err(|e| anyhow!(e))?,
892 },
893 )
894 .await?;
895
896 let mut mapper = GoogleEventMapper::new();
897 Ok(map_cloud_completion_events(
898 Box::pin(
899 response_lines(response, includes_status_messages)
900 .chain(usage_updated_event(usage))
901 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
902 ),
903 move |event| mapper.map_event(event),
904 ))
905 });
906 async move { Ok(future.await?.boxed()) }.boxed()
907 }
908 }
909 }
910}
911
912fn map_cloud_completion_events<T, F>(
913 stream: Pin<Box<dyn Stream<Item = Result<CompletionEvent<T>>> + Send>>,
914 mut map_callback: F,
915) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
916where
917 T: DeserializeOwned + 'static,
918 F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
919 + Send
920 + 'static,
921{
922 stream
923 .flat_map(move |event| {
924 futures::stream::iter(match event {
925 Err(error) => {
926 vec![Err(LanguageModelCompletionError::from(error))]
927 }
928 Ok(CompletionEvent::Status(event)) => {
929 vec![Ok(LanguageModelCompletionEvent::StatusUpdate(event))]
930 }
931 Ok(CompletionEvent::Event(event)) => map_callback(event),
932 })
933 })
934 .boxed()
935}
936
937fn usage_updated_event<T>(
938 usage: Option<ModelRequestUsage>,
939) -> impl Stream<Item = Result<CompletionEvent<T>>> {
940 futures::stream::iter(usage.map(|usage| {
941 Ok(CompletionEvent::Status(
942 CompletionRequestStatus::UsageUpdated {
943 amount: usage.amount as usize,
944 limit: usage.limit,
945 },
946 ))
947 }))
948}
949
950fn tool_use_limit_reached_event<T>(
951 tool_use_limit_reached: bool,
952) -> impl Stream<Item = Result<CompletionEvent<T>>> {
953 futures::stream::iter(tool_use_limit_reached.then(|| {
954 Ok(CompletionEvent::Status(
955 CompletionRequestStatus::ToolUseLimitReached,
956 ))
957 }))
958}
959
960fn response_lines<T: DeserializeOwned>(
961 response: Response<AsyncBody>,
962 includes_status_messages: bool,
963) -> impl Stream<Item = Result<CompletionEvent<T>>> {
964 futures::stream::try_unfold(
965 (String::new(), BufReader::new(response.into_body())),
966 move |(mut line, mut body)| async move {
967 match body.read_line(&mut line).await {
968 Ok(0) => Ok(None),
969 Ok(_) => {
970 let event = if includes_status_messages {
971 serde_json::from_str::<CompletionEvent<T>>(&line)?
972 } else {
973 CompletionEvent::Event(serde_json::from_str::<T>(&line)?)
974 };
975
976 line.clear();
977 Ok(Some((event, (line, body))))
978 }
979 Err(e) => Err(e.into()),
980 }
981 },
982 )
983}
984
985#[derive(IntoElement, RegisterComponent)]
986struct ZedAiConfiguration {
987 is_connected: bool,
988 plan: Option<Plan>,
989 subscription_period: Option<(DateTime<Utc>, DateTime<Utc>)>,
990 eligible_for_trial: bool,
991 account_too_young: bool,
992 sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
993}
994
995impl RenderOnce for ZedAiConfiguration {
996 fn render(self, _window: &mut Window, _cx: &mut App) -> impl IntoElement {
997 let young_account_banner = YoungAccountBanner;
998
999 let is_pro = self.plan.is_some_and(|plan| {
1000 matches!(plan, Plan::V1(PlanV1::ZedPro) | Plan::V2(PlanV2::ZedPro))
1001 });
1002 let subscription_text = match (self.plan, self.subscription_period) {
1003 (Some(Plan::V1(PlanV1::ZedPro) | Plan::V2(PlanV2::ZedPro)), Some(_)) => {
1004 "You have access to Zed's hosted models through your Pro subscription."
1005 }
1006 (Some(Plan::V1(PlanV1::ZedProTrial) | Plan::V2(PlanV2::ZedProTrial)), Some(_)) => {
1007 "You have access to Zed's hosted models through your Pro trial."
1008 }
1009 (Some(Plan::V1(PlanV1::ZedFree) | Plan::V2(PlanV2::ZedFree)), Some(_)) => {
1010 "You have basic access to Zed's hosted models through the Free plan."
1011 }
1012 _ => {
1013 if self.eligible_for_trial {
1014 "Subscribe for access to Zed's hosted models. Start with a 14 day free trial."
1015 } else {
1016 "Subscribe for access to Zed's hosted models."
1017 }
1018 }
1019 };
1020
1021 let manage_subscription_buttons = if is_pro {
1022 Button::new("manage_settings", "Manage Subscription")
1023 .full_width()
1024 .style(ButtonStyle::Tinted(TintColor::Accent))
1025 .on_click(|_, _, cx| cx.open_url(&zed_urls::account_url(cx)))
1026 .into_any_element()
1027 } else if self.plan.is_none() || self.eligible_for_trial {
1028 Button::new("start_trial", "Start 14-day Free Pro Trial")
1029 .full_width()
1030 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1031 .on_click(|_, _, cx| cx.open_url(&zed_urls::start_trial_url(cx)))
1032 .into_any_element()
1033 } else {
1034 Button::new("upgrade", "Upgrade to Pro")
1035 .full_width()
1036 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1037 .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx)))
1038 .into_any_element()
1039 };
1040
1041 if !self.is_connected {
1042 return v_flex()
1043 .gap_2()
1044 .child(Label::new("Sign in to have access to Zed's complete agentic experience with hosted models."))
1045 .child(
1046 Button::new("sign_in", "Sign In to use Zed AI")
1047 .icon_color(Color::Muted)
1048 .icon(IconName::Github)
1049 .icon_size(IconSize::Small)
1050 .icon_position(IconPosition::Start)
1051 .full_width()
1052 .on_click({
1053 let callback = self.sign_in_callback.clone();
1054 move |_, window, cx| (callback)(window, cx)
1055 }),
1056 );
1057 }
1058
1059 v_flex().gap_2().w_full().map(|this| {
1060 if self.account_too_young {
1061 this.child(young_account_banner).child(
1062 Button::new("upgrade", "Upgrade to Pro")
1063 .style(ui::ButtonStyle::Tinted(ui::TintColor::Accent))
1064 .full_width()
1065 .on_click(|_, _, cx| cx.open_url(&zed_urls::upgrade_to_zed_pro_url(cx))),
1066 )
1067 } else {
1068 this.text_sm()
1069 .child(subscription_text)
1070 .child(manage_subscription_buttons)
1071 }
1072 })
1073 }
1074}
1075
1076struct ConfigurationView {
1077 state: Entity<State>,
1078 sign_in_callback: Arc<dyn Fn(&mut Window, &mut App) + Send + Sync>,
1079}
1080
1081impl ConfigurationView {
1082 fn new(state: Entity<State>) -> Self {
1083 let sign_in_callback = Arc::new({
1084 let state = state.clone();
1085 move |_window: &mut Window, cx: &mut App| {
1086 state.update(cx, |state, cx| {
1087 state.authenticate(cx).detach_and_log_err(cx);
1088 });
1089 }
1090 });
1091
1092 Self {
1093 state,
1094 sign_in_callback,
1095 }
1096 }
1097}
1098
1099impl Render for ConfigurationView {
1100 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1101 let state = self.state.read(cx);
1102 let user_store = state.user_store.read(cx);
1103
1104 ZedAiConfiguration {
1105 is_connected: !state.is_signed_out(cx),
1106 plan: user_store.plan(),
1107 subscription_period: user_store.subscription_period(),
1108 eligible_for_trial: user_store.trial_started_at().is_none(),
1109 account_too_young: user_store.account_too_young(),
1110 sign_in_callback: self.sign_in_callback.clone(),
1111 }
1112 }
1113}
1114
1115impl Component for ZedAiConfiguration {
1116 fn name() -> &'static str {
1117 "AI Configuration Content"
1118 }
1119
1120 fn sort_name() -> &'static str {
1121 "AI Configuration Content"
1122 }
1123
1124 fn scope() -> ComponentScope {
1125 ComponentScope::Onboarding
1126 }
1127
1128 fn preview(_window: &mut Window, _cx: &mut App) -> Option<AnyElement> {
1129 fn configuration(
1130 is_connected: bool,
1131 plan: Option<Plan>,
1132 eligible_for_trial: bool,
1133 account_too_young: bool,
1134 ) -> AnyElement {
1135 ZedAiConfiguration {
1136 is_connected,
1137 plan,
1138 subscription_period: plan
1139 .is_some()
1140 .then(|| (Utc::now(), Utc::now() + chrono::Duration::days(7))),
1141 eligible_for_trial,
1142 account_too_young,
1143 sign_in_callback: Arc::new(|_, _| {}),
1144 }
1145 .into_any_element()
1146 }
1147
1148 Some(
1149 v_flex()
1150 .p_4()
1151 .gap_4()
1152 .children(vec![
1153 single_example("Not connected", configuration(false, None, false, false)),
1154 single_example(
1155 "Accept Terms of Service",
1156 configuration(true, None, true, false),
1157 ),
1158 single_example(
1159 "No Plan - Not eligible for trial",
1160 configuration(true, None, false, false),
1161 ),
1162 single_example(
1163 "No Plan - Eligible for trial",
1164 configuration(true, None, true, false),
1165 ),
1166 single_example(
1167 "Free Plan",
1168 configuration(true, Some(Plan::V1(PlanV1::ZedFree)), true, false),
1169 ),
1170 single_example(
1171 "Zed Pro Trial Plan",
1172 configuration(true, Some(Plan::V1(PlanV1::ZedProTrial)), true, false),
1173 ),
1174 single_example(
1175 "Zed Pro Plan",
1176 configuration(true, Some(Plan::V1(PlanV1::ZedPro)), true, false),
1177 ),
1178 ])
1179 .into_any_element(),
1180 )
1181 }
1182}
1183
1184#[cfg(test)]
1185mod tests {
1186 use super::*;
1187 use http_client::http::{HeaderMap, StatusCode};
1188 use language_model::LanguageModelCompletionError;
1189
1190 #[test]
1191 fn test_api_error_conversion_with_upstream_http_error() {
1192 // upstream_http_error with 503 status should become ServerOverloaded
1193 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout","upstream_status":503}"#;
1194
1195 let api_error = ApiError {
1196 status: StatusCode::INTERNAL_SERVER_ERROR,
1197 body: error_body.to_string(),
1198 headers: HeaderMap::new(),
1199 };
1200
1201 let completion_error: LanguageModelCompletionError = api_error.into();
1202
1203 match completion_error {
1204 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1205 assert_eq!(
1206 message,
1207 "Received an error from the Anthropic API: upstream connect error or disconnect/reset before headers, reset reason: connection timeout"
1208 );
1209 }
1210 _ => panic!(
1211 "Expected UpstreamProviderError for upstream 503, got: {:?}",
1212 completion_error
1213 ),
1214 }
1215
1216 // upstream_http_error with 500 status should become ApiInternalServerError
1217 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the OpenAI API: internal server error","upstream_status":500}"#;
1218
1219 let api_error = ApiError {
1220 status: StatusCode::INTERNAL_SERVER_ERROR,
1221 body: error_body.to_string(),
1222 headers: HeaderMap::new(),
1223 };
1224
1225 let completion_error: LanguageModelCompletionError = api_error.into();
1226
1227 match completion_error {
1228 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1229 assert_eq!(
1230 message,
1231 "Received an error from the OpenAI API: internal server error"
1232 );
1233 }
1234 _ => panic!(
1235 "Expected UpstreamProviderError for upstream 500, got: {:?}",
1236 completion_error
1237 ),
1238 }
1239
1240 // upstream_http_error with 429 status should become RateLimitExceeded
1241 let error_body = r#"{"code":"upstream_http_error","message":"Received an error from the Google API: rate limit exceeded","upstream_status":429}"#;
1242
1243 let api_error = ApiError {
1244 status: StatusCode::INTERNAL_SERVER_ERROR,
1245 body: error_body.to_string(),
1246 headers: HeaderMap::new(),
1247 };
1248
1249 let completion_error: LanguageModelCompletionError = api_error.into();
1250
1251 match completion_error {
1252 LanguageModelCompletionError::UpstreamProviderError { message, .. } => {
1253 assert_eq!(
1254 message,
1255 "Received an error from the Google API: rate limit exceeded"
1256 );
1257 }
1258 _ => panic!(
1259 "Expected UpstreamProviderError for upstream 429, got: {:?}",
1260 completion_error
1261 ),
1262 }
1263
1264 // Regular 500 error without upstream_http_error should remain ApiInternalServerError for Zed
1265 let error_body = "Regular internal server error";
1266
1267 let api_error = ApiError {
1268 status: StatusCode::INTERNAL_SERVER_ERROR,
1269 body: error_body.to_string(),
1270 headers: HeaderMap::new(),
1271 };
1272
1273 let completion_error: LanguageModelCompletionError = api_error.into();
1274
1275 match completion_error {
1276 LanguageModelCompletionError::ApiInternalServerError { provider, message } => {
1277 assert_eq!(provider, PROVIDER_NAME);
1278 assert_eq!(message, "Regular internal server error");
1279 }
1280 _ => panic!(
1281 "Expected ApiInternalServerError for regular 500, got: {:?}",
1282 completion_error
1283 ),
1284 }
1285
1286 // upstream_http_429 format should be converted to UpstreamProviderError
1287 let error_body = r#"{"code":"upstream_http_429","message":"Upstream Anthropic rate limit exceeded.","retry_after":30.5}"#;
1288
1289 let api_error = ApiError {
1290 status: StatusCode::INTERNAL_SERVER_ERROR,
1291 body: error_body.to_string(),
1292 headers: HeaderMap::new(),
1293 };
1294
1295 let completion_error: LanguageModelCompletionError = api_error.into();
1296
1297 match completion_error {
1298 LanguageModelCompletionError::UpstreamProviderError {
1299 message,
1300 status,
1301 retry_after,
1302 } => {
1303 assert_eq!(message, "Upstream Anthropic rate limit exceeded.");
1304 assert_eq!(status, StatusCode::TOO_MANY_REQUESTS);
1305 assert_eq!(retry_after, Some(Duration::from_secs_f64(30.5)));
1306 }
1307 _ => panic!(
1308 "Expected UpstreamProviderError for upstream_http_429, got: {:?}",
1309 completion_error
1310 ),
1311 }
1312
1313 // Invalid JSON in error body should fall back to regular error handling
1314 let error_body = "Not JSON at all";
1315
1316 let api_error = ApiError {
1317 status: StatusCode::INTERNAL_SERVER_ERROR,
1318 body: error_body.to_string(),
1319 headers: HeaderMap::new(),
1320 };
1321
1322 let completion_error: LanguageModelCompletionError = api_error.into();
1323
1324 match completion_error {
1325 LanguageModelCompletionError::ApiInternalServerError { provider, .. } => {
1326 assert_eq!(provider, PROVIDER_NAME);
1327 }
1328 _ => panic!(
1329 "Expected ApiInternalServerError for invalid JSON, got: {:?}",
1330 completion_error
1331 ),
1332 }
1333 }
1334}