1use anthropic::{AnthropicModelMode, parse_prompt_too_long};
2use anyhow::{Result, anyhow};
3use client::{Client, UserStore, zed_urls};
4use collections::BTreeMap;
5use feature_flags::{FeatureFlagAppExt, LlmClosedBetaFeatureFlag};
6use futures::{
7 AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
8};
9use gpui::{
10 AnyElement, AnyView, App, AsyncApp, Context, Entity, SemanticVersion, Subscription, Task,
11};
12use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
13use language_model::{
14 AuthenticateError, CloudModel, LanguageModel, LanguageModelCacheConfiguration,
15 LanguageModelCompletionError, LanguageModelId, LanguageModelKnownError, LanguageModelName,
16 LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
17 LanguageModelProviderTosView, LanguageModelRequest, LanguageModelToolChoice,
18 LanguageModelToolSchemaFormat, ModelRequestLimitReachedError, RateLimiter, RequestUsage,
19 ZED_CLOUD_PROVIDER_ID,
20};
21use language_model::{
22 LanguageModelAvailability, LanguageModelCompletionEvent, LanguageModelProvider, LlmApiToken,
23 PaymentRequiredError, RefreshLlmTokenListener,
24};
25use proto::Plan;
26use release_channel::AppVersion;
27use schemars::JsonSchema;
28use serde::{Deserialize, Serialize, de::DeserializeOwned};
29use settings::{Settings, SettingsStore};
30use smol::Timer;
31use smol::io::{AsyncReadExt, BufReader};
32use std::pin::Pin;
33use std::str::FromStr as _;
34use std::{
35 sync::{Arc, LazyLock},
36 time::Duration,
37};
38use strum::IntoEnumIterator;
39use thiserror::Error;
40use ui::{TintColor, prelude::*};
41use zed_llm_client::{
42 CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CURRENT_PLAN_HEADER_NAME, CompletionBody,
43 CompletionRequestStatus, CountTokensBody, CountTokensResponse, EXPIRED_LLM_TOKEN_HEADER_NAME,
44 MODEL_REQUESTS_RESOURCE_HEADER_VALUE, SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME,
45 SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME, TOOL_USE_LIMIT_REACHED_HEADER_NAME,
46 ZED_VERSION_HEADER_NAME,
47};
48
49use crate::AllLanguageModelSettings;
50use crate::provider::anthropic::{AnthropicEventMapper, count_anthropic_tokens, into_anthropic};
51use crate::provider::google::{GoogleEventMapper, into_google};
52use crate::provider::open_ai::{OpenAiEventMapper, count_open_ai_tokens, into_open_ai};
53
54pub const PROVIDER_NAME: &str = "Zed";
55
56const ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: Option<&str> =
57 option_env!("ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON");
58
59fn zed_cloud_provider_additional_models() -> &'static [AvailableModel] {
60 static ADDITIONAL_MODELS: LazyLock<Vec<AvailableModel>> = LazyLock::new(|| {
61 ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON
62 .map(|json| serde_json::from_str(json).unwrap())
63 .unwrap_or_default()
64 });
65 ADDITIONAL_MODELS.as_slice()
66}
67
68#[derive(Default, Clone, Debug, PartialEq)]
69pub struct ZedDotDevSettings {
70 pub available_models: Vec<AvailableModel>,
71}
72
73#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
74#[serde(rename_all = "lowercase")]
75pub enum AvailableProvider {
76 Anthropic,
77 OpenAi,
78 Google,
79}
80
81#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
82pub struct AvailableModel {
83 /// The provider of the language model.
84 pub provider: AvailableProvider,
85 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
86 pub name: String,
87 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
88 pub display_name: Option<String>,
89 /// The size of the context window, indicating the maximum number of tokens the model can process.
90 pub max_tokens: usize,
91 /// The maximum number of output tokens allowed by the model.
92 pub max_output_tokens: Option<u32>,
93 /// The maximum number of completion tokens allowed by the model (o1-* only)
94 pub max_completion_tokens: Option<u32>,
95 /// Override this model with a different Anthropic model for tool calls.
96 pub tool_override: Option<String>,
97 /// Indicates whether this custom model supports caching.
98 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
99 /// The default temperature to use for this model.
100 pub default_temperature: Option<f32>,
101 /// Any extra beta headers to provide when using the model.
102 #[serde(default)]
103 pub extra_beta_headers: Vec<String>,
104 /// The model's mode (e.g. thinking)
105 pub mode: Option<ModelMode>,
106}
107
108#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
109#[serde(tag = "type", rename_all = "lowercase")]
110pub enum ModelMode {
111 #[default]
112 Default,
113 Thinking {
114 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
115 budget_tokens: Option<u32>,
116 },
117}
118
119impl From<ModelMode> for AnthropicModelMode {
120 fn from(value: ModelMode) -> Self {
121 match value {
122 ModelMode::Default => AnthropicModelMode::Default,
123 ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
124 }
125 }
126}
127
128pub struct CloudLanguageModelProvider {
129 client: Arc<Client>,
130 state: gpui::Entity<State>,
131 _maintain_client_status: Task<()>,
132}
133
134pub struct State {
135 client: Arc<Client>,
136 llm_api_token: LlmApiToken,
137 user_store: Entity<UserStore>,
138 status: client::Status,
139 accept_terms: Option<Task<Result<()>>>,
140 _settings_subscription: Subscription,
141 _llm_token_subscription: Subscription,
142}
143
144impl State {
145 fn new(
146 client: Arc<Client>,
147 user_store: Entity<UserStore>,
148 status: client::Status,
149 cx: &mut Context<Self>,
150 ) -> Self {
151 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
152
153 Self {
154 client: client.clone(),
155 llm_api_token: LlmApiToken::default(),
156 user_store,
157 status,
158 accept_terms: None,
159 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
160 cx.notify();
161 }),
162 _llm_token_subscription: cx.subscribe(
163 &refresh_llm_token_listener,
164 |this, _listener, _event, cx| {
165 let client = this.client.clone();
166 let llm_api_token = this.llm_api_token.clone();
167 cx.spawn(async move |_this, _cx| {
168 llm_api_token.refresh(&client).await?;
169 anyhow::Ok(())
170 })
171 .detach_and_log_err(cx);
172 },
173 ),
174 }
175 }
176
177 fn is_signed_out(&self) -> bool {
178 self.status.is_signed_out()
179 }
180
181 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
182 let client = self.client.clone();
183 cx.spawn(async move |state, cx| {
184 client
185 .authenticate_and_connect(true, &cx)
186 .await
187 .into_response()?;
188 state.update(cx, |_, cx| cx.notify())
189 })
190 }
191
192 fn has_accepted_terms_of_service(&self, cx: &App) -> bool {
193 self.user_store
194 .read(cx)
195 .current_user_has_accepted_terms()
196 .unwrap_or(false)
197 }
198
199 fn accept_terms_of_service(&mut self, cx: &mut Context<Self>) {
200 let user_store = self.user_store.clone();
201 self.accept_terms = Some(cx.spawn(async move |this, cx| {
202 let _ = user_store
203 .update(cx, |store, cx| store.accept_terms_of_service(cx))?
204 .await;
205 this.update(cx, |this, cx| {
206 this.accept_terms = None;
207 cx.notify()
208 })
209 }));
210 }
211}
212
213impl CloudLanguageModelProvider {
214 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
215 let mut status_rx = client.status();
216 let status = *status_rx.borrow();
217
218 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
219
220 let state_ref = state.downgrade();
221 let maintain_client_status = cx.spawn(async move |cx| {
222 while let Some(status) = status_rx.next().await {
223 if let Some(this) = state_ref.upgrade() {
224 _ = this.update(cx, |this, cx| {
225 if this.status != status {
226 this.status = status;
227 cx.notify();
228 }
229 });
230 } else {
231 break;
232 }
233 }
234 });
235
236 Self {
237 client,
238 state: state.clone(),
239 _maintain_client_status: maintain_client_status,
240 }
241 }
242
243 fn create_language_model(
244 &self,
245 model: CloudModel,
246 llm_api_token: LlmApiToken,
247 ) -> Arc<dyn LanguageModel> {
248 Arc::new(CloudLanguageModel {
249 id: LanguageModelId::from(model.id().to_string()),
250 model,
251 llm_api_token: llm_api_token.clone(),
252 client: self.client.clone(),
253 request_limiter: RateLimiter::new(4),
254 })
255 }
256}
257
258impl LanguageModelProviderState for CloudLanguageModelProvider {
259 type ObservableEntity = State;
260
261 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
262 Some(self.state.clone())
263 }
264}
265
266impl LanguageModelProvider for CloudLanguageModelProvider {
267 fn id(&self) -> LanguageModelProviderId {
268 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
269 }
270
271 fn name(&self) -> LanguageModelProviderName {
272 LanguageModelProviderName(PROVIDER_NAME.into())
273 }
274
275 fn icon(&self) -> IconName {
276 IconName::AiZed
277 }
278
279 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
280 let llm_api_token = self.state.read(cx).llm_api_token.clone();
281 let model = CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet);
282 Some(self.create_language_model(model, llm_api_token))
283 }
284
285 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
286 let llm_api_token = self.state.read(cx).llm_api_token.clone();
287 let model = CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet);
288 Some(self.create_language_model(model, llm_api_token))
289 }
290
291 fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
292 let llm_api_token = self.state.read(cx).llm_api_token.clone();
293 [
294 CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
295 CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
296 ]
297 .into_iter()
298 .map(|model| self.create_language_model(model, llm_api_token.clone()))
299 .collect()
300 }
301
302 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
303 let mut models = BTreeMap::default();
304
305 if cx.is_staff() {
306 for model in anthropic::Model::iter() {
307 if !matches!(model, anthropic::Model::Custom { .. }) {
308 models.insert(model.id().to_string(), CloudModel::Anthropic(model));
309 }
310 }
311 for model in open_ai::Model::iter() {
312 if !matches!(model, open_ai::Model::Custom { .. }) {
313 models.insert(model.id().to_string(), CloudModel::OpenAi(model));
314 }
315 }
316 for model in google_ai::Model::iter() {
317 if !matches!(model, google_ai::Model::Custom { .. }) {
318 models.insert(model.id().to_string(), CloudModel::Google(model));
319 }
320 }
321 } else {
322 models.insert(
323 anthropic::Model::Claude3_5Sonnet.id().to_string(),
324 CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet),
325 );
326 models.insert(
327 anthropic::Model::Claude3_7Sonnet.id().to_string(),
328 CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
329 );
330 models.insert(
331 anthropic::Model::Claude3_7SonnetThinking.id().to_string(),
332 CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
333 );
334 }
335
336 let llm_closed_beta_models = if cx.has_flag::<LlmClosedBetaFeatureFlag>() {
337 zed_cloud_provider_additional_models()
338 } else {
339 &[]
340 };
341
342 // Override with available models from settings
343 for model in AllLanguageModelSettings::get_global(cx)
344 .zed_dot_dev
345 .available_models
346 .iter()
347 .chain(llm_closed_beta_models)
348 .cloned()
349 {
350 let model = match model.provider {
351 AvailableProvider::Anthropic => CloudModel::Anthropic(anthropic::Model::Custom {
352 name: model.name.clone(),
353 display_name: model.display_name.clone(),
354 max_tokens: model.max_tokens,
355 tool_override: model.tool_override.clone(),
356 cache_configuration: model.cache_configuration.as_ref().map(|config| {
357 anthropic::AnthropicModelCacheConfiguration {
358 max_cache_anchors: config.max_cache_anchors,
359 should_speculate: config.should_speculate,
360 min_total_token: config.min_total_token,
361 }
362 }),
363 default_temperature: model.default_temperature,
364 max_output_tokens: model.max_output_tokens,
365 extra_beta_headers: model.extra_beta_headers.clone(),
366 mode: model.mode.unwrap_or_default().into(),
367 }),
368 AvailableProvider::OpenAi => CloudModel::OpenAi(open_ai::Model::Custom {
369 name: model.name.clone(),
370 display_name: model.display_name.clone(),
371 max_tokens: model.max_tokens,
372 max_output_tokens: model.max_output_tokens,
373 max_completion_tokens: model.max_completion_tokens,
374 }),
375 AvailableProvider::Google => CloudModel::Google(google_ai::Model::Custom {
376 name: model.name.clone(),
377 display_name: model.display_name.clone(),
378 max_tokens: model.max_tokens,
379 }),
380 };
381 models.insert(model.id().to_string(), model.clone());
382 }
383
384 let llm_api_token = self.state.read(cx).llm_api_token.clone();
385 models
386 .into_values()
387 .map(|model| self.create_language_model(model, llm_api_token.clone()))
388 .collect()
389 }
390
391 fn is_authenticated(&self, cx: &App) -> bool {
392 !self.state.read(cx).is_signed_out()
393 }
394
395 fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
396 Task::ready(Ok(()))
397 }
398
399 fn configuration_view(&self, _: &mut Window, cx: &mut App) -> AnyView {
400 cx.new(|_| ConfigurationView {
401 state: self.state.clone(),
402 })
403 .into()
404 }
405
406 fn must_accept_terms(&self, cx: &App) -> bool {
407 !self.state.read(cx).has_accepted_terms_of_service(cx)
408 }
409
410 fn render_accept_terms(
411 &self,
412 view: LanguageModelProviderTosView,
413 cx: &mut App,
414 ) -> Option<AnyElement> {
415 render_accept_terms(self.state.clone(), view, cx)
416 }
417
418 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
419 Task::ready(Ok(()))
420 }
421}
422
423fn render_accept_terms(
424 state: Entity<State>,
425 view_kind: LanguageModelProviderTosView,
426 cx: &mut App,
427) -> Option<AnyElement> {
428 if state.read(cx).has_accepted_terms_of_service(cx) {
429 return None;
430 }
431
432 let accept_terms_disabled = state.read(cx).accept_terms.is_some();
433
434 let thread_fresh_start = matches!(view_kind, LanguageModelProviderTosView::ThreadFreshStart);
435 let thread_empty_state = matches!(view_kind, LanguageModelProviderTosView::ThreadtEmptyState);
436
437 let terms_button = Button::new("terms_of_service", "Terms of Service")
438 .style(ButtonStyle::Subtle)
439 .icon(IconName::ArrowUpRight)
440 .icon_color(Color::Muted)
441 .icon_size(IconSize::XSmall)
442 .when(thread_empty_state, |this| this.label_size(LabelSize::Small))
443 .on_click(move |_, _window, cx| cx.open_url("https://zed.dev/terms-of-service"));
444
445 let button_container = h_flex().child(
446 Button::new("accept_terms", "I accept the Terms of Service")
447 .when(!thread_empty_state, |this| {
448 this.full_width()
449 .style(ButtonStyle::Tinted(TintColor::Accent))
450 .icon(IconName::Check)
451 .icon_position(IconPosition::Start)
452 .icon_size(IconSize::Small)
453 })
454 .when(thread_empty_state, |this| {
455 this.style(ButtonStyle::Tinted(TintColor::Warning))
456 .label_size(LabelSize::Small)
457 })
458 .disabled(accept_terms_disabled)
459 .on_click({
460 let state = state.downgrade();
461 move |_, _window, cx| {
462 state
463 .update(cx, |state, cx| state.accept_terms_of_service(cx))
464 .ok();
465 }
466 }),
467 );
468
469 let form = if thread_empty_state {
470 h_flex()
471 .w_full()
472 .flex_wrap()
473 .justify_between()
474 .child(
475 h_flex()
476 .child(
477 Label::new("To start using Zed AI, please read and accept the")
478 .size(LabelSize::Small),
479 )
480 .child(terms_button),
481 )
482 .child(button_container)
483 } else {
484 v_flex()
485 .w_full()
486 .gap_2()
487 .child(
488 h_flex()
489 .flex_wrap()
490 .when(thread_fresh_start, |this| this.justify_center())
491 .child(Label::new(
492 "To start using Zed AI, please read and accept the",
493 ))
494 .child(terms_button),
495 )
496 .child({
497 match view_kind {
498 LanguageModelProviderTosView::PromptEditorPopup => {
499 button_container.w_full().justify_end()
500 }
501 LanguageModelProviderTosView::Configuration => {
502 button_container.w_full().justify_start()
503 }
504 LanguageModelProviderTosView::ThreadFreshStart => {
505 button_container.w_full().justify_center()
506 }
507 LanguageModelProviderTosView::ThreadtEmptyState => div().w_0(),
508 }
509 })
510 };
511
512 Some(form.into_any())
513}
514
515pub struct CloudLanguageModel {
516 id: LanguageModelId,
517 model: CloudModel,
518 llm_api_token: LlmApiToken,
519 client: Arc<Client>,
520 request_limiter: RateLimiter,
521}
522
523struct PerformLlmCompletionResponse {
524 response: Response<AsyncBody>,
525 usage: Option<RequestUsage>,
526 tool_use_limit_reached: bool,
527 includes_status_messages: bool,
528}
529
530impl CloudLanguageModel {
531 const MAX_RETRIES: usize = 3;
532
533 async fn perform_llm_completion(
534 client: Arc<Client>,
535 llm_api_token: LlmApiToken,
536 app_version: Option<SemanticVersion>,
537 body: CompletionBody,
538 ) -> Result<PerformLlmCompletionResponse> {
539 let http_client = &client.http_client();
540
541 let mut token = llm_api_token.acquire(&client).await?;
542 let mut retries_remaining = Self::MAX_RETRIES;
543 let mut retry_delay = Duration::from_secs(1);
544
545 loop {
546 let request_builder = http_client::Request::builder()
547 .method(Method::POST)
548 .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref());
549 let request_builder = if let Some(app_version) = app_version {
550 request_builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
551 } else {
552 request_builder
553 };
554
555 let request = request_builder
556 .header("Content-Type", "application/json")
557 .header("Authorization", format!("Bearer {token}"))
558 .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
559 .body(serde_json::to_string(&body)?.into())?;
560 let mut response = http_client.send(request).await?;
561 let status = response.status();
562 if status.is_success() {
563 let includes_status_messages = response
564 .headers()
565 .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
566 .is_some();
567
568 let tool_use_limit_reached = response
569 .headers()
570 .get(TOOL_USE_LIMIT_REACHED_HEADER_NAME)
571 .is_some();
572
573 let usage = if includes_status_messages {
574 None
575 } else {
576 RequestUsage::from_headers(response.headers()).ok()
577 };
578
579 return Ok(PerformLlmCompletionResponse {
580 response,
581 usage,
582 includes_status_messages,
583 tool_use_limit_reached,
584 });
585 } else if response
586 .headers()
587 .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
588 .is_some()
589 {
590 retries_remaining -= 1;
591 token = llm_api_token.refresh(&client).await?;
592 } else if status == StatusCode::FORBIDDEN
593 && response
594 .headers()
595 .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
596 .is_some()
597 {
598 if let Some(MODEL_REQUESTS_RESOURCE_HEADER_VALUE) = response
599 .headers()
600 .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
601 .and_then(|resource| resource.to_str().ok())
602 {
603 if let Some(plan) = response
604 .headers()
605 .get(CURRENT_PLAN_HEADER_NAME)
606 .and_then(|plan| plan.to_str().ok())
607 .and_then(|plan| zed_llm_client::Plan::from_str(plan).ok())
608 {
609 let plan = match plan {
610 zed_llm_client::Plan::ZedFree => Plan::Free,
611 zed_llm_client::Plan::ZedPro => Plan::ZedPro,
612 zed_llm_client::Plan::ZedProTrial => Plan::ZedProTrial,
613 };
614 return Err(anyhow!(ModelRequestLimitReachedError { plan }));
615 }
616 }
617
618 return Err(anyhow!("Forbidden"));
619 } else if status.as_u16() >= 500 && status.as_u16() < 600 {
620 // If we encounter an error in the 500 range, retry after a delay.
621 // We've seen at least these in the wild from API providers:
622 // * 500 Internal Server Error
623 // * 502 Bad Gateway
624 // * 529 Service Overloaded
625
626 if retries_remaining == 0 {
627 let mut body = String::new();
628 response.body_mut().read_to_string(&mut body).await?;
629 return Err(anyhow!(
630 "cloud language model completion failed after {} retries with status {status}: {body}",
631 Self::MAX_RETRIES
632 ));
633 }
634
635 Timer::after(retry_delay).await;
636
637 retries_remaining -= 1;
638 retry_delay *= 2; // If it fails again, wait longer.
639 } else if status == StatusCode::PAYMENT_REQUIRED {
640 return Err(anyhow!(PaymentRequiredError));
641 } else {
642 let mut body = String::new();
643 response.body_mut().read_to_string(&mut body).await?;
644 return Err(anyhow!(ApiError { status, body }));
645 }
646 }
647 }
648}
649
650#[derive(Debug, Error)]
651#[error("cloud language model request failed with status {status}: {body}")]
652struct ApiError {
653 status: StatusCode,
654 body: String,
655}
656
657impl LanguageModel for CloudLanguageModel {
658 fn id(&self) -> LanguageModelId {
659 self.id.clone()
660 }
661
662 fn name(&self) -> LanguageModelName {
663 LanguageModelName::from(self.model.display_name().to_string())
664 }
665
666 fn provider_id(&self) -> LanguageModelProviderId {
667 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
668 }
669
670 fn provider_name(&self) -> LanguageModelProviderName {
671 LanguageModelProviderName(PROVIDER_NAME.into())
672 }
673
674 fn supports_tools(&self) -> bool {
675 match self.model {
676 CloudModel::Anthropic(_) => true,
677 CloudModel::Google(_) => true,
678 CloudModel::OpenAi(_) => true,
679 }
680 }
681
682 fn supports_images(&self) -> bool {
683 match self.model {
684 CloudModel::Anthropic(_) => true,
685 CloudModel::Google(_) => true,
686 CloudModel::OpenAi(_) => false,
687 }
688 }
689
690 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
691 match choice {
692 LanguageModelToolChoice::Auto
693 | LanguageModelToolChoice::Any
694 | LanguageModelToolChoice::None => true,
695 }
696 }
697
698 fn telemetry_id(&self) -> String {
699 format!("zed.dev/{}", self.model.id())
700 }
701
702 fn availability(&self) -> LanguageModelAvailability {
703 self.model.availability()
704 }
705
706 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
707 self.model.tool_input_format()
708 }
709
710 fn max_token_count(&self) -> usize {
711 self.model.max_token_count()
712 }
713
714 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
715 match &self.model {
716 CloudModel::Anthropic(model) => {
717 model
718 .cache_configuration()
719 .map(|cache| LanguageModelCacheConfiguration {
720 max_cache_anchors: cache.max_cache_anchors,
721 should_speculate: cache.should_speculate,
722 min_total_token: cache.min_total_token,
723 })
724 }
725 CloudModel::OpenAi(_) | CloudModel::Google(_) => None,
726 }
727 }
728
729 fn count_tokens(
730 &self,
731 request: LanguageModelRequest,
732 cx: &App,
733 ) -> BoxFuture<'static, Result<usize>> {
734 match self.model.clone() {
735 CloudModel::Anthropic(_) => count_anthropic_tokens(request, cx),
736 CloudModel::OpenAi(model) => count_open_ai_tokens(request, model, cx),
737 CloudModel::Google(model) => {
738 let client = self.client.clone();
739 let llm_api_token = self.llm_api_token.clone();
740 let model_id = model.id().to_string();
741 let generate_content_request = into_google(request, model_id.clone());
742 async move {
743 let http_client = &client.http_client();
744 let token = llm_api_token.acquire(&client).await?;
745
746 let request_body = CountTokensBody {
747 provider: zed_llm_client::LanguageModelProvider::Google,
748 model: model_id,
749 provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
750 generate_content_request,
751 })?,
752 };
753 let request = http_client::Request::builder()
754 .method(Method::POST)
755 .uri(
756 http_client
757 .build_zed_llm_url("/count_tokens", &[])?
758 .as_ref(),
759 )
760 .header("Content-Type", "application/json")
761 .header("Authorization", format!("Bearer {token}"))
762 .body(serde_json::to_string(&request_body)?.into())?;
763 let mut response = http_client.send(request).await?;
764 let status = response.status();
765 let mut response_body = String::new();
766 response
767 .body_mut()
768 .read_to_string(&mut response_body)
769 .await?;
770
771 if status.is_success() {
772 let response_body: CountTokensResponse =
773 serde_json::from_str(&response_body)?;
774
775 Ok(response_body.tokens)
776 } else {
777 Err(anyhow!(ApiError {
778 status,
779 body: response_body
780 }))
781 }
782 }
783 .boxed()
784 }
785 }
786 }
787
788 fn stream_completion(
789 &self,
790 request: LanguageModelRequest,
791 cx: &AsyncApp,
792 ) -> BoxFuture<
793 'static,
794 Result<
795 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
796 >,
797 > {
798 let thread_id = request.thread_id.clone();
799 let prompt_id = request.prompt_id.clone();
800 let mode = request.mode;
801 let app_version = cx.update(|cx| AppVersion::global(cx)).ok();
802 match &self.model {
803 CloudModel::Anthropic(model) => {
804 let request = into_anthropic(
805 request,
806 model.request_id().into(),
807 model.default_temperature(),
808 model.max_output_tokens(),
809 model.mode(),
810 );
811 let client = self.client.clone();
812 let llm_api_token = self.llm_api_token.clone();
813 let future = self.request_limiter.stream(async move {
814 let PerformLlmCompletionResponse {
815 response,
816 usage,
817 includes_status_messages,
818 tool_use_limit_reached,
819 } = Self::perform_llm_completion(
820 client.clone(),
821 llm_api_token,
822 app_version,
823 CompletionBody {
824 thread_id,
825 prompt_id,
826 mode,
827 provider: zed_llm_client::LanguageModelProvider::Anthropic,
828 model: request.model.clone(),
829 provider_request: serde_json::to_value(&request)?,
830 },
831 )
832 .await
833 .map_err(|err| match err.downcast::<ApiError>() {
834 Ok(api_err) => {
835 if api_err.status == StatusCode::BAD_REQUEST {
836 if let Some(tokens) = parse_prompt_too_long(&api_err.body) {
837 return anyhow!(
838 LanguageModelKnownError::ContextWindowLimitExceeded {
839 tokens
840 }
841 );
842 }
843 }
844 anyhow!(api_err)
845 }
846 Err(err) => anyhow!(err),
847 })?;
848
849 let mut mapper = AnthropicEventMapper::new();
850 Ok(map_cloud_completion_events(
851 Box::pin(
852 response_lines(response, includes_status_messages)
853 .chain(usage_updated_event(usage))
854 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
855 ),
856 move |event| mapper.map_event(event),
857 ))
858 });
859 async move { Ok(future.await?.boxed()) }.boxed()
860 }
861 CloudModel::OpenAi(model) => {
862 let client = self.client.clone();
863 let request = into_open_ai(request, model, model.max_output_tokens());
864 let llm_api_token = self.llm_api_token.clone();
865 let future = self.request_limiter.stream(async move {
866 let PerformLlmCompletionResponse {
867 response,
868 usage,
869 includes_status_messages,
870 tool_use_limit_reached,
871 } = Self::perform_llm_completion(
872 client.clone(),
873 llm_api_token,
874 app_version,
875 CompletionBody {
876 thread_id,
877 prompt_id,
878 mode,
879 provider: zed_llm_client::LanguageModelProvider::OpenAi,
880 model: request.model.clone(),
881 provider_request: serde_json::to_value(&request)?,
882 },
883 )
884 .await?;
885
886 let mut mapper = OpenAiEventMapper::new();
887 Ok(map_cloud_completion_events(
888 Box::pin(
889 response_lines(response, includes_status_messages)
890 .chain(usage_updated_event(usage))
891 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
892 ),
893 move |event| mapper.map_event(event),
894 ))
895 });
896 async move { Ok(future.await?.boxed()) }.boxed()
897 }
898 CloudModel::Google(model) => {
899 let client = self.client.clone();
900 let request = into_google(request, model.id().into());
901 let llm_api_token = self.llm_api_token.clone();
902 let future = self.request_limiter.stream(async move {
903 let PerformLlmCompletionResponse {
904 response,
905 usage,
906 includes_status_messages,
907 tool_use_limit_reached,
908 } = Self::perform_llm_completion(
909 client.clone(),
910 llm_api_token,
911 app_version,
912 CompletionBody {
913 thread_id,
914 prompt_id,
915 mode,
916 provider: zed_llm_client::LanguageModelProvider::Google,
917 model: request.model.model_id.clone(),
918 provider_request: serde_json::to_value(&request)?,
919 },
920 )
921 .await?;
922
923 let mut mapper = GoogleEventMapper::new();
924 Ok(map_cloud_completion_events(
925 Box::pin(
926 response_lines(response, includes_status_messages)
927 .chain(usage_updated_event(usage))
928 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
929 ),
930 move |event| mapper.map_event(event),
931 ))
932 });
933 async move { Ok(future.await?.boxed()) }.boxed()
934 }
935 }
936 }
937}
938
939#[derive(Serialize, Deserialize)]
940#[serde(rename_all = "snake_case")]
941pub enum CloudCompletionEvent<T> {
942 Status(CompletionRequestStatus),
943 Event(T),
944}
945
946fn map_cloud_completion_events<T, F>(
947 stream: Pin<Box<dyn Stream<Item = Result<CloudCompletionEvent<T>>> + Send>>,
948 mut map_callback: F,
949) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
950where
951 T: DeserializeOwned + 'static,
952 F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
953 + Send
954 + 'static,
955{
956 stream
957 .flat_map(move |event| {
958 futures::stream::iter(match event {
959 Err(error) => {
960 vec![Err(LanguageModelCompletionError::Other(error))]
961 }
962 Ok(CloudCompletionEvent::Status(event)) => {
963 vec![Ok(LanguageModelCompletionEvent::StatusUpdate(event))]
964 }
965 Ok(CloudCompletionEvent::Event(event)) => map_callback(event),
966 })
967 })
968 .boxed()
969}
970
971fn usage_updated_event<T>(
972 usage: Option<RequestUsage>,
973) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
974 futures::stream::iter(usage.map(|usage| {
975 Ok(CloudCompletionEvent::Status(
976 CompletionRequestStatus::UsageUpdated {
977 amount: usage.amount as usize,
978 limit: usage.limit,
979 },
980 ))
981 }))
982}
983
984fn tool_use_limit_reached_event<T>(
985 tool_use_limit_reached: bool,
986) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
987 futures::stream::iter(tool_use_limit_reached.then(|| {
988 Ok(CloudCompletionEvent::Status(
989 CompletionRequestStatus::ToolUseLimitReached,
990 ))
991 }))
992}
993
994fn response_lines<T: DeserializeOwned>(
995 response: Response<AsyncBody>,
996 includes_status_messages: bool,
997) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
998 futures::stream::try_unfold(
999 (String::new(), BufReader::new(response.into_body())),
1000 move |(mut line, mut body)| async move {
1001 match body.read_line(&mut line).await {
1002 Ok(0) => Ok(None),
1003 Ok(_) => {
1004 let event = if includes_status_messages {
1005 serde_json::from_str::<CloudCompletionEvent<T>>(&line)?
1006 } else {
1007 CloudCompletionEvent::Event(serde_json::from_str::<T>(&line)?)
1008 };
1009
1010 line.clear();
1011 Ok(Some((event, (line, body))))
1012 }
1013 Err(e) => Err(e.into()),
1014 }
1015 },
1016 )
1017}
1018
1019struct ConfigurationView {
1020 state: gpui::Entity<State>,
1021}
1022
1023impl ConfigurationView {
1024 fn authenticate(&mut self, cx: &mut Context<Self>) {
1025 self.state.update(cx, |state, cx| {
1026 state.authenticate(cx).detach_and_log_err(cx);
1027 });
1028 cx.notify();
1029 }
1030}
1031
1032impl Render for ConfigurationView {
1033 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1034 const ZED_PRICING_URL: &str = "https://zed.dev/pricing";
1035
1036 let is_connected = !self.state.read(cx).is_signed_out();
1037 let user_store = self.state.read(cx).user_store.read(cx);
1038 let plan = user_store.current_plan();
1039 let subscription_period = user_store.subscription_period();
1040 let eligible_for_trial = user_store.trial_started_at().is_none();
1041 let has_accepted_terms = self.state.read(cx).has_accepted_terms_of_service(cx);
1042
1043 let is_pro = plan == Some(proto::Plan::ZedPro);
1044 let subscription_text = match (plan, subscription_period) {
1045 (Some(proto::Plan::ZedPro), Some(_)) => {
1046 "You have access to Zed's hosted LLMs through your Zed Pro subscription."
1047 }
1048 (Some(proto::Plan::ZedProTrial), Some(_)) => {
1049 "You have access to Zed's hosted LLMs through your Zed Pro trial."
1050 }
1051 (Some(proto::Plan::Free), Some(_)) => {
1052 "You have basic access to Zed's hosted LLMs through your Zed Free subscription."
1053 }
1054 _ => {
1055 if eligible_for_trial {
1056 "Subscribe for access to Zed's hosted LLMs. Start with a 14 day free trial."
1057 } else {
1058 "Subscribe for access to Zed's hosted LLMs."
1059 }
1060 }
1061 };
1062 let manage_subscription_buttons = if is_pro {
1063 h_flex().child(
1064 Button::new("manage_settings", "Manage Subscription")
1065 .style(ButtonStyle::Tinted(TintColor::Accent))
1066 .on_click(cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx)))),
1067 )
1068 } else {
1069 h_flex()
1070 .gap_2()
1071 .child(
1072 Button::new("learn_more", "Learn more")
1073 .style(ButtonStyle::Subtle)
1074 .on_click(cx.listener(|_, _, _, cx| cx.open_url(ZED_PRICING_URL))),
1075 )
1076 .child(
1077 Button::new("upgrade", "Upgrade")
1078 .style(ButtonStyle::Subtle)
1079 .color(Color::Accent)
1080 .on_click(
1081 cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
1082 ),
1083 )
1084 };
1085
1086 if is_connected {
1087 v_flex()
1088 .gap_3()
1089 .w_full()
1090 .children(render_accept_terms(
1091 self.state.clone(),
1092 LanguageModelProviderTosView::Configuration,
1093 cx,
1094 ))
1095 .when(has_accepted_terms, |this| {
1096 this.child(subscription_text)
1097 .child(manage_subscription_buttons)
1098 })
1099 } else {
1100 v_flex()
1101 .gap_2()
1102 .child(Label::new("Use Zed AI to access hosted language models."))
1103 .child(
1104 Button::new("sign_in", "Sign In")
1105 .icon_color(Color::Muted)
1106 .icon(IconName::Github)
1107 .icon_position(IconPosition::Start)
1108 .on_click(cx.listener(move |this, _, _, cx| this.authenticate(cx))),
1109 )
1110 }
1111 }
1112}