1use anthropic::{AnthropicModelMode, parse_prompt_too_long};
2use anyhow::{Result, anyhow};
3use client::{Client, UserStore, zed_urls};
4use collections::BTreeMap;
5use feature_flags::{FeatureFlagAppExt, LlmClosedBetaFeatureFlag};
6use futures::{
7 AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream,
8};
9use gpui::{
10 AnyElement, AnyView, App, AsyncApp, Context, Entity, SemanticVersion, Subscription, Task,
11};
12use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
13use language_model::{
14 AuthenticateError, CloudModel, LanguageModel, LanguageModelCacheConfiguration,
15 LanguageModelCompletionError, LanguageModelId, LanguageModelKnownError, LanguageModelName,
16 LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
17 LanguageModelProviderTosView, LanguageModelRequest, LanguageModelToolChoice,
18 LanguageModelToolSchemaFormat, ModelRequestLimitReachedError, RateLimiter, RequestUsage,
19 ZED_CLOUD_PROVIDER_ID,
20};
21use language_model::{
22 LanguageModelAvailability, LanguageModelCompletionEvent, LanguageModelProvider, LlmApiToken,
23 MaxMonthlySpendReachedError, PaymentRequiredError, RefreshLlmTokenListener,
24};
25use proto::Plan;
26use release_channel::AppVersion;
27use schemars::JsonSchema;
28use serde::{Deserialize, Serialize, de::DeserializeOwned};
29use settings::{Settings, SettingsStore};
30use smol::Timer;
31use smol::io::{AsyncReadExt, BufReader};
32use std::pin::Pin;
33use std::str::FromStr as _;
34use std::{
35 sync::{Arc, LazyLock},
36 time::Duration,
37};
38use strum::IntoEnumIterator;
39use thiserror::Error;
40use ui::{TintColor, prelude::*};
41use zed_llm_client::{
42 CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, CURRENT_PLAN_HEADER_NAME, CompletionBody,
43 CompletionRequestStatus, CountTokensBody, CountTokensResponse, EXPIRED_LLM_TOKEN_HEADER_NAME,
44 MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME, MODEL_REQUESTS_RESOURCE_HEADER_VALUE,
45 SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME,
46 TOOL_USE_LIMIT_REACHED_HEADER_NAME, ZED_VERSION_HEADER_NAME,
47};
48
49use crate::AllLanguageModelSettings;
50use crate::provider::anthropic::{AnthropicEventMapper, count_anthropic_tokens, into_anthropic};
51use crate::provider::google::{GoogleEventMapper, into_google};
52use crate::provider::open_ai::{OpenAiEventMapper, count_open_ai_tokens, into_open_ai};
53
54pub const PROVIDER_NAME: &str = "Zed";
55
56const ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: Option<&str> =
57 option_env!("ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON");
58
59fn zed_cloud_provider_additional_models() -> &'static [AvailableModel] {
60 static ADDITIONAL_MODELS: LazyLock<Vec<AvailableModel>> = LazyLock::new(|| {
61 ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON
62 .map(|json| serde_json::from_str(json).unwrap())
63 .unwrap_or_default()
64 });
65 ADDITIONAL_MODELS.as_slice()
66}
67
68#[derive(Default, Clone, Debug, PartialEq)]
69pub struct ZedDotDevSettings {
70 pub available_models: Vec<AvailableModel>,
71}
72
73#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
74#[serde(rename_all = "lowercase")]
75pub enum AvailableProvider {
76 Anthropic,
77 OpenAi,
78 Google,
79}
80
81#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
82pub struct AvailableModel {
83 /// The provider of the language model.
84 pub provider: AvailableProvider,
85 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
86 pub name: String,
87 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
88 pub display_name: Option<String>,
89 /// The size of the context window, indicating the maximum number of tokens the model can process.
90 pub max_tokens: usize,
91 /// The maximum number of output tokens allowed by the model.
92 pub max_output_tokens: Option<u32>,
93 /// The maximum number of completion tokens allowed by the model (o1-* only)
94 pub max_completion_tokens: Option<u32>,
95 /// Override this model with a different Anthropic model for tool calls.
96 pub tool_override: Option<String>,
97 /// Indicates whether this custom model supports caching.
98 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
99 /// The default temperature to use for this model.
100 pub default_temperature: Option<f32>,
101 /// Any extra beta headers to provide when using the model.
102 #[serde(default)]
103 pub extra_beta_headers: Vec<String>,
104 /// The model's mode (e.g. thinking)
105 pub mode: Option<ModelMode>,
106}
107
108#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
109#[serde(tag = "type", rename_all = "lowercase")]
110pub enum ModelMode {
111 #[default]
112 Default,
113 Thinking {
114 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
115 budget_tokens: Option<u32>,
116 },
117}
118
119impl From<ModelMode> for AnthropicModelMode {
120 fn from(value: ModelMode) -> Self {
121 match value {
122 ModelMode::Default => AnthropicModelMode::Default,
123 ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
124 }
125 }
126}
127
128pub struct CloudLanguageModelProvider {
129 client: Arc<Client>,
130 state: gpui::Entity<State>,
131 _maintain_client_status: Task<()>,
132}
133
134pub struct State {
135 client: Arc<Client>,
136 llm_api_token: LlmApiToken,
137 user_store: Entity<UserStore>,
138 status: client::Status,
139 accept_terms: Option<Task<Result<()>>>,
140 _settings_subscription: Subscription,
141 _llm_token_subscription: Subscription,
142}
143
144impl State {
145 fn new(
146 client: Arc<Client>,
147 user_store: Entity<UserStore>,
148 status: client::Status,
149 cx: &mut Context<Self>,
150 ) -> Self {
151 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
152
153 Self {
154 client: client.clone(),
155 llm_api_token: LlmApiToken::default(),
156 user_store,
157 status,
158 accept_terms: None,
159 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
160 cx.notify();
161 }),
162 _llm_token_subscription: cx.subscribe(
163 &refresh_llm_token_listener,
164 |this, _listener, _event, cx| {
165 let client = this.client.clone();
166 let llm_api_token = this.llm_api_token.clone();
167 cx.spawn(async move |_this, _cx| {
168 llm_api_token.refresh(&client).await?;
169 anyhow::Ok(())
170 })
171 .detach_and_log_err(cx);
172 },
173 ),
174 }
175 }
176
177 fn is_signed_out(&self) -> bool {
178 self.status.is_signed_out()
179 }
180
181 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
182 let client = self.client.clone();
183 cx.spawn(async move |this, cx| {
184 client.authenticate_and_connect(true, &cx).await?;
185 this.update(cx, |_, cx| cx.notify())
186 })
187 }
188
189 fn has_accepted_terms_of_service(&self, cx: &App) -> bool {
190 self.user_store
191 .read(cx)
192 .current_user_has_accepted_terms()
193 .unwrap_or(false)
194 }
195
196 fn accept_terms_of_service(&mut self, cx: &mut Context<Self>) {
197 let user_store = self.user_store.clone();
198 self.accept_terms = Some(cx.spawn(async move |this, cx| {
199 let _ = user_store
200 .update(cx, |store, cx| store.accept_terms_of_service(cx))?
201 .await;
202 this.update(cx, |this, cx| {
203 this.accept_terms = None;
204 cx.notify()
205 })
206 }));
207 }
208}
209
210impl CloudLanguageModelProvider {
211 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
212 let mut status_rx = client.status();
213 let status = *status_rx.borrow();
214
215 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
216
217 let state_ref = state.downgrade();
218 let maintain_client_status = cx.spawn(async move |cx| {
219 while let Some(status) = status_rx.next().await {
220 if let Some(this) = state_ref.upgrade() {
221 _ = this.update(cx, |this, cx| {
222 if this.status != status {
223 this.status = status;
224 cx.notify();
225 }
226 });
227 } else {
228 break;
229 }
230 }
231 });
232
233 Self {
234 client,
235 state: state.clone(),
236 _maintain_client_status: maintain_client_status,
237 }
238 }
239
240 fn create_language_model(
241 &self,
242 model: CloudModel,
243 llm_api_token: LlmApiToken,
244 ) -> Arc<dyn LanguageModel> {
245 Arc::new(CloudLanguageModel {
246 id: LanguageModelId::from(model.id().to_string()),
247 model,
248 llm_api_token: llm_api_token.clone(),
249 client: self.client.clone(),
250 request_limiter: RateLimiter::new(4),
251 })
252 }
253}
254
255impl LanguageModelProviderState for CloudLanguageModelProvider {
256 type ObservableEntity = State;
257
258 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
259 Some(self.state.clone())
260 }
261}
262
263impl LanguageModelProvider for CloudLanguageModelProvider {
264 fn id(&self) -> LanguageModelProviderId {
265 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
266 }
267
268 fn name(&self) -> LanguageModelProviderName {
269 LanguageModelProviderName(PROVIDER_NAME.into())
270 }
271
272 fn icon(&self) -> IconName {
273 IconName::AiZed
274 }
275
276 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
277 let llm_api_token = self.state.read(cx).llm_api_token.clone();
278 let model = CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet);
279 Some(self.create_language_model(model, llm_api_token))
280 }
281
282 fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
283 let llm_api_token = self.state.read(cx).llm_api_token.clone();
284 let model = CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet);
285 Some(self.create_language_model(model, llm_api_token))
286 }
287
288 fn recommended_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
289 let llm_api_token = self.state.read(cx).llm_api_token.clone();
290 [
291 CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
292 CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
293 ]
294 .into_iter()
295 .map(|model| self.create_language_model(model, llm_api_token.clone()))
296 .collect()
297 }
298
299 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
300 let mut models = BTreeMap::default();
301
302 if cx.is_staff() {
303 for model in anthropic::Model::iter() {
304 if !matches!(model, anthropic::Model::Custom { .. }) {
305 models.insert(model.id().to_string(), CloudModel::Anthropic(model));
306 }
307 }
308 for model in open_ai::Model::iter() {
309 if !matches!(model, open_ai::Model::Custom { .. }) {
310 models.insert(model.id().to_string(), CloudModel::OpenAi(model));
311 }
312 }
313 for model in google_ai::Model::iter() {
314 if !matches!(model, google_ai::Model::Custom { .. }) {
315 models.insert(model.id().to_string(), CloudModel::Google(model));
316 }
317 }
318 } else {
319 models.insert(
320 anthropic::Model::Claude3_5Sonnet.id().to_string(),
321 CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet),
322 );
323 models.insert(
324 anthropic::Model::Claude3_7Sonnet.id().to_string(),
325 CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
326 );
327 models.insert(
328 anthropic::Model::Claude3_7SonnetThinking.id().to_string(),
329 CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
330 );
331 }
332
333 let llm_closed_beta_models = if cx.has_flag::<LlmClosedBetaFeatureFlag>() {
334 zed_cloud_provider_additional_models()
335 } else {
336 &[]
337 };
338
339 // Override with available models from settings
340 for model in AllLanguageModelSettings::get_global(cx)
341 .zed_dot_dev
342 .available_models
343 .iter()
344 .chain(llm_closed_beta_models)
345 .cloned()
346 {
347 let model = match model.provider {
348 AvailableProvider::Anthropic => CloudModel::Anthropic(anthropic::Model::Custom {
349 name: model.name.clone(),
350 display_name: model.display_name.clone(),
351 max_tokens: model.max_tokens,
352 tool_override: model.tool_override.clone(),
353 cache_configuration: model.cache_configuration.as_ref().map(|config| {
354 anthropic::AnthropicModelCacheConfiguration {
355 max_cache_anchors: config.max_cache_anchors,
356 should_speculate: config.should_speculate,
357 min_total_token: config.min_total_token,
358 }
359 }),
360 default_temperature: model.default_temperature,
361 max_output_tokens: model.max_output_tokens,
362 extra_beta_headers: model.extra_beta_headers.clone(),
363 mode: model.mode.unwrap_or_default().into(),
364 }),
365 AvailableProvider::OpenAi => CloudModel::OpenAi(open_ai::Model::Custom {
366 name: model.name.clone(),
367 display_name: model.display_name.clone(),
368 max_tokens: model.max_tokens,
369 max_output_tokens: model.max_output_tokens,
370 max_completion_tokens: model.max_completion_tokens,
371 }),
372 AvailableProvider::Google => CloudModel::Google(google_ai::Model::Custom {
373 name: model.name.clone(),
374 display_name: model.display_name.clone(),
375 max_tokens: model.max_tokens,
376 }),
377 };
378 models.insert(model.id().to_string(), model.clone());
379 }
380
381 let llm_api_token = self.state.read(cx).llm_api_token.clone();
382 models
383 .into_values()
384 .map(|model| self.create_language_model(model, llm_api_token.clone()))
385 .collect()
386 }
387
388 fn is_authenticated(&self, cx: &App) -> bool {
389 !self.state.read(cx).is_signed_out()
390 }
391
392 fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
393 Task::ready(Ok(()))
394 }
395
396 fn configuration_view(&self, _: &mut Window, cx: &mut App) -> AnyView {
397 cx.new(|_| ConfigurationView {
398 state: self.state.clone(),
399 })
400 .into()
401 }
402
403 fn must_accept_terms(&self, cx: &App) -> bool {
404 !self.state.read(cx).has_accepted_terms_of_service(cx)
405 }
406
407 fn render_accept_terms(
408 &self,
409 view: LanguageModelProviderTosView,
410 cx: &mut App,
411 ) -> Option<AnyElement> {
412 render_accept_terms(self.state.clone(), view, cx)
413 }
414
415 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
416 Task::ready(Ok(()))
417 }
418}
419
420fn render_accept_terms(
421 state: Entity<State>,
422 view_kind: LanguageModelProviderTosView,
423 cx: &mut App,
424) -> Option<AnyElement> {
425 if state.read(cx).has_accepted_terms_of_service(cx) {
426 return None;
427 }
428
429 let accept_terms_disabled = state.read(cx).accept_terms.is_some();
430
431 let thread_fresh_start = matches!(view_kind, LanguageModelProviderTosView::ThreadFreshStart);
432 let thread_empty_state = matches!(view_kind, LanguageModelProviderTosView::ThreadtEmptyState);
433
434 let terms_button = Button::new("terms_of_service", "Terms of Service")
435 .style(ButtonStyle::Subtle)
436 .icon(IconName::ArrowUpRight)
437 .icon_color(Color::Muted)
438 .icon_size(IconSize::XSmall)
439 .when(thread_empty_state, |this| this.label_size(LabelSize::Small))
440 .on_click(move |_, _window, cx| cx.open_url("https://zed.dev/terms-of-service"));
441
442 let button_container = h_flex().child(
443 Button::new("accept_terms", "I accept the Terms of Service")
444 .when(!thread_empty_state, |this| {
445 this.full_width()
446 .style(ButtonStyle::Tinted(TintColor::Accent))
447 .icon(IconName::Check)
448 .icon_position(IconPosition::Start)
449 .icon_size(IconSize::Small)
450 })
451 .when(thread_empty_state, |this| {
452 this.style(ButtonStyle::Tinted(TintColor::Warning))
453 .label_size(LabelSize::Small)
454 })
455 .disabled(accept_terms_disabled)
456 .on_click({
457 let state = state.downgrade();
458 move |_, _window, cx| {
459 state
460 .update(cx, |state, cx| state.accept_terms_of_service(cx))
461 .ok();
462 }
463 }),
464 );
465
466 let form = if thread_empty_state {
467 h_flex()
468 .w_full()
469 .flex_wrap()
470 .justify_between()
471 .child(
472 h_flex()
473 .child(
474 Label::new("To start using Zed AI, please read and accept the")
475 .size(LabelSize::Small),
476 )
477 .child(terms_button),
478 )
479 .child(button_container)
480 } else {
481 v_flex()
482 .w_full()
483 .gap_2()
484 .child(
485 h_flex()
486 .flex_wrap()
487 .when(thread_fresh_start, |this| this.justify_center())
488 .child(Label::new(
489 "To start using Zed AI, please read and accept the",
490 ))
491 .child(terms_button),
492 )
493 .child({
494 match view_kind {
495 LanguageModelProviderTosView::PromptEditorPopup => {
496 button_container.w_full().justify_end()
497 }
498 LanguageModelProviderTosView::Configuration => {
499 button_container.w_full().justify_start()
500 }
501 LanguageModelProviderTosView::ThreadFreshStart => {
502 button_container.w_full().justify_center()
503 }
504 LanguageModelProviderTosView::ThreadtEmptyState => div().w_0(),
505 }
506 })
507 };
508
509 Some(form.into_any())
510}
511
512pub struct CloudLanguageModel {
513 id: LanguageModelId,
514 model: CloudModel,
515 llm_api_token: LlmApiToken,
516 client: Arc<Client>,
517 request_limiter: RateLimiter,
518}
519
520struct PerformLlmCompletionResponse {
521 response: Response<AsyncBody>,
522 usage: Option<RequestUsage>,
523 tool_use_limit_reached: bool,
524 includes_status_messages: bool,
525}
526
527impl CloudLanguageModel {
528 const MAX_RETRIES: usize = 3;
529
530 async fn perform_llm_completion(
531 client: Arc<Client>,
532 llm_api_token: LlmApiToken,
533 app_version: Option<SemanticVersion>,
534 body: CompletionBody,
535 ) -> Result<PerformLlmCompletionResponse> {
536 let http_client = &client.http_client();
537
538 let mut token = llm_api_token.acquire(&client).await?;
539 let mut retries_remaining = Self::MAX_RETRIES;
540 let mut retry_delay = Duration::from_secs(1);
541
542 loop {
543 let request_builder = http_client::Request::builder()
544 .method(Method::POST)
545 .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref());
546 let request_builder = if let Some(app_version) = app_version {
547 request_builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string())
548 } else {
549 request_builder
550 };
551
552 let request = request_builder
553 .header("Content-Type", "application/json")
554 .header("Authorization", format!("Bearer {token}"))
555 .header(CLIENT_SUPPORTS_STATUS_MESSAGES_HEADER_NAME, "true")
556 .body(serde_json::to_string(&body)?.into())?;
557 let mut response = http_client.send(request).await?;
558 let status = response.status();
559 if status.is_success() {
560 let includes_status_messages = response
561 .headers()
562 .get(SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME)
563 .is_some();
564
565 let tool_use_limit_reached = response
566 .headers()
567 .get(TOOL_USE_LIMIT_REACHED_HEADER_NAME)
568 .is_some();
569
570 let usage = if includes_status_messages {
571 None
572 } else {
573 RequestUsage::from_headers(response.headers()).ok()
574 };
575
576 return Ok(PerformLlmCompletionResponse {
577 response,
578 usage,
579 includes_status_messages,
580 tool_use_limit_reached,
581 });
582 } else if response
583 .headers()
584 .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
585 .is_some()
586 {
587 retries_remaining -= 1;
588 token = llm_api_token.refresh(&client).await?;
589 } else if status == StatusCode::FORBIDDEN
590 && response
591 .headers()
592 .get(MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME)
593 .is_some()
594 {
595 return Err(anyhow!(MaxMonthlySpendReachedError));
596 } else if status == StatusCode::FORBIDDEN
597 && response
598 .headers()
599 .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
600 .is_some()
601 {
602 if let Some(MODEL_REQUESTS_RESOURCE_HEADER_VALUE) = response
603 .headers()
604 .get(SUBSCRIPTION_LIMIT_RESOURCE_HEADER_NAME)
605 .and_then(|resource| resource.to_str().ok())
606 {
607 if let Some(plan) = response
608 .headers()
609 .get(CURRENT_PLAN_HEADER_NAME)
610 .and_then(|plan| plan.to_str().ok())
611 .and_then(|plan| zed_llm_client::Plan::from_str(plan).ok())
612 {
613 let plan = match plan {
614 zed_llm_client::Plan::ZedFree => Plan::Free,
615 zed_llm_client::Plan::ZedPro => Plan::ZedPro,
616 zed_llm_client::Plan::ZedProTrial => Plan::ZedProTrial,
617 };
618 return Err(anyhow!(ModelRequestLimitReachedError { plan }));
619 }
620 }
621
622 return Err(anyhow!("Forbidden"));
623 } else if status.as_u16() >= 500 && status.as_u16() < 600 {
624 // If we encounter an error in the 500 range, retry after a delay.
625 // We've seen at least these in the wild from API providers:
626 // * 500 Internal Server Error
627 // * 502 Bad Gateway
628 // * 529 Service Overloaded
629
630 if retries_remaining == 0 {
631 let mut body = String::new();
632 response.body_mut().read_to_string(&mut body).await?;
633 return Err(anyhow!(
634 "cloud language model completion failed after {} retries with status {status}: {body}",
635 Self::MAX_RETRIES
636 ));
637 }
638
639 Timer::after(retry_delay).await;
640
641 retries_remaining -= 1;
642 retry_delay *= 2; // If it fails again, wait longer.
643 } else if status == StatusCode::PAYMENT_REQUIRED {
644 return Err(anyhow!(PaymentRequiredError));
645 } else {
646 let mut body = String::new();
647 response.body_mut().read_to_string(&mut body).await?;
648 return Err(anyhow!(ApiError { status, body }));
649 }
650 }
651 }
652}
653
654#[derive(Debug, Error)]
655#[error("cloud language model request failed with status {status}: {body}")]
656struct ApiError {
657 status: StatusCode,
658 body: String,
659}
660
661impl LanguageModel for CloudLanguageModel {
662 fn id(&self) -> LanguageModelId {
663 self.id.clone()
664 }
665
666 fn name(&self) -> LanguageModelName {
667 LanguageModelName::from(self.model.display_name().to_string())
668 }
669
670 fn provider_id(&self) -> LanguageModelProviderId {
671 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
672 }
673
674 fn provider_name(&self) -> LanguageModelProviderName {
675 LanguageModelProviderName(PROVIDER_NAME.into())
676 }
677
678 fn supports_tools(&self) -> bool {
679 match self.model {
680 CloudModel::Anthropic(_) => true,
681 CloudModel::Google(_) => true,
682 CloudModel::OpenAi(_) => true,
683 }
684 }
685
686 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
687 match choice {
688 LanguageModelToolChoice::Auto
689 | LanguageModelToolChoice::Any
690 | LanguageModelToolChoice::None => true,
691 }
692 }
693
694 fn telemetry_id(&self) -> String {
695 format!("zed.dev/{}", self.model.id())
696 }
697
698 fn availability(&self) -> LanguageModelAvailability {
699 self.model.availability()
700 }
701
702 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
703 self.model.tool_input_format()
704 }
705
706 fn max_token_count(&self) -> usize {
707 self.model.max_token_count()
708 }
709
710 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
711 match &self.model {
712 CloudModel::Anthropic(model) => {
713 model
714 .cache_configuration()
715 .map(|cache| LanguageModelCacheConfiguration {
716 max_cache_anchors: cache.max_cache_anchors,
717 should_speculate: cache.should_speculate,
718 min_total_token: cache.min_total_token,
719 })
720 }
721 CloudModel::OpenAi(_) | CloudModel::Google(_) => None,
722 }
723 }
724
725 fn count_tokens(
726 &self,
727 request: LanguageModelRequest,
728 cx: &App,
729 ) -> BoxFuture<'static, Result<usize>> {
730 match self.model.clone() {
731 CloudModel::Anthropic(_) => count_anthropic_tokens(request, cx),
732 CloudModel::OpenAi(model) => count_open_ai_tokens(request, model, cx),
733 CloudModel::Google(model) => {
734 let client = self.client.clone();
735 let llm_api_token = self.llm_api_token.clone();
736 let model_id = model.id().to_string();
737 let generate_content_request = into_google(request, model_id.clone());
738 async move {
739 let http_client = &client.http_client();
740 let token = llm_api_token.acquire(&client).await?;
741
742 let request_body = CountTokensBody {
743 provider: zed_llm_client::LanguageModelProvider::Google,
744 model: model_id,
745 provider_request: serde_json::to_value(&google_ai::CountTokensRequest {
746 generate_content_request,
747 })?,
748 };
749 let request = http_client::Request::builder()
750 .method(Method::POST)
751 .uri(
752 http_client
753 .build_zed_llm_url("/count_tokens", &[])?
754 .as_ref(),
755 )
756 .header("Content-Type", "application/json")
757 .header("Authorization", format!("Bearer {token}"))
758 .body(serde_json::to_string(&request_body)?.into())?;
759 let mut response = http_client.send(request).await?;
760 let status = response.status();
761 let mut response_body = String::new();
762 response
763 .body_mut()
764 .read_to_string(&mut response_body)
765 .await?;
766
767 if status.is_success() {
768 let response_body: CountTokensResponse =
769 serde_json::from_str(&response_body)?;
770
771 Ok(response_body.tokens)
772 } else {
773 Err(anyhow!(ApiError {
774 status,
775 body: response_body
776 }))
777 }
778 }
779 .boxed()
780 }
781 }
782 }
783
784 fn stream_completion(
785 &self,
786 request: LanguageModelRequest,
787 cx: &AsyncApp,
788 ) -> BoxFuture<
789 'static,
790 Result<
791 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
792 >,
793 > {
794 let thread_id = request.thread_id.clone();
795 let prompt_id = request.prompt_id.clone();
796 let mode = request.mode;
797 let app_version = cx.update(|cx| AppVersion::global(cx)).ok();
798 match &self.model {
799 CloudModel::Anthropic(model) => {
800 let request = into_anthropic(
801 request,
802 model.request_id().into(),
803 model.default_temperature(),
804 model.max_output_tokens(),
805 model.mode(),
806 );
807 let client = self.client.clone();
808 let llm_api_token = self.llm_api_token.clone();
809 let future = self.request_limiter.stream(async move {
810 let PerformLlmCompletionResponse {
811 response,
812 usage,
813 includes_status_messages,
814 tool_use_limit_reached,
815 } = Self::perform_llm_completion(
816 client.clone(),
817 llm_api_token,
818 app_version,
819 CompletionBody {
820 thread_id,
821 prompt_id,
822 mode,
823 provider: zed_llm_client::LanguageModelProvider::Anthropic,
824 model: request.model.clone(),
825 provider_request: serde_json::to_value(&request)?,
826 },
827 )
828 .await
829 .map_err(|err| match err.downcast::<ApiError>() {
830 Ok(api_err) => {
831 if api_err.status == StatusCode::BAD_REQUEST {
832 if let Some(tokens) = parse_prompt_too_long(&api_err.body) {
833 return anyhow!(
834 LanguageModelKnownError::ContextWindowLimitExceeded {
835 tokens
836 }
837 );
838 }
839 }
840 anyhow!(api_err)
841 }
842 Err(err) => anyhow!(err),
843 })?;
844
845 let mut mapper = AnthropicEventMapper::new();
846 Ok(map_cloud_completion_events(
847 Box::pin(
848 response_lines(response, includes_status_messages)
849 .chain(usage_updated_event(usage))
850 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
851 ),
852 move |event| mapper.map_event(event),
853 ))
854 });
855 async move { Ok(future.await?.boxed()) }.boxed()
856 }
857 CloudModel::OpenAi(model) => {
858 let client = self.client.clone();
859 let request = into_open_ai(request, model, model.max_output_tokens());
860 let llm_api_token = self.llm_api_token.clone();
861 let future = self.request_limiter.stream(async move {
862 let PerformLlmCompletionResponse {
863 response,
864 usage,
865 includes_status_messages,
866 tool_use_limit_reached,
867 } = Self::perform_llm_completion(
868 client.clone(),
869 llm_api_token,
870 app_version,
871 CompletionBody {
872 thread_id,
873 prompt_id,
874 mode,
875 provider: zed_llm_client::LanguageModelProvider::OpenAi,
876 model: request.model.clone(),
877 provider_request: serde_json::to_value(&request)?,
878 },
879 )
880 .await?;
881
882 let mut mapper = OpenAiEventMapper::new();
883 Ok(map_cloud_completion_events(
884 Box::pin(
885 response_lines(response, includes_status_messages)
886 .chain(usage_updated_event(usage))
887 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
888 ),
889 move |event| mapper.map_event(event),
890 ))
891 });
892 async move { Ok(future.await?.boxed()) }.boxed()
893 }
894 CloudModel::Google(model) => {
895 let client = self.client.clone();
896 let request = into_google(request, model.id().into());
897 let llm_api_token = self.llm_api_token.clone();
898 let future = self.request_limiter.stream(async move {
899 let PerformLlmCompletionResponse {
900 response,
901 usage,
902 includes_status_messages,
903 tool_use_limit_reached,
904 } = Self::perform_llm_completion(
905 client.clone(),
906 llm_api_token,
907 app_version,
908 CompletionBody {
909 thread_id,
910 prompt_id,
911 mode,
912 provider: zed_llm_client::LanguageModelProvider::Google,
913 model: request.model.model_id.clone(),
914 provider_request: serde_json::to_value(&request)?,
915 },
916 )
917 .await?;
918
919 let mut mapper = GoogleEventMapper::new();
920 Ok(map_cloud_completion_events(
921 Box::pin(
922 response_lines(response, includes_status_messages)
923 .chain(usage_updated_event(usage))
924 .chain(tool_use_limit_reached_event(tool_use_limit_reached)),
925 ),
926 move |event| mapper.map_event(event),
927 ))
928 });
929 async move { Ok(future.await?.boxed()) }.boxed()
930 }
931 }
932 }
933}
934
935#[derive(Serialize, Deserialize)]
936#[serde(rename_all = "snake_case")]
937pub enum CloudCompletionEvent<T> {
938 Status(CompletionRequestStatus),
939 Event(T),
940}
941
942fn map_cloud_completion_events<T, F>(
943 stream: Pin<Box<dyn Stream<Item = Result<CloudCompletionEvent<T>>> + Send>>,
944 mut map_callback: F,
945) -> BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
946where
947 T: DeserializeOwned + 'static,
948 F: FnMut(T) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
949 + Send
950 + 'static,
951{
952 stream
953 .flat_map(move |event| {
954 futures::stream::iter(match event {
955 Err(error) => {
956 vec![Err(LanguageModelCompletionError::Other(error))]
957 }
958 Ok(CloudCompletionEvent::Status(event)) => {
959 vec![Ok(LanguageModelCompletionEvent::StatusUpdate(event))]
960 }
961 Ok(CloudCompletionEvent::Event(event)) => map_callback(event),
962 })
963 })
964 .boxed()
965}
966
967fn usage_updated_event<T>(
968 usage: Option<RequestUsage>,
969) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
970 futures::stream::iter(usage.map(|usage| {
971 Ok(CloudCompletionEvent::Status(
972 CompletionRequestStatus::UsageUpdated {
973 amount: usage.amount as usize,
974 limit: usage.limit,
975 },
976 ))
977 }))
978}
979
980fn tool_use_limit_reached_event<T>(
981 tool_use_limit_reached: bool,
982) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
983 futures::stream::iter(tool_use_limit_reached.then(|| {
984 Ok(CloudCompletionEvent::Status(
985 CompletionRequestStatus::ToolUseLimitReached,
986 ))
987 }))
988}
989
990fn response_lines<T: DeserializeOwned>(
991 response: Response<AsyncBody>,
992 includes_status_messages: bool,
993) -> impl Stream<Item = Result<CloudCompletionEvent<T>>> {
994 futures::stream::try_unfold(
995 (String::new(), BufReader::new(response.into_body())),
996 move |(mut line, mut body)| async move {
997 match body.read_line(&mut line).await {
998 Ok(0) => Ok(None),
999 Ok(_) => {
1000 let event = if includes_status_messages {
1001 serde_json::from_str::<CloudCompletionEvent<T>>(&line)?
1002 } else {
1003 CloudCompletionEvent::Event(serde_json::from_str::<T>(&line)?)
1004 };
1005
1006 line.clear();
1007 Ok(Some((event, (line, body))))
1008 }
1009 Err(e) => Err(e.into()),
1010 }
1011 },
1012 )
1013}
1014
1015struct ConfigurationView {
1016 state: gpui::Entity<State>,
1017}
1018
1019impl ConfigurationView {
1020 fn authenticate(&mut self, cx: &mut Context<Self>) {
1021 self.state.update(cx, |state, cx| {
1022 state.authenticate(cx).detach_and_log_err(cx);
1023 });
1024 cx.notify();
1025 }
1026}
1027
1028impl Render for ConfigurationView {
1029 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1030 const ZED_PRICING_URL: &str = "https://zed.dev/pricing";
1031
1032 let is_connected = !self.state.read(cx).is_signed_out();
1033 let user_store = self.state.read(cx).user_store.read(cx);
1034 let plan = user_store.current_plan();
1035 let subscription_period = user_store.subscription_period();
1036 let eligible_for_trial = user_store.trial_started_at().is_none();
1037 let has_accepted_terms = self.state.read(cx).has_accepted_terms_of_service(cx);
1038
1039 let is_pro = plan == Some(proto::Plan::ZedPro);
1040 let subscription_text = match (plan, subscription_period) {
1041 (Some(proto::Plan::ZedPro), Some(_)) => {
1042 "You have access to Zed's hosted LLMs through your Zed Pro subscription."
1043 }
1044 (Some(proto::Plan::ZedProTrial), Some(_)) => {
1045 "You have access to Zed's hosted LLMs through your Zed Pro trial."
1046 }
1047 (Some(proto::Plan::Free), Some(_)) => {
1048 "You have basic access to Zed's hosted LLMs through your Zed Free subscription."
1049 }
1050 _ => {
1051 if eligible_for_trial {
1052 "Subscribe for access to Zed's hosted LLMs. Start with a 14 day free trial."
1053 } else {
1054 "Subscribe for access to Zed's hosted LLMs."
1055 }
1056 }
1057 };
1058 let manage_subscription_buttons = if is_pro {
1059 h_flex().child(
1060 Button::new("manage_settings", "Manage Subscription")
1061 .style(ButtonStyle::Tinted(TintColor::Accent))
1062 .on_click(cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx)))),
1063 )
1064 } else {
1065 h_flex()
1066 .gap_2()
1067 .child(
1068 Button::new("learn_more", "Learn more")
1069 .style(ButtonStyle::Subtle)
1070 .on_click(cx.listener(|_, _, _, cx| cx.open_url(ZED_PRICING_URL))),
1071 )
1072 .child(
1073 Button::new("upgrade", "Upgrade")
1074 .style(ButtonStyle::Subtle)
1075 .color(Color::Accent)
1076 .on_click(
1077 cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
1078 ),
1079 )
1080 };
1081
1082 if is_connected {
1083 v_flex()
1084 .gap_3()
1085 .w_full()
1086 .children(render_accept_terms(
1087 self.state.clone(),
1088 LanguageModelProviderTosView::Configuration,
1089 cx,
1090 ))
1091 .when(has_accepted_terms, |this| {
1092 this.child(subscription_text)
1093 .child(manage_subscription_buttons)
1094 })
1095 } else {
1096 v_flex()
1097 .gap_2()
1098 .child(Label::new("Use Zed AI to access hosted language models."))
1099 .child(
1100 Button::new("sign_in", "Sign In")
1101 .icon_color(Color::Muted)
1102 .icon(IconName::Github)
1103 .icon_position(IconPosition::Start)
1104 .on_click(cx.listener(move |this, _, _, cx| this.authenticate(cx))),
1105 )
1106 }
1107 }
1108}