1use anthropic::{AnthropicError, AnthropicModelMode};
2use anyhow::{Result, anyhow};
3use client::{
4 Client, EXPIRED_LLM_TOKEN_HEADER_NAME, MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME,
5 PerformCompletionParams, UserStore, zed_urls,
6};
7use collections::BTreeMap;
8use feature_flags::{FeatureFlagAppExt, LlmClosedBeta, ZedPro};
9use futures::{
10 AsyncBufReadExt, FutureExt, Stream, StreamExt, TryStreamExt as _, future::BoxFuture,
11 stream::BoxStream,
12};
13use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
14use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
15use language_model::{
16 AuthenticateError, CloudModel, LanguageModel, LanguageModelCacheConfiguration, LanguageModelId,
17 LanguageModelName, LanguageModelProviderId, LanguageModelProviderName,
18 LanguageModelProviderState, LanguageModelProviderTosView, LanguageModelRequest,
19 LanguageModelToolSchemaFormat, RateLimiter, ZED_CLOUD_PROVIDER_ID,
20};
21use language_model::{
22 LanguageModelAvailability, LanguageModelCompletionEvent, LanguageModelProvider, LlmApiToken,
23 MaxMonthlySpendReachedError, PaymentRequiredError, RefreshLlmTokenListener,
24};
25use schemars::JsonSchema;
26use serde::{Deserialize, Serialize, de::DeserializeOwned};
27use serde_json::value::RawValue;
28use settings::{Settings, SettingsStore};
29use smol::Timer;
30use smol::io::{AsyncReadExt, BufReader};
31use std::{
32 sync::{Arc, LazyLock},
33 time::Duration,
34};
35use strum::IntoEnumIterator;
36use ui::{TintColor, prelude::*};
37
38use crate::AllLanguageModelSettings;
39use crate::provider::anthropic::{count_anthropic_tokens, into_anthropic};
40use crate::provider::google::into_google;
41use crate::provider::open_ai::{count_open_ai_tokens, into_open_ai};
42
43pub const PROVIDER_NAME: &str = "Zed";
44
45const ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: Option<&str> =
46 option_env!("ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON");
47
48fn zed_cloud_provider_additional_models() -> &'static [AvailableModel] {
49 static ADDITIONAL_MODELS: LazyLock<Vec<AvailableModel>> = LazyLock::new(|| {
50 ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON
51 .map(|json| serde_json::from_str(json).unwrap())
52 .unwrap_or_default()
53 });
54 ADDITIONAL_MODELS.as_slice()
55}
56
57#[derive(Default, Clone, Debug, PartialEq)]
58pub struct ZedDotDevSettings {
59 pub available_models: Vec<AvailableModel>,
60}
61
62#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
63#[serde(rename_all = "lowercase")]
64pub enum AvailableProvider {
65 Anthropic,
66 OpenAi,
67 Google,
68}
69
70#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
71pub struct AvailableModel {
72 /// The provider of the language model.
73 pub provider: AvailableProvider,
74 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
75 pub name: String,
76 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
77 pub display_name: Option<String>,
78 /// The size of the context window, indicating the maximum number of tokens the model can process.
79 pub max_tokens: usize,
80 /// The maximum number of output tokens allowed by the model.
81 pub max_output_tokens: Option<u32>,
82 /// The maximum number of completion tokens allowed by the model (o1-* only)
83 pub max_completion_tokens: Option<u32>,
84 /// Override this model with a different Anthropic model for tool calls.
85 pub tool_override: Option<String>,
86 /// Indicates whether this custom model supports caching.
87 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
88 /// The default temperature to use for this model.
89 pub default_temperature: Option<f32>,
90 /// Any extra beta headers to provide when using the model.
91 #[serde(default)]
92 pub extra_beta_headers: Vec<String>,
93 /// The model's mode (e.g. thinking)
94 pub mode: Option<ModelMode>,
95}
96
97#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
98#[serde(tag = "type", rename_all = "lowercase")]
99pub enum ModelMode {
100 #[default]
101 Default,
102 Thinking {
103 /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
104 budget_tokens: Option<u32>,
105 },
106}
107
108impl From<ModelMode> for AnthropicModelMode {
109 fn from(value: ModelMode) -> Self {
110 match value {
111 ModelMode::Default => AnthropicModelMode::Default,
112 ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
113 }
114 }
115}
116
117pub struct CloudLanguageModelProvider {
118 client: Arc<Client>,
119 state: gpui::Entity<State>,
120 _maintain_client_status: Task<()>,
121}
122
123pub struct State {
124 client: Arc<Client>,
125 llm_api_token: LlmApiToken,
126 user_store: Entity<UserStore>,
127 status: client::Status,
128 accept_terms: Option<Task<Result<()>>>,
129 _settings_subscription: Subscription,
130 _llm_token_subscription: Subscription,
131}
132
133impl State {
134 fn new(
135 client: Arc<Client>,
136 user_store: Entity<UserStore>,
137 status: client::Status,
138 cx: &mut Context<Self>,
139 ) -> Self {
140 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
141
142 Self {
143 client: client.clone(),
144 llm_api_token: LlmApiToken::default(),
145 user_store,
146 status,
147 accept_terms: None,
148 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
149 cx.notify();
150 }),
151 _llm_token_subscription: cx.subscribe(
152 &refresh_llm_token_listener,
153 |this, _listener, _event, cx| {
154 let client = this.client.clone();
155 let llm_api_token = this.llm_api_token.clone();
156 cx.spawn(async move |_this, _cx| {
157 llm_api_token.refresh(&client).await?;
158 anyhow::Ok(())
159 })
160 .detach_and_log_err(cx);
161 },
162 ),
163 }
164 }
165
166 fn is_signed_out(&self) -> bool {
167 self.status.is_signed_out()
168 }
169
170 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
171 let client = self.client.clone();
172 cx.spawn(async move |this, cx| {
173 client.authenticate_and_connect(true, &cx).await?;
174 this.update(cx, |_, cx| cx.notify())
175 })
176 }
177
178 fn has_accepted_terms_of_service(&self, cx: &App) -> bool {
179 self.user_store
180 .read(cx)
181 .current_user_has_accepted_terms()
182 .unwrap_or(false)
183 }
184
185 fn accept_terms_of_service(&mut self, cx: &mut Context<Self>) {
186 let user_store = self.user_store.clone();
187 self.accept_terms = Some(cx.spawn(async move |this, cx| {
188 let _ = user_store
189 .update(cx, |store, cx| store.accept_terms_of_service(cx))?
190 .await;
191 this.update(cx, |this, cx| {
192 this.accept_terms = None;
193 cx.notify()
194 })
195 }));
196 }
197}
198
199impl CloudLanguageModelProvider {
200 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
201 let mut status_rx = client.status();
202 let status = *status_rx.borrow();
203
204 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
205
206 let state_ref = state.downgrade();
207 let maintain_client_status = cx.spawn(async move |cx| {
208 while let Some(status) = status_rx.next().await {
209 if let Some(this) = state_ref.upgrade() {
210 _ = this.update(cx, |this, cx| {
211 if this.status != status {
212 this.status = status;
213 cx.notify();
214 }
215 });
216 } else {
217 break;
218 }
219 }
220 });
221
222 Self {
223 client,
224 state: state.clone(),
225 _maintain_client_status: maintain_client_status,
226 }
227 }
228}
229
230impl LanguageModelProviderState for CloudLanguageModelProvider {
231 type ObservableEntity = State;
232
233 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
234 Some(self.state.clone())
235 }
236}
237
238impl LanguageModelProvider for CloudLanguageModelProvider {
239 fn id(&self) -> LanguageModelProviderId {
240 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
241 }
242
243 fn name(&self) -> LanguageModelProviderName {
244 LanguageModelProviderName(PROVIDER_NAME.into())
245 }
246
247 fn icon(&self) -> IconName {
248 IconName::AiZed
249 }
250
251 fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
252 let llm_api_token = self.state.read(cx).llm_api_token.clone();
253 let model = CloudModel::Anthropic(anthropic::Model::default());
254 Some(Arc::new(CloudLanguageModel {
255 id: LanguageModelId::from(model.id().to_string()),
256 model,
257 llm_api_token: llm_api_token.clone(),
258 client: self.client.clone(),
259 request_limiter: RateLimiter::new(4),
260 }))
261 }
262
263 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
264 let mut models = BTreeMap::default();
265
266 if cx.is_staff() {
267 for model in anthropic::Model::iter() {
268 if !matches!(model, anthropic::Model::Custom { .. }) {
269 models.insert(model.id().to_string(), CloudModel::Anthropic(model));
270 }
271 }
272 for model in open_ai::Model::iter() {
273 if !matches!(model, open_ai::Model::Custom { .. }) {
274 models.insert(model.id().to_string(), CloudModel::OpenAi(model));
275 }
276 }
277 for model in google_ai::Model::iter() {
278 if !matches!(model, google_ai::Model::Custom { .. }) {
279 models.insert(model.id().to_string(), CloudModel::Google(model));
280 }
281 }
282 } else {
283 models.insert(
284 anthropic::Model::Claude3_5Sonnet.id().to_string(),
285 CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet),
286 );
287 models.insert(
288 anthropic::Model::Claude3_7Sonnet.id().to_string(),
289 CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
290 );
291 models.insert(
292 anthropic::Model::Claude3_7SonnetThinking.id().to_string(),
293 CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
294 );
295 }
296
297 let llm_closed_beta_models = if cx.has_flag::<LlmClosedBeta>() {
298 zed_cloud_provider_additional_models()
299 } else {
300 &[]
301 };
302
303 // Override with available models from settings
304 for model in AllLanguageModelSettings::get_global(cx)
305 .zed_dot_dev
306 .available_models
307 .iter()
308 .chain(llm_closed_beta_models)
309 .cloned()
310 {
311 let model = match model.provider {
312 AvailableProvider::Anthropic => CloudModel::Anthropic(anthropic::Model::Custom {
313 name: model.name.clone(),
314 display_name: model.display_name.clone(),
315 max_tokens: model.max_tokens,
316 tool_override: model.tool_override.clone(),
317 cache_configuration: model.cache_configuration.as_ref().map(|config| {
318 anthropic::AnthropicModelCacheConfiguration {
319 max_cache_anchors: config.max_cache_anchors,
320 should_speculate: config.should_speculate,
321 min_total_token: config.min_total_token,
322 }
323 }),
324 default_temperature: model.default_temperature,
325 max_output_tokens: model.max_output_tokens,
326 extra_beta_headers: model.extra_beta_headers.clone(),
327 mode: model.mode.unwrap_or_default().into(),
328 }),
329 AvailableProvider::OpenAi => CloudModel::OpenAi(open_ai::Model::Custom {
330 name: model.name.clone(),
331 display_name: model.display_name.clone(),
332 max_tokens: model.max_tokens,
333 max_output_tokens: model.max_output_tokens,
334 max_completion_tokens: model.max_completion_tokens,
335 }),
336 AvailableProvider::Google => CloudModel::Google(google_ai::Model::Custom {
337 name: model.name.clone(),
338 display_name: model.display_name.clone(),
339 max_tokens: model.max_tokens,
340 }),
341 };
342 models.insert(model.id().to_string(), model.clone());
343 }
344
345 let llm_api_token = self.state.read(cx).llm_api_token.clone();
346 models
347 .into_values()
348 .map(|model| {
349 Arc::new(CloudLanguageModel {
350 id: LanguageModelId::from(model.id().to_string()),
351 model,
352 llm_api_token: llm_api_token.clone(),
353 client: self.client.clone(),
354 request_limiter: RateLimiter::new(4),
355 }) as Arc<dyn LanguageModel>
356 })
357 .collect()
358 }
359
360 fn is_authenticated(&self, cx: &App) -> bool {
361 !self.state.read(cx).is_signed_out()
362 }
363
364 fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
365 Task::ready(Ok(()))
366 }
367
368 fn configuration_view(&self, _: &mut Window, cx: &mut App) -> AnyView {
369 cx.new(|_| ConfigurationView {
370 state: self.state.clone(),
371 })
372 .into()
373 }
374
375 fn must_accept_terms(&self, cx: &App) -> bool {
376 !self.state.read(cx).has_accepted_terms_of_service(cx)
377 }
378
379 fn render_accept_terms(
380 &self,
381 view: LanguageModelProviderTosView,
382 cx: &mut App,
383 ) -> Option<AnyElement> {
384 render_accept_terms(self.state.clone(), view, cx)
385 }
386
387 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
388 Task::ready(Ok(()))
389 }
390}
391
392fn render_accept_terms(
393 state: Entity<State>,
394 view_kind: LanguageModelProviderTosView,
395 cx: &mut App,
396) -> Option<AnyElement> {
397 if state.read(cx).has_accepted_terms_of_service(cx) {
398 return None;
399 }
400
401 let accept_terms_disabled = state.read(cx).accept_terms.is_some();
402
403 let thread_fresh_start = matches!(view_kind, LanguageModelProviderTosView::ThreadFreshStart);
404 let thread_empty_state = matches!(view_kind, LanguageModelProviderTosView::ThreadtEmptyState);
405
406 let terms_button = Button::new("terms_of_service", "Terms of Service")
407 .style(ButtonStyle::Subtle)
408 .icon(IconName::ArrowUpRight)
409 .icon_color(Color::Muted)
410 .icon_size(IconSize::XSmall)
411 .when(thread_empty_state, |this| this.label_size(LabelSize::Small))
412 .on_click(move |_, _window, cx| cx.open_url("https://zed.dev/terms-of-service"));
413
414 let button_container = h_flex().child(
415 Button::new("accept_terms", "I accept the Terms of Service")
416 .when(!thread_empty_state, |this| {
417 this.full_width()
418 .style(ButtonStyle::Tinted(TintColor::Accent))
419 .icon(IconName::Check)
420 .icon_position(IconPosition::Start)
421 .icon_size(IconSize::Small)
422 })
423 .when(thread_empty_state, |this| {
424 this.style(ButtonStyle::Tinted(TintColor::Warning))
425 .label_size(LabelSize::Small)
426 })
427 .disabled(accept_terms_disabled)
428 .on_click({
429 let state = state.downgrade();
430 move |_, _window, cx| {
431 state
432 .update(cx, |state, cx| state.accept_terms_of_service(cx))
433 .ok();
434 }
435 }),
436 );
437
438 let form = if thread_empty_state {
439 h_flex()
440 .w_full()
441 .flex_wrap()
442 .justify_between()
443 .child(
444 h_flex()
445 .child(
446 Label::new("To start using Zed AI, please read and accept the")
447 .size(LabelSize::Small),
448 )
449 .child(terms_button),
450 )
451 .child(button_container)
452 } else {
453 v_flex()
454 .w_full()
455 .gap_2()
456 .child(
457 h_flex()
458 .flex_wrap()
459 .when(thread_fresh_start, |this| this.justify_center())
460 .child(Label::new(
461 "To start using Zed AI, please read and accept the",
462 ))
463 .child(terms_button),
464 )
465 .child({
466 match view_kind {
467 LanguageModelProviderTosView::PromptEditorPopup => {
468 button_container.w_full().justify_end()
469 }
470 LanguageModelProviderTosView::Configuration => {
471 button_container.w_full().justify_start()
472 }
473 LanguageModelProviderTosView::ThreadFreshStart => {
474 button_container.w_full().justify_center()
475 }
476 LanguageModelProviderTosView::ThreadtEmptyState => div().w_0(),
477 }
478 })
479 };
480
481 Some(form.into_any())
482}
483
484pub struct CloudLanguageModel {
485 id: LanguageModelId,
486 model: CloudModel,
487 llm_api_token: LlmApiToken,
488 client: Arc<Client>,
489 request_limiter: RateLimiter,
490}
491
492impl CloudLanguageModel {
493 const MAX_RETRIES: usize = 3;
494
495 async fn perform_llm_completion(
496 client: Arc<Client>,
497 llm_api_token: LlmApiToken,
498 body: PerformCompletionParams,
499 ) -> Result<Response<AsyncBody>> {
500 let http_client = &client.http_client();
501
502 let mut token = llm_api_token.acquire(&client).await?;
503 let mut retries_remaining = Self::MAX_RETRIES;
504 let mut retry_delay = Duration::from_secs(1);
505
506 loop {
507 let request_builder = http_client::Request::builder().method(Method::POST);
508 let request_builder = if let Ok(completions_url) = std::env::var("ZED_COMPLETIONS_URL")
509 {
510 request_builder.uri(completions_url)
511 } else {
512 request_builder.uri(http_client.build_zed_llm_url("/completion", &[])?.as_ref())
513 };
514 let request = request_builder
515 .header("Content-Type", "application/json")
516 .header("Authorization", format!("Bearer {token}"))
517 .body(serde_json::to_string(&body)?.into())?;
518 let mut response = http_client.send(request).await?;
519 let status = response.status();
520 if status.is_success() {
521 return Ok(response);
522 } else if response
523 .headers()
524 .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
525 .is_some()
526 {
527 retries_remaining -= 1;
528 token = llm_api_token.refresh(&client).await?;
529 } else if status == StatusCode::FORBIDDEN
530 && response
531 .headers()
532 .get(MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME)
533 .is_some()
534 {
535 return Err(anyhow!(MaxMonthlySpendReachedError));
536 } else if status.as_u16() >= 500 && status.as_u16() < 600 {
537 // If we encounter an error in the 500 range, retry after a delay.
538 // We've seen at least these in the wild from API providers:
539 // * 500 Internal Server Error
540 // * 502 Bad Gateway
541 // * 529 Service Overloaded
542
543 if retries_remaining == 0 {
544 let mut body = String::new();
545 response.body_mut().read_to_string(&mut body).await?;
546 return Err(anyhow!(
547 "cloud language model completion failed after {} retries with status {status}: {body}",
548 Self::MAX_RETRIES
549 ));
550 }
551
552 Timer::after(retry_delay).await;
553
554 retries_remaining -= 1;
555 retry_delay *= 2; // If it fails again, wait longer.
556 } else if status == StatusCode::PAYMENT_REQUIRED {
557 return Err(anyhow!(PaymentRequiredError));
558 } else {
559 let mut body = String::new();
560 response.body_mut().read_to_string(&mut body).await?;
561 return Err(anyhow!(
562 "cloud language model completion failed with status {status}: {body}",
563 ));
564 }
565 }
566 }
567}
568
569impl LanguageModel for CloudLanguageModel {
570 fn id(&self) -> LanguageModelId {
571 self.id.clone()
572 }
573
574 fn name(&self) -> LanguageModelName {
575 LanguageModelName::from(self.model.display_name().to_string())
576 }
577
578 fn icon(&self) -> Option<IconName> {
579 self.model.icon()
580 }
581
582 fn provider_id(&self) -> LanguageModelProviderId {
583 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
584 }
585
586 fn provider_name(&self) -> LanguageModelProviderName {
587 LanguageModelProviderName(PROVIDER_NAME.into())
588 }
589
590 fn supports_tools(&self) -> bool {
591 match self.model {
592 CloudModel::Anthropic(_) => true,
593 CloudModel::Google(_) => true,
594 CloudModel::OpenAi(_) => true,
595 }
596 }
597
598 fn telemetry_id(&self) -> String {
599 format!("zed.dev/{}", self.model.id())
600 }
601
602 fn availability(&self) -> LanguageModelAvailability {
603 self.model.availability()
604 }
605
606 fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
607 self.model.tool_input_format()
608 }
609
610 fn max_token_count(&self) -> usize {
611 self.model.max_token_count()
612 }
613
614 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
615 match &self.model {
616 CloudModel::Anthropic(model) => {
617 model
618 .cache_configuration()
619 .map(|cache| LanguageModelCacheConfiguration {
620 max_cache_anchors: cache.max_cache_anchors,
621 should_speculate: cache.should_speculate,
622 min_total_token: cache.min_total_token,
623 })
624 }
625 CloudModel::OpenAi(_) | CloudModel::Google(_) => None,
626 }
627 }
628
629 fn count_tokens(
630 &self,
631 request: LanguageModelRequest,
632 cx: &App,
633 ) -> BoxFuture<'static, Result<usize>> {
634 match self.model.clone() {
635 CloudModel::Anthropic(_) => count_anthropic_tokens(request, cx),
636 CloudModel::OpenAi(model) => count_open_ai_tokens(request, model, cx),
637 CloudModel::Google(model) => {
638 let client = self.client.clone();
639 let request = into_google(request, model.id().into());
640 let request = google_ai::CountTokensRequest {
641 contents: request.contents,
642 };
643 async move {
644 let request = serde_json::to_string(&request)?;
645 let response = client
646 .request(proto::CountLanguageModelTokens {
647 provider: proto::LanguageModelProvider::Google as i32,
648 request,
649 })
650 .await?;
651 Ok(response.token_count as usize)
652 }
653 .boxed()
654 }
655 }
656 }
657
658 fn stream_completion(
659 &self,
660 request: LanguageModelRequest,
661 _cx: &AsyncApp,
662 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
663 match &self.model {
664 CloudModel::Anthropic(model) => {
665 let request = into_anthropic(
666 request,
667 model.request_id().into(),
668 model.default_temperature(),
669 model.max_output_tokens(),
670 model.mode(),
671 );
672 let client = self.client.clone();
673 let llm_api_token = self.llm_api_token.clone();
674 let future = self.request_limiter.stream(async move {
675 let response = Self::perform_llm_completion(
676 client.clone(),
677 llm_api_token,
678 PerformCompletionParams {
679 provider: client::LanguageModelProvider::Anthropic,
680 model: request.model.clone(),
681 provider_request: RawValue::from_string(serde_json::to_string(
682 &request,
683 )?)?,
684 },
685 )
686 .await?;
687 Ok(
688 crate::provider::anthropic::map_to_language_model_completion_events(
689 Box::pin(response_lines(response).map_err(AnthropicError::Other)),
690 ),
691 )
692 });
693 async move { Ok(future.await?.boxed()) }.boxed()
694 }
695 CloudModel::OpenAi(model) => {
696 let client = self.client.clone();
697 let request = into_open_ai(request, model, model.max_output_tokens());
698 let llm_api_token = self.llm_api_token.clone();
699 let future = self.request_limiter.stream(async move {
700 let response = Self::perform_llm_completion(
701 client.clone(),
702 llm_api_token,
703 PerformCompletionParams {
704 provider: client::LanguageModelProvider::OpenAi,
705 model: request.model.clone(),
706 provider_request: RawValue::from_string(serde_json::to_string(
707 &request,
708 )?)?,
709 },
710 )
711 .await?;
712 Ok(
713 crate::provider::open_ai::map_to_language_model_completion_events(
714 Box::pin(response_lines(response)),
715 ),
716 )
717 });
718 async move { Ok(future.await?.boxed()) }.boxed()
719 }
720 CloudModel::Google(model) => {
721 let client = self.client.clone();
722 let request = into_google(request, model.id().into());
723 let llm_api_token = self.llm_api_token.clone();
724 let future = self.request_limiter.stream(async move {
725 let response = Self::perform_llm_completion(
726 client.clone(),
727 llm_api_token,
728 PerformCompletionParams {
729 provider: client::LanguageModelProvider::Google,
730 model: request.model.clone(),
731 provider_request: RawValue::from_string(serde_json::to_string(
732 &request,
733 )?)?,
734 },
735 )
736 .await?;
737 Ok(
738 crate::provider::google::map_to_language_model_completion_events(Box::pin(
739 response_lines(response),
740 )),
741 )
742 });
743 async move { Ok(future.await?.boxed()) }.boxed()
744 }
745 }
746 }
747}
748
749fn response_lines<T: DeserializeOwned>(
750 response: Response<AsyncBody>,
751) -> impl Stream<Item = Result<T>> {
752 futures::stream::try_unfold(
753 (String::new(), BufReader::new(response.into_body())),
754 move |(mut line, mut body)| async {
755 match body.read_line(&mut line).await {
756 Ok(0) => Ok(None),
757 Ok(_) => {
758 let event: T = serde_json::from_str(&line)?;
759 line.clear();
760 Ok(Some((event, (line, body))))
761 }
762 Err(e) => Err(e.into()),
763 }
764 },
765 )
766}
767
768struct ConfigurationView {
769 state: gpui::Entity<State>,
770}
771
772impl ConfigurationView {
773 fn authenticate(&mut self, cx: &mut Context<Self>) {
774 self.state.update(cx, |state, cx| {
775 state.authenticate(cx).detach_and_log_err(cx);
776 });
777 cx.notify();
778 }
779}
780
781impl Render for ConfigurationView {
782 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
783 const ZED_AI_URL: &str = "https://zed.dev/ai";
784
785 let is_connected = !self.state.read(cx).is_signed_out();
786 let plan = self.state.read(cx).user_store.read(cx).current_plan();
787 let has_accepted_terms = self.state.read(cx).has_accepted_terms_of_service(cx);
788
789 let is_pro = plan == Some(proto::Plan::ZedPro);
790 let subscription_text = Label::new(if is_pro {
791 "You have full access to Zed's hosted LLMs, which include models from Anthropic, OpenAI, and Google. They come with faster speeds and higher limits through Zed Pro."
792 } else {
793 "You have basic access to models from Anthropic through the Zed AI Free plan."
794 });
795 let manage_subscription_button = if is_pro {
796 Some(
797 h_flex().child(
798 Button::new("manage_settings", "Manage Subscription")
799 .style(ButtonStyle::Tinted(TintColor::Accent))
800 .on_click(
801 cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
802 ),
803 ),
804 )
805 } else if cx.has_flag::<ZedPro>() {
806 Some(
807 h_flex()
808 .gap_2()
809 .child(
810 Button::new("learn_more", "Learn more")
811 .style(ButtonStyle::Subtle)
812 .on_click(cx.listener(|_, _, _, cx| cx.open_url(ZED_AI_URL))),
813 )
814 .child(
815 Button::new("upgrade", "Upgrade")
816 .style(ButtonStyle::Subtle)
817 .color(Color::Accent)
818 .on_click(
819 cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
820 ),
821 ),
822 )
823 } else {
824 None
825 };
826
827 if is_connected {
828 v_flex()
829 .gap_3()
830 .w_full()
831 .children(render_accept_terms(
832 self.state.clone(),
833 LanguageModelProviderTosView::Configuration,
834 cx,
835 ))
836 .when(has_accepted_terms, |this| {
837 this.child(subscription_text)
838 .children(manage_subscription_button)
839 })
840 } else {
841 v_flex()
842 .gap_2()
843 .child(Label::new("Use Zed AI to access hosted language models."))
844 .child(
845 Button::new("sign_in", "Sign In")
846 .icon_color(Color::Muted)
847 .icon(IconName::Github)
848 .icon_position(IconPosition::Start)
849 .on_click(cx.listener(move |this, _, _, cx| this.authenticate(cx))),
850 )
851 }
852 }
853}