1use super::open_ai::count_open_ai_tokens;
2use anthropic::AnthropicError;
3use anyhow::{anyhow, Result};
4use client::{
5 zed_urls, Client, PerformCompletionParams, UserStore, EXPIRED_LLM_TOKEN_HEADER_NAME,
6 MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME,
7};
8use collections::BTreeMap;
9use feature_flags::{FeatureFlagAppExt, LlmClosedBeta, ZedPro};
10use futures::{
11 future::BoxFuture, stream::BoxStream, AsyncBufReadExt, FutureExt, Stream, StreamExt,
12 TryStreamExt as _,
13};
14use gpui::{
15 AnyElement, AnyView, App, AsyncApp, Context, Entity, EventEmitter, Global, ReadGlobal,
16 Subscription, Task,
17};
18use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
19use language_model::{
20 CloudModel, LanguageModel, LanguageModelCacheConfiguration, LanguageModelId, LanguageModelName,
21 LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
22 LanguageModelProviderTosView, LanguageModelRequest, RateLimiter, ZED_CLOUD_PROVIDER_ID,
23};
24use language_model::{
25 LanguageModelAvailability, LanguageModelCompletionEvent, LanguageModelProvider,
26};
27use proto::TypedEnvelope;
28use schemars::JsonSchema;
29use serde::{de::DeserializeOwned, Deserialize, Serialize};
30use serde_json::value::RawValue;
31use settings::{Settings, SettingsStore};
32use smol::{
33 io::{AsyncReadExt, BufReader},
34 lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard},
35};
36use std::fmt;
37use std::{
38 future,
39 sync::{Arc, LazyLock},
40};
41use strum::IntoEnumIterator;
42use thiserror::Error;
43use ui::{prelude::*, TintColor};
44
45use crate::provider::anthropic::map_to_language_model_completion_events;
46use crate::AllLanguageModelSettings;
47
48use super::anthropic::count_anthropic_tokens;
49
50pub const PROVIDER_NAME: &str = "Zed";
51
52const ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: Option<&str> =
53 option_env!("ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON");
54
55fn zed_cloud_provider_additional_models() -> &'static [AvailableModel] {
56 static ADDITIONAL_MODELS: LazyLock<Vec<AvailableModel>> = LazyLock::new(|| {
57 ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON
58 .map(|json| serde_json::from_str(json).unwrap())
59 .unwrap_or_default()
60 });
61 ADDITIONAL_MODELS.as_slice()
62}
63
64#[derive(Default, Clone, Debug, PartialEq)]
65pub struct ZedDotDevSettings {
66 pub available_models: Vec<AvailableModel>,
67}
68
69#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
70#[serde(rename_all = "lowercase")]
71pub enum AvailableProvider {
72 Anthropic,
73 OpenAi,
74 Google,
75}
76
77#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
78pub struct AvailableModel {
79 /// The provider of the language model.
80 pub provider: AvailableProvider,
81 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
82 pub name: String,
83 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
84 pub display_name: Option<String>,
85 /// The size of the context window, indicating the maximum number of tokens the model can process.
86 pub max_tokens: usize,
87 /// The maximum number of output tokens allowed by the model.
88 pub max_output_tokens: Option<u32>,
89 /// The maximum number of completion tokens allowed by the model (o1-* only)
90 pub max_completion_tokens: Option<u32>,
91 /// Override this model with a different Anthropic model for tool calls.
92 pub tool_override: Option<String>,
93 /// Indicates whether this custom model supports caching.
94 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
95 /// The default temperature to use for this model.
96 pub default_temperature: Option<f32>,
97 /// Any extra beta headers to provide when using the model.
98 #[serde(default)]
99 pub extra_beta_headers: Vec<String>,
100}
101
102struct GlobalRefreshLlmTokenListener(Entity<RefreshLlmTokenListener>);
103
104impl Global for GlobalRefreshLlmTokenListener {}
105
106pub struct RefreshLlmTokenEvent;
107
108pub struct RefreshLlmTokenListener {
109 _llm_token_subscription: client::Subscription,
110}
111
112impl EventEmitter<RefreshLlmTokenEvent> for RefreshLlmTokenListener {}
113
114impl RefreshLlmTokenListener {
115 pub fn register(client: Arc<Client>, cx: &mut App) {
116 let listener = cx.new(|cx| RefreshLlmTokenListener::new(client, cx));
117 cx.set_global(GlobalRefreshLlmTokenListener(listener));
118 }
119
120 pub fn global(cx: &App) -> Entity<Self> {
121 GlobalRefreshLlmTokenListener::global(cx).0.clone()
122 }
123
124 fn new(client: Arc<Client>, cx: &mut Context<Self>) -> Self {
125 Self {
126 _llm_token_subscription: client
127 .add_message_handler(cx.weak_entity(), Self::handle_refresh_llm_token),
128 }
129 }
130
131 async fn handle_refresh_llm_token(
132 this: Entity<Self>,
133 _: TypedEnvelope<proto::RefreshLlmToken>,
134 mut cx: AsyncApp,
135 ) -> Result<()> {
136 this.update(&mut cx, |_this, cx| cx.emit(RefreshLlmTokenEvent))
137 }
138}
139
140pub struct CloudLanguageModelProvider {
141 client: Arc<Client>,
142 state: gpui::Entity<State>,
143 _maintain_client_status: Task<()>,
144}
145
146pub struct State {
147 client: Arc<Client>,
148 llm_api_token: LlmApiToken,
149 user_store: Entity<UserStore>,
150 status: client::Status,
151 accept_terms: Option<Task<Result<()>>>,
152 _settings_subscription: Subscription,
153 _llm_token_subscription: Subscription,
154}
155
156impl State {
157 fn new(
158 client: Arc<Client>,
159 user_store: Entity<UserStore>,
160 status: client::Status,
161 cx: &mut Context<Self>,
162 ) -> Self {
163 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
164
165 Self {
166 client: client.clone(),
167 llm_api_token: LlmApiToken::default(),
168 user_store,
169 status,
170 accept_terms: None,
171 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
172 cx.notify();
173 }),
174 _llm_token_subscription: cx.subscribe(
175 &refresh_llm_token_listener,
176 |this, _listener, _event, cx| {
177 let client = this.client.clone();
178 let llm_api_token = this.llm_api_token.clone();
179 cx.spawn(|_this, _cx| async move {
180 llm_api_token.refresh(&client).await?;
181 anyhow::Ok(())
182 })
183 .detach_and_log_err(cx);
184 },
185 ),
186 }
187 }
188
189 fn is_signed_out(&self) -> bool {
190 self.status.is_signed_out()
191 }
192
193 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
194 let client = self.client.clone();
195 cx.spawn(move |this, mut cx| async move {
196 client.authenticate_and_connect(true, &cx).await?;
197 this.update(&mut cx, |_, cx| cx.notify())
198 })
199 }
200
201 fn has_accepted_terms_of_service(&self, cx: &App) -> bool {
202 self.user_store
203 .read(cx)
204 .current_user_has_accepted_terms()
205 .unwrap_or(false)
206 }
207
208 fn accept_terms_of_service(&mut self, cx: &mut Context<Self>) {
209 let user_store = self.user_store.clone();
210 self.accept_terms = Some(cx.spawn(move |this, mut cx| async move {
211 let _ = user_store
212 .update(&mut cx, |store, cx| store.accept_terms_of_service(cx))?
213 .await;
214 this.update(&mut cx, |this, cx| {
215 this.accept_terms = None;
216 cx.notify()
217 })
218 }));
219 }
220}
221
222impl CloudLanguageModelProvider {
223 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
224 let mut status_rx = client.status();
225 let status = *status_rx.borrow();
226
227 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
228
229 let state_ref = state.downgrade();
230 let maintain_client_status = cx.spawn(|mut cx| async move {
231 while let Some(status) = status_rx.next().await {
232 if let Some(this) = state_ref.upgrade() {
233 _ = this.update(&mut cx, |this, cx| {
234 if this.status != status {
235 this.status = status;
236 cx.notify();
237 }
238 });
239 } else {
240 break;
241 }
242 }
243 });
244
245 Self {
246 client,
247 state: state.clone(),
248 _maintain_client_status: maintain_client_status,
249 }
250 }
251}
252
253impl LanguageModelProviderState for CloudLanguageModelProvider {
254 type ObservableEntity = State;
255
256 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
257 Some(self.state.clone())
258 }
259}
260
261impl LanguageModelProvider for CloudLanguageModelProvider {
262 fn id(&self) -> LanguageModelProviderId {
263 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
264 }
265
266 fn name(&self) -> LanguageModelProviderName {
267 LanguageModelProviderName(PROVIDER_NAME.into())
268 }
269
270 fn icon(&self) -> IconName {
271 IconName::AiZed
272 }
273
274 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
275 let mut models = BTreeMap::default();
276
277 if cx.is_staff() {
278 for model in anthropic::Model::iter() {
279 if !matches!(model, anthropic::Model::Custom { .. }) {
280 models.insert(model.id().to_string(), CloudModel::Anthropic(model));
281 }
282 }
283 for model in open_ai::Model::iter() {
284 if !matches!(model, open_ai::Model::Custom { .. }) {
285 models.insert(model.id().to_string(), CloudModel::OpenAi(model));
286 }
287 }
288 for model in google_ai::Model::iter() {
289 if !matches!(model, google_ai::Model::Custom { .. }) {
290 models.insert(model.id().to_string(), CloudModel::Google(model));
291 }
292 }
293 } else {
294 models.insert(
295 anthropic::Model::Claude3_5Sonnet.id().to_string(),
296 CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet),
297 );
298 models.insert(
299 anthropic::Model::Claude3_7Sonnet.id().to_string(),
300 CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
301 );
302 }
303
304 let llm_closed_beta_models = if cx.has_flag::<LlmClosedBeta>() {
305 zed_cloud_provider_additional_models()
306 } else {
307 &[]
308 };
309
310 // Override with available models from settings
311 for model in AllLanguageModelSettings::get_global(cx)
312 .zed_dot_dev
313 .available_models
314 .iter()
315 .chain(llm_closed_beta_models)
316 .cloned()
317 {
318 let model = match model.provider {
319 AvailableProvider::Anthropic => CloudModel::Anthropic(anthropic::Model::Custom {
320 name: model.name.clone(),
321 display_name: model.display_name.clone(),
322 max_tokens: model.max_tokens,
323 tool_override: model.tool_override.clone(),
324 cache_configuration: model.cache_configuration.as_ref().map(|config| {
325 anthropic::AnthropicModelCacheConfiguration {
326 max_cache_anchors: config.max_cache_anchors,
327 should_speculate: config.should_speculate,
328 min_total_token: config.min_total_token,
329 }
330 }),
331 default_temperature: model.default_temperature,
332 max_output_tokens: model.max_output_tokens,
333 extra_beta_headers: model.extra_beta_headers.clone(),
334 }),
335 AvailableProvider::OpenAi => CloudModel::OpenAi(open_ai::Model::Custom {
336 name: model.name.clone(),
337 display_name: model.display_name.clone(),
338 max_tokens: model.max_tokens,
339 max_output_tokens: model.max_output_tokens,
340 max_completion_tokens: model.max_completion_tokens,
341 }),
342 AvailableProvider::Google => CloudModel::Google(google_ai::Model::Custom {
343 name: model.name.clone(),
344 display_name: model.display_name.clone(),
345 max_tokens: model.max_tokens,
346 }),
347 };
348 models.insert(model.id().to_string(), model.clone());
349 }
350
351 let llm_api_token = self.state.read(cx).llm_api_token.clone();
352 models
353 .into_values()
354 .map(|model| {
355 Arc::new(CloudLanguageModel {
356 id: LanguageModelId::from(model.id().to_string()),
357 model,
358 llm_api_token: llm_api_token.clone(),
359 client: self.client.clone(),
360 request_limiter: RateLimiter::new(4),
361 }) as Arc<dyn LanguageModel>
362 })
363 .collect()
364 }
365
366 fn is_authenticated(&self, cx: &App) -> bool {
367 !self.state.read(cx).is_signed_out()
368 }
369
370 fn authenticate(&self, _cx: &mut App) -> Task<Result<()>> {
371 Task::ready(Ok(()))
372 }
373
374 fn configuration_view(&self, _: &mut Window, cx: &mut App) -> AnyView {
375 cx.new(|_| ConfigurationView {
376 state: self.state.clone(),
377 })
378 .into()
379 }
380
381 fn must_accept_terms(&self, cx: &App) -> bool {
382 !self.state.read(cx).has_accepted_terms_of_service(cx)
383 }
384
385 fn render_accept_terms(
386 &self,
387 view: LanguageModelProviderTosView,
388 cx: &mut App,
389 ) -> Option<AnyElement> {
390 render_accept_terms(self.state.clone(), view, cx)
391 }
392
393 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
394 Task::ready(Ok(()))
395 }
396}
397
398fn render_accept_terms(
399 state: Entity<State>,
400 view_kind: LanguageModelProviderTosView,
401 cx: &mut App,
402) -> Option<AnyElement> {
403 if state.read(cx).has_accepted_terms_of_service(cx) {
404 return None;
405 }
406
407 let accept_terms_disabled = state.read(cx).accept_terms.is_some();
408
409 let terms_button = Button::new("terms_of_service", "Terms of Service")
410 .style(ButtonStyle::Subtle)
411 .icon(IconName::ArrowUpRight)
412 .icon_color(Color::Muted)
413 .icon_size(IconSize::XSmall)
414 .on_click(move |_, _window, cx| cx.open_url("https://zed.dev/terms-of-service"));
415
416 let text = "To start using Zed AI, please read and accept the";
417
418 let form = v_flex()
419 .w_full()
420 .gap_2()
421 .when(
422 view_kind == LanguageModelProviderTosView::ThreadEmptyState,
423 |form| form.items_center(),
424 )
425 .child(
426 h_flex()
427 .flex_wrap()
428 .when(
429 view_kind == LanguageModelProviderTosView::ThreadEmptyState,
430 |form| form.justify_center(),
431 )
432 .child(Label::new(text))
433 .child(terms_button),
434 )
435 .child({
436 let button_container = h_flex().w_full().child(
437 Button::new("accept_terms", "I accept the Terms of Service")
438 .style(ButtonStyle::Tinted(TintColor::Accent))
439 .disabled(accept_terms_disabled)
440 .on_click({
441 let state = state.downgrade();
442 move |_, _window, cx| {
443 state
444 .update(cx, |state, cx| state.accept_terms_of_service(cx))
445 .ok();
446 }
447 }),
448 );
449
450 match view_kind {
451 LanguageModelProviderTosView::ThreadEmptyState => button_container.justify_center(),
452 LanguageModelProviderTosView::PromptEditorPopup => button_container.justify_end(),
453 LanguageModelProviderTosView::Configuration => button_container.justify_start(),
454 }
455 });
456
457 Some(form.into_any())
458}
459
460pub struct CloudLanguageModel {
461 id: LanguageModelId,
462 model: CloudModel,
463 llm_api_token: LlmApiToken,
464 client: Arc<Client>,
465 request_limiter: RateLimiter,
466}
467
468#[derive(Clone, Default)]
469pub struct LlmApiToken(Arc<RwLock<Option<String>>>);
470
471#[derive(Error, Debug)]
472pub struct PaymentRequiredError;
473
474impl fmt::Display for PaymentRequiredError {
475 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
476 write!(
477 f,
478 "Payment required to use this language model. Please upgrade your account."
479 )
480 }
481}
482
483#[derive(Error, Debug)]
484pub struct MaxMonthlySpendReachedError;
485
486impl fmt::Display for MaxMonthlySpendReachedError {
487 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
488 write!(
489 f,
490 "Maximum spending limit reached for this month. For more usage, increase your spending limit."
491 )
492 }
493}
494
495impl CloudLanguageModel {
496 async fn perform_llm_completion(
497 client: Arc<Client>,
498 llm_api_token: LlmApiToken,
499 body: PerformCompletionParams,
500 ) -> Result<Response<AsyncBody>> {
501 let http_client = &client.http_client();
502
503 let mut token = llm_api_token.acquire(&client).await?;
504 let mut did_retry = false;
505
506 let response = loop {
507 let request_builder = http_client::Request::builder();
508 let request = request_builder
509 .method(Method::POST)
510 .uri(http_client.build_zed_llm_url("/completion", &[])?.as_ref())
511 .header("Content-Type", "application/json")
512 .header("Authorization", format!("Bearer {token}"))
513 .body(serde_json::to_string(&body)?.into())?;
514 let mut response = http_client.send(request).await?;
515 if response.status().is_success() {
516 break response;
517 } else if !did_retry
518 && response
519 .headers()
520 .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
521 .is_some()
522 {
523 did_retry = true;
524 token = llm_api_token.refresh(&client).await?;
525 } else if response.status() == StatusCode::FORBIDDEN
526 && response
527 .headers()
528 .get(MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME)
529 .is_some()
530 {
531 break Err(anyhow!(MaxMonthlySpendReachedError))?;
532 } else if response.status() == StatusCode::PAYMENT_REQUIRED {
533 break Err(anyhow!(PaymentRequiredError))?;
534 } else {
535 let mut body = String::new();
536 response.body_mut().read_to_string(&mut body).await?;
537 break Err(anyhow!(
538 "cloud language model completion failed with status {}: {body}",
539 response.status()
540 ))?;
541 }
542 };
543
544 Ok(response)
545 }
546}
547
548impl LanguageModel for CloudLanguageModel {
549 fn id(&self) -> LanguageModelId {
550 self.id.clone()
551 }
552
553 fn name(&self) -> LanguageModelName {
554 LanguageModelName::from(self.model.display_name().to_string())
555 }
556
557 fn icon(&self) -> Option<IconName> {
558 self.model.icon()
559 }
560
561 fn provider_id(&self) -> LanguageModelProviderId {
562 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
563 }
564
565 fn provider_name(&self) -> LanguageModelProviderName {
566 LanguageModelProviderName(PROVIDER_NAME.into())
567 }
568
569 fn telemetry_id(&self) -> String {
570 format!("zed.dev/{}", self.model.id())
571 }
572
573 fn availability(&self) -> LanguageModelAvailability {
574 self.model.availability()
575 }
576
577 fn max_token_count(&self) -> usize {
578 self.model.max_token_count()
579 }
580
581 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
582 match &self.model {
583 CloudModel::Anthropic(model) => {
584 model
585 .cache_configuration()
586 .map(|cache| LanguageModelCacheConfiguration {
587 max_cache_anchors: cache.max_cache_anchors,
588 should_speculate: cache.should_speculate,
589 min_total_token: cache.min_total_token,
590 })
591 }
592 CloudModel::OpenAi(_) | CloudModel::Google(_) => None,
593 }
594 }
595
596 fn count_tokens(
597 &self,
598 request: LanguageModelRequest,
599 cx: &App,
600 ) -> BoxFuture<'static, Result<usize>> {
601 match self.model.clone() {
602 CloudModel::Anthropic(_) => count_anthropic_tokens(request, cx),
603 CloudModel::OpenAi(model) => count_open_ai_tokens(request, model, cx),
604 CloudModel::Google(model) => {
605 let client = self.client.clone();
606 let request = request.into_google(model.id().into());
607 let request = google_ai::CountTokensRequest {
608 contents: request.contents,
609 };
610 async move {
611 let request = serde_json::to_string(&request)?;
612 let response = client
613 .request(proto::CountLanguageModelTokens {
614 provider: proto::LanguageModelProvider::Google as i32,
615 request,
616 })
617 .await?;
618 Ok(response.token_count as usize)
619 }
620 .boxed()
621 }
622 }
623 }
624
625 fn stream_completion(
626 &self,
627 request: LanguageModelRequest,
628 _cx: &AsyncApp,
629 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
630 match &self.model {
631 CloudModel::Anthropic(model) => {
632 let request = request.into_anthropic(
633 model.id().into(),
634 model.default_temperature(),
635 model.max_output_tokens(),
636 );
637 let client = self.client.clone();
638 let llm_api_token = self.llm_api_token.clone();
639 let future = self.request_limiter.stream(async move {
640 let response = Self::perform_llm_completion(
641 client.clone(),
642 llm_api_token,
643 PerformCompletionParams {
644 provider: client::LanguageModelProvider::Anthropic,
645 model: request.model.clone(),
646 provider_request: RawValue::from_string(serde_json::to_string(
647 &request,
648 )?)?,
649 },
650 )
651 .await?;
652 Ok(map_to_language_model_completion_events(Box::pin(
653 response_lines(response).map_err(AnthropicError::Other),
654 )))
655 });
656 async move { Ok(future.await?.boxed()) }.boxed()
657 }
658 CloudModel::OpenAi(model) => {
659 let client = self.client.clone();
660 let request = request.into_open_ai(model.id().into(), model.max_output_tokens());
661 let llm_api_token = self.llm_api_token.clone();
662 let future = self.request_limiter.stream(async move {
663 let response = Self::perform_llm_completion(
664 client.clone(),
665 llm_api_token,
666 PerformCompletionParams {
667 provider: client::LanguageModelProvider::OpenAi,
668 model: request.model.clone(),
669 provider_request: RawValue::from_string(serde_json::to_string(
670 &request,
671 )?)?,
672 },
673 )
674 .await?;
675 Ok(open_ai::extract_text_from_events(response_lines(response)))
676 });
677 async move {
678 Ok(future
679 .await?
680 .map(|result| result.map(LanguageModelCompletionEvent::Text))
681 .boxed())
682 }
683 .boxed()
684 }
685 CloudModel::Google(model) => {
686 let client = self.client.clone();
687 let request = request.into_google(model.id().into());
688 let llm_api_token = self.llm_api_token.clone();
689 let future = self.request_limiter.stream(async move {
690 let response = Self::perform_llm_completion(
691 client.clone(),
692 llm_api_token,
693 PerformCompletionParams {
694 provider: client::LanguageModelProvider::Google,
695 model: request.model.clone(),
696 provider_request: RawValue::from_string(serde_json::to_string(
697 &request,
698 )?)?,
699 },
700 )
701 .await?;
702 Ok(google_ai::extract_text_from_events(response_lines(
703 response,
704 )))
705 });
706 async move {
707 Ok(future
708 .await?
709 .map(|result| result.map(LanguageModelCompletionEvent::Text))
710 .boxed())
711 }
712 .boxed()
713 }
714 }
715 }
716
717 fn use_any_tool(
718 &self,
719 request: LanguageModelRequest,
720 tool_name: String,
721 tool_description: String,
722 input_schema: serde_json::Value,
723 _cx: &AsyncApp,
724 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
725 let client = self.client.clone();
726 let llm_api_token = self.llm_api_token.clone();
727
728 match &self.model {
729 CloudModel::Anthropic(model) => {
730 let mut request = request.into_anthropic(
731 model.tool_model_id().into(),
732 model.default_temperature(),
733 model.max_output_tokens(),
734 );
735 request.tool_choice = Some(anthropic::ToolChoice::Tool {
736 name: tool_name.clone(),
737 });
738 request.tools = vec![anthropic::Tool {
739 name: tool_name.clone(),
740 description: tool_description,
741 input_schema,
742 }];
743
744 self.request_limiter
745 .run(async move {
746 let response = Self::perform_llm_completion(
747 client.clone(),
748 llm_api_token,
749 PerformCompletionParams {
750 provider: client::LanguageModelProvider::Anthropic,
751 model: request.model.clone(),
752 provider_request: RawValue::from_string(serde_json::to_string(
753 &request,
754 )?)?,
755 },
756 )
757 .await?;
758
759 Ok(anthropic::extract_tool_args_from_events(
760 tool_name,
761 Box::pin(response_lines(response)),
762 )
763 .await?
764 .boxed())
765 })
766 .boxed()
767 }
768 CloudModel::OpenAi(model) => {
769 let mut request =
770 request.into_open_ai(model.id().into(), model.max_output_tokens());
771 request.tool_choice = Some(open_ai::ToolChoice::Other(
772 open_ai::ToolDefinition::Function {
773 function: open_ai::FunctionDefinition {
774 name: tool_name.clone(),
775 description: None,
776 parameters: None,
777 },
778 },
779 ));
780 request.tools = vec![open_ai::ToolDefinition::Function {
781 function: open_ai::FunctionDefinition {
782 name: tool_name.clone(),
783 description: Some(tool_description),
784 parameters: Some(input_schema),
785 },
786 }];
787
788 self.request_limiter
789 .run(async move {
790 let response = Self::perform_llm_completion(
791 client.clone(),
792 llm_api_token,
793 PerformCompletionParams {
794 provider: client::LanguageModelProvider::OpenAi,
795 model: request.model.clone(),
796 provider_request: RawValue::from_string(serde_json::to_string(
797 &request,
798 )?)?,
799 },
800 )
801 .await?;
802
803 Ok(open_ai::extract_tool_args_from_events(
804 tool_name,
805 Box::pin(response_lines(response)),
806 )
807 .await?
808 .boxed())
809 })
810 .boxed()
811 }
812 CloudModel::Google(_) => {
813 future::ready(Err(anyhow!("tool use not implemented for Google AI"))).boxed()
814 }
815 }
816 }
817}
818
819fn response_lines<T: DeserializeOwned>(
820 response: Response<AsyncBody>,
821) -> impl Stream<Item = Result<T>> {
822 futures::stream::try_unfold(
823 (String::new(), BufReader::new(response.into_body())),
824 move |(mut line, mut body)| async {
825 match body.read_line(&mut line).await {
826 Ok(0) => Ok(None),
827 Ok(_) => {
828 let event: T = serde_json::from_str(&line)?;
829 line.clear();
830 Ok(Some((event, (line, body))))
831 }
832 Err(e) => Err(e.into()),
833 }
834 },
835 )
836}
837
838impl LlmApiToken {
839 pub async fn acquire(&self, client: &Arc<Client>) -> Result<String> {
840 let lock = self.0.upgradable_read().await;
841 if let Some(token) = lock.as_ref() {
842 Ok(token.to_string())
843 } else {
844 Self::fetch(RwLockUpgradableReadGuard::upgrade(lock).await, client).await
845 }
846 }
847
848 pub async fn refresh(&self, client: &Arc<Client>) -> Result<String> {
849 Self::fetch(self.0.write().await, client).await
850 }
851
852 async fn fetch<'a>(
853 mut lock: RwLockWriteGuard<'a, Option<String>>,
854 client: &Arc<Client>,
855 ) -> Result<String> {
856 let response = client.request(proto::GetLlmToken {}).await?;
857 *lock = Some(response.token.clone());
858 Ok(response.token.clone())
859 }
860}
861
862struct ConfigurationView {
863 state: gpui::Entity<State>,
864}
865
866impl ConfigurationView {
867 fn authenticate(&mut self, cx: &mut Context<Self>) {
868 self.state.update(cx, |state, cx| {
869 state.authenticate(cx).detach_and_log_err(cx);
870 });
871 cx.notify();
872 }
873}
874
875impl Render for ConfigurationView {
876 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
877 const ZED_AI_URL: &str = "https://zed.dev/ai";
878
879 let is_connected = !self.state.read(cx).is_signed_out();
880 let plan = self.state.read(cx).user_store.read(cx).current_plan();
881 let has_accepted_terms = self.state.read(cx).has_accepted_terms_of_service(cx);
882
883 let is_pro = plan == Some(proto::Plan::ZedPro);
884 let subscription_text = Label::new(if is_pro {
885 "You have full access to Zed's hosted LLMs, which include models from Anthropic, OpenAI, and Google. They come with faster speeds and higher limits through Zed Pro."
886 } else {
887 "You have basic access to models from Anthropic through the Zed AI Free plan."
888 });
889 let manage_subscription_button = if is_pro {
890 Some(
891 h_flex().child(
892 Button::new("manage_settings", "Manage Subscription")
893 .style(ButtonStyle::Tinted(TintColor::Accent))
894 .on_click(
895 cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
896 ),
897 ),
898 )
899 } else if cx.has_flag::<ZedPro>() {
900 Some(
901 h_flex()
902 .gap_2()
903 .child(
904 Button::new("learn_more", "Learn more")
905 .style(ButtonStyle::Subtle)
906 .on_click(cx.listener(|_, _, _, cx| cx.open_url(ZED_AI_URL))),
907 )
908 .child(
909 Button::new("upgrade", "Upgrade")
910 .style(ButtonStyle::Subtle)
911 .color(Color::Accent)
912 .on_click(
913 cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
914 ),
915 ),
916 )
917 } else {
918 None
919 };
920
921 if is_connected {
922 v_flex()
923 .gap_3()
924 .w_full()
925 .children(render_accept_terms(
926 self.state.clone(),
927 LanguageModelProviderTosView::Configuration,
928 cx,
929 ))
930 .when(has_accepted_terms, |this| {
931 this.child(subscription_text)
932 .children(manage_subscription_button)
933 })
934 } else {
935 v_flex()
936 .gap_2()
937 .child(Label::new("Use Zed AI to access hosted language models."))
938 .child(
939 Button::new("sign_in", "Sign In")
940 .icon_color(Color::Muted)
941 .icon(IconName::Github)
942 .icon_position(IconPosition::Start)
943 .on_click(cx.listener(move |this, _, _, cx| this.authenticate(cx))),
944 )
945 }
946 }
947}