1use super::open_ai::count_open_ai_tokens;
2use anthropic::AnthropicError;
3use anyhow::{anyhow, Result};
4use client::{
5 zed_urls, Client, PerformCompletionParams, UserStore, EXPIRED_LLM_TOKEN_HEADER_NAME,
6 MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME,
7};
8use collections::BTreeMap;
9use feature_flags::{FeatureFlagAppExt, LlmClosedBeta, ZedPro};
10use futures::{
11 future::BoxFuture, stream::BoxStream, AsyncBufReadExt, FutureExt, Stream, StreamExt,
12 TryStreamExt as _,
13};
14use gpui::{
15 AnyElement, AnyView, App, AsyncApp, Context, Entity, EventEmitter, Global, ReadGlobal,
16 Subscription, Task,
17};
18use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
19use language_model::{
20 AuthenticateError, CloudModel, LanguageModel, LanguageModelCacheConfiguration, LanguageModelId,
21 LanguageModelName, LanguageModelProviderId, LanguageModelProviderName,
22 LanguageModelProviderState, LanguageModelProviderTosView, LanguageModelRequest, RateLimiter,
23 ZED_CLOUD_PROVIDER_ID,
24};
25use language_model::{
26 LanguageModelAvailability, LanguageModelCompletionEvent, LanguageModelProvider,
27};
28use proto::TypedEnvelope;
29use schemars::JsonSchema;
30use serde::{de::DeserializeOwned, Deserialize, Serialize};
31use serde_json::value::RawValue;
32use settings::{Settings, SettingsStore};
33use smol::{
34 io::{AsyncReadExt, BufReader},
35 lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard},
36};
37use std::fmt;
38use std::{
39 future,
40 sync::{Arc, LazyLock},
41};
42use strum::IntoEnumIterator;
43use thiserror::Error;
44use ui::{prelude::*, TintColor};
45
46use crate::provider::anthropic::map_to_language_model_completion_events;
47use crate::AllLanguageModelSettings;
48
49use super::anthropic::count_anthropic_tokens;
50
51pub const PROVIDER_NAME: &str = "Zed";
52
53const ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: Option<&str> =
54 option_env!("ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON");
55
56fn zed_cloud_provider_additional_models() -> &'static [AvailableModel] {
57 static ADDITIONAL_MODELS: LazyLock<Vec<AvailableModel>> = LazyLock::new(|| {
58 ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON
59 .map(|json| serde_json::from_str(json).unwrap())
60 .unwrap_or_default()
61 });
62 ADDITIONAL_MODELS.as_slice()
63}
64
65#[derive(Default, Clone, Debug, PartialEq)]
66pub struct ZedDotDevSettings {
67 pub available_models: Vec<AvailableModel>,
68}
69
70#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
71#[serde(rename_all = "lowercase")]
72pub enum AvailableProvider {
73 Anthropic,
74 OpenAi,
75 Google,
76}
77
78#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
79pub struct AvailableModel {
80 /// The provider of the language model.
81 pub provider: AvailableProvider,
82 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
83 pub name: String,
84 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
85 pub display_name: Option<String>,
86 /// The size of the context window, indicating the maximum number of tokens the model can process.
87 pub max_tokens: usize,
88 /// The maximum number of output tokens allowed by the model.
89 pub max_output_tokens: Option<u32>,
90 /// The maximum number of completion tokens allowed by the model (o1-* only)
91 pub max_completion_tokens: Option<u32>,
92 /// Override this model with a different Anthropic model for tool calls.
93 pub tool_override: Option<String>,
94 /// Indicates whether this custom model supports caching.
95 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
96 /// The default temperature to use for this model.
97 pub default_temperature: Option<f32>,
98 /// Any extra beta headers to provide when using the model.
99 #[serde(default)]
100 pub extra_beta_headers: Vec<String>,
101}
102
103struct GlobalRefreshLlmTokenListener(Entity<RefreshLlmTokenListener>);
104
105impl Global for GlobalRefreshLlmTokenListener {}
106
107pub struct RefreshLlmTokenEvent;
108
109pub struct RefreshLlmTokenListener {
110 _llm_token_subscription: client::Subscription,
111}
112
113impl EventEmitter<RefreshLlmTokenEvent> for RefreshLlmTokenListener {}
114
115impl RefreshLlmTokenListener {
116 pub fn register(client: Arc<Client>, cx: &mut App) {
117 let listener = cx.new(|cx| RefreshLlmTokenListener::new(client, cx));
118 cx.set_global(GlobalRefreshLlmTokenListener(listener));
119 }
120
121 pub fn global(cx: &App) -> Entity<Self> {
122 GlobalRefreshLlmTokenListener::global(cx).0.clone()
123 }
124
125 fn new(client: Arc<Client>, cx: &mut Context<Self>) -> Self {
126 Self {
127 _llm_token_subscription: client
128 .add_message_handler(cx.weak_entity(), Self::handle_refresh_llm_token),
129 }
130 }
131
132 async fn handle_refresh_llm_token(
133 this: Entity<Self>,
134 _: TypedEnvelope<proto::RefreshLlmToken>,
135 mut cx: AsyncApp,
136 ) -> Result<()> {
137 this.update(&mut cx, |_this, cx| cx.emit(RefreshLlmTokenEvent))
138 }
139}
140
141pub struct CloudLanguageModelProvider {
142 client: Arc<Client>,
143 state: gpui::Entity<State>,
144 _maintain_client_status: Task<()>,
145}
146
147pub struct State {
148 client: Arc<Client>,
149 llm_api_token: LlmApiToken,
150 user_store: Entity<UserStore>,
151 status: client::Status,
152 accept_terms: Option<Task<Result<()>>>,
153 _settings_subscription: Subscription,
154 _llm_token_subscription: Subscription,
155}
156
157impl State {
158 fn new(
159 client: Arc<Client>,
160 user_store: Entity<UserStore>,
161 status: client::Status,
162 cx: &mut Context<Self>,
163 ) -> Self {
164 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
165
166 Self {
167 client: client.clone(),
168 llm_api_token: LlmApiToken::default(),
169 user_store,
170 status,
171 accept_terms: None,
172 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
173 cx.notify();
174 }),
175 _llm_token_subscription: cx.subscribe(
176 &refresh_llm_token_listener,
177 |this, _listener, _event, cx| {
178 let client = this.client.clone();
179 let llm_api_token = this.llm_api_token.clone();
180 cx.spawn(|_this, _cx| async move {
181 llm_api_token.refresh(&client).await?;
182 anyhow::Ok(())
183 })
184 .detach_and_log_err(cx);
185 },
186 ),
187 }
188 }
189
190 fn is_signed_out(&self) -> bool {
191 self.status.is_signed_out()
192 }
193
194 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
195 let client = self.client.clone();
196 cx.spawn(move |this, mut cx| async move {
197 client.authenticate_and_connect(true, &cx).await?;
198 this.update(&mut cx, |_, cx| cx.notify())
199 })
200 }
201
202 fn has_accepted_terms_of_service(&self, cx: &App) -> bool {
203 self.user_store
204 .read(cx)
205 .current_user_has_accepted_terms()
206 .unwrap_or(false)
207 }
208
209 fn accept_terms_of_service(&mut self, cx: &mut Context<Self>) {
210 let user_store = self.user_store.clone();
211 self.accept_terms = Some(cx.spawn(move |this, mut cx| async move {
212 let _ = user_store
213 .update(&mut cx, |store, cx| store.accept_terms_of_service(cx))?
214 .await;
215 this.update(&mut cx, |this, cx| {
216 this.accept_terms = None;
217 cx.notify()
218 })
219 }));
220 }
221}
222
223impl CloudLanguageModelProvider {
224 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
225 let mut status_rx = client.status();
226 let status = *status_rx.borrow();
227
228 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
229
230 let state_ref = state.downgrade();
231 let maintain_client_status = cx.spawn(|mut cx| async move {
232 while let Some(status) = status_rx.next().await {
233 if let Some(this) = state_ref.upgrade() {
234 _ = this.update(&mut cx, |this, cx| {
235 if this.status != status {
236 this.status = status;
237 cx.notify();
238 }
239 });
240 } else {
241 break;
242 }
243 }
244 });
245
246 Self {
247 client,
248 state: state.clone(),
249 _maintain_client_status: maintain_client_status,
250 }
251 }
252}
253
254impl LanguageModelProviderState for CloudLanguageModelProvider {
255 type ObservableEntity = State;
256
257 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
258 Some(self.state.clone())
259 }
260}
261
262impl LanguageModelProvider for CloudLanguageModelProvider {
263 fn id(&self) -> LanguageModelProviderId {
264 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
265 }
266
267 fn name(&self) -> LanguageModelProviderName {
268 LanguageModelProviderName(PROVIDER_NAME.into())
269 }
270
271 fn icon(&self) -> IconName {
272 IconName::AiZed
273 }
274
275 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
276 let mut models = BTreeMap::default();
277
278 if cx.is_staff() {
279 for model in anthropic::Model::iter() {
280 if !matches!(model, anthropic::Model::Custom { .. }) {
281 models.insert(model.id().to_string(), CloudModel::Anthropic(model));
282 }
283 }
284 for model in open_ai::Model::iter() {
285 if !matches!(model, open_ai::Model::Custom { .. }) {
286 models.insert(model.id().to_string(), CloudModel::OpenAi(model));
287 }
288 }
289 for model in google_ai::Model::iter() {
290 if !matches!(model, google_ai::Model::Custom { .. }) {
291 models.insert(model.id().to_string(), CloudModel::Google(model));
292 }
293 }
294 } else {
295 models.insert(
296 anthropic::Model::Claude3_5Sonnet.id().to_string(),
297 CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet),
298 );
299 models.insert(
300 anthropic::Model::Claude3_7Sonnet.id().to_string(),
301 CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
302 );
303 }
304
305 let llm_closed_beta_models = if cx.has_flag::<LlmClosedBeta>() {
306 zed_cloud_provider_additional_models()
307 } else {
308 &[]
309 };
310
311 // Override with available models from settings
312 for model in AllLanguageModelSettings::get_global(cx)
313 .zed_dot_dev
314 .available_models
315 .iter()
316 .chain(llm_closed_beta_models)
317 .cloned()
318 {
319 let model = match model.provider {
320 AvailableProvider::Anthropic => CloudModel::Anthropic(anthropic::Model::Custom {
321 name: model.name.clone(),
322 display_name: model.display_name.clone(),
323 max_tokens: model.max_tokens,
324 tool_override: model.tool_override.clone(),
325 cache_configuration: model.cache_configuration.as_ref().map(|config| {
326 anthropic::AnthropicModelCacheConfiguration {
327 max_cache_anchors: config.max_cache_anchors,
328 should_speculate: config.should_speculate,
329 min_total_token: config.min_total_token,
330 }
331 }),
332 default_temperature: model.default_temperature,
333 max_output_tokens: model.max_output_tokens,
334 extra_beta_headers: model.extra_beta_headers.clone(),
335 }),
336 AvailableProvider::OpenAi => CloudModel::OpenAi(open_ai::Model::Custom {
337 name: model.name.clone(),
338 display_name: model.display_name.clone(),
339 max_tokens: model.max_tokens,
340 max_output_tokens: model.max_output_tokens,
341 max_completion_tokens: model.max_completion_tokens,
342 }),
343 AvailableProvider::Google => CloudModel::Google(google_ai::Model::Custom {
344 name: model.name.clone(),
345 display_name: model.display_name.clone(),
346 max_tokens: model.max_tokens,
347 }),
348 };
349 models.insert(model.id().to_string(), model.clone());
350 }
351
352 let llm_api_token = self.state.read(cx).llm_api_token.clone();
353 models
354 .into_values()
355 .map(|model| {
356 Arc::new(CloudLanguageModel {
357 id: LanguageModelId::from(model.id().to_string()),
358 model,
359 llm_api_token: llm_api_token.clone(),
360 client: self.client.clone(),
361 request_limiter: RateLimiter::new(4),
362 }) as Arc<dyn LanguageModel>
363 })
364 .collect()
365 }
366
367 fn is_authenticated(&self, cx: &App) -> bool {
368 !self.state.read(cx).is_signed_out()
369 }
370
371 fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
372 Task::ready(Ok(()))
373 }
374
375 fn configuration_view(&self, _: &mut Window, cx: &mut App) -> AnyView {
376 cx.new(|_| ConfigurationView {
377 state: self.state.clone(),
378 })
379 .into()
380 }
381
382 fn must_accept_terms(&self, cx: &App) -> bool {
383 !self.state.read(cx).has_accepted_terms_of_service(cx)
384 }
385
386 fn render_accept_terms(
387 &self,
388 view: LanguageModelProviderTosView,
389 cx: &mut App,
390 ) -> Option<AnyElement> {
391 render_accept_terms(self.state.clone(), view, cx)
392 }
393
394 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
395 Task::ready(Ok(()))
396 }
397}
398
399fn render_accept_terms(
400 state: Entity<State>,
401 view_kind: LanguageModelProviderTosView,
402 cx: &mut App,
403) -> Option<AnyElement> {
404 if state.read(cx).has_accepted_terms_of_service(cx) {
405 return None;
406 }
407
408 let accept_terms_disabled = state.read(cx).accept_terms.is_some();
409
410 let terms_button = Button::new("terms_of_service", "Terms of Service")
411 .style(ButtonStyle::Subtle)
412 .icon(IconName::ArrowUpRight)
413 .icon_color(Color::Muted)
414 .icon_size(IconSize::XSmall)
415 .on_click(move |_, _window, cx| cx.open_url("https://zed.dev/terms-of-service"));
416
417 let text = "To start using Zed AI, please read and accept the";
418
419 let form = v_flex()
420 .w_full()
421 .gap_2()
422 .when(
423 view_kind == LanguageModelProviderTosView::ThreadEmptyState,
424 |form| form.items_center(),
425 )
426 .child(
427 h_flex()
428 .flex_wrap()
429 .when(
430 view_kind == LanguageModelProviderTosView::ThreadEmptyState,
431 |form| form.justify_center(),
432 )
433 .child(Label::new(text))
434 .child(terms_button),
435 )
436 .child({
437 let button_container = h_flex().w_full().child(
438 Button::new("accept_terms", "I accept the Terms of Service")
439 .style(ButtonStyle::Tinted(TintColor::Accent))
440 .disabled(accept_terms_disabled)
441 .on_click({
442 let state = state.downgrade();
443 move |_, _window, cx| {
444 state
445 .update(cx, |state, cx| state.accept_terms_of_service(cx))
446 .ok();
447 }
448 }),
449 );
450
451 match view_kind {
452 LanguageModelProviderTosView::ThreadEmptyState => button_container.justify_center(),
453 LanguageModelProviderTosView::PromptEditorPopup => button_container.justify_end(),
454 LanguageModelProviderTosView::Configuration => button_container.justify_start(),
455 }
456 });
457
458 Some(form.into_any())
459}
460
461pub struct CloudLanguageModel {
462 id: LanguageModelId,
463 model: CloudModel,
464 llm_api_token: LlmApiToken,
465 client: Arc<Client>,
466 request_limiter: RateLimiter,
467}
468
469#[derive(Clone, Default)]
470pub struct LlmApiToken(Arc<RwLock<Option<String>>>);
471
472#[derive(Error, Debug)]
473pub struct PaymentRequiredError;
474
475impl fmt::Display for PaymentRequiredError {
476 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
477 write!(
478 f,
479 "Payment required to use this language model. Please upgrade your account."
480 )
481 }
482}
483
484#[derive(Error, Debug)]
485pub struct MaxMonthlySpendReachedError;
486
487impl fmt::Display for MaxMonthlySpendReachedError {
488 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
489 write!(
490 f,
491 "Maximum spending limit reached for this month. For more usage, increase your spending limit."
492 )
493 }
494}
495
496impl CloudLanguageModel {
497 async fn perform_llm_completion(
498 client: Arc<Client>,
499 llm_api_token: LlmApiToken,
500 body: PerformCompletionParams,
501 ) -> Result<Response<AsyncBody>> {
502 let http_client = &client.http_client();
503
504 let mut token = llm_api_token.acquire(&client).await?;
505 let mut did_retry = false;
506
507 let response = loop {
508 let request_builder = http_client::Request::builder();
509 let request = request_builder
510 .method(Method::POST)
511 .uri(http_client.build_zed_llm_url("/completion", &[])?.as_ref())
512 .header("Content-Type", "application/json")
513 .header("Authorization", format!("Bearer {token}"))
514 .body(serde_json::to_string(&body)?.into())?;
515 let mut response = http_client.send(request).await?;
516 if response.status().is_success() {
517 break response;
518 } else if !did_retry
519 && response
520 .headers()
521 .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
522 .is_some()
523 {
524 did_retry = true;
525 token = llm_api_token.refresh(&client).await?;
526 } else if response.status() == StatusCode::FORBIDDEN
527 && response
528 .headers()
529 .get(MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME)
530 .is_some()
531 {
532 break Err(anyhow!(MaxMonthlySpendReachedError))?;
533 } else if response.status() == StatusCode::PAYMENT_REQUIRED {
534 break Err(anyhow!(PaymentRequiredError))?;
535 } else {
536 let mut body = String::new();
537 response.body_mut().read_to_string(&mut body).await?;
538 break Err(anyhow!(
539 "cloud language model completion failed with status {}: {body}",
540 response.status()
541 ))?;
542 }
543 };
544
545 Ok(response)
546 }
547}
548
549impl LanguageModel for CloudLanguageModel {
550 fn id(&self) -> LanguageModelId {
551 self.id.clone()
552 }
553
554 fn name(&self) -> LanguageModelName {
555 LanguageModelName::from(self.model.display_name().to_string())
556 }
557
558 fn icon(&self) -> Option<IconName> {
559 self.model.icon()
560 }
561
562 fn provider_id(&self) -> LanguageModelProviderId {
563 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
564 }
565
566 fn provider_name(&self) -> LanguageModelProviderName {
567 LanguageModelProviderName(PROVIDER_NAME.into())
568 }
569
570 fn telemetry_id(&self) -> String {
571 format!("zed.dev/{}", self.model.id())
572 }
573
574 fn availability(&self) -> LanguageModelAvailability {
575 self.model.availability()
576 }
577
578 fn max_token_count(&self) -> usize {
579 self.model.max_token_count()
580 }
581
582 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
583 match &self.model {
584 CloudModel::Anthropic(model) => {
585 model
586 .cache_configuration()
587 .map(|cache| LanguageModelCacheConfiguration {
588 max_cache_anchors: cache.max_cache_anchors,
589 should_speculate: cache.should_speculate,
590 min_total_token: cache.min_total_token,
591 })
592 }
593 CloudModel::OpenAi(_) | CloudModel::Google(_) => None,
594 }
595 }
596
597 fn count_tokens(
598 &self,
599 request: LanguageModelRequest,
600 cx: &App,
601 ) -> BoxFuture<'static, Result<usize>> {
602 match self.model.clone() {
603 CloudModel::Anthropic(_) => count_anthropic_tokens(request, cx),
604 CloudModel::OpenAi(model) => count_open_ai_tokens(request, model, cx),
605 CloudModel::Google(model) => {
606 let client = self.client.clone();
607 let request = request.into_google(model.id().into());
608 let request = google_ai::CountTokensRequest {
609 contents: request.contents,
610 };
611 async move {
612 let request = serde_json::to_string(&request)?;
613 let response = client
614 .request(proto::CountLanguageModelTokens {
615 provider: proto::LanguageModelProvider::Google as i32,
616 request,
617 })
618 .await?;
619 Ok(response.token_count as usize)
620 }
621 .boxed()
622 }
623 }
624 }
625
626 fn stream_completion(
627 &self,
628 request: LanguageModelRequest,
629 _cx: &AsyncApp,
630 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
631 match &self.model {
632 CloudModel::Anthropic(model) => {
633 let request = request.into_anthropic(
634 model.id().into(),
635 model.default_temperature(),
636 model.max_output_tokens(),
637 );
638 let client = self.client.clone();
639 let llm_api_token = self.llm_api_token.clone();
640 let future = self.request_limiter.stream(async move {
641 let response = Self::perform_llm_completion(
642 client.clone(),
643 llm_api_token,
644 PerformCompletionParams {
645 provider: client::LanguageModelProvider::Anthropic,
646 model: request.model.clone(),
647 provider_request: RawValue::from_string(serde_json::to_string(
648 &request,
649 )?)?,
650 },
651 )
652 .await?;
653 Ok(map_to_language_model_completion_events(Box::pin(
654 response_lines(response).map_err(AnthropicError::Other),
655 )))
656 });
657 async move { Ok(future.await?.boxed()) }.boxed()
658 }
659 CloudModel::OpenAi(model) => {
660 let client = self.client.clone();
661 let request = request.into_open_ai(model.id().into(), model.max_output_tokens());
662 let llm_api_token = self.llm_api_token.clone();
663 let future = self.request_limiter.stream(async move {
664 let response = Self::perform_llm_completion(
665 client.clone(),
666 llm_api_token,
667 PerformCompletionParams {
668 provider: client::LanguageModelProvider::OpenAi,
669 model: request.model.clone(),
670 provider_request: RawValue::from_string(serde_json::to_string(
671 &request,
672 )?)?,
673 },
674 )
675 .await?;
676 Ok(open_ai::extract_text_from_events(response_lines(response)))
677 });
678 async move {
679 Ok(future
680 .await?
681 .map(|result| result.map(LanguageModelCompletionEvent::Text))
682 .boxed())
683 }
684 .boxed()
685 }
686 CloudModel::Google(model) => {
687 let client = self.client.clone();
688 let request = request.into_google(model.id().into());
689 let llm_api_token = self.llm_api_token.clone();
690 let future = self.request_limiter.stream(async move {
691 let response = Self::perform_llm_completion(
692 client.clone(),
693 llm_api_token,
694 PerformCompletionParams {
695 provider: client::LanguageModelProvider::Google,
696 model: request.model.clone(),
697 provider_request: RawValue::from_string(serde_json::to_string(
698 &request,
699 )?)?,
700 },
701 )
702 .await?;
703 Ok(google_ai::extract_text_from_events(response_lines(
704 response,
705 )))
706 });
707 async move {
708 Ok(future
709 .await?
710 .map(|result| result.map(LanguageModelCompletionEvent::Text))
711 .boxed())
712 }
713 .boxed()
714 }
715 }
716 }
717
718 fn use_any_tool(
719 &self,
720 request: LanguageModelRequest,
721 tool_name: String,
722 tool_description: String,
723 input_schema: serde_json::Value,
724 _cx: &AsyncApp,
725 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
726 let client = self.client.clone();
727 let llm_api_token = self.llm_api_token.clone();
728
729 match &self.model {
730 CloudModel::Anthropic(model) => {
731 let mut request = request.into_anthropic(
732 model.tool_model_id().into(),
733 model.default_temperature(),
734 model.max_output_tokens(),
735 );
736 request.tool_choice = Some(anthropic::ToolChoice::Tool {
737 name: tool_name.clone(),
738 });
739 request.tools = vec![anthropic::Tool {
740 name: tool_name.clone(),
741 description: tool_description,
742 input_schema,
743 }];
744
745 self.request_limiter
746 .run(async move {
747 let response = Self::perform_llm_completion(
748 client.clone(),
749 llm_api_token,
750 PerformCompletionParams {
751 provider: client::LanguageModelProvider::Anthropic,
752 model: request.model.clone(),
753 provider_request: RawValue::from_string(serde_json::to_string(
754 &request,
755 )?)?,
756 },
757 )
758 .await?;
759
760 Ok(anthropic::extract_tool_args_from_events(
761 tool_name,
762 Box::pin(response_lines(response)),
763 )
764 .await?
765 .boxed())
766 })
767 .boxed()
768 }
769 CloudModel::OpenAi(model) => {
770 let mut request =
771 request.into_open_ai(model.id().into(), model.max_output_tokens());
772 request.tool_choice = Some(open_ai::ToolChoice::Other(
773 open_ai::ToolDefinition::Function {
774 function: open_ai::FunctionDefinition {
775 name: tool_name.clone(),
776 description: None,
777 parameters: None,
778 },
779 },
780 ));
781 request.tools = vec![open_ai::ToolDefinition::Function {
782 function: open_ai::FunctionDefinition {
783 name: tool_name.clone(),
784 description: Some(tool_description),
785 parameters: Some(input_schema),
786 },
787 }];
788
789 self.request_limiter
790 .run(async move {
791 let response = Self::perform_llm_completion(
792 client.clone(),
793 llm_api_token,
794 PerformCompletionParams {
795 provider: client::LanguageModelProvider::OpenAi,
796 model: request.model.clone(),
797 provider_request: RawValue::from_string(serde_json::to_string(
798 &request,
799 )?)?,
800 },
801 )
802 .await?;
803
804 Ok(open_ai::extract_tool_args_from_events(
805 tool_name,
806 Box::pin(response_lines(response)),
807 )
808 .await?
809 .boxed())
810 })
811 .boxed()
812 }
813 CloudModel::Google(_) => {
814 future::ready(Err(anyhow!("tool use not implemented for Google AI"))).boxed()
815 }
816 }
817 }
818}
819
820fn response_lines<T: DeserializeOwned>(
821 response: Response<AsyncBody>,
822) -> impl Stream<Item = Result<T>> {
823 futures::stream::try_unfold(
824 (String::new(), BufReader::new(response.into_body())),
825 move |(mut line, mut body)| async {
826 match body.read_line(&mut line).await {
827 Ok(0) => Ok(None),
828 Ok(_) => {
829 let event: T = serde_json::from_str(&line)?;
830 line.clear();
831 Ok(Some((event, (line, body))))
832 }
833 Err(e) => Err(e.into()),
834 }
835 },
836 )
837}
838
839impl LlmApiToken {
840 pub async fn acquire(&self, client: &Arc<Client>) -> Result<String> {
841 let lock = self.0.upgradable_read().await;
842 if let Some(token) = lock.as_ref() {
843 Ok(token.to_string())
844 } else {
845 Self::fetch(RwLockUpgradableReadGuard::upgrade(lock).await, client).await
846 }
847 }
848
849 pub async fn refresh(&self, client: &Arc<Client>) -> Result<String> {
850 Self::fetch(self.0.write().await, client).await
851 }
852
853 async fn fetch<'a>(
854 mut lock: RwLockWriteGuard<'a, Option<String>>,
855 client: &Arc<Client>,
856 ) -> Result<String> {
857 let response = client.request(proto::GetLlmToken {}).await?;
858 *lock = Some(response.token.clone());
859 Ok(response.token.clone())
860 }
861}
862
863struct ConfigurationView {
864 state: gpui::Entity<State>,
865}
866
867impl ConfigurationView {
868 fn authenticate(&mut self, cx: &mut Context<Self>) {
869 self.state.update(cx, |state, cx| {
870 state.authenticate(cx).detach_and_log_err(cx);
871 });
872 cx.notify();
873 }
874}
875
876impl Render for ConfigurationView {
877 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
878 const ZED_AI_URL: &str = "https://zed.dev/ai";
879
880 let is_connected = !self.state.read(cx).is_signed_out();
881 let plan = self.state.read(cx).user_store.read(cx).current_plan();
882 let has_accepted_terms = self.state.read(cx).has_accepted_terms_of_service(cx);
883
884 let is_pro = plan == Some(proto::Plan::ZedPro);
885 let subscription_text = Label::new(if is_pro {
886 "You have full access to Zed's hosted LLMs, which include models from Anthropic, OpenAI, and Google. They come with faster speeds and higher limits through Zed Pro."
887 } else {
888 "You have basic access to models from Anthropic through the Zed AI Free plan."
889 });
890 let manage_subscription_button = if is_pro {
891 Some(
892 h_flex().child(
893 Button::new("manage_settings", "Manage Subscription")
894 .style(ButtonStyle::Tinted(TintColor::Accent))
895 .on_click(
896 cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
897 ),
898 ),
899 )
900 } else if cx.has_flag::<ZedPro>() {
901 Some(
902 h_flex()
903 .gap_2()
904 .child(
905 Button::new("learn_more", "Learn more")
906 .style(ButtonStyle::Subtle)
907 .on_click(cx.listener(|_, _, _, cx| cx.open_url(ZED_AI_URL))),
908 )
909 .child(
910 Button::new("upgrade", "Upgrade")
911 .style(ButtonStyle::Subtle)
912 .color(Color::Accent)
913 .on_click(
914 cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
915 ),
916 ),
917 )
918 } else {
919 None
920 };
921
922 if is_connected {
923 v_flex()
924 .gap_3()
925 .w_full()
926 .children(render_accept_terms(
927 self.state.clone(),
928 LanguageModelProviderTosView::Configuration,
929 cx,
930 ))
931 .when(has_accepted_terms, |this| {
932 this.child(subscription_text)
933 .children(manage_subscription_button)
934 })
935 } else {
936 v_flex()
937 .gap_2()
938 .child(Label::new("Use Zed AI to access hosted language models."))
939 .child(
940 Button::new("sign_in", "Sign In")
941 .icon_color(Color::Muted)
942 .icon(IconName::Github)
943 .icon_position(IconPosition::Start)
944 .on_click(cx.listener(move |this, _, _, cx| this.authenticate(cx))),
945 )
946 }
947 }
948}