1use super::open_ai::count_open_ai_tokens;
2use anthropic::AnthropicError;
3use anyhow::{anyhow, Result};
4use client::{
5 zed_urls, Client, PerformCompletionParams, UserStore, EXPIRED_LLM_TOKEN_HEADER_NAME,
6 MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME,
7};
8use collections::BTreeMap;
9use feature_flags::{FeatureFlagAppExt, LlmClosedBeta, ZedPro};
10use futures::{
11 future::BoxFuture, stream::BoxStream, AsyncBufReadExt, FutureExt, Stream, StreamExt,
12 TryStreamExt as _,
13};
14use gpui::{
15 AnyElement, AnyView, App, AsyncApp, Context, Entity, EventEmitter, Global, ReadGlobal,
16 Subscription, Task,
17};
18use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
19use language_model::{
20 AuthenticateError, CloudModel, LanguageModel, LanguageModelCacheConfiguration, LanguageModelId,
21 LanguageModelName, LanguageModelProviderId, LanguageModelProviderName,
22 LanguageModelProviderState, LanguageModelProviderTosView, LanguageModelRequest, RateLimiter,
23 ZED_CLOUD_PROVIDER_ID,
24};
25use language_model::{
26 LanguageModelAvailability, LanguageModelCompletionEvent, LanguageModelProvider,
27};
28use proto::TypedEnvelope;
29use schemars::JsonSchema;
30use serde::{de::DeserializeOwned, Deserialize, Serialize};
31use serde_json::value::RawValue;
32use settings::{Settings, SettingsStore};
33use smol::{
34 io::{AsyncReadExt, BufReader},
35 lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard},
36};
37use std::fmt;
38use std::{
39 future,
40 sync::{Arc, LazyLock},
41};
42use strum::IntoEnumIterator;
43use thiserror::Error;
44use ui::{prelude::*, TintColor};
45
46use crate::provider::anthropic::map_to_language_model_completion_events;
47use crate::AllLanguageModelSettings;
48
49use super::anthropic::count_anthropic_tokens;
50
51pub const PROVIDER_NAME: &str = "Zed";
52
53const ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: Option<&str> =
54 option_env!("ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON");
55
56fn zed_cloud_provider_additional_models() -> &'static [AvailableModel] {
57 static ADDITIONAL_MODELS: LazyLock<Vec<AvailableModel>> = LazyLock::new(|| {
58 ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON
59 .map(|json| serde_json::from_str(json).unwrap())
60 .unwrap_or_default()
61 });
62 ADDITIONAL_MODELS.as_slice()
63}
64
65#[derive(Default, Clone, Debug, PartialEq)]
66pub struct ZedDotDevSettings {
67 pub available_models: Vec<AvailableModel>,
68}
69
70#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
71#[serde(rename_all = "lowercase")]
72pub enum AvailableProvider {
73 Anthropic,
74 OpenAi,
75 Google,
76}
77
78#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
79pub struct AvailableModel {
80 /// The provider of the language model.
81 pub provider: AvailableProvider,
82 /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
83 pub name: String,
84 /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
85 pub display_name: Option<String>,
86 /// The size of the context window, indicating the maximum number of tokens the model can process.
87 pub max_tokens: usize,
88 /// The maximum number of output tokens allowed by the model.
89 pub max_output_tokens: Option<u32>,
90 /// The maximum number of completion tokens allowed by the model (o1-* only)
91 pub max_completion_tokens: Option<u32>,
92 /// Override this model with a different Anthropic model for tool calls.
93 pub tool_override: Option<String>,
94 /// Indicates whether this custom model supports caching.
95 pub cache_configuration: Option<LanguageModelCacheConfiguration>,
96 /// The default temperature to use for this model.
97 pub default_temperature: Option<f32>,
98 /// Any extra beta headers to provide when using the model.
99 #[serde(default)]
100 pub extra_beta_headers: Vec<String>,
101}
102
103struct GlobalRefreshLlmTokenListener(Entity<RefreshLlmTokenListener>);
104
105impl Global for GlobalRefreshLlmTokenListener {}
106
107pub struct RefreshLlmTokenEvent;
108
109pub struct RefreshLlmTokenListener {
110 _llm_token_subscription: client::Subscription,
111}
112
113impl EventEmitter<RefreshLlmTokenEvent> for RefreshLlmTokenListener {}
114
115impl RefreshLlmTokenListener {
116 pub fn register(client: Arc<Client>, cx: &mut App) {
117 let listener = cx.new(|cx| RefreshLlmTokenListener::new(client, cx));
118 cx.set_global(GlobalRefreshLlmTokenListener(listener));
119 }
120
121 pub fn global(cx: &App) -> Entity<Self> {
122 GlobalRefreshLlmTokenListener::global(cx).0.clone()
123 }
124
125 fn new(client: Arc<Client>, cx: &mut Context<Self>) -> Self {
126 Self {
127 _llm_token_subscription: client
128 .add_message_handler(cx.weak_entity(), Self::handle_refresh_llm_token),
129 }
130 }
131
132 async fn handle_refresh_llm_token(
133 this: Entity<Self>,
134 _: TypedEnvelope<proto::RefreshLlmToken>,
135 mut cx: AsyncApp,
136 ) -> Result<()> {
137 this.update(&mut cx, |_this, cx| cx.emit(RefreshLlmTokenEvent))
138 }
139}
140
141pub struct CloudLanguageModelProvider {
142 client: Arc<Client>,
143 state: gpui::Entity<State>,
144 _maintain_client_status: Task<()>,
145}
146
147pub struct State {
148 client: Arc<Client>,
149 llm_api_token: LlmApiToken,
150 user_store: Entity<UserStore>,
151 status: client::Status,
152 accept_terms: Option<Task<Result<()>>>,
153 _settings_subscription: Subscription,
154 _llm_token_subscription: Subscription,
155}
156
157impl State {
158 fn new(
159 client: Arc<Client>,
160 user_store: Entity<UserStore>,
161 status: client::Status,
162 cx: &mut Context<Self>,
163 ) -> Self {
164 let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
165
166 Self {
167 client: client.clone(),
168 llm_api_token: LlmApiToken::default(),
169 user_store,
170 status,
171 accept_terms: None,
172 _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
173 cx.notify();
174 }),
175 _llm_token_subscription: cx.subscribe(
176 &refresh_llm_token_listener,
177 |this, _listener, _event, cx| {
178 let client = this.client.clone();
179 let llm_api_token = this.llm_api_token.clone();
180 cx.spawn(|_this, _cx| async move {
181 llm_api_token.refresh(&client).await?;
182 anyhow::Ok(())
183 })
184 .detach_and_log_err(cx);
185 },
186 ),
187 }
188 }
189
190 fn is_signed_out(&self) -> bool {
191 self.status.is_signed_out()
192 }
193
194 fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
195 let client = self.client.clone();
196 cx.spawn(move |this, mut cx| async move {
197 client.authenticate_and_connect(true, &cx).await?;
198 this.update(&mut cx, |_, cx| cx.notify())
199 })
200 }
201
202 fn has_accepted_terms_of_service(&self, cx: &App) -> bool {
203 self.user_store
204 .read(cx)
205 .current_user_has_accepted_terms()
206 .unwrap_or(false)
207 }
208
209 fn accept_terms_of_service(&mut self, cx: &mut Context<Self>) {
210 let user_store = self.user_store.clone();
211 self.accept_terms = Some(cx.spawn(move |this, mut cx| async move {
212 let _ = user_store
213 .update(&mut cx, |store, cx| store.accept_terms_of_service(cx))?
214 .await;
215 this.update(&mut cx, |this, cx| {
216 this.accept_terms = None;
217 cx.notify()
218 })
219 }));
220 }
221}
222
223impl CloudLanguageModelProvider {
224 pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
225 let mut status_rx = client.status();
226 let status = *status_rx.borrow();
227
228 let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
229
230 let state_ref = state.downgrade();
231 let maintain_client_status = cx.spawn(|mut cx| async move {
232 while let Some(status) = status_rx.next().await {
233 if let Some(this) = state_ref.upgrade() {
234 _ = this.update(&mut cx, |this, cx| {
235 if this.status != status {
236 this.status = status;
237 cx.notify();
238 }
239 });
240 } else {
241 break;
242 }
243 }
244 });
245
246 Self {
247 client,
248 state: state.clone(),
249 _maintain_client_status: maintain_client_status,
250 }
251 }
252}
253
254impl LanguageModelProviderState for CloudLanguageModelProvider {
255 type ObservableEntity = State;
256
257 fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
258 Some(self.state.clone())
259 }
260}
261
262impl LanguageModelProvider for CloudLanguageModelProvider {
263 fn id(&self) -> LanguageModelProviderId {
264 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
265 }
266
267 fn name(&self) -> LanguageModelProviderName {
268 LanguageModelProviderName(PROVIDER_NAME.into())
269 }
270
271 fn icon(&self) -> IconName {
272 IconName::AiZed
273 }
274
275 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
276 let mut models = BTreeMap::default();
277
278 if cx.is_staff() {
279 for model in anthropic::Model::iter() {
280 if !matches!(model, anthropic::Model::Custom { .. }) {
281 models.insert(model.id().to_string(), CloudModel::Anthropic(model));
282 }
283 }
284 for model in open_ai::Model::iter() {
285 if !matches!(model, open_ai::Model::Custom { .. }) {
286 models.insert(model.id().to_string(), CloudModel::OpenAi(model));
287 }
288 }
289 for model in google_ai::Model::iter() {
290 if !matches!(model, google_ai::Model::Custom { .. }) {
291 models.insert(model.id().to_string(), CloudModel::Google(model));
292 }
293 }
294 } else {
295 models.insert(
296 anthropic::Model::Claude3_5Sonnet.id().to_string(),
297 CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet),
298 );
299 }
300
301 let llm_closed_beta_models = if cx.has_flag::<LlmClosedBeta>() {
302 zed_cloud_provider_additional_models()
303 } else {
304 &[]
305 };
306
307 // Override with available models from settings
308 for model in AllLanguageModelSettings::get_global(cx)
309 .zed_dot_dev
310 .available_models
311 .iter()
312 .chain(llm_closed_beta_models)
313 .cloned()
314 {
315 let model = match model.provider {
316 AvailableProvider::Anthropic => CloudModel::Anthropic(anthropic::Model::Custom {
317 name: model.name.clone(),
318 display_name: model.display_name.clone(),
319 max_tokens: model.max_tokens,
320 tool_override: model.tool_override.clone(),
321 cache_configuration: model.cache_configuration.as_ref().map(|config| {
322 anthropic::AnthropicModelCacheConfiguration {
323 max_cache_anchors: config.max_cache_anchors,
324 should_speculate: config.should_speculate,
325 min_total_token: config.min_total_token,
326 }
327 }),
328 default_temperature: model.default_temperature,
329 max_output_tokens: model.max_output_tokens,
330 extra_beta_headers: model.extra_beta_headers.clone(),
331 }),
332 AvailableProvider::OpenAi => CloudModel::OpenAi(open_ai::Model::Custom {
333 name: model.name.clone(),
334 display_name: model.display_name.clone(),
335 max_tokens: model.max_tokens,
336 max_output_tokens: model.max_output_tokens,
337 max_completion_tokens: model.max_completion_tokens,
338 }),
339 AvailableProvider::Google => CloudModel::Google(google_ai::Model::Custom {
340 name: model.name.clone(),
341 display_name: model.display_name.clone(),
342 max_tokens: model.max_tokens,
343 }),
344 };
345 models.insert(model.id().to_string(), model.clone());
346 }
347
348 let llm_api_token = self.state.read(cx).llm_api_token.clone();
349 models
350 .into_values()
351 .map(|model| {
352 Arc::new(CloudLanguageModel {
353 id: LanguageModelId::from(model.id().to_string()),
354 model,
355 llm_api_token: llm_api_token.clone(),
356 client: self.client.clone(),
357 request_limiter: RateLimiter::new(4),
358 }) as Arc<dyn LanguageModel>
359 })
360 .collect()
361 }
362
363 fn is_authenticated(&self, cx: &App) -> bool {
364 !self.state.read(cx).is_signed_out()
365 }
366
367 fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
368 Task::ready(Ok(()))
369 }
370
371 fn configuration_view(&self, _: &mut Window, cx: &mut App) -> AnyView {
372 cx.new(|_| ConfigurationView {
373 state: self.state.clone(),
374 })
375 .into()
376 }
377
378 fn must_accept_terms(&self, cx: &App) -> bool {
379 !self.state.read(cx).has_accepted_terms_of_service(cx)
380 }
381
382 fn render_accept_terms(
383 &self,
384 view: LanguageModelProviderTosView,
385 cx: &mut App,
386 ) -> Option<AnyElement> {
387 render_accept_terms(self.state.clone(), view, cx)
388 }
389
390 fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
391 Task::ready(Ok(()))
392 }
393}
394
395fn render_accept_terms(
396 state: Entity<State>,
397 view_kind: LanguageModelProviderTosView,
398 cx: &mut App,
399) -> Option<AnyElement> {
400 if state.read(cx).has_accepted_terms_of_service(cx) {
401 return None;
402 }
403
404 let accept_terms_disabled = state.read(cx).accept_terms.is_some();
405
406 let terms_button = Button::new("terms_of_service", "Terms of Service")
407 .style(ButtonStyle::Subtle)
408 .icon(IconName::ArrowUpRight)
409 .icon_color(Color::Muted)
410 .icon_size(IconSize::XSmall)
411 .on_click(move |_, _window, cx| cx.open_url("https://zed.dev/terms-of-service"));
412
413 let text = "To start using Zed AI, please read and accept the";
414
415 let form = v_flex()
416 .w_full()
417 .gap_2()
418 .when(
419 view_kind == LanguageModelProviderTosView::ThreadEmptyState,
420 |form| form.items_center(),
421 )
422 .child(
423 h_flex()
424 .flex_wrap()
425 .when(
426 view_kind == LanguageModelProviderTosView::ThreadEmptyState,
427 |form| form.justify_center(),
428 )
429 .child(Label::new(text))
430 .child(terms_button),
431 )
432 .child({
433 let button_container = h_flex().w_full().child(
434 Button::new("accept_terms", "I accept the Terms of Service")
435 .style(ButtonStyle::Tinted(TintColor::Accent))
436 .disabled(accept_terms_disabled)
437 .on_click({
438 let state = state.downgrade();
439 move |_, _window, cx| {
440 state
441 .update(cx, |state, cx| state.accept_terms_of_service(cx))
442 .ok();
443 }
444 }),
445 );
446
447 match view_kind {
448 LanguageModelProviderTosView::ThreadEmptyState => button_container.justify_center(),
449 LanguageModelProviderTosView::PromptEditorPopup => button_container.justify_end(),
450 LanguageModelProviderTosView::Configuration => button_container.justify_start(),
451 }
452 });
453
454 Some(form.into_any())
455}
456
457pub struct CloudLanguageModel {
458 id: LanguageModelId,
459 model: CloudModel,
460 llm_api_token: LlmApiToken,
461 client: Arc<Client>,
462 request_limiter: RateLimiter,
463}
464
465#[derive(Clone, Default)]
466pub struct LlmApiToken(Arc<RwLock<Option<String>>>);
467
468#[derive(Error, Debug)]
469pub struct PaymentRequiredError;
470
471impl fmt::Display for PaymentRequiredError {
472 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
473 write!(
474 f,
475 "Payment required to use this language model. Please upgrade your account."
476 )
477 }
478}
479
480#[derive(Error, Debug)]
481pub struct MaxMonthlySpendReachedError;
482
483impl fmt::Display for MaxMonthlySpendReachedError {
484 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
485 write!(
486 f,
487 "Maximum spending limit reached for this month. For more usage, increase your spending limit."
488 )
489 }
490}
491
492impl CloudLanguageModel {
493 async fn perform_llm_completion(
494 client: Arc<Client>,
495 llm_api_token: LlmApiToken,
496 body: PerformCompletionParams,
497 ) -> Result<Response<AsyncBody>> {
498 let http_client = &client.http_client();
499
500 let mut token = llm_api_token.acquire(&client).await?;
501 let mut did_retry = false;
502
503 let response = loop {
504 let request_builder = http_client::Request::builder();
505 let request = request_builder
506 .method(Method::POST)
507 .uri(http_client.build_zed_llm_url("/completion", &[])?.as_ref())
508 .header("Content-Type", "application/json")
509 .header("Authorization", format!("Bearer {token}"))
510 .body(serde_json::to_string(&body)?.into())?;
511 let mut response = http_client.send(request).await?;
512 if response.status().is_success() {
513 break response;
514 } else if !did_retry
515 && response
516 .headers()
517 .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
518 .is_some()
519 {
520 did_retry = true;
521 token = llm_api_token.refresh(&client).await?;
522 } else if response.status() == StatusCode::FORBIDDEN
523 && response
524 .headers()
525 .get(MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME)
526 .is_some()
527 {
528 break Err(anyhow!(MaxMonthlySpendReachedError))?;
529 } else if response.status() == StatusCode::PAYMENT_REQUIRED {
530 break Err(anyhow!(PaymentRequiredError))?;
531 } else {
532 let mut body = String::new();
533 response.body_mut().read_to_string(&mut body).await?;
534 break Err(anyhow!(
535 "cloud language model completion failed with status {}: {body}",
536 response.status()
537 ))?;
538 }
539 };
540
541 Ok(response)
542 }
543}
544
545impl LanguageModel for CloudLanguageModel {
546 fn id(&self) -> LanguageModelId {
547 self.id.clone()
548 }
549
550 fn name(&self) -> LanguageModelName {
551 LanguageModelName::from(self.model.display_name().to_string())
552 }
553
554 fn icon(&self) -> Option<IconName> {
555 self.model.icon()
556 }
557
558 fn provider_id(&self) -> LanguageModelProviderId {
559 LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
560 }
561
562 fn provider_name(&self) -> LanguageModelProviderName {
563 LanguageModelProviderName(PROVIDER_NAME.into())
564 }
565
566 fn telemetry_id(&self) -> String {
567 format!("zed.dev/{}", self.model.id())
568 }
569
570 fn availability(&self) -> LanguageModelAvailability {
571 self.model.availability()
572 }
573
574 fn max_token_count(&self) -> usize {
575 self.model.max_token_count()
576 }
577
578 fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
579 match &self.model {
580 CloudModel::Anthropic(model) => {
581 model
582 .cache_configuration()
583 .map(|cache| LanguageModelCacheConfiguration {
584 max_cache_anchors: cache.max_cache_anchors,
585 should_speculate: cache.should_speculate,
586 min_total_token: cache.min_total_token,
587 })
588 }
589 CloudModel::OpenAi(_) | CloudModel::Google(_) => None,
590 }
591 }
592
593 fn count_tokens(
594 &self,
595 request: LanguageModelRequest,
596 cx: &App,
597 ) -> BoxFuture<'static, Result<usize>> {
598 match self.model.clone() {
599 CloudModel::Anthropic(_) => count_anthropic_tokens(request, cx),
600 CloudModel::OpenAi(model) => count_open_ai_tokens(request, model, cx),
601 CloudModel::Google(model) => {
602 let client = self.client.clone();
603 let request = request.into_google(model.id().into());
604 let request = google_ai::CountTokensRequest {
605 contents: request.contents,
606 };
607 async move {
608 let request = serde_json::to_string(&request)?;
609 let response = client
610 .request(proto::CountLanguageModelTokens {
611 provider: proto::LanguageModelProvider::Google as i32,
612 request,
613 })
614 .await?;
615 Ok(response.token_count as usize)
616 }
617 .boxed()
618 }
619 }
620 }
621
622 fn stream_completion(
623 &self,
624 request: LanguageModelRequest,
625 _cx: &AsyncApp,
626 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
627 match &self.model {
628 CloudModel::Anthropic(model) => {
629 let request = request.into_anthropic(
630 model.id().into(),
631 model.default_temperature(),
632 model.max_output_tokens(),
633 );
634 let client = self.client.clone();
635 let llm_api_token = self.llm_api_token.clone();
636 let future = self.request_limiter.stream(async move {
637 let response = Self::perform_llm_completion(
638 client.clone(),
639 llm_api_token,
640 PerformCompletionParams {
641 provider: client::LanguageModelProvider::Anthropic,
642 model: request.model.clone(),
643 provider_request: RawValue::from_string(serde_json::to_string(
644 &request,
645 )?)?,
646 },
647 )
648 .await?;
649 Ok(map_to_language_model_completion_events(Box::pin(
650 response_lines(response).map_err(AnthropicError::Other),
651 )))
652 });
653 async move { Ok(future.await?.boxed()) }.boxed()
654 }
655 CloudModel::OpenAi(model) => {
656 let client = self.client.clone();
657 let request = request.into_open_ai(model.id().into(), model.max_output_tokens());
658 let llm_api_token = self.llm_api_token.clone();
659 let future = self.request_limiter.stream(async move {
660 let response = Self::perform_llm_completion(
661 client.clone(),
662 llm_api_token,
663 PerformCompletionParams {
664 provider: client::LanguageModelProvider::OpenAi,
665 model: request.model.clone(),
666 provider_request: RawValue::from_string(serde_json::to_string(
667 &request,
668 )?)?,
669 },
670 )
671 .await?;
672 Ok(open_ai::extract_text_from_events(response_lines(response)))
673 });
674 async move {
675 Ok(future
676 .await?
677 .map(|result| result.map(LanguageModelCompletionEvent::Text))
678 .boxed())
679 }
680 .boxed()
681 }
682 CloudModel::Google(model) => {
683 let client = self.client.clone();
684 let request = request.into_google(model.id().into());
685 let llm_api_token = self.llm_api_token.clone();
686 let future = self.request_limiter.stream(async move {
687 let response = Self::perform_llm_completion(
688 client.clone(),
689 llm_api_token,
690 PerformCompletionParams {
691 provider: client::LanguageModelProvider::Google,
692 model: request.model.clone(),
693 provider_request: RawValue::from_string(serde_json::to_string(
694 &request,
695 )?)?,
696 },
697 )
698 .await?;
699 Ok(google_ai::extract_text_from_events(response_lines(
700 response,
701 )))
702 });
703 async move {
704 Ok(future
705 .await?
706 .map(|result| result.map(LanguageModelCompletionEvent::Text))
707 .boxed())
708 }
709 .boxed()
710 }
711 }
712 }
713
714 fn use_any_tool(
715 &self,
716 request: LanguageModelRequest,
717 tool_name: String,
718 tool_description: String,
719 input_schema: serde_json::Value,
720 _cx: &AsyncApp,
721 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
722 let client = self.client.clone();
723 let llm_api_token = self.llm_api_token.clone();
724
725 match &self.model {
726 CloudModel::Anthropic(model) => {
727 let mut request = request.into_anthropic(
728 model.tool_model_id().into(),
729 model.default_temperature(),
730 model.max_output_tokens(),
731 );
732 request.tool_choice = Some(anthropic::ToolChoice::Tool {
733 name: tool_name.clone(),
734 });
735 request.tools = vec![anthropic::Tool {
736 name: tool_name.clone(),
737 description: tool_description,
738 input_schema,
739 }];
740
741 self.request_limiter
742 .run(async move {
743 let response = Self::perform_llm_completion(
744 client.clone(),
745 llm_api_token,
746 PerformCompletionParams {
747 provider: client::LanguageModelProvider::Anthropic,
748 model: request.model.clone(),
749 provider_request: RawValue::from_string(serde_json::to_string(
750 &request,
751 )?)?,
752 },
753 )
754 .await?;
755
756 Ok(anthropic::extract_tool_args_from_events(
757 tool_name,
758 Box::pin(response_lines(response)),
759 )
760 .await?
761 .boxed())
762 })
763 .boxed()
764 }
765 CloudModel::OpenAi(model) => {
766 let mut request =
767 request.into_open_ai(model.id().into(), model.max_output_tokens());
768 request.tool_choice = Some(open_ai::ToolChoice::Other(
769 open_ai::ToolDefinition::Function {
770 function: open_ai::FunctionDefinition {
771 name: tool_name.clone(),
772 description: None,
773 parameters: None,
774 },
775 },
776 ));
777 request.tools = vec![open_ai::ToolDefinition::Function {
778 function: open_ai::FunctionDefinition {
779 name: tool_name.clone(),
780 description: Some(tool_description),
781 parameters: Some(input_schema),
782 },
783 }];
784
785 self.request_limiter
786 .run(async move {
787 let response = Self::perform_llm_completion(
788 client.clone(),
789 llm_api_token,
790 PerformCompletionParams {
791 provider: client::LanguageModelProvider::OpenAi,
792 model: request.model.clone(),
793 provider_request: RawValue::from_string(serde_json::to_string(
794 &request,
795 )?)?,
796 },
797 )
798 .await?;
799
800 Ok(open_ai::extract_tool_args_from_events(
801 tool_name,
802 Box::pin(response_lines(response)),
803 )
804 .await?
805 .boxed())
806 })
807 .boxed()
808 }
809 CloudModel::Google(_) => {
810 future::ready(Err(anyhow!("tool use not implemented for Google AI"))).boxed()
811 }
812 }
813 }
814}
815
816fn response_lines<T: DeserializeOwned>(
817 response: Response<AsyncBody>,
818) -> impl Stream<Item = Result<T>> {
819 futures::stream::try_unfold(
820 (String::new(), BufReader::new(response.into_body())),
821 move |(mut line, mut body)| async {
822 match body.read_line(&mut line).await {
823 Ok(0) => Ok(None),
824 Ok(_) => {
825 let event: T = serde_json::from_str(&line)?;
826 line.clear();
827 Ok(Some((event, (line, body))))
828 }
829 Err(e) => Err(e.into()),
830 }
831 },
832 )
833}
834
835impl LlmApiToken {
836 pub async fn acquire(&self, client: &Arc<Client>) -> Result<String> {
837 let lock = self.0.upgradable_read().await;
838 if let Some(token) = lock.as_ref() {
839 Ok(token.to_string())
840 } else {
841 Self::fetch(RwLockUpgradableReadGuard::upgrade(lock).await, client).await
842 }
843 }
844
845 pub async fn refresh(&self, client: &Arc<Client>) -> Result<String> {
846 Self::fetch(self.0.write().await, client).await
847 }
848
849 async fn fetch<'a>(
850 mut lock: RwLockWriteGuard<'a, Option<String>>,
851 client: &Arc<Client>,
852 ) -> Result<String> {
853 let response = client.request(proto::GetLlmToken {}).await?;
854 *lock = Some(response.token.clone());
855 Ok(response.token.clone())
856 }
857}
858
859struct ConfigurationView {
860 state: gpui::Entity<State>,
861}
862
863impl ConfigurationView {
864 fn authenticate(&mut self, cx: &mut Context<Self>) {
865 self.state.update(cx, |state, cx| {
866 state.authenticate(cx).detach_and_log_err(cx);
867 });
868 cx.notify();
869 }
870}
871
872impl Render for ConfigurationView {
873 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
874 const ZED_AI_URL: &str = "https://zed.dev/ai";
875
876 let is_connected = !self.state.read(cx).is_signed_out();
877 let plan = self.state.read(cx).user_store.read(cx).current_plan();
878 let has_accepted_terms = self.state.read(cx).has_accepted_terms_of_service(cx);
879
880 let is_pro = plan == Some(proto::Plan::ZedPro);
881 let subscription_text = Label::new(if is_pro {
882 "You have full access to Zed's hosted LLMs, which include models from Anthropic, OpenAI, and Google. They come with faster speeds and higher limits through Zed Pro."
883 } else {
884 "You have basic access to models from Anthropic through the Zed AI Free plan."
885 });
886 let manage_subscription_button = if is_pro {
887 Some(
888 h_flex().child(
889 Button::new("manage_settings", "Manage Subscription")
890 .style(ButtonStyle::Tinted(TintColor::Accent))
891 .on_click(
892 cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
893 ),
894 ),
895 )
896 } else if cx.has_flag::<ZedPro>() {
897 Some(
898 h_flex()
899 .gap_2()
900 .child(
901 Button::new("learn_more", "Learn more")
902 .style(ButtonStyle::Subtle)
903 .on_click(cx.listener(|_, _, _, cx| cx.open_url(ZED_AI_URL))),
904 )
905 .child(
906 Button::new("upgrade", "Upgrade")
907 .style(ButtonStyle::Subtle)
908 .color(Color::Accent)
909 .on_click(
910 cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
911 ),
912 ),
913 )
914 } else {
915 None
916 };
917
918 if is_connected {
919 v_flex()
920 .gap_3()
921 .w_full()
922 .children(render_accept_terms(
923 self.state.clone(),
924 LanguageModelProviderTosView::Configuration,
925 cx,
926 ))
927 .when(has_accepted_terms, |this| {
928 this.child(subscription_text)
929 .children(manage_subscription_button)
930 })
931 } else {
932 v_flex()
933 .gap_2()
934 .child(Label::new("Use Zed AI to access hosted language models."))
935 .child(
936 Button::new("sign_in", "Sign In")
937 .icon_color(Color::Muted)
938 .icon(IconName::Github)
939 .icon_position(IconPosition::Start)
940 .on_click(cx.listener(move |this, _, _, cx| this.authenticate(cx))),
941 )
942 }
943 }
944}