1use super::open_ai::count_open_ai_tokens;
2use crate::{
3 settings::AllLanguageModelSettings, CloudModel, LanguageModel, LanguageModelId,
4 LanguageModelName, LanguageModelProviderId, LanguageModelProviderName,
5 LanguageModelProviderState, LanguageModelRequest, RateLimiter, ZedModel,
6};
7use anyhow::{anyhow, Context as _, Result};
8use client::{Client, PerformCompletionParams, UserStore, EXPIRED_LLM_TOKEN_HEADER_NAME};
9use collections::BTreeMap;
10use feature_flags::{FeatureFlag, FeatureFlagAppExt};
11use futures::{future::BoxFuture, stream::BoxStream, AsyncBufReadExt, FutureExt, StreamExt};
12use gpui::{AnyView, AppContext, AsyncAppContext, Model, ModelContext, Subscription, Task};
13use http_client::{AsyncBody, HttpClient, Method, Response};
14use schemars::JsonSchema;
15use serde::{Deserialize, Serialize};
16use serde_json::value::RawValue;
17use settings::{Settings, SettingsStore};
18use smol::{
19 io::BufReader,
20 lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard},
21};
22use std::{future, sync::Arc};
23use strum::IntoEnumIterator;
24use ui::prelude::*;
25
26use crate::{LanguageModelAvailability, LanguageModelProvider};
27
28use super::anthropic::count_anthropic_tokens;
29
30pub const PROVIDER_ID: &str = "zed.dev";
31pub const PROVIDER_NAME: &str = "Zed";
32
33#[derive(Default, Clone, Debug, PartialEq)]
34pub struct ZedDotDevSettings {
35 pub available_models: Vec<AvailableModel>,
36}
37
38#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
39#[serde(rename_all = "lowercase")]
40pub enum AvailableProvider {
41 Anthropic,
42 OpenAi,
43 Google,
44}
45
46#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
47pub struct AvailableModel {
48 provider: AvailableProvider,
49 name: String,
50 max_tokens: usize,
51 tool_override: Option<String>,
52}
53
54pub struct CloudLanguageModelProvider {
55 client: Arc<Client>,
56 llm_api_token: LlmApiToken,
57 state: gpui::Model<State>,
58 _maintain_client_status: Task<()>,
59}
60
61pub struct State {
62 client: Arc<Client>,
63 user_store: Model<UserStore>,
64 status: client::Status,
65 _subscription: Subscription,
66}
67
68impl State {
69 fn is_signed_out(&self) -> bool {
70 self.status.is_signed_out()
71 }
72
73 fn authenticate(&self, cx: &mut ModelContext<Self>) -> Task<Result<()>> {
74 let client = self.client.clone();
75 cx.spawn(move |this, mut cx| async move {
76 client.authenticate_and_connect(true, &cx).await?;
77 this.update(&mut cx, |_, cx| cx.notify())
78 })
79 }
80}
81
82impl CloudLanguageModelProvider {
83 pub fn new(user_store: Model<UserStore>, client: Arc<Client>, cx: &mut AppContext) -> Self {
84 let mut status_rx = client.status();
85 let status = *status_rx.borrow();
86
87 let state = cx.new_model(|cx| State {
88 client: client.clone(),
89 user_store,
90 status,
91 _subscription: cx.observe_global::<SettingsStore>(|_, cx| {
92 cx.notify();
93 }),
94 });
95
96 let state_ref = state.downgrade();
97 let maintain_client_status = cx.spawn(|mut cx| async move {
98 while let Some(status) = status_rx.next().await {
99 if let Some(this) = state_ref.upgrade() {
100 _ = this.update(&mut cx, |this, cx| {
101 if this.status != status {
102 this.status = status;
103 cx.notify();
104 }
105 });
106 } else {
107 break;
108 }
109 }
110 });
111
112 Self {
113 client,
114 state,
115 llm_api_token: LlmApiToken::default(),
116 _maintain_client_status: maintain_client_status,
117 }
118 }
119}
120
121impl LanguageModelProviderState for CloudLanguageModelProvider {
122 type ObservableEntity = State;
123
124 fn observable_entity(&self) -> Option<gpui::Model<Self::ObservableEntity>> {
125 Some(self.state.clone())
126 }
127}
128
129impl LanguageModelProvider for CloudLanguageModelProvider {
130 fn id(&self) -> LanguageModelProviderId {
131 LanguageModelProviderId(PROVIDER_ID.into())
132 }
133
134 fn name(&self) -> LanguageModelProviderName {
135 LanguageModelProviderName(PROVIDER_NAME.into())
136 }
137
138 fn icon(&self) -> IconName {
139 IconName::AiZed
140 }
141
142 fn provided_models(&self, cx: &AppContext) -> Vec<Arc<dyn LanguageModel>> {
143 let mut models = BTreeMap::default();
144
145 for model in anthropic::Model::iter() {
146 if !matches!(model, anthropic::Model::Custom { .. }) {
147 models.insert(model.id().to_string(), CloudModel::Anthropic(model));
148 }
149 }
150 for model in open_ai::Model::iter() {
151 if !matches!(model, open_ai::Model::Custom { .. }) {
152 models.insert(model.id().to_string(), CloudModel::OpenAi(model));
153 }
154 }
155 for model in google_ai::Model::iter() {
156 if !matches!(model, google_ai::Model::Custom { .. }) {
157 models.insert(model.id().to_string(), CloudModel::Google(model));
158 }
159 }
160 for model in ZedModel::iter() {
161 models.insert(model.id().to_string(), CloudModel::Zed(model));
162 }
163
164 // Override with available models from settings
165 for model in &AllLanguageModelSettings::get_global(cx)
166 .zed_dot_dev
167 .available_models
168 {
169 let model = match model.provider {
170 AvailableProvider::Anthropic => CloudModel::Anthropic(anthropic::Model::Custom {
171 name: model.name.clone(),
172 max_tokens: model.max_tokens,
173 tool_override: model.tool_override.clone(),
174 }),
175 AvailableProvider::OpenAi => CloudModel::OpenAi(open_ai::Model::Custom {
176 name: model.name.clone(),
177 max_tokens: model.max_tokens,
178 }),
179 AvailableProvider::Google => CloudModel::Google(google_ai::Model::Custom {
180 name: model.name.clone(),
181 max_tokens: model.max_tokens,
182 }),
183 };
184 models.insert(model.id().to_string(), model.clone());
185 }
186
187 models
188 .into_values()
189 .map(|model| {
190 Arc::new(CloudLanguageModel {
191 id: LanguageModelId::from(model.id().to_string()),
192 model,
193 llm_api_token: self.llm_api_token.clone(),
194 client: self.client.clone(),
195 request_limiter: RateLimiter::new(4),
196 }) as Arc<dyn LanguageModel>
197 })
198 .collect()
199 }
200
201 fn is_authenticated(&self, cx: &AppContext) -> bool {
202 !self.state.read(cx).is_signed_out()
203 }
204
205 fn authenticate(&self, _cx: &mut AppContext) -> Task<Result<()>> {
206 Task::ready(Ok(()))
207 }
208
209 fn configuration_view(&self, cx: &mut WindowContext) -> AnyView {
210 cx.new_view(|_cx| ConfigurationView {
211 state: self.state.clone(),
212 })
213 .into()
214 }
215
216 fn reset_credentials(&self, _cx: &mut AppContext) -> Task<Result<()>> {
217 Task::ready(Ok(()))
218 }
219}
220
221struct LlmServiceFeatureFlag;
222
223impl FeatureFlag for LlmServiceFeatureFlag {
224 const NAME: &'static str = "llm-service";
225
226 fn enabled_for_staff() -> bool {
227 false
228 }
229}
230
231pub struct CloudLanguageModel {
232 id: LanguageModelId,
233 model: CloudModel,
234 llm_api_token: LlmApiToken,
235 client: Arc<Client>,
236 request_limiter: RateLimiter,
237}
238
239#[derive(Clone, Default)]
240struct LlmApiToken(Arc<RwLock<Option<String>>>);
241
242impl CloudLanguageModel {
243 async fn perform_llm_completion(
244 client: Arc<Client>,
245 llm_api_token: LlmApiToken,
246 body: PerformCompletionParams,
247 ) -> Result<Response<AsyncBody>> {
248 let http_client = &client.http_client();
249
250 let mut token = llm_api_token.acquire(&client).await?;
251 let mut did_retry = false;
252
253 let response = loop {
254 let request = http_client::Request::builder()
255 .method(Method::POST)
256 .uri(http_client.build_zed_llm_url("/completion", &[])?.as_ref())
257 .header("Content-Type", "application/json")
258 .header("Authorization", format!("Bearer {token}"))
259 .body(serde_json::to_string(&body)?.into())?;
260 let response = http_client.send(request).await?;
261 if response.status().is_success() {
262 break response;
263 } else if !did_retry
264 && response
265 .headers()
266 .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
267 .is_some()
268 {
269 did_retry = true;
270 token = llm_api_token.refresh(&client).await?;
271 } else {
272 break Err(anyhow!(
273 "cloud language model completion failed with status {}",
274 response.status()
275 ))?;
276 }
277 };
278
279 Ok(response)
280 }
281}
282
283impl LanguageModel for CloudLanguageModel {
284 fn id(&self) -> LanguageModelId {
285 self.id.clone()
286 }
287
288 fn name(&self) -> LanguageModelName {
289 LanguageModelName::from(self.model.display_name().to_string())
290 }
291
292 fn provider_id(&self) -> LanguageModelProviderId {
293 LanguageModelProviderId(PROVIDER_ID.into())
294 }
295
296 fn provider_name(&self) -> LanguageModelProviderName {
297 LanguageModelProviderName(PROVIDER_NAME.into())
298 }
299
300 fn telemetry_id(&self) -> String {
301 format!("zed.dev/{}", self.model.id())
302 }
303
304 fn availability(&self) -> LanguageModelAvailability {
305 self.model.availability()
306 }
307
308 fn max_token_count(&self) -> usize {
309 self.model.max_token_count()
310 }
311
312 fn count_tokens(
313 &self,
314 request: LanguageModelRequest,
315 cx: &AppContext,
316 ) -> BoxFuture<'static, Result<usize>> {
317 match self.model.clone() {
318 CloudModel::Anthropic(_) => count_anthropic_tokens(request, cx),
319 CloudModel::OpenAi(model) => count_open_ai_tokens(request, model, cx),
320 CloudModel::Google(model) => {
321 let client = self.client.clone();
322 let request = request.into_google(model.id().into());
323 let request = google_ai::CountTokensRequest {
324 contents: request.contents,
325 };
326 async move {
327 let request = serde_json::to_string(&request)?;
328 let response = client
329 .request(proto::CountLanguageModelTokens {
330 provider: proto::LanguageModelProvider::Google as i32,
331 request,
332 })
333 .await?;
334 Ok(response.token_count as usize)
335 }
336 .boxed()
337 }
338 CloudModel::Zed(_) => {
339 count_open_ai_tokens(request, open_ai::Model::ThreePointFiveTurbo, cx)
340 }
341 }
342 }
343
344 fn stream_completion(
345 &self,
346 request: LanguageModelRequest,
347 cx: &AsyncAppContext,
348 ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
349 match &self.model {
350 CloudModel::Anthropic(model) => {
351 let request = request.into_anthropic(model.id().into());
352 let client = self.client.clone();
353
354 if cx
355 .update(|cx| cx.has_flag::<LlmServiceFeatureFlag>())
356 .unwrap_or(false)
357 {
358 let llm_api_token = self.llm_api_token.clone();
359 let future = self.request_limiter.stream(async move {
360 let response = Self::perform_llm_completion(
361 client.clone(),
362 llm_api_token,
363 PerformCompletionParams {
364 provider: client::LanguageModelProvider::Anthropic,
365 model: request.model.clone(),
366 provider_request: RawValue::from_string(serde_json::to_string(
367 &request,
368 )?)?,
369 },
370 )
371 .await?;
372 let body = BufReader::new(response.into_body());
373 let stream =
374 futures::stream::try_unfold(body, move |mut body| async move {
375 let mut buffer = String::new();
376 match body.read_line(&mut buffer).await {
377 Ok(0) => Ok(None),
378 Ok(_) => {
379 let event: anthropic::Event =
380 serde_json::from_str(&buffer)?;
381 Ok(Some((event, body)))
382 }
383 Err(e) => Err(e.into()),
384 }
385 });
386
387 Ok(anthropic::extract_text_from_events(stream))
388 });
389 async move { Ok(future.await?.boxed()) }.boxed()
390 } else {
391 let future = self.request_limiter.stream(async move {
392 let request = serde_json::to_string(&request)?;
393 let stream = client
394 .request_stream(proto::StreamCompleteWithLanguageModel {
395 provider: proto::LanguageModelProvider::Anthropic as i32,
396 request,
397 })
398 .await?
399 .map(|event| Ok(serde_json::from_str(&event?.event)?));
400 Ok(anthropic::extract_text_from_events(stream))
401 });
402 async move { Ok(future.await?.boxed()) }.boxed()
403 }
404 }
405 CloudModel::OpenAi(model) => {
406 let client = self.client.clone();
407 let request = request.into_open_ai(model.id().into());
408
409 if cx
410 .update(|cx| cx.has_flag::<LlmServiceFeatureFlag>())
411 .unwrap_or(false)
412 {
413 let llm_api_token = self.llm_api_token.clone();
414 let future = self.request_limiter.stream(async move {
415 let response = Self::perform_llm_completion(
416 client.clone(),
417 llm_api_token,
418 PerformCompletionParams {
419 provider: client::LanguageModelProvider::OpenAi,
420 model: request.model.clone(),
421 provider_request: RawValue::from_string(serde_json::to_string(
422 &request,
423 )?)?,
424 },
425 )
426 .await?;
427 let body = BufReader::new(response.into_body());
428 let stream =
429 futures::stream::try_unfold(body, move |mut body| async move {
430 let mut buffer = String::new();
431 match body.read_line(&mut buffer).await {
432 Ok(0) => Ok(None),
433 Ok(_) => {
434 let event: open_ai::ResponseStreamEvent =
435 serde_json::from_str(&buffer)?;
436 Ok(Some((event, body)))
437 }
438 Err(e) => Err(e.into()),
439 }
440 });
441
442 Ok(open_ai::extract_text_from_events(stream))
443 });
444 async move { Ok(future.await?.boxed()) }.boxed()
445 } else {
446 let future = self.request_limiter.stream(async move {
447 let request = serde_json::to_string(&request)?;
448 let stream = client
449 .request_stream(proto::StreamCompleteWithLanguageModel {
450 provider: proto::LanguageModelProvider::OpenAi as i32,
451 request,
452 })
453 .await?;
454 Ok(open_ai::extract_text_from_events(
455 stream.map(|item| Ok(serde_json::from_str(&item?.event)?)),
456 ))
457 });
458 async move { Ok(future.await?.boxed()) }.boxed()
459 }
460 }
461 CloudModel::Google(model) => {
462 let client = self.client.clone();
463 let request = request.into_google(model.id().into());
464
465 if cx
466 .update(|cx| cx.has_flag::<LlmServiceFeatureFlag>())
467 .unwrap_or(false)
468 {
469 let llm_api_token = self.llm_api_token.clone();
470 let future = self.request_limiter.stream(async move {
471 let response = Self::perform_llm_completion(
472 client.clone(),
473 llm_api_token,
474 PerformCompletionParams {
475 provider: client::LanguageModelProvider::Google,
476 model: request.model.clone(),
477 provider_request: RawValue::from_string(serde_json::to_string(
478 &request,
479 )?)?,
480 },
481 )
482 .await?;
483 let body = BufReader::new(response.into_body());
484 let stream =
485 futures::stream::try_unfold(body, move |mut body| async move {
486 let mut buffer = String::new();
487 match body.read_line(&mut buffer).await {
488 Ok(0) => Ok(None),
489 Ok(_) => {
490 let event: google_ai::GenerateContentResponse =
491 serde_json::from_str(&buffer)?;
492 Ok(Some((event, body)))
493 }
494 Err(e) => Err(e.into()),
495 }
496 });
497
498 Ok(google_ai::extract_text_from_events(stream))
499 });
500 async move { Ok(future.await?.boxed()) }.boxed()
501 } else {
502 let future = self.request_limiter.stream(async move {
503 let request = serde_json::to_string(&request)?;
504 let stream = client
505 .request_stream(proto::StreamCompleteWithLanguageModel {
506 provider: proto::LanguageModelProvider::Google as i32,
507 request,
508 })
509 .await?;
510 Ok(google_ai::extract_text_from_events(
511 stream.map(|item| Ok(serde_json::from_str(&item?.event)?)),
512 ))
513 });
514 async move { Ok(future.await?.boxed()) }.boxed()
515 }
516 }
517 CloudModel::Zed(model) => {
518 let client = self.client.clone();
519 let mut request = request.into_open_ai(model.id().into());
520 request.max_tokens = Some(4000);
521
522 if cx
523 .update(|cx| cx.has_flag::<LlmServiceFeatureFlag>())
524 .unwrap_or(false)
525 {
526 let llm_api_token = self.llm_api_token.clone();
527 let future = self.request_limiter.stream(async move {
528 let response = Self::perform_llm_completion(
529 client.clone(),
530 llm_api_token,
531 PerformCompletionParams {
532 provider: client::LanguageModelProvider::Zed,
533 model: request.model.clone(),
534 provider_request: RawValue::from_string(serde_json::to_string(
535 &request,
536 )?)?,
537 },
538 )
539 .await?;
540 let body = BufReader::new(response.into_body());
541 let stream =
542 futures::stream::try_unfold(body, move |mut body| async move {
543 let mut buffer = String::new();
544 match body.read_line(&mut buffer).await {
545 Ok(0) => Ok(None),
546 Ok(_) => {
547 let event: open_ai::ResponseStreamEvent =
548 serde_json::from_str(&buffer)?;
549 Ok(Some((event, body)))
550 }
551 Err(e) => Err(e.into()),
552 }
553 });
554
555 Ok(open_ai::extract_text_from_events(stream))
556 });
557 async move { Ok(future.await?.boxed()) }.boxed()
558 } else {
559 let future = self.request_limiter.stream(async move {
560 let request = serde_json::to_string(&request)?;
561 let stream = client
562 .request_stream(proto::StreamCompleteWithLanguageModel {
563 provider: proto::LanguageModelProvider::Zed as i32,
564 request,
565 })
566 .await?;
567 Ok(open_ai::extract_text_from_events(
568 stream.map(|item| Ok(serde_json::from_str(&item?.event)?)),
569 ))
570 });
571 async move { Ok(future.await?.boxed()) }.boxed()
572 }
573 }
574 }
575 }
576
577 fn use_any_tool(
578 &self,
579 request: LanguageModelRequest,
580 tool_name: String,
581 tool_description: String,
582 input_schema: serde_json::Value,
583 _cx: &AsyncAppContext,
584 ) -> BoxFuture<'static, Result<serde_json::Value>> {
585 match &self.model {
586 CloudModel::Anthropic(model) => {
587 let client = self.client.clone();
588 let mut request = request.into_anthropic(model.tool_model_id().into());
589 request.tool_choice = Some(anthropic::ToolChoice::Tool {
590 name: tool_name.clone(),
591 });
592 request.tools = vec![anthropic::Tool {
593 name: tool_name.clone(),
594 description: tool_description,
595 input_schema,
596 }];
597
598 self.request_limiter
599 .run(async move {
600 let request = serde_json::to_string(&request)?;
601 let response = client
602 .request(proto::CompleteWithLanguageModel {
603 provider: proto::LanguageModelProvider::Anthropic as i32,
604 request,
605 })
606 .await?;
607 let response: anthropic::Response =
608 serde_json::from_str(&response.completion)?;
609 response
610 .content
611 .into_iter()
612 .find_map(|content| {
613 if let anthropic::Content::ToolUse { name, input, .. } = content {
614 if name == tool_name {
615 Some(input)
616 } else {
617 None
618 }
619 } else {
620 None
621 }
622 })
623 .context("tool not used")
624 })
625 .boxed()
626 }
627 CloudModel::OpenAi(_) => {
628 future::ready(Err(anyhow!("tool use not implemented for OpenAI"))).boxed()
629 }
630 CloudModel::Google(_) => {
631 future::ready(Err(anyhow!("tool use not implemented for Google AI"))).boxed()
632 }
633 CloudModel::Zed(_) => {
634 future::ready(Err(anyhow!("tool use not implemented for Zed models"))).boxed()
635 }
636 }
637 }
638}
639
640impl LlmApiToken {
641 async fn acquire(&self, client: &Arc<Client>) -> Result<String> {
642 let lock = self.0.upgradable_read().await;
643 if let Some(token) = lock.as_ref() {
644 Ok(token.to_string())
645 } else {
646 Self::fetch(RwLockUpgradableReadGuard::upgrade(lock).await, &client).await
647 }
648 }
649
650 async fn refresh(&self, client: &Arc<Client>) -> Result<String> {
651 Self::fetch(self.0.write().await, &client).await
652 }
653
654 async fn fetch<'a>(
655 mut lock: RwLockWriteGuard<'a, Option<String>>,
656 client: &Arc<Client>,
657 ) -> Result<String> {
658 let response = client.request(proto::GetLlmToken {}).await?;
659 *lock = Some(response.token.clone());
660 Ok(response.token.clone())
661 }
662}
663
664struct ConfigurationView {
665 state: gpui::Model<State>,
666}
667
668impl ConfigurationView {
669 fn authenticate(&mut self, cx: &mut ViewContext<Self>) {
670 self.state.update(cx, |state, cx| {
671 state.authenticate(cx).detach_and_log_err(cx);
672 });
673 cx.notify();
674 }
675}
676
677impl Render for ConfigurationView {
678 fn render(&mut self, cx: &mut ViewContext<Self>) -> impl IntoElement {
679 const ZED_AI_URL: &str = "https://zed.dev/ai";
680 const ACCOUNT_SETTINGS_URL: &str = "https://zed.dev/account";
681
682 let is_connected = !self.state.read(cx).is_signed_out();
683 let plan = self.state.read(cx).user_store.read(cx).current_plan();
684
685 let is_pro = plan == Some(proto::Plan::ZedPro);
686
687 if is_connected {
688 v_flex()
689 .gap_3()
690 .max_w_4_5()
691 .child(Label::new(
692 if is_pro {
693 "You have full access to Zed's hosted models from Anthropic, OpenAI, Google with faster speeds and higher limits through Zed Pro."
694 } else {
695 "You have basic access to models from Anthropic, OpenAI, Google and more through the Zed AI Free plan."
696 }))
697 .child(
698 if is_pro {
699 h_flex().child(
700 Button::new("manage_settings", "Manage Subscription")
701 .style(ButtonStyle::Filled)
702 .on_click(cx.listener(|_, _, cx| {
703 cx.open_url(ACCOUNT_SETTINGS_URL)
704 })))
705 } else {
706 h_flex()
707 .gap_2()
708 .child(
709 Button::new("learn_more", "Learn more")
710 .style(ButtonStyle::Subtle)
711 .on_click(cx.listener(|_, _, cx| {
712 cx.open_url(ZED_AI_URL)
713 })))
714 .child(
715 Button::new("upgrade", "Upgrade")
716 .style(ButtonStyle::Subtle)
717 .color(Color::Accent)
718 .on_click(cx.listener(|_, _, cx| {
719 cx.open_url(ACCOUNT_SETTINGS_URL)
720 })))
721 },
722 )
723 } else {
724 v_flex()
725 .gap_6()
726 .child(Label::new("Use the zed.dev to access language models."))
727 .child(
728 v_flex()
729 .gap_2()
730 .child(
731 Button::new("sign_in", "Sign in")
732 .icon_color(Color::Muted)
733 .icon(IconName::Github)
734 .icon_position(IconPosition::Start)
735 .style(ButtonStyle::Filled)
736 .full_width()
737 .on_click(cx.listener(move |this, _, cx| this.authenticate(cx))),
738 )
739 .child(
740 div().flex().w_full().items_center().child(
741 Label::new("Sign in to enable collaboration.")
742 .color(Color::Muted)
743 .size(LabelSize::Small),
744 ),
745 ),
746 )
747 }
748 }
749}