cloud.rs

  1use anthropic::{AnthropicError, AnthropicModelMode};
  2use anyhow::{Result, anyhow};
  3use client::{
  4    Client, EXPIRED_LLM_TOKEN_HEADER_NAME, MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME,
  5    PerformCompletionParams, UserStore, zed_urls,
  6};
  7use collections::BTreeMap;
  8use feature_flags::{FeatureFlagAppExt, LlmClosedBeta, ZedPro};
  9use futures::{
 10    AsyncBufReadExt, FutureExt, Stream, StreamExt, TryStreamExt as _, future::BoxFuture,
 11    stream::BoxStream,
 12};
 13use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
 14use http_client::{AsyncBody, HttpClient, Method, Response, StatusCode};
 15use language_model::{
 16    AuthenticateError, CloudModel, LanguageModel, LanguageModelCacheConfiguration, LanguageModelId,
 17    LanguageModelName, LanguageModelProviderId, LanguageModelProviderName,
 18    LanguageModelProviderState, LanguageModelProviderTosView, LanguageModelRequest,
 19    LanguageModelToolSchemaFormat, RateLimiter, ZED_CLOUD_PROVIDER_ID,
 20};
 21use language_model::{
 22    LanguageModelAvailability, LanguageModelCompletionEvent, LanguageModelProvider, LlmApiToken,
 23    MaxMonthlySpendReachedError, PaymentRequiredError, RefreshLlmTokenListener,
 24};
 25use schemars::JsonSchema;
 26use serde::{Deserialize, Serialize, de::DeserializeOwned};
 27use serde_json::value::RawValue;
 28use settings::{Settings, SettingsStore};
 29use smol::Timer;
 30use smol::io::{AsyncReadExt, BufReader};
 31use std::{
 32    sync::{Arc, LazyLock},
 33    time::Duration,
 34};
 35use strum::IntoEnumIterator;
 36use ui::{TintColor, prelude::*};
 37
 38use crate::AllLanguageModelSettings;
 39use crate::provider::anthropic::{count_anthropic_tokens, into_anthropic};
 40use crate::provider::google::into_google;
 41use crate::provider::open_ai::{count_open_ai_tokens, into_open_ai};
 42
 43pub const PROVIDER_NAME: &str = "Zed";
 44
 45const ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON: Option<&str> =
 46    option_env!("ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON");
 47
 48fn zed_cloud_provider_additional_models() -> &'static [AvailableModel] {
 49    static ADDITIONAL_MODELS: LazyLock<Vec<AvailableModel>> = LazyLock::new(|| {
 50        ZED_CLOUD_PROVIDER_ADDITIONAL_MODELS_JSON
 51            .map(|json| serde_json::from_str(json).unwrap())
 52            .unwrap_or_default()
 53    });
 54    ADDITIONAL_MODELS.as_slice()
 55}
 56
 57#[derive(Default, Clone, Debug, PartialEq)]
 58pub struct ZedDotDevSettings {
 59    pub available_models: Vec<AvailableModel>,
 60}
 61
 62#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 63#[serde(rename_all = "lowercase")]
 64pub enum AvailableProvider {
 65    Anthropic,
 66    OpenAi,
 67    Google,
 68}
 69
 70#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 71pub struct AvailableModel {
 72    /// The provider of the language model.
 73    pub provider: AvailableProvider,
 74    /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
 75    pub name: String,
 76    /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
 77    pub display_name: Option<String>,
 78    /// The size of the context window, indicating the maximum number of tokens the model can process.
 79    pub max_tokens: usize,
 80    /// The maximum number of output tokens allowed by the model.
 81    pub max_output_tokens: Option<u32>,
 82    /// The maximum number of completion tokens allowed by the model (o1-* only)
 83    pub max_completion_tokens: Option<u32>,
 84    /// Override this model with a different Anthropic model for tool calls.
 85    pub tool_override: Option<String>,
 86    /// Indicates whether this custom model supports caching.
 87    pub cache_configuration: Option<LanguageModelCacheConfiguration>,
 88    /// The default temperature to use for this model.
 89    pub default_temperature: Option<f32>,
 90    /// Any extra beta headers to provide when using the model.
 91    #[serde(default)]
 92    pub extra_beta_headers: Vec<String>,
 93    /// The model's mode (e.g. thinking)
 94    pub mode: Option<ModelMode>,
 95}
 96
 97#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 98#[serde(tag = "type", rename_all = "lowercase")]
 99pub enum ModelMode {
100    #[default]
101    Default,
102    Thinking {
103        /// The maximum number of tokens to use for reasoning. Must be lower than the model's `max_output_tokens`.
104        budget_tokens: Option<u32>,
105    },
106}
107
108impl From<ModelMode> for AnthropicModelMode {
109    fn from(value: ModelMode) -> Self {
110        match value {
111            ModelMode::Default => AnthropicModelMode::Default,
112            ModelMode::Thinking { budget_tokens } => AnthropicModelMode::Thinking { budget_tokens },
113        }
114    }
115}
116
117pub struct CloudLanguageModelProvider {
118    client: Arc<Client>,
119    state: gpui::Entity<State>,
120    _maintain_client_status: Task<()>,
121}
122
123pub struct State {
124    client: Arc<Client>,
125    llm_api_token: LlmApiToken,
126    user_store: Entity<UserStore>,
127    status: client::Status,
128    accept_terms: Option<Task<Result<()>>>,
129    _settings_subscription: Subscription,
130    _llm_token_subscription: Subscription,
131}
132
133impl State {
134    fn new(
135        client: Arc<Client>,
136        user_store: Entity<UserStore>,
137        status: client::Status,
138        cx: &mut Context<Self>,
139    ) -> Self {
140        let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
141
142        Self {
143            client: client.clone(),
144            llm_api_token: LlmApiToken::default(),
145            user_store,
146            status,
147            accept_terms: None,
148            _settings_subscription: cx.observe_global::<SettingsStore>(|_, cx| {
149                cx.notify();
150            }),
151            _llm_token_subscription: cx.subscribe(
152                &refresh_llm_token_listener,
153                |this, _listener, _event, cx| {
154                    let client = this.client.clone();
155                    let llm_api_token = this.llm_api_token.clone();
156                    cx.spawn(async move |_this, _cx| {
157                        llm_api_token.refresh(&client).await?;
158                        anyhow::Ok(())
159                    })
160                    .detach_and_log_err(cx);
161                },
162            ),
163        }
164    }
165
166    fn is_signed_out(&self) -> bool {
167        self.status.is_signed_out()
168    }
169
170    fn authenticate(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
171        let client = self.client.clone();
172        cx.spawn(async move |this, cx| {
173            client.authenticate_and_connect(true, &cx).await?;
174            this.update(cx, |_, cx| cx.notify())
175        })
176    }
177
178    fn has_accepted_terms_of_service(&self, cx: &App) -> bool {
179        self.user_store
180            .read(cx)
181            .current_user_has_accepted_terms()
182            .unwrap_or(false)
183    }
184
185    fn accept_terms_of_service(&mut self, cx: &mut Context<Self>) {
186        let user_store = self.user_store.clone();
187        self.accept_terms = Some(cx.spawn(async move |this, cx| {
188            let _ = user_store
189                .update(cx, |store, cx| store.accept_terms_of_service(cx))?
190                .await;
191            this.update(cx, |this, cx| {
192                this.accept_terms = None;
193                cx.notify()
194            })
195        }));
196    }
197}
198
199impl CloudLanguageModelProvider {
200    pub fn new(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) -> Self {
201        let mut status_rx = client.status();
202        let status = *status_rx.borrow();
203
204        let state = cx.new(|cx| State::new(client.clone(), user_store.clone(), status, cx));
205
206        let state_ref = state.downgrade();
207        let maintain_client_status = cx.spawn(async move |cx| {
208            while let Some(status) = status_rx.next().await {
209                if let Some(this) = state_ref.upgrade() {
210                    _ = this.update(cx, |this, cx| {
211                        if this.status != status {
212                            this.status = status;
213                            cx.notify();
214                        }
215                    });
216                } else {
217                    break;
218                }
219            }
220        });
221
222        Self {
223            client,
224            state: state.clone(),
225            _maintain_client_status: maintain_client_status,
226        }
227    }
228}
229
230impl LanguageModelProviderState for CloudLanguageModelProvider {
231    type ObservableEntity = State;
232
233    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
234        Some(self.state.clone())
235    }
236}
237
238impl LanguageModelProvider for CloudLanguageModelProvider {
239    fn id(&self) -> LanguageModelProviderId {
240        LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
241    }
242
243    fn name(&self) -> LanguageModelProviderName {
244        LanguageModelProviderName(PROVIDER_NAME.into())
245    }
246
247    fn icon(&self) -> IconName {
248        IconName::AiZed
249    }
250
251    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>> {
252        let llm_api_token = self.state.read(cx).llm_api_token.clone();
253        let model = CloudModel::Anthropic(anthropic::Model::default());
254        Some(Arc::new(CloudLanguageModel {
255            id: LanguageModelId::from(model.id().to_string()),
256            model,
257            llm_api_token: llm_api_token.clone(),
258            client: self.client.clone(),
259            request_limiter: RateLimiter::new(4),
260        }))
261    }
262
263    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
264        let mut models = BTreeMap::default();
265
266        if cx.is_staff() {
267            for model in anthropic::Model::iter() {
268                if !matches!(model, anthropic::Model::Custom { .. }) {
269                    models.insert(model.id().to_string(), CloudModel::Anthropic(model));
270                }
271            }
272            for model in open_ai::Model::iter() {
273                if !matches!(model, open_ai::Model::Custom { .. }) {
274                    models.insert(model.id().to_string(), CloudModel::OpenAi(model));
275                }
276            }
277            for model in google_ai::Model::iter() {
278                if !matches!(model, google_ai::Model::Custom { .. }) {
279                    models.insert(model.id().to_string(), CloudModel::Google(model));
280                }
281            }
282        } else {
283            models.insert(
284                anthropic::Model::Claude3_5Sonnet.id().to_string(),
285                CloudModel::Anthropic(anthropic::Model::Claude3_5Sonnet),
286            );
287            models.insert(
288                anthropic::Model::Claude3_7Sonnet.id().to_string(),
289                CloudModel::Anthropic(anthropic::Model::Claude3_7Sonnet),
290            );
291            models.insert(
292                anthropic::Model::Claude3_7SonnetThinking.id().to_string(),
293                CloudModel::Anthropic(anthropic::Model::Claude3_7SonnetThinking),
294            );
295        }
296
297        let llm_closed_beta_models = if cx.has_flag::<LlmClosedBeta>() {
298            zed_cloud_provider_additional_models()
299        } else {
300            &[]
301        };
302
303        // Override with available models from settings
304        for model in AllLanguageModelSettings::get_global(cx)
305            .zed_dot_dev
306            .available_models
307            .iter()
308            .chain(llm_closed_beta_models)
309            .cloned()
310        {
311            let model = match model.provider {
312                AvailableProvider::Anthropic => CloudModel::Anthropic(anthropic::Model::Custom {
313                    name: model.name.clone(),
314                    display_name: model.display_name.clone(),
315                    max_tokens: model.max_tokens,
316                    tool_override: model.tool_override.clone(),
317                    cache_configuration: model.cache_configuration.as_ref().map(|config| {
318                        anthropic::AnthropicModelCacheConfiguration {
319                            max_cache_anchors: config.max_cache_anchors,
320                            should_speculate: config.should_speculate,
321                            min_total_token: config.min_total_token,
322                        }
323                    }),
324                    default_temperature: model.default_temperature,
325                    max_output_tokens: model.max_output_tokens,
326                    extra_beta_headers: model.extra_beta_headers.clone(),
327                    mode: model.mode.unwrap_or_default().into(),
328                }),
329                AvailableProvider::OpenAi => CloudModel::OpenAi(open_ai::Model::Custom {
330                    name: model.name.clone(),
331                    display_name: model.display_name.clone(),
332                    max_tokens: model.max_tokens,
333                    max_output_tokens: model.max_output_tokens,
334                    max_completion_tokens: model.max_completion_tokens,
335                }),
336                AvailableProvider::Google => CloudModel::Google(google_ai::Model::Custom {
337                    name: model.name.clone(),
338                    display_name: model.display_name.clone(),
339                    max_tokens: model.max_tokens,
340                }),
341            };
342            models.insert(model.id().to_string(), model.clone());
343        }
344
345        let llm_api_token = self.state.read(cx).llm_api_token.clone();
346        models
347            .into_values()
348            .map(|model| {
349                Arc::new(CloudLanguageModel {
350                    id: LanguageModelId::from(model.id().to_string()),
351                    model,
352                    llm_api_token: llm_api_token.clone(),
353                    client: self.client.clone(),
354                    request_limiter: RateLimiter::new(4),
355                }) as Arc<dyn LanguageModel>
356            })
357            .collect()
358    }
359
360    fn is_authenticated(&self, cx: &App) -> bool {
361        !self.state.read(cx).is_signed_out()
362    }
363
364    fn authenticate(&self, _cx: &mut App) -> Task<Result<(), AuthenticateError>> {
365        Task::ready(Ok(()))
366    }
367
368    fn configuration_view(&self, _: &mut Window, cx: &mut App) -> AnyView {
369        cx.new(|_| ConfigurationView {
370            state: self.state.clone(),
371        })
372        .into()
373    }
374
375    fn must_accept_terms(&self, cx: &App) -> bool {
376        !self.state.read(cx).has_accepted_terms_of_service(cx)
377    }
378
379    fn render_accept_terms(
380        &self,
381        view: LanguageModelProviderTosView,
382        cx: &mut App,
383    ) -> Option<AnyElement> {
384        render_accept_terms(self.state.clone(), view, cx)
385    }
386
387    fn reset_credentials(&self, _cx: &mut App) -> Task<Result<()>> {
388        Task::ready(Ok(()))
389    }
390}
391
392fn render_accept_terms(
393    state: Entity<State>,
394    view_kind: LanguageModelProviderTosView,
395    cx: &mut App,
396) -> Option<AnyElement> {
397    if state.read(cx).has_accepted_terms_of_service(cx) {
398        return None;
399    }
400
401    let accept_terms_disabled = state.read(cx).accept_terms.is_some();
402
403    let thread_fresh_start = matches!(view_kind, LanguageModelProviderTosView::ThreadFreshStart);
404    let thread_empty_state = matches!(view_kind, LanguageModelProviderTosView::ThreadtEmptyState);
405
406    let terms_button = Button::new("terms_of_service", "Terms of Service")
407        .style(ButtonStyle::Subtle)
408        .icon(IconName::ArrowUpRight)
409        .icon_color(Color::Muted)
410        .icon_size(IconSize::XSmall)
411        .when(thread_empty_state, |this| this.label_size(LabelSize::Small))
412        .on_click(move |_, _window, cx| cx.open_url("https://zed.dev/terms-of-service"));
413
414    let button_container = h_flex().child(
415        Button::new("accept_terms", "I accept the Terms of Service")
416            .when(!thread_empty_state, |this| {
417                this.full_width()
418                    .style(ButtonStyle::Tinted(TintColor::Accent))
419                    .icon(IconName::Check)
420                    .icon_position(IconPosition::Start)
421                    .icon_size(IconSize::Small)
422            })
423            .when(thread_empty_state, |this| {
424                this.style(ButtonStyle::Tinted(TintColor::Warning))
425                    .label_size(LabelSize::Small)
426            })
427            .disabled(accept_terms_disabled)
428            .on_click({
429                let state = state.downgrade();
430                move |_, _window, cx| {
431                    state
432                        .update(cx, |state, cx| state.accept_terms_of_service(cx))
433                        .ok();
434                }
435            }),
436    );
437
438    let form = if thread_empty_state {
439        h_flex()
440            .w_full()
441            .flex_wrap()
442            .justify_between()
443            .child(
444                h_flex()
445                    .child(
446                        Label::new("To start using Zed AI, please read and accept the")
447                            .size(LabelSize::Small),
448                    )
449                    .child(terms_button),
450            )
451            .child(button_container)
452    } else {
453        v_flex()
454            .w_full()
455            .gap_2()
456            .child(
457                h_flex()
458                    .flex_wrap()
459                    .when(thread_fresh_start, |this| this.justify_center())
460                    .child(Label::new(
461                        "To start using Zed AI, please read and accept the",
462                    ))
463                    .child(terms_button),
464            )
465            .child({
466                match view_kind {
467                    LanguageModelProviderTosView::PromptEditorPopup => {
468                        button_container.w_full().justify_end()
469                    }
470                    LanguageModelProviderTosView::Configuration => {
471                        button_container.w_full().justify_start()
472                    }
473                    LanguageModelProviderTosView::ThreadFreshStart => {
474                        button_container.w_full().justify_center()
475                    }
476                    LanguageModelProviderTosView::ThreadtEmptyState => div().w_0(),
477                }
478            })
479    };
480
481    Some(form.into_any())
482}
483
484pub struct CloudLanguageModel {
485    id: LanguageModelId,
486    model: CloudModel,
487    llm_api_token: LlmApiToken,
488    client: Arc<Client>,
489    request_limiter: RateLimiter,
490}
491
492impl CloudLanguageModel {
493    const MAX_RETRIES: usize = 3;
494
495    async fn perform_llm_completion(
496        client: Arc<Client>,
497        llm_api_token: LlmApiToken,
498        body: PerformCompletionParams,
499    ) -> Result<Response<AsyncBody>> {
500        let http_client = &client.http_client();
501
502        let mut token = llm_api_token.acquire(&client).await?;
503        let mut retries_remaining = Self::MAX_RETRIES;
504        let mut retry_delay = Duration::from_secs(1);
505
506        loop {
507            let request_builder = http_client::Request::builder();
508            let request = request_builder
509                .method(Method::POST)
510                .uri(http_client.build_zed_llm_url("/completion", &[])?.as_ref())
511                .header("Content-Type", "application/json")
512                .header("Authorization", format!("Bearer {token}"))
513                .body(serde_json::to_string(&body)?.into())?;
514            let mut response = http_client.send(request).await?;
515            let status = response.status();
516            if status.is_success() {
517                return Ok(response);
518            } else if response
519                .headers()
520                .get(EXPIRED_LLM_TOKEN_HEADER_NAME)
521                .is_some()
522            {
523                retries_remaining -= 1;
524                token = llm_api_token.refresh(&client).await?;
525            } else if status == StatusCode::FORBIDDEN
526                && response
527                    .headers()
528                    .get(MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME)
529                    .is_some()
530            {
531                return Err(anyhow!(MaxMonthlySpendReachedError));
532            } else if status.as_u16() >= 500 && status.as_u16() < 600 {
533                // If we encounter an error in the 500 range, retry after a delay.
534                // We've seen at least these in the wild from API providers:
535                // * 500 Internal Server Error
536                // * 502 Bad Gateway
537                // * 529 Service Overloaded
538
539                if retries_remaining == 0 {
540                    let mut body = String::new();
541                    response.body_mut().read_to_string(&mut body).await?;
542                    return Err(anyhow!(
543                        "cloud language model completion failed after {} retries with status {status}: {body}",
544                        Self::MAX_RETRIES
545                    ));
546                }
547
548                Timer::after(retry_delay).await;
549
550                retries_remaining -= 1;
551                retry_delay *= 2; // If it fails again, wait longer.
552            } else if status == StatusCode::PAYMENT_REQUIRED {
553                return Err(anyhow!(PaymentRequiredError));
554            } else {
555                let mut body = String::new();
556                response.body_mut().read_to_string(&mut body).await?;
557                return Err(anyhow!(
558                    "cloud language model completion failed with status {status}: {body}",
559                ));
560            }
561        }
562    }
563}
564
565impl LanguageModel for CloudLanguageModel {
566    fn id(&self) -> LanguageModelId {
567        self.id.clone()
568    }
569
570    fn name(&self) -> LanguageModelName {
571        LanguageModelName::from(self.model.display_name().to_string())
572    }
573
574    fn icon(&self) -> Option<IconName> {
575        self.model.icon()
576    }
577
578    fn provider_id(&self) -> LanguageModelProviderId {
579        LanguageModelProviderId(ZED_CLOUD_PROVIDER_ID.into())
580    }
581
582    fn provider_name(&self) -> LanguageModelProviderName {
583        LanguageModelProviderName(PROVIDER_NAME.into())
584    }
585
586    fn supports_tools(&self) -> bool {
587        match self.model {
588            CloudModel::Anthropic(_) => true,
589            CloudModel::Google(_) => true,
590            CloudModel::OpenAi(_) => false,
591        }
592    }
593
594    fn telemetry_id(&self) -> String {
595        format!("zed.dev/{}", self.model.id())
596    }
597
598    fn availability(&self) -> LanguageModelAvailability {
599        self.model.availability()
600    }
601
602    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
603        self.model.tool_input_format()
604    }
605
606    fn max_token_count(&self) -> usize {
607        self.model.max_token_count()
608    }
609
610    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
611        match &self.model {
612            CloudModel::Anthropic(model) => {
613                model
614                    .cache_configuration()
615                    .map(|cache| LanguageModelCacheConfiguration {
616                        max_cache_anchors: cache.max_cache_anchors,
617                        should_speculate: cache.should_speculate,
618                        min_total_token: cache.min_total_token,
619                    })
620            }
621            CloudModel::OpenAi(_) | CloudModel::Google(_) => None,
622        }
623    }
624
625    fn count_tokens(
626        &self,
627        request: LanguageModelRequest,
628        cx: &App,
629    ) -> BoxFuture<'static, Result<usize>> {
630        match self.model.clone() {
631            CloudModel::Anthropic(_) => count_anthropic_tokens(request, cx),
632            CloudModel::OpenAi(model) => count_open_ai_tokens(request, model, cx),
633            CloudModel::Google(model) => {
634                let client = self.client.clone();
635                let request = into_google(request, model.id().into());
636                let request = google_ai::CountTokensRequest {
637                    contents: request.contents,
638                };
639                async move {
640                    let request = serde_json::to_string(&request)?;
641                    let response = client
642                        .request(proto::CountLanguageModelTokens {
643                            provider: proto::LanguageModelProvider::Google as i32,
644                            request,
645                        })
646                        .await?;
647                    Ok(response.token_count as usize)
648                }
649                .boxed()
650            }
651        }
652    }
653
654    fn stream_completion(
655        &self,
656        request: LanguageModelRequest,
657        _cx: &AsyncApp,
658    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<LanguageModelCompletionEvent>>>> {
659        match &self.model {
660            CloudModel::Anthropic(model) => {
661                let request = into_anthropic(
662                    request,
663                    model.request_id().into(),
664                    model.default_temperature(),
665                    model.max_output_tokens(),
666                    model.mode(),
667                );
668                let client = self.client.clone();
669                let llm_api_token = self.llm_api_token.clone();
670                let future = self.request_limiter.stream(async move {
671                    let response = Self::perform_llm_completion(
672                        client.clone(),
673                        llm_api_token,
674                        PerformCompletionParams {
675                            provider: client::LanguageModelProvider::Anthropic,
676                            model: request.model.clone(),
677                            provider_request: RawValue::from_string(serde_json::to_string(
678                                &request,
679                            )?)?,
680                        },
681                    )
682                    .await?;
683                    Ok(
684                        crate::provider::anthropic::map_to_language_model_completion_events(
685                            Box::pin(response_lines(response).map_err(AnthropicError::Other)),
686                        ),
687                    )
688                });
689                async move { Ok(future.await?.boxed()) }.boxed()
690            }
691            CloudModel::OpenAi(model) => {
692                let client = self.client.clone();
693                let request = into_open_ai(request, model.id().into(), model.max_output_tokens());
694                let llm_api_token = self.llm_api_token.clone();
695                let future = self.request_limiter.stream(async move {
696                    let response = Self::perform_llm_completion(
697                        client.clone(),
698                        llm_api_token,
699                        PerformCompletionParams {
700                            provider: client::LanguageModelProvider::OpenAi,
701                            model: request.model.clone(),
702                            provider_request: RawValue::from_string(serde_json::to_string(
703                                &request,
704                            )?)?,
705                        },
706                    )
707                    .await?;
708                    Ok(open_ai::extract_text_from_events(response_lines(response)))
709                });
710                async move {
711                    Ok(future
712                        .await?
713                        .map(|result| result.map(LanguageModelCompletionEvent::Text))
714                        .boxed())
715                }
716                .boxed()
717            }
718            CloudModel::Google(model) => {
719                let client = self.client.clone();
720                let request = into_google(request, model.id().into());
721                let llm_api_token = self.llm_api_token.clone();
722                let future = self.request_limiter.stream(async move {
723                    let response = Self::perform_llm_completion(
724                        client.clone(),
725                        llm_api_token,
726                        PerformCompletionParams {
727                            provider: client::LanguageModelProvider::Google,
728                            model: request.model.clone(),
729                            provider_request: RawValue::from_string(serde_json::to_string(
730                                &request,
731                            )?)?,
732                        },
733                    )
734                    .await?;
735                    Ok(
736                        crate::provider::google::map_to_language_model_completion_events(Box::pin(
737                            response_lines(response),
738                        )),
739                    )
740                });
741                async move { Ok(future.await?.boxed()) }.boxed()
742            }
743        }
744    }
745}
746
747fn response_lines<T: DeserializeOwned>(
748    response: Response<AsyncBody>,
749) -> impl Stream<Item = Result<T>> {
750    futures::stream::try_unfold(
751        (String::new(), BufReader::new(response.into_body())),
752        move |(mut line, mut body)| async {
753            match body.read_line(&mut line).await {
754                Ok(0) => Ok(None),
755                Ok(_) => {
756                    let event: T = serde_json::from_str(&line)?;
757                    line.clear();
758                    Ok(Some((event, (line, body))))
759                }
760                Err(e) => Err(e.into()),
761            }
762        },
763    )
764}
765
766struct ConfigurationView {
767    state: gpui::Entity<State>,
768}
769
770impl ConfigurationView {
771    fn authenticate(&mut self, cx: &mut Context<Self>) {
772        self.state.update(cx, |state, cx| {
773            state.authenticate(cx).detach_and_log_err(cx);
774        });
775        cx.notify();
776    }
777}
778
779impl Render for ConfigurationView {
780    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
781        const ZED_AI_URL: &str = "https://zed.dev/ai";
782
783        let is_connected = !self.state.read(cx).is_signed_out();
784        let plan = self.state.read(cx).user_store.read(cx).current_plan();
785        let has_accepted_terms = self.state.read(cx).has_accepted_terms_of_service(cx);
786
787        let is_pro = plan == Some(proto::Plan::ZedPro);
788        let subscription_text = Label::new(if is_pro {
789            "You have full access to Zed's hosted LLMs, which include models from Anthropic, OpenAI, and Google. They come with faster speeds and higher limits through Zed Pro."
790        } else {
791            "You have basic access to models from Anthropic through the Zed AI Free plan."
792        });
793        let manage_subscription_button = if is_pro {
794            Some(
795                h_flex().child(
796                    Button::new("manage_settings", "Manage Subscription")
797                        .style(ButtonStyle::Tinted(TintColor::Accent))
798                        .on_click(
799                            cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
800                        ),
801                ),
802            )
803        } else if cx.has_flag::<ZedPro>() {
804            Some(
805                h_flex()
806                    .gap_2()
807                    .child(
808                        Button::new("learn_more", "Learn more")
809                            .style(ButtonStyle::Subtle)
810                            .on_click(cx.listener(|_, _, _, cx| cx.open_url(ZED_AI_URL))),
811                    )
812                    .child(
813                        Button::new("upgrade", "Upgrade")
814                            .style(ButtonStyle::Subtle)
815                            .color(Color::Accent)
816                            .on_click(
817                                cx.listener(|_, _, _, cx| cx.open_url(&zed_urls::account_url(cx))),
818                            ),
819                    ),
820            )
821        } else {
822            None
823        };
824
825        if is_connected {
826            v_flex()
827                .gap_3()
828                .w_full()
829                .children(render_accept_terms(
830                    self.state.clone(),
831                    LanguageModelProviderTosView::Configuration,
832                    cx,
833                ))
834                .when(has_accepted_terms, |this| {
835                    this.child(subscription_text)
836                        .children(manage_subscription_button)
837                })
838        } else {
839            v_flex()
840                .gap_2()
841                .child(Label::new("Use Zed AI to access hosted language models."))
842                .child(
843                    Button::new("sign_in", "Sign In")
844                        .icon_color(Color::Muted)
845                        .icon(IconName::Github)
846                        .icon_position(IconPosition::Start)
847                        .on_click(cx.listener(move |this, _, _, cx| this.authenticate(cx))),
848                )
849        }
850    }
851}