lmstudio.rs

  1use anyhow::{Result, anyhow};
  2use collections::HashMap;
  3use futures::Stream;
  4use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
  5use gpui::{AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
  6use http_client::HttpClient;
  7use language_model::{
  8    AuthenticateError, LanguageModelCompletionError, LanguageModelCompletionEvent,
  9    LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent,
 10    StopReason, TokenUsage,
 11};
 12use language_model::{
 13    IconOrSvg, LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
 14    LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
 15    LanguageModelRequest, RateLimiter, Role,
 16};
 17use lmstudio::{ModelType, get_models};
 18pub use settings::LmStudioAvailableModel as AvailableModel;
 19use settings::{Settings, SettingsStore};
 20use std::pin::Pin;
 21use std::str::FromStr;
 22use std::{collections::BTreeMap, sync::Arc};
 23use ui::{ButtonLike, Indicator, List, ListBulletItem, prelude::*};
 24use util::ResultExt;
 25
 26use crate::AllLanguageModelSettings;
 27
 28const LMSTUDIO_DOWNLOAD_URL: &str = "https://lmstudio.ai/download";
 29const LMSTUDIO_CATALOG_URL: &str = "https://lmstudio.ai/models";
 30const LMSTUDIO_SITE: &str = "https://lmstudio.ai/";
 31
 32const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("lmstudio");
 33const PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("LM Studio");
 34
 35#[derive(Default, Debug, Clone, PartialEq)]
 36pub struct LmStudioSettings {
 37    pub api_url: String,
 38    pub available_models: Vec<AvailableModel>,
 39}
 40
 41pub struct LmStudioLanguageModelProvider {
 42    http_client: Arc<dyn HttpClient>,
 43    state: Entity<State>,
 44}
 45
 46pub struct State {
 47    http_client: Arc<dyn HttpClient>,
 48    available_models: Vec<lmstudio::Model>,
 49    fetch_model_task: Option<Task<Result<()>>>,
 50    _subscription: Subscription,
 51}
 52
 53impl State {
 54    fn is_authenticated(&self) -> bool {
 55        !self.available_models.is_empty()
 56    }
 57
 58    fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
 59        let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
 60        let http_client = self.http_client.clone();
 61        let api_url = settings.api_url.clone();
 62
 63        // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
 64        cx.spawn(async move |this, cx| {
 65            let models = get_models(http_client.as_ref(), &api_url, None).await?;
 66
 67            let mut models: Vec<lmstudio::Model> = models
 68                .into_iter()
 69                .filter(|model| model.r#type != ModelType::Embeddings)
 70                .map(|model| {
 71                    lmstudio::Model::new(
 72                        &model.id,
 73                        None,
 74                        model
 75                            .loaded_context_length
 76                            .or_else(|| model.max_context_length),
 77                        model.capabilities.supports_tool_calls(),
 78                        model.capabilities.supports_images() || model.r#type == ModelType::Vlm,
 79                    )
 80                })
 81                .collect();
 82
 83            models.sort_by(|a, b| a.name.cmp(&b.name));
 84
 85            this.update(cx, |this, cx| {
 86                this.available_models = models;
 87                cx.notify();
 88            })
 89        })
 90    }
 91
 92    fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
 93        let task = self.fetch_models(cx);
 94        self.fetch_model_task.replace(task);
 95    }
 96
 97    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
 98        if self.is_authenticated() {
 99            return Task::ready(Ok(()));
100        }
101
102        let fetch_models_task = self.fetch_models(cx);
103        cx.spawn(async move |_this, _cx| {
104            match fetch_models_task.await {
105                Ok(()) => Ok(()),
106                Err(err) => {
107                    // If any cause in the error chain is an std::io::Error with
108                    // ErrorKind::ConnectionRefused, treat this as "credentials not found"
109                    // (i.e. LM Studio not running).
110                    let mut connection_refused = false;
111                    for cause in err.chain() {
112                        if let Some(io_err) = cause.downcast_ref::<std::io::Error>() {
113                            if io_err.kind() == std::io::ErrorKind::ConnectionRefused {
114                                connection_refused = true;
115                                break;
116                            }
117                        }
118                    }
119                    if connection_refused {
120                        Err(AuthenticateError::ConnectionRefused)
121                    } else {
122                        Err(AuthenticateError::Other(err))
123                    }
124                }
125            }
126        })
127    }
128}
129
130impl LmStudioLanguageModelProvider {
131    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
132        let this = Self {
133            http_client: http_client.clone(),
134            state: cx.new(|cx| {
135                let subscription = cx.observe_global::<SettingsStore>({
136                    let mut settings = AllLanguageModelSettings::get_global(cx).lmstudio.clone();
137                    move |this: &mut State, cx| {
138                        let new_settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
139                        if &settings != new_settings {
140                            settings = new_settings.clone();
141                            this.restart_fetch_models_task(cx);
142                            cx.notify();
143                        }
144                    }
145                });
146
147                State {
148                    http_client,
149                    available_models: Default::default(),
150                    fetch_model_task: None,
151                    _subscription: subscription,
152                }
153            }),
154        };
155        this.state
156            .update(cx, |state, cx| state.restart_fetch_models_task(cx));
157        this
158    }
159}
160
161impl LanguageModelProviderState for LmStudioLanguageModelProvider {
162    type ObservableEntity = State;
163
164    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
165        Some(self.state.clone())
166    }
167}
168
169impl LanguageModelProvider for LmStudioLanguageModelProvider {
170    fn id(&self) -> LanguageModelProviderId {
171        PROVIDER_ID
172    }
173
174    fn name(&self) -> LanguageModelProviderName {
175        PROVIDER_NAME
176    }
177
178    fn icon(&self) -> IconOrSvg {
179        IconOrSvg::Icon(IconName::AiLmStudio)
180    }
181
182    fn default_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
183        // We shouldn't try to select default model, because it might lead to a load call for an unloaded model.
184        // In a constrained environment where user might not have enough resources it'll be a bad UX to select something
185        // to load by default.
186        None
187    }
188
189    fn default_fast_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
190        // See explanation for default_model.
191        None
192    }
193
194    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
195        let mut models: BTreeMap<String, lmstudio::Model> = BTreeMap::default();
196
197        // Add models from the LM Studio API
198        for model in self.state.read(cx).available_models.iter() {
199            models.insert(model.name.clone(), model.clone());
200        }
201
202        // Override with available models from settings
203        for model in AllLanguageModelSettings::get_global(cx)
204            .lmstudio
205            .available_models
206            .iter()
207        {
208            models.insert(
209                model.name.clone(),
210                lmstudio::Model {
211                    name: model.name.clone(),
212                    display_name: model.display_name.clone(),
213                    max_tokens: model.max_tokens,
214                    supports_tool_calls: model.supports_tool_calls,
215                    supports_images: model.supports_images,
216                },
217            );
218        }
219
220        models
221            .into_values()
222            .map(|model| {
223                Arc::new(LmStudioLanguageModel {
224                    id: LanguageModelId::from(model.name.clone()),
225                    model,
226                    http_client: self.http_client.clone(),
227                    request_limiter: RateLimiter::new(4),
228                }) as Arc<dyn LanguageModel>
229            })
230            .collect()
231    }
232
233    fn is_authenticated(&self, cx: &App) -> bool {
234        self.state.read(cx).is_authenticated()
235    }
236
237    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
238        self.state.update(cx, |state, cx| state.authenticate(cx))
239    }
240
241    fn configuration_view(
242        &self,
243        _target_agent: language_model::ConfigurationViewTargetAgent,
244        _window: &mut Window,
245        cx: &mut App,
246    ) -> AnyView {
247        let state = self.state.clone();
248        cx.new(|cx| ConfigurationView::new(state, cx)).into()
249    }
250
251    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
252        self.state.update(cx, |state, cx| state.fetch_models(cx))
253    }
254}
255
256pub struct LmStudioLanguageModel {
257    id: LanguageModelId,
258    model: lmstudio::Model,
259    http_client: Arc<dyn HttpClient>,
260    request_limiter: RateLimiter,
261}
262
263impl LmStudioLanguageModel {
264    fn to_lmstudio_request(
265        &self,
266        request: LanguageModelRequest,
267    ) -> lmstudio::ChatCompletionRequest {
268        let mut messages = Vec::new();
269
270        for message in request.messages {
271            for content in message.content {
272                match content {
273                    MessageContent::Text(text) => add_message_content_part(
274                        lmstudio::MessagePart::Text { text },
275                        message.role,
276                        &mut messages,
277                    ),
278                    MessageContent::Thinking { .. } => {}
279                    MessageContent::RedactedThinking(_) => {}
280                    MessageContent::Image(image) => {
281                        add_message_content_part(
282                            lmstudio::MessagePart::Image {
283                                image_url: lmstudio::ImageUrl {
284                                    url: image.to_base64_url(),
285                                    detail: None,
286                                },
287                            },
288                            message.role,
289                            &mut messages,
290                        );
291                    }
292                    MessageContent::ToolUse(tool_use) => {
293                        let tool_call = lmstudio::ToolCall {
294                            id: tool_use.id.to_string(),
295                            content: lmstudio::ToolCallContent::Function {
296                                function: lmstudio::FunctionContent {
297                                    name: tool_use.name.to_string(),
298                                    arguments: serde_json::to_string(&tool_use.input)
299                                        .unwrap_or_default(),
300                                },
301                            },
302                        };
303
304                        if let Some(lmstudio::ChatMessage::Assistant { tool_calls, .. }) =
305                            messages.last_mut()
306                        {
307                            tool_calls.push(tool_call);
308                        } else {
309                            messages.push(lmstudio::ChatMessage::Assistant {
310                                content: None,
311                                tool_calls: vec![tool_call],
312                            });
313                        }
314                    }
315                    MessageContent::ToolResult(tool_result) => {
316                        let content = match &tool_result.content {
317                            LanguageModelToolResultContent::Text(text) => {
318                                vec![lmstudio::MessagePart::Text {
319                                    text: text.to_string(),
320                                }]
321                            }
322                            LanguageModelToolResultContent::Image(image) => {
323                                vec![lmstudio::MessagePart::Image {
324                                    image_url: lmstudio::ImageUrl {
325                                        url: image.to_base64_url(),
326                                        detail: None,
327                                    },
328                                }]
329                            }
330                        };
331
332                        messages.push(lmstudio::ChatMessage::Tool {
333                            content: content.into(),
334                            tool_call_id: tool_result.tool_use_id.to_string(),
335                        });
336                    }
337                }
338            }
339        }
340
341        lmstudio::ChatCompletionRequest {
342            model: self.model.name.clone(),
343            messages,
344            stream: true,
345            max_tokens: Some(-1),
346            stop: Some(request.stop),
347            // In LM Studio you can configure specific settings you'd like to use for your model.
348            // For example Qwen3 is recommended to be used with 0.7 temperature.
349            // It would be a bad UX to silently override these settings from Zed, so we pass no temperature as a default.
350            temperature: request.temperature.or(None),
351            tools: request
352                .tools
353                .into_iter()
354                .map(|tool| lmstudio::ToolDefinition::Function {
355                    function: lmstudio::FunctionDefinition {
356                        name: tool.name,
357                        description: Some(tool.description),
358                        parameters: Some(tool.input_schema),
359                    },
360                })
361                .collect(),
362            tool_choice: request.tool_choice.map(|choice| match choice {
363                LanguageModelToolChoice::Auto => lmstudio::ToolChoice::Auto,
364                LanguageModelToolChoice::Any => lmstudio::ToolChoice::Required,
365                LanguageModelToolChoice::None => lmstudio::ToolChoice::None,
366            }),
367        }
368    }
369
370    fn stream_completion(
371        &self,
372        request: lmstudio::ChatCompletionRequest,
373        bypass_rate_limit: bool,
374        cx: &AsyncApp,
375    ) -> BoxFuture<
376        'static,
377        Result<futures::stream::BoxStream<'static, Result<lmstudio::ResponseStreamEvent>>>,
378    > {
379        let http_client = self.http_client.clone();
380        let api_url = cx.update(|cx| {
381            let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
382            settings.api_url.clone()
383        });
384
385        let future = self.request_limiter.stream_with_bypass(
386            async move {
387                let request =
388                    lmstudio::stream_chat_completion(http_client.as_ref(), &api_url, request);
389                let response = request.await?;
390                Ok(response)
391            },
392            bypass_rate_limit,
393        );
394
395        async move { Ok(future.await?.boxed()) }.boxed()
396    }
397}
398
399impl LanguageModel for LmStudioLanguageModel {
400    fn id(&self) -> LanguageModelId {
401        self.id.clone()
402    }
403
404    fn name(&self) -> LanguageModelName {
405        LanguageModelName::from(self.model.display_name().to_string())
406    }
407
408    fn provider_id(&self) -> LanguageModelProviderId {
409        PROVIDER_ID
410    }
411
412    fn provider_name(&self) -> LanguageModelProviderName {
413        PROVIDER_NAME
414    }
415
416    fn supports_tools(&self) -> bool {
417        self.model.supports_tool_calls()
418    }
419
420    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
421        self.supports_tools()
422            && match choice {
423                LanguageModelToolChoice::Auto => true,
424                LanguageModelToolChoice::Any => true,
425                LanguageModelToolChoice::None => true,
426            }
427    }
428
429    fn supports_images(&self) -> bool {
430        self.model.supports_images
431    }
432
433    fn telemetry_id(&self) -> String {
434        format!("lmstudio/{}", self.model.id())
435    }
436
437    fn max_token_count(&self) -> u64 {
438        self.model.max_token_count()
439    }
440
441    fn count_tokens(
442        &self,
443        request: LanguageModelRequest,
444        _cx: &App,
445    ) -> BoxFuture<'static, Result<u64>> {
446        // Endpoint for this is coming soon. In the meantime, hacky estimation
447        let token_count = request
448            .messages
449            .iter()
450            .map(|msg| msg.string_contents().split_whitespace().count())
451            .sum::<usize>();
452
453        let estimated_tokens = (token_count as f64 * 0.75) as u64;
454        async move { Ok(estimated_tokens) }.boxed()
455    }
456
457    fn stream_completion(
458        &self,
459        request: LanguageModelRequest,
460        cx: &AsyncApp,
461    ) -> BoxFuture<
462        'static,
463        Result<
464            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
465            LanguageModelCompletionError,
466        >,
467    > {
468        let bypass_rate_limit = request.bypass_rate_limit;
469        let request = self.to_lmstudio_request(request);
470        let completions = self.stream_completion(request, bypass_rate_limit, cx);
471        async move {
472            let mapper = LmStudioEventMapper::new();
473            Ok(mapper.map_stream(completions.await?).boxed())
474        }
475        .boxed()
476    }
477}
478
479struct LmStudioEventMapper {
480    tool_calls_by_index: HashMap<usize, RawToolCall>,
481}
482
483impl LmStudioEventMapper {
484    fn new() -> Self {
485        Self {
486            tool_calls_by_index: HashMap::default(),
487        }
488    }
489
490    pub fn map_stream(
491        mut self,
492        events: Pin<Box<dyn Send + Stream<Item = Result<lmstudio::ResponseStreamEvent>>>>,
493    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
494    {
495        events.flat_map(move |event| {
496            futures::stream::iter(match event {
497                Ok(event) => self.map_event(event),
498                Err(error) => vec![Err(LanguageModelCompletionError::from(error))],
499            })
500        })
501    }
502
503    pub fn map_event(
504        &mut self,
505        event: lmstudio::ResponseStreamEvent,
506    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
507        let Some(choice) = event.choices.into_iter().next() else {
508            return vec![Err(LanguageModelCompletionError::from(anyhow!(
509                "Response contained no choices"
510            )))];
511        };
512
513        let mut events = Vec::new();
514        if let Some(content) = choice.delta.content {
515            events.push(Ok(LanguageModelCompletionEvent::Text(content)));
516        }
517
518        if let Some(reasoning_content) = choice.delta.reasoning_content {
519            events.push(Ok(LanguageModelCompletionEvent::Thinking {
520                text: reasoning_content,
521                signature: None,
522            }));
523        }
524
525        if let Some(tool_calls) = choice.delta.tool_calls {
526            for tool_call in tool_calls {
527                let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
528
529                if let Some(tool_id) = tool_call.id {
530                    entry.id = tool_id;
531                }
532
533                if let Some(function) = tool_call.function {
534                    if let Some(name) = function.name {
535                        // At the time of writing this code LM Studio (0.3.15) is incompatible with the OpenAI API:
536                        // 1. It sends function name in the first chunk
537                        // 2. It sends empty string in the function name field in all subsequent chunks for arguments
538                        // According to https://platform.openai.com/docs/guides/function-calling?api-mode=responses#streaming
539                        // function name field should be sent only inside the first chunk.
540                        if !name.is_empty() {
541                            entry.name = name;
542                        }
543                    }
544
545                    if let Some(arguments) = function.arguments {
546                        entry.arguments.push_str(&arguments);
547                    }
548                }
549            }
550        }
551
552        if let Some(usage) = event.usage {
553            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
554                input_tokens: usage.prompt_tokens,
555                output_tokens: usage.completion_tokens,
556                cache_creation_input_tokens: 0,
557                cache_read_input_tokens: 0,
558            })));
559        }
560
561        match choice.finish_reason.as_deref() {
562            Some("stop") => {
563                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
564            }
565            Some("tool_calls") => {
566                events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
567                    match serde_json::Value::from_str(&tool_call.arguments) {
568                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
569                            LanguageModelToolUse {
570                                id: tool_call.id.into(),
571                                name: tool_call.name.into(),
572                                is_input_complete: true,
573                                input,
574                                raw_input: tool_call.arguments,
575                                thought_signature: None,
576                            },
577                        )),
578                        Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
579                            id: tool_call.id.into(),
580                            tool_name: tool_call.name.into(),
581                            raw_input: tool_call.arguments.into(),
582                            json_parse_error: error.to_string(),
583                        }),
584                    }
585                }));
586
587                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
588            }
589            Some(stop_reason) => {
590                log::error!("Unexpected LMStudio stop_reason: {stop_reason:?}",);
591                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
592            }
593            None => {}
594        }
595
596        events
597    }
598}
599
600#[derive(Default)]
601struct RawToolCall {
602    id: String,
603    name: String,
604    arguments: String,
605}
606
607fn add_message_content_part(
608    new_part: lmstudio::MessagePart,
609    role: Role,
610    messages: &mut Vec<lmstudio::ChatMessage>,
611) {
612    match (role, messages.last_mut()) {
613        (Role::User, Some(lmstudio::ChatMessage::User { content }))
614        | (
615            Role::Assistant,
616            Some(lmstudio::ChatMessage::Assistant {
617                content: Some(content),
618                ..
619            }),
620        )
621        | (Role::System, Some(lmstudio::ChatMessage::System { content })) => {
622            content.push_part(new_part);
623        }
624        _ => {
625            messages.push(match role {
626                Role::User => lmstudio::ChatMessage::User {
627                    content: lmstudio::MessageContent::from(vec![new_part]),
628                },
629                Role::Assistant => lmstudio::ChatMessage::Assistant {
630                    content: Some(lmstudio::MessageContent::from(vec![new_part])),
631                    tool_calls: Vec::new(),
632                },
633                Role::System => lmstudio::ChatMessage::System {
634                    content: lmstudio::MessageContent::from(vec![new_part]),
635                },
636            });
637        }
638    }
639}
640
641struct ConfigurationView {
642    state: Entity<State>,
643    loading_models_task: Option<Task<()>>,
644}
645
646impl ConfigurationView {
647    pub fn new(state: Entity<State>, cx: &mut Context<Self>) -> Self {
648        let loading_models_task = Some(cx.spawn({
649            let state = state.clone();
650            async move |this, cx| {
651                state
652                    .update(cx, |state, cx| state.authenticate(cx))
653                    .await
654                    .log_err();
655
656                this.update(cx, |this, cx| {
657                    this.loading_models_task = None;
658                    cx.notify();
659                })
660                .log_err();
661            }
662        }));
663
664        Self {
665            state,
666            loading_models_task,
667        }
668    }
669
670    fn retry_connection(&self, cx: &mut App) {
671        self.state
672            .update(cx, |state, cx| state.fetch_models(cx))
673            .detach_and_log_err(cx);
674    }
675}
676
677impl Render for ConfigurationView {
678    fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
679        let is_authenticated = self.state.read(cx).is_authenticated();
680
681        let lmstudio_intro = "Run local LLMs like Llama, Phi, and Qwen.";
682
683        if self.loading_models_task.is_some() {
684            div().child(Label::new("Loading models...")).into_any()
685        } else {
686            v_flex()
687                .gap_2()
688                .child(
689                    v_flex().gap_1().child(Label::new(lmstudio_intro)).child(
690                        List::new()
691                            .child(ListBulletItem::new(
692                                "LM Studio needs to be running with at least one model downloaded.",
693                            ))
694                            .child(
695                                ListBulletItem::new("")
696                                    .child(Label::new("To get your first model, try running"))
697                                    .child(Label::new("lms get qwen2.5-coder-7b").inline_code(cx)),
698                            ),
699                    ),
700                )
701                .child(
702                    h_flex()
703                        .w_full()
704                        .justify_between()
705                        .gap_2()
706                        .child(
707                            h_flex()
708                                .w_full()
709                                .gap_2()
710                                .map(|this| {
711                                    if is_authenticated {
712                                        this.child(
713                                            Button::new("lmstudio-site", "LM Studio")
714                                                .style(ButtonStyle::Subtle)
715                                                .icon(IconName::ArrowUpRight)
716                                                .icon_size(IconSize::Small)
717                                                .icon_color(Color::Muted)
718                                                .on_click(move |_, _window, cx| {
719                                                    cx.open_url(LMSTUDIO_SITE)
720                                                })
721                                                .into_any_element(),
722                                        )
723                                    } else {
724                                        this.child(
725                                            Button::new(
726                                                "download_lmstudio_button",
727                                                "Download LM Studio",
728                                            )
729                                            .style(ButtonStyle::Subtle)
730                                            .icon(IconName::ArrowUpRight)
731                                            .icon_size(IconSize::Small)
732                                            .icon_color(Color::Muted)
733                                            .on_click(move |_, _window, cx| {
734                                                cx.open_url(LMSTUDIO_DOWNLOAD_URL)
735                                            })
736                                            .into_any_element(),
737                                        )
738                                    }
739                                })
740                                .child(
741                                    Button::new("view-models", "Model Catalog")
742                                        .style(ButtonStyle::Subtle)
743                                        .icon(IconName::ArrowUpRight)
744                                        .icon_size(IconSize::Small)
745                                        .icon_color(Color::Muted)
746                                        .on_click(move |_, _window, cx| {
747                                            cx.open_url(LMSTUDIO_CATALOG_URL)
748                                        }),
749                                ),
750                        )
751                        .map(|this| {
752                            if is_authenticated {
753                                this.child(
754                                    ButtonLike::new("connected")
755                                        .disabled(true)
756                                        .cursor_style(gpui::CursorStyle::Arrow)
757                                        .child(
758                                            h_flex()
759                                                .gap_2()
760                                                .child(Indicator::dot().color(Color::Success))
761                                                .child(Label::new("Connected"))
762                                                .into_any_element(),
763                                        ),
764                                )
765                            } else {
766                                this.child(
767                                    Button::new("retry_lmstudio_models", "Connect")
768                                        .icon_position(IconPosition::Start)
769                                        .icon_size(IconSize::XSmall)
770                                        .icon(IconName::PlayFilled)
771                                        .on_click(cx.listener(move |this, _, _window, cx| {
772                                            this.retry_connection(cx)
773                                        })),
774                                )
775                            }
776                        }),
777                )
778                .into_any()
779        }
780    }
781}