lmstudio.rs

  1use anyhow::{Result, anyhow};
  2use collections::HashMap;
  3use futures::Stream;
  4use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
  5use gpui::{AnyView, App, AsyncApp, Context, Subscription, Task};
  6use http_client::HttpClient;
  7use language_model::{
  8    AuthenticateError, LanguageModelCompletionError, LanguageModelCompletionEvent,
  9    LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent,
 10    StopReason, TokenUsage,
 11};
 12use language_model::{
 13    LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
 14    LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
 15    LanguageModelRequest, RateLimiter, Role,
 16};
 17use lmstudio::{ModelType, get_models};
 18use schemars::JsonSchema;
 19use serde::{Deserialize, Serialize};
 20use settings::{Settings, SettingsStore};
 21use std::pin::Pin;
 22use std::str::FromStr;
 23use std::{collections::BTreeMap, sync::Arc};
 24use ui::{ButtonLike, Indicator, List, prelude::*};
 25use util::ResultExt;
 26
 27use crate::AllLanguageModelSettings;
 28use crate::ui::InstructionListItem;
 29
 30const LMSTUDIO_DOWNLOAD_URL: &str = "https://lmstudio.ai/download";
 31const LMSTUDIO_CATALOG_URL: &str = "https://lmstudio.ai/models";
 32const LMSTUDIO_SITE: &str = "https://lmstudio.ai/";
 33
 34const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("lmstudio");
 35const PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("LM Studio");
 36
 37#[derive(Default, Debug, Clone, PartialEq)]
 38pub struct LmStudioSettings {
 39    pub api_url: String,
 40    pub available_models: Vec<AvailableModel>,
 41}
 42
 43#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 44pub struct AvailableModel {
 45    pub name: String,
 46    pub display_name: Option<String>,
 47    pub max_tokens: u64,
 48    pub supports_tool_calls: bool,
 49    pub supports_images: bool,
 50}
 51
 52pub struct LmStudioLanguageModelProvider {
 53    http_client: Arc<dyn HttpClient>,
 54    state: gpui::Entity<State>,
 55}
 56
 57pub struct State {
 58    http_client: Arc<dyn HttpClient>,
 59    available_models: Vec<lmstudio::Model>,
 60    fetch_model_task: Option<Task<Result<()>>>,
 61    _subscription: Subscription,
 62}
 63
 64impl State {
 65    fn is_authenticated(&self) -> bool {
 66        !self.available_models.is_empty()
 67    }
 68
 69    fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
 70        let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
 71        let http_client = self.http_client.clone();
 72        let api_url = settings.api_url.clone();
 73
 74        // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
 75        cx.spawn(async move |this, cx| {
 76            let models = get_models(http_client.as_ref(), &api_url, None).await?;
 77
 78            let mut models: Vec<lmstudio::Model> = models
 79                .into_iter()
 80                .filter(|model| model.r#type != ModelType::Embeddings)
 81                .map(|model| {
 82                    lmstudio::Model::new(
 83                        &model.id,
 84                        None,
 85                        model
 86                            .loaded_context_length
 87                            .or_else(|| model.max_context_length),
 88                        model.capabilities.supports_tool_calls(),
 89                        model.capabilities.supports_images() || model.r#type == ModelType::Vlm,
 90                    )
 91                })
 92                .collect();
 93
 94            models.sort_by(|a, b| a.name.cmp(&b.name));
 95
 96            this.update(cx, |this, cx| {
 97                this.available_models = models;
 98                cx.notify();
 99            })
100        })
101    }
102
103    fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
104        let task = self.fetch_models(cx);
105        self.fetch_model_task.replace(task);
106    }
107
108    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
109        if self.is_authenticated() {
110            return Task::ready(Ok(()));
111        }
112
113        let fetch_models_task = self.fetch_models(cx);
114        cx.spawn(async move |_this, _cx| {
115            match fetch_models_task.await {
116                Ok(()) => Ok(()),
117                Err(err) => {
118                    // If any cause in the error chain is an std::io::Error with
119                    // ErrorKind::ConnectionRefused, treat this as "credentials not found"
120                    // (i.e. LM Studio not running).
121                    let mut connection_refused = false;
122                    for cause in err.chain() {
123                        if let Some(io_err) = cause.downcast_ref::<std::io::Error>() {
124                            if io_err.kind() == std::io::ErrorKind::ConnectionRefused {
125                                connection_refused = true;
126                                break;
127                            }
128                        }
129                    }
130                    if connection_refused {
131                        Err(AuthenticateError::ConnectionRefused)
132                    } else {
133                        Err(AuthenticateError::Other(err))
134                    }
135                }
136            }
137        })
138    }
139}
140
141impl LmStudioLanguageModelProvider {
142    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
143        let this = Self {
144            http_client: http_client.clone(),
145            state: cx.new(|cx| {
146                let subscription = cx.observe_global::<SettingsStore>({
147                    let mut settings = AllLanguageModelSettings::get_global(cx).lmstudio.clone();
148                    move |this: &mut State, cx| {
149                        let new_settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
150                        if &settings != new_settings {
151                            settings = new_settings.clone();
152                            this.restart_fetch_models_task(cx);
153                            cx.notify();
154                        }
155                    }
156                });
157
158                State {
159                    http_client,
160                    available_models: Default::default(),
161                    fetch_model_task: None,
162                    _subscription: subscription,
163                }
164            }),
165        };
166        this.state
167            .update(cx, |state, cx| state.restart_fetch_models_task(cx));
168        this
169    }
170}
171
172impl LanguageModelProviderState for LmStudioLanguageModelProvider {
173    type ObservableEntity = State;
174
175    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
176        Some(self.state.clone())
177    }
178}
179
180impl LanguageModelProvider for LmStudioLanguageModelProvider {
181    fn id(&self) -> LanguageModelProviderId {
182        PROVIDER_ID
183    }
184
185    fn name(&self) -> LanguageModelProviderName {
186        PROVIDER_NAME
187    }
188
189    fn icon(&self) -> IconName {
190        IconName::AiLmStudio
191    }
192
193    fn default_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
194        // We shouldn't try to select default model, because it might lead to a load call for an unloaded model.
195        // In a constrained environment where user might not have enough resources it'll be a bad UX to select something
196        // to load by default.
197        None
198    }
199
200    fn default_fast_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
201        // See explanation for default_model.
202        None
203    }
204
205    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
206        let mut models: BTreeMap<String, lmstudio::Model> = BTreeMap::default();
207
208        // Add models from the LM Studio API
209        for model in self.state.read(cx).available_models.iter() {
210            models.insert(model.name.clone(), model.clone());
211        }
212
213        // Override with available models from settings
214        for model in AllLanguageModelSettings::get_global(cx)
215            .lmstudio
216            .available_models
217            .iter()
218        {
219            models.insert(
220                model.name.clone(),
221                lmstudio::Model {
222                    name: model.name.clone(),
223                    display_name: model.display_name.clone(),
224                    max_tokens: model.max_tokens,
225                    supports_tool_calls: model.supports_tool_calls,
226                    supports_images: model.supports_images,
227                },
228            );
229        }
230
231        models
232            .into_values()
233            .map(|model| {
234                Arc::new(LmStudioLanguageModel {
235                    id: LanguageModelId::from(model.name.clone()),
236                    model,
237                    http_client: self.http_client.clone(),
238                    request_limiter: RateLimiter::new(4),
239                }) as Arc<dyn LanguageModel>
240            })
241            .collect()
242    }
243
244    fn is_authenticated(&self, cx: &App) -> bool {
245        self.state.read(cx).is_authenticated()
246    }
247
248    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
249        self.state.update(cx, |state, cx| state.authenticate(cx))
250    }
251
252    fn configuration_view(
253        &self,
254        _target_agent: language_model::ConfigurationViewTargetAgent,
255        _window: &mut Window,
256        cx: &mut App,
257    ) -> AnyView {
258        let state = self.state.clone();
259        cx.new(|cx| ConfigurationView::new(state, cx)).into()
260    }
261
262    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
263        self.state.update(cx, |state, cx| state.fetch_models(cx))
264    }
265}
266
267pub struct LmStudioLanguageModel {
268    id: LanguageModelId,
269    model: lmstudio::Model,
270    http_client: Arc<dyn HttpClient>,
271    request_limiter: RateLimiter,
272}
273
274impl LmStudioLanguageModel {
275    fn to_lmstudio_request(
276        &self,
277        request: LanguageModelRequest,
278    ) -> lmstudio::ChatCompletionRequest {
279        let mut messages = Vec::new();
280
281        for message in request.messages {
282            for content in message.content {
283                match content {
284                    MessageContent::Text(text) => add_message_content_part(
285                        lmstudio::MessagePart::Text { text },
286                        message.role,
287                        &mut messages,
288                    ),
289                    MessageContent::Thinking { .. } => {}
290                    MessageContent::RedactedThinking(_) => {}
291                    MessageContent::Image(image) => {
292                        add_message_content_part(
293                            lmstudio::MessagePart::Image {
294                                image_url: lmstudio::ImageUrl {
295                                    url: image.to_base64_url(),
296                                    detail: None,
297                                },
298                            },
299                            message.role,
300                            &mut messages,
301                        );
302                    }
303                    MessageContent::ToolUse(tool_use) => {
304                        let tool_call = lmstudio::ToolCall {
305                            id: tool_use.id.to_string(),
306                            content: lmstudio::ToolCallContent::Function {
307                                function: lmstudio::FunctionContent {
308                                    name: tool_use.name.to_string(),
309                                    arguments: serde_json::to_string(&tool_use.input)
310                                        .unwrap_or_default(),
311                                },
312                            },
313                        };
314
315                        if let Some(lmstudio::ChatMessage::Assistant { tool_calls, .. }) =
316                            messages.last_mut()
317                        {
318                            tool_calls.push(tool_call);
319                        } else {
320                            messages.push(lmstudio::ChatMessage::Assistant {
321                                content: None,
322                                tool_calls: vec![tool_call],
323                            });
324                        }
325                    }
326                    MessageContent::ToolResult(tool_result) => {
327                        let content = match &tool_result.content {
328                            LanguageModelToolResultContent::Text(text) => {
329                                vec![lmstudio::MessagePart::Text {
330                                    text: text.to_string(),
331                                }]
332                            }
333                            LanguageModelToolResultContent::Image(image) => {
334                                vec![lmstudio::MessagePart::Image {
335                                    image_url: lmstudio::ImageUrl {
336                                        url: image.to_base64_url(),
337                                        detail: None,
338                                    },
339                                }]
340                            }
341                        };
342
343                        messages.push(lmstudio::ChatMessage::Tool {
344                            content: content.into(),
345                            tool_call_id: tool_result.tool_use_id.to_string(),
346                        });
347                    }
348                }
349            }
350        }
351
352        lmstudio::ChatCompletionRequest {
353            model: self.model.name.clone(),
354            messages,
355            stream: true,
356            max_tokens: Some(-1),
357            stop: Some(request.stop),
358            // In LM Studio you can configure specific settings you'd like to use for your model.
359            // For example Qwen3 is recommended to be used with 0.7 temperature.
360            // It would be a bad UX to silently override these settings from Zed, so we pass no temperature as a default.
361            temperature: request.temperature.or(None),
362            tools: request
363                .tools
364                .into_iter()
365                .map(|tool| lmstudio::ToolDefinition::Function {
366                    function: lmstudio::FunctionDefinition {
367                        name: tool.name,
368                        description: Some(tool.description),
369                        parameters: Some(tool.input_schema),
370                    },
371                })
372                .collect(),
373            tool_choice: request.tool_choice.map(|choice| match choice {
374                LanguageModelToolChoice::Auto => lmstudio::ToolChoice::Auto,
375                LanguageModelToolChoice::Any => lmstudio::ToolChoice::Required,
376                LanguageModelToolChoice::None => lmstudio::ToolChoice::None,
377            }),
378        }
379    }
380
381    fn stream_completion(
382        &self,
383        request: lmstudio::ChatCompletionRequest,
384        cx: &AsyncApp,
385    ) -> BoxFuture<
386        'static,
387        Result<futures::stream::BoxStream<'static, Result<lmstudio::ResponseStreamEvent>>>,
388    > {
389        let http_client = self.http_client.clone();
390        let Ok(api_url) = cx.update(|cx| {
391            let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
392            settings.api_url.clone()
393        }) else {
394            return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
395        };
396
397        let future = self.request_limiter.stream(async move {
398            let request = lmstudio::stream_chat_completion(http_client.as_ref(), &api_url, request);
399            let response = request.await?;
400            Ok(response)
401        });
402
403        async move { Ok(future.await?.boxed()) }.boxed()
404    }
405}
406
407impl LanguageModel for LmStudioLanguageModel {
408    fn id(&self) -> LanguageModelId {
409        self.id.clone()
410    }
411
412    fn name(&self) -> LanguageModelName {
413        LanguageModelName::from(self.model.display_name().to_string())
414    }
415
416    fn provider_id(&self) -> LanguageModelProviderId {
417        PROVIDER_ID
418    }
419
420    fn provider_name(&self) -> LanguageModelProviderName {
421        PROVIDER_NAME
422    }
423
424    fn supports_tools(&self) -> bool {
425        self.model.supports_tool_calls()
426    }
427
428    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
429        self.supports_tools()
430            && match choice {
431                LanguageModelToolChoice::Auto => true,
432                LanguageModelToolChoice::Any => true,
433                LanguageModelToolChoice::None => true,
434            }
435    }
436
437    fn supports_images(&self) -> bool {
438        self.model.supports_images
439    }
440
441    fn telemetry_id(&self) -> String {
442        format!("lmstudio/{}", self.model.id())
443    }
444
445    fn max_token_count(&self) -> u64 {
446        self.model.max_token_count()
447    }
448
449    fn count_tokens(
450        &self,
451        request: LanguageModelRequest,
452        _cx: &App,
453    ) -> BoxFuture<'static, Result<u64>> {
454        // Endpoint for this is coming soon. In the meantime, hacky estimation
455        let token_count = request
456            .messages
457            .iter()
458            .map(|msg| msg.string_contents().split_whitespace().count())
459            .sum::<usize>();
460
461        let estimated_tokens = (token_count as f64 * 0.75) as u64;
462        async move { Ok(estimated_tokens) }.boxed()
463    }
464
465    fn stream_completion(
466        &self,
467        request: LanguageModelRequest,
468        cx: &AsyncApp,
469    ) -> BoxFuture<
470        'static,
471        Result<
472            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
473            LanguageModelCompletionError,
474        >,
475    > {
476        let request = self.to_lmstudio_request(request);
477        let completions = self.stream_completion(request, cx);
478        async move {
479            let mapper = LmStudioEventMapper::new();
480            Ok(mapper.map_stream(completions.await?).boxed())
481        }
482        .boxed()
483    }
484}
485
486struct LmStudioEventMapper {
487    tool_calls_by_index: HashMap<usize, RawToolCall>,
488}
489
490impl LmStudioEventMapper {
491    fn new() -> Self {
492        Self {
493            tool_calls_by_index: HashMap::default(),
494        }
495    }
496
497    pub fn map_stream(
498        mut self,
499        events: Pin<Box<dyn Send + Stream<Item = Result<lmstudio::ResponseStreamEvent>>>>,
500    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
501    {
502        events.flat_map(move |event| {
503            futures::stream::iter(match event {
504                Ok(event) => self.map_event(event),
505                Err(error) => vec![Err(LanguageModelCompletionError::from(error))],
506            })
507        })
508    }
509
510    pub fn map_event(
511        &mut self,
512        event: lmstudio::ResponseStreamEvent,
513    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
514        let Some(choice) = event.choices.into_iter().next() else {
515            return vec![Err(LanguageModelCompletionError::from(anyhow!(
516                "Response contained no choices"
517            )))];
518        };
519
520        let mut events = Vec::new();
521        if let Some(content) = choice.delta.content {
522            events.push(Ok(LanguageModelCompletionEvent::Text(content)));
523        }
524
525        if let Some(reasoning_content) = choice.delta.reasoning_content {
526            events.push(Ok(LanguageModelCompletionEvent::Thinking {
527                text: reasoning_content,
528                signature: None,
529            }));
530        }
531
532        if let Some(tool_calls) = choice.delta.tool_calls {
533            for tool_call in tool_calls {
534                let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
535
536                if let Some(tool_id) = tool_call.id {
537                    entry.id = tool_id;
538                }
539
540                if let Some(function) = tool_call.function {
541                    if let Some(name) = function.name {
542                        // At the time of writing this code LM Studio (0.3.15) is incompatible with the OpenAI API:
543                        // 1. It sends function name in the first chunk
544                        // 2. It sends empty string in the function name field in all subsequent chunks for arguments
545                        // According to https://platform.openai.com/docs/guides/function-calling?api-mode=responses#streaming
546                        // function name field should be sent only inside the first chunk.
547                        if !name.is_empty() {
548                            entry.name = name;
549                        }
550                    }
551
552                    if let Some(arguments) = function.arguments {
553                        entry.arguments.push_str(&arguments);
554                    }
555                }
556            }
557        }
558
559        if let Some(usage) = event.usage {
560            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
561                input_tokens: usage.prompt_tokens,
562                output_tokens: usage.completion_tokens,
563                cache_creation_input_tokens: 0,
564                cache_read_input_tokens: 0,
565            })));
566        }
567
568        match choice.finish_reason.as_deref() {
569            Some("stop") => {
570                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
571            }
572            Some("tool_calls") => {
573                events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
574                    match serde_json::Value::from_str(&tool_call.arguments) {
575                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
576                            LanguageModelToolUse {
577                                id: tool_call.id.into(),
578                                name: tool_call.name.into(),
579                                is_input_complete: true,
580                                input,
581                                raw_input: tool_call.arguments,
582                            },
583                        )),
584                        Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
585                            id: tool_call.id.into(),
586                            tool_name: tool_call.name.into(),
587                            raw_input: tool_call.arguments.into(),
588                            json_parse_error: error.to_string(),
589                        }),
590                    }
591                }));
592
593                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
594            }
595            Some(stop_reason) => {
596                log::error!("Unexpected LMStudio stop_reason: {stop_reason:?}",);
597                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
598            }
599            None => {}
600        }
601
602        events
603    }
604}
605
606#[derive(Default)]
607struct RawToolCall {
608    id: String,
609    name: String,
610    arguments: String,
611}
612
613fn add_message_content_part(
614    new_part: lmstudio::MessagePart,
615    role: Role,
616    messages: &mut Vec<lmstudio::ChatMessage>,
617) {
618    match (role, messages.last_mut()) {
619        (Role::User, Some(lmstudio::ChatMessage::User { content }))
620        | (
621            Role::Assistant,
622            Some(lmstudio::ChatMessage::Assistant {
623                content: Some(content),
624                ..
625            }),
626        )
627        | (Role::System, Some(lmstudio::ChatMessage::System { content })) => {
628            content.push_part(new_part);
629        }
630        _ => {
631            messages.push(match role {
632                Role::User => lmstudio::ChatMessage::User {
633                    content: lmstudio::MessageContent::from(vec![new_part]),
634                },
635                Role::Assistant => lmstudio::ChatMessage::Assistant {
636                    content: Some(lmstudio::MessageContent::from(vec![new_part])),
637                    tool_calls: Vec::new(),
638                },
639                Role::System => lmstudio::ChatMessage::System {
640                    content: lmstudio::MessageContent::from(vec![new_part]),
641                },
642            });
643        }
644    }
645}
646
647struct ConfigurationView {
648    state: gpui::Entity<State>,
649    loading_models_task: Option<Task<()>>,
650}
651
652impl ConfigurationView {
653    pub fn new(state: gpui::Entity<State>, cx: &mut Context<Self>) -> Self {
654        let loading_models_task = Some(cx.spawn({
655            let state = state.clone();
656            async move |this, cx| {
657                if let Some(task) = state
658                    .update(cx, |state, cx| state.authenticate(cx))
659                    .log_err()
660                {
661                    task.await.log_err();
662                }
663                this.update(cx, |this, cx| {
664                    this.loading_models_task = None;
665                    cx.notify();
666                })
667                .log_err();
668            }
669        }));
670
671        Self {
672            state,
673            loading_models_task,
674        }
675    }
676
677    fn retry_connection(&self, cx: &mut App) {
678        self.state
679            .update(cx, |state, cx| state.fetch_models(cx))
680            .detach_and_log_err(cx);
681    }
682}
683
684impl Render for ConfigurationView {
685    fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
686        let is_authenticated = self.state.read(cx).is_authenticated();
687
688        let lmstudio_intro = "Run local LLMs like Llama, Phi, and Qwen.";
689
690        if self.loading_models_task.is_some() {
691            div().child(Label::new("Loading models...")).into_any()
692        } else {
693            v_flex()
694                .gap_2()
695                .child(
696                    v_flex().gap_1().child(Label::new(lmstudio_intro)).child(
697                        List::new()
698                            .child(InstructionListItem::text_only(
699                                "LM Studio needs to be running with at least one model downloaded.",
700                            ))
701                            .child(InstructionListItem::text_only(
702                                "To get your first model, try running `lms get qwen2.5-coder-7b`",
703                            )),
704                    ),
705                )
706                .child(
707                    h_flex()
708                        .w_full()
709                        .justify_between()
710                        .gap_2()
711                        .child(
712                            h_flex()
713                                .w_full()
714                                .gap_2()
715                                .map(|this| {
716                                    if is_authenticated {
717                                        this.child(
718                                            Button::new("lmstudio-site", "LM Studio")
719                                                .style(ButtonStyle::Subtle)
720                                                .icon(IconName::ArrowUpRight)
721                                                .icon_size(IconSize::Small)
722                                                .icon_color(Color::Muted)
723                                                .on_click(move |_, _window, cx| {
724                                                    cx.open_url(LMSTUDIO_SITE)
725                                                })
726                                                .into_any_element(),
727                                        )
728                                    } else {
729                                        this.child(
730                                            Button::new(
731                                                "download_lmstudio_button",
732                                                "Download LM Studio",
733                                            )
734                                            .style(ButtonStyle::Subtle)
735                                            .icon(IconName::ArrowUpRight)
736                                            .icon_size(IconSize::Small)
737                                            .icon_color(Color::Muted)
738                                            .on_click(move |_, _window, cx| {
739                                                cx.open_url(LMSTUDIO_DOWNLOAD_URL)
740                                            })
741                                            .into_any_element(),
742                                        )
743                                    }
744                                })
745                                .child(
746                                    Button::new("view-models", "Model Catalog")
747                                        .style(ButtonStyle::Subtle)
748                                        .icon(IconName::ArrowUpRight)
749                                        .icon_size(IconSize::Small)
750                                        .icon_color(Color::Muted)
751                                        .on_click(move |_, _window, cx| {
752                                            cx.open_url(LMSTUDIO_CATALOG_URL)
753                                        }),
754                                ),
755                        )
756                        .map(|this| {
757                            if is_authenticated {
758                                this.child(
759                                    ButtonLike::new("connected")
760                                        .disabled(true)
761                                        .cursor_style(gpui::CursorStyle::Arrow)
762                                        .child(
763                                            h_flex()
764                                                .gap_2()
765                                                .child(Indicator::dot().color(Color::Success))
766                                                .child(Label::new("Connected"))
767                                                .into_any_element(),
768                                        ),
769                                )
770                            } else {
771                                this.child(
772                                    Button::new("retry_lmstudio_models", "Connect")
773                                        .icon_position(IconPosition::Start)
774                                        .icon_size(IconSize::XSmall)
775                                        .icon(IconName::PlayFilled)
776                                        .on_click(cx.listener(move |this, _, _window, cx| {
777                                            this.retry_connection(cx)
778                                        })),
779                                )
780                            }
781                        }),
782                )
783                .into_any()
784        }
785    }
786}