1use anyhow::{Result, anyhow};
2use collections::HashMap;
3use futures::Stream;
4use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
5use gpui::{AnyView, App, AsyncApp, Context, Entity, Subscription, Task};
6use http_client::HttpClient;
7use language_model::{
8 AuthenticateError, LanguageModelCompletionError, LanguageModelCompletionEvent,
9 LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent,
10 StopReason, TokenUsage,
11};
12use language_model::{
13 LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
14 LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
15 LanguageModelRequest, RateLimiter, Role,
16};
17use lmstudio::{ModelType, get_models};
18pub use settings::LmStudioAvailableModel as AvailableModel;
19use settings::{Settings, SettingsStore};
20use std::pin::Pin;
21use std::str::FromStr;
22use std::{collections::BTreeMap, sync::Arc};
23use ui::{ButtonLike, Indicator, List, prelude::*};
24use util::ResultExt;
25
26use crate::AllLanguageModelSettings;
27use crate::ui::InstructionListItem;
28
29const LMSTUDIO_DOWNLOAD_URL: &str = "https://lmstudio.ai/download";
30const LMSTUDIO_CATALOG_URL: &str = "https://lmstudio.ai/models";
31const LMSTUDIO_SITE: &str = "https://lmstudio.ai/";
32
33const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("lmstudio");
34const PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("LM Studio");
35
36#[derive(Default, Debug, Clone, PartialEq)]
37pub struct LmStudioSettings {
38 pub api_url: String,
39 pub available_models: Vec<AvailableModel>,
40}
41
42pub struct LmStudioLanguageModelProvider {
43 http_client: Arc<dyn HttpClient>,
44 state: Entity<State>,
45}
46
47pub struct State {
48 http_client: Arc<dyn HttpClient>,
49 available_models: Vec<lmstudio::Model>,
50 fetch_model_task: Option<Task<Result<()>>>,
51 _subscription: Subscription,
52}
53
54impl State {
55 fn is_authenticated(&self) -> bool {
56 !self.available_models.is_empty()
57 }
58
59 fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
60 let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
61 let http_client = self.http_client.clone();
62 let api_url = settings.api_url.clone();
63
64 // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
65 cx.spawn(async move |this, cx| {
66 let models = get_models(http_client.as_ref(), &api_url, None).await?;
67
68 let mut models: Vec<lmstudio::Model> = models
69 .into_iter()
70 .filter(|model| model.r#type != ModelType::Embeddings)
71 .map(|model| {
72 lmstudio::Model::new(
73 &model.id,
74 None,
75 model
76 .loaded_context_length
77 .or_else(|| model.max_context_length),
78 model.capabilities.supports_tool_calls(),
79 model.capabilities.supports_images() || model.r#type == ModelType::Vlm,
80 )
81 })
82 .collect();
83
84 models.sort_by(|a, b| a.name.cmp(&b.name));
85
86 this.update(cx, |this, cx| {
87 this.available_models = models;
88 cx.notify();
89 })
90 })
91 }
92
93 fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
94 let task = self.fetch_models(cx);
95 self.fetch_model_task.replace(task);
96 }
97
98 fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
99 if self.is_authenticated() {
100 return Task::ready(Ok(()));
101 }
102
103 let fetch_models_task = self.fetch_models(cx);
104 cx.spawn(async move |_this, _cx| {
105 match fetch_models_task.await {
106 Ok(()) => Ok(()),
107 Err(err) => {
108 // If any cause in the error chain is an std::io::Error with
109 // ErrorKind::ConnectionRefused, treat this as "credentials not found"
110 // (i.e. LM Studio not running).
111 let mut connection_refused = false;
112 for cause in err.chain() {
113 if let Some(io_err) = cause.downcast_ref::<std::io::Error>() {
114 if io_err.kind() == std::io::ErrorKind::ConnectionRefused {
115 connection_refused = true;
116 break;
117 }
118 }
119 }
120 if connection_refused {
121 Err(AuthenticateError::ConnectionRefused)
122 } else {
123 Err(AuthenticateError::Other(err))
124 }
125 }
126 }
127 })
128 }
129}
130
131impl LmStudioLanguageModelProvider {
132 pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
133 let this = Self {
134 http_client: http_client.clone(),
135 state: cx.new(|cx| {
136 let subscription = cx.observe_global::<SettingsStore>({
137 let mut settings = AllLanguageModelSettings::get_global(cx).lmstudio.clone();
138 move |this: &mut State, cx| {
139 let new_settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
140 if &settings != new_settings {
141 settings = new_settings.clone();
142 this.restart_fetch_models_task(cx);
143 cx.notify();
144 }
145 }
146 });
147
148 State {
149 http_client,
150 available_models: Default::default(),
151 fetch_model_task: None,
152 _subscription: subscription,
153 }
154 }),
155 };
156 this.state
157 .update(cx, |state, cx| state.restart_fetch_models_task(cx));
158 this
159 }
160}
161
162impl LanguageModelProviderState for LmStudioLanguageModelProvider {
163 type ObservableEntity = State;
164
165 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
166 Some(self.state.clone())
167 }
168}
169
170impl LanguageModelProvider for LmStudioLanguageModelProvider {
171 fn id(&self) -> LanguageModelProviderId {
172 PROVIDER_ID
173 }
174
175 fn name(&self) -> LanguageModelProviderName {
176 PROVIDER_NAME
177 }
178
179 fn icon(&self) -> IconName {
180 IconName::AiLmStudio
181 }
182
183 fn default_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
184 // We shouldn't try to select default model, because it might lead to a load call for an unloaded model.
185 // In a constrained environment where user might not have enough resources it'll be a bad UX to select something
186 // to load by default.
187 None
188 }
189
190 fn default_fast_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
191 // See explanation for default_model.
192 None
193 }
194
195 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
196 let mut models: BTreeMap<String, lmstudio::Model> = BTreeMap::default();
197
198 // Add models from the LM Studio API
199 for model in self.state.read(cx).available_models.iter() {
200 models.insert(model.name.clone(), model.clone());
201 }
202
203 // Override with available models from settings
204 for model in AllLanguageModelSettings::get_global(cx)
205 .lmstudio
206 .available_models
207 .iter()
208 {
209 models.insert(
210 model.name.clone(),
211 lmstudio::Model {
212 name: model.name.clone(),
213 display_name: model.display_name.clone(),
214 max_tokens: model.max_tokens,
215 supports_tool_calls: model.supports_tool_calls,
216 supports_images: model.supports_images,
217 },
218 );
219 }
220
221 models
222 .into_values()
223 .map(|model| {
224 Arc::new(LmStudioLanguageModel {
225 id: LanguageModelId::from(model.name.clone()),
226 model,
227 http_client: self.http_client.clone(),
228 request_limiter: RateLimiter::new(4),
229 }) as Arc<dyn LanguageModel>
230 })
231 .collect()
232 }
233
234 fn is_authenticated(&self, cx: &App) -> bool {
235 self.state.read(cx).is_authenticated()
236 }
237
238 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
239 self.state.update(cx, |state, cx| state.authenticate(cx))
240 }
241
242 fn configuration_view(
243 &self,
244 _target_agent: language_model::ConfigurationViewTargetAgent,
245 _window: &mut Window,
246 cx: &mut App,
247 ) -> AnyView {
248 let state = self.state.clone();
249 cx.new(|cx| ConfigurationView::new(state, cx)).into()
250 }
251
252 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
253 self.state.update(cx, |state, cx| state.fetch_models(cx))
254 }
255}
256
257pub struct LmStudioLanguageModel {
258 id: LanguageModelId,
259 model: lmstudio::Model,
260 http_client: Arc<dyn HttpClient>,
261 request_limiter: RateLimiter,
262}
263
264impl LmStudioLanguageModel {
265 fn to_lmstudio_request(
266 &self,
267 request: LanguageModelRequest,
268 ) -> lmstudio::ChatCompletionRequest {
269 let mut messages = Vec::new();
270
271 for message in request.messages {
272 for content in message.content {
273 match content {
274 MessageContent::Text(text) => add_message_content_part(
275 lmstudio::MessagePart::Text { text },
276 message.role,
277 &mut messages,
278 ),
279 MessageContent::Thinking { .. } => {}
280 MessageContent::RedactedThinking(_) => {}
281 MessageContent::Image(image) => {
282 add_message_content_part(
283 lmstudio::MessagePart::Image {
284 image_url: lmstudio::ImageUrl {
285 url: image.to_base64_url(),
286 detail: None,
287 },
288 },
289 message.role,
290 &mut messages,
291 );
292 }
293 MessageContent::ToolUse(tool_use) => {
294 let tool_call = lmstudio::ToolCall {
295 id: tool_use.id.to_string(),
296 content: lmstudio::ToolCallContent::Function {
297 function: lmstudio::FunctionContent {
298 name: tool_use.name.to_string(),
299 arguments: serde_json::to_string(&tool_use.input)
300 .unwrap_or_default(),
301 },
302 },
303 };
304
305 if let Some(lmstudio::ChatMessage::Assistant { tool_calls, .. }) =
306 messages.last_mut()
307 {
308 tool_calls.push(tool_call);
309 } else {
310 messages.push(lmstudio::ChatMessage::Assistant {
311 content: None,
312 tool_calls: vec![tool_call],
313 });
314 }
315 }
316 MessageContent::ToolResult(tool_result) => {
317 let content = match &tool_result.content {
318 LanguageModelToolResultContent::Text(text) => {
319 vec![lmstudio::MessagePart::Text {
320 text: text.to_string(),
321 }]
322 }
323 LanguageModelToolResultContent::Image(image) => {
324 vec![lmstudio::MessagePart::Image {
325 image_url: lmstudio::ImageUrl {
326 url: image.to_base64_url(),
327 detail: None,
328 },
329 }]
330 }
331 };
332
333 messages.push(lmstudio::ChatMessage::Tool {
334 content: content.into(),
335 tool_call_id: tool_result.tool_use_id.to_string(),
336 });
337 }
338 }
339 }
340 }
341
342 lmstudio::ChatCompletionRequest {
343 model: self.model.name.clone(),
344 messages,
345 stream: true,
346 max_tokens: Some(-1),
347 stop: Some(request.stop),
348 // In LM Studio you can configure specific settings you'd like to use for your model.
349 // For example Qwen3 is recommended to be used with 0.7 temperature.
350 // It would be a bad UX to silently override these settings from Zed, so we pass no temperature as a default.
351 temperature: request.temperature.or(None),
352 tools: request
353 .tools
354 .into_iter()
355 .map(|tool| lmstudio::ToolDefinition::Function {
356 function: lmstudio::FunctionDefinition {
357 name: tool.name,
358 description: Some(tool.description),
359 parameters: Some(tool.input_schema),
360 },
361 })
362 .collect(),
363 tool_choice: request.tool_choice.map(|choice| match choice {
364 LanguageModelToolChoice::Auto => lmstudio::ToolChoice::Auto,
365 LanguageModelToolChoice::Any => lmstudio::ToolChoice::Required,
366 LanguageModelToolChoice::None => lmstudio::ToolChoice::None,
367 }),
368 }
369 }
370
371 fn stream_completion(
372 &self,
373 request: lmstudio::ChatCompletionRequest,
374 cx: &AsyncApp,
375 ) -> BoxFuture<
376 'static,
377 Result<futures::stream::BoxStream<'static, Result<lmstudio::ResponseStreamEvent>>>,
378 > {
379 let http_client = self.http_client.clone();
380 let Ok(api_url) = cx.update(|cx| {
381 let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
382 settings.api_url.clone()
383 }) else {
384 return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
385 };
386
387 let future = self.request_limiter.stream(async move {
388 let request = lmstudio::stream_chat_completion(http_client.as_ref(), &api_url, request);
389 let response = request.await?;
390 Ok(response)
391 });
392
393 async move { Ok(future.await?.boxed()) }.boxed()
394 }
395}
396
397impl LanguageModel for LmStudioLanguageModel {
398 fn id(&self) -> LanguageModelId {
399 self.id.clone()
400 }
401
402 fn name(&self) -> LanguageModelName {
403 LanguageModelName::from(self.model.display_name().to_string())
404 }
405
406 fn provider_id(&self) -> LanguageModelProviderId {
407 PROVIDER_ID
408 }
409
410 fn provider_name(&self) -> LanguageModelProviderName {
411 PROVIDER_NAME
412 }
413
414 fn supports_tools(&self) -> bool {
415 self.model.supports_tool_calls()
416 }
417
418 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
419 self.supports_tools()
420 && match choice {
421 LanguageModelToolChoice::Auto => true,
422 LanguageModelToolChoice::Any => true,
423 LanguageModelToolChoice::None => true,
424 }
425 }
426
427 fn supports_images(&self) -> bool {
428 self.model.supports_images
429 }
430
431 fn telemetry_id(&self) -> String {
432 format!("lmstudio/{}", self.model.id())
433 }
434
435 fn max_token_count(&self) -> u64 {
436 self.model.max_token_count()
437 }
438
439 fn count_tokens(
440 &self,
441 request: LanguageModelRequest,
442 _cx: &App,
443 ) -> BoxFuture<'static, Result<u64>> {
444 // Endpoint for this is coming soon. In the meantime, hacky estimation
445 let token_count = request
446 .messages
447 .iter()
448 .map(|msg| msg.string_contents().split_whitespace().count())
449 .sum::<usize>();
450
451 let estimated_tokens = (token_count as f64 * 0.75) as u64;
452 async move { Ok(estimated_tokens) }.boxed()
453 }
454
455 fn stream_completion(
456 &self,
457 request: LanguageModelRequest,
458 cx: &AsyncApp,
459 ) -> BoxFuture<
460 'static,
461 Result<
462 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
463 LanguageModelCompletionError,
464 >,
465 > {
466 let request = self.to_lmstudio_request(request);
467 let completions = self.stream_completion(request, cx);
468 async move {
469 let mapper = LmStudioEventMapper::new();
470 Ok(mapper.map_stream(completions.await?).boxed())
471 }
472 .boxed()
473 }
474}
475
476struct LmStudioEventMapper {
477 tool_calls_by_index: HashMap<usize, RawToolCall>,
478}
479
480impl LmStudioEventMapper {
481 fn new() -> Self {
482 Self {
483 tool_calls_by_index: HashMap::default(),
484 }
485 }
486
487 pub fn map_stream(
488 mut self,
489 events: Pin<Box<dyn Send + Stream<Item = Result<lmstudio::ResponseStreamEvent>>>>,
490 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
491 {
492 events.flat_map(move |event| {
493 futures::stream::iter(match event {
494 Ok(event) => self.map_event(event),
495 Err(error) => vec![Err(LanguageModelCompletionError::from(error))],
496 })
497 })
498 }
499
500 pub fn map_event(
501 &mut self,
502 event: lmstudio::ResponseStreamEvent,
503 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
504 let Some(choice) = event.choices.into_iter().next() else {
505 return vec![Err(LanguageModelCompletionError::from(anyhow!(
506 "Response contained no choices"
507 )))];
508 };
509
510 let mut events = Vec::new();
511 if let Some(content) = choice.delta.content {
512 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
513 }
514
515 if let Some(reasoning_content) = choice.delta.reasoning_content {
516 events.push(Ok(LanguageModelCompletionEvent::Thinking {
517 text: reasoning_content,
518 signature: None,
519 }));
520 }
521
522 if let Some(tool_calls) = choice.delta.tool_calls {
523 for tool_call in tool_calls {
524 let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
525
526 if let Some(tool_id) = tool_call.id {
527 entry.id = tool_id;
528 }
529
530 if let Some(function) = tool_call.function {
531 if let Some(name) = function.name {
532 // At the time of writing this code LM Studio (0.3.15) is incompatible with the OpenAI API:
533 // 1. It sends function name in the first chunk
534 // 2. It sends empty string in the function name field in all subsequent chunks for arguments
535 // According to https://platform.openai.com/docs/guides/function-calling?api-mode=responses#streaming
536 // function name field should be sent only inside the first chunk.
537 if !name.is_empty() {
538 entry.name = name;
539 }
540 }
541
542 if let Some(arguments) = function.arguments {
543 entry.arguments.push_str(&arguments);
544 }
545 }
546 }
547 }
548
549 if let Some(usage) = event.usage {
550 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
551 input_tokens: usage.prompt_tokens,
552 output_tokens: usage.completion_tokens,
553 cache_creation_input_tokens: 0,
554 cache_read_input_tokens: 0,
555 })));
556 }
557
558 match choice.finish_reason.as_deref() {
559 Some("stop") => {
560 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
561 }
562 Some("tool_calls") => {
563 events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
564 match serde_json::Value::from_str(&tool_call.arguments) {
565 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
566 LanguageModelToolUse {
567 id: tool_call.id.into(),
568 name: tool_call.name.into(),
569 is_input_complete: true,
570 input,
571 raw_input: tool_call.arguments,
572 },
573 )),
574 Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
575 id: tool_call.id.into(),
576 tool_name: tool_call.name.into(),
577 raw_input: tool_call.arguments.into(),
578 json_parse_error: error.to_string(),
579 }),
580 }
581 }));
582
583 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
584 }
585 Some(stop_reason) => {
586 log::error!("Unexpected LMStudio stop_reason: {stop_reason:?}",);
587 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
588 }
589 None => {}
590 }
591
592 events
593 }
594}
595
596#[derive(Default)]
597struct RawToolCall {
598 id: String,
599 name: String,
600 arguments: String,
601}
602
603fn add_message_content_part(
604 new_part: lmstudio::MessagePart,
605 role: Role,
606 messages: &mut Vec<lmstudio::ChatMessage>,
607) {
608 match (role, messages.last_mut()) {
609 (Role::User, Some(lmstudio::ChatMessage::User { content }))
610 | (
611 Role::Assistant,
612 Some(lmstudio::ChatMessage::Assistant {
613 content: Some(content),
614 ..
615 }),
616 )
617 | (Role::System, Some(lmstudio::ChatMessage::System { content })) => {
618 content.push_part(new_part);
619 }
620 _ => {
621 messages.push(match role {
622 Role::User => lmstudio::ChatMessage::User {
623 content: lmstudio::MessageContent::from(vec![new_part]),
624 },
625 Role::Assistant => lmstudio::ChatMessage::Assistant {
626 content: Some(lmstudio::MessageContent::from(vec![new_part])),
627 tool_calls: Vec::new(),
628 },
629 Role::System => lmstudio::ChatMessage::System {
630 content: lmstudio::MessageContent::from(vec![new_part]),
631 },
632 });
633 }
634 }
635}
636
637struct ConfigurationView {
638 state: Entity<State>,
639 loading_models_task: Option<Task<()>>,
640}
641
642impl ConfigurationView {
643 pub fn new(state: Entity<State>, cx: &mut Context<Self>) -> Self {
644 let loading_models_task = Some(cx.spawn({
645 let state = state.clone();
646 async move |this, cx| {
647 if let Some(task) = state
648 .update(cx, |state, cx| state.authenticate(cx))
649 .log_err()
650 {
651 task.await.log_err();
652 }
653 this.update(cx, |this, cx| {
654 this.loading_models_task = None;
655 cx.notify();
656 })
657 .log_err();
658 }
659 }));
660
661 Self {
662 state,
663 loading_models_task,
664 }
665 }
666
667 fn retry_connection(&self, cx: &mut App) {
668 self.state
669 .update(cx, |state, cx| state.fetch_models(cx))
670 .detach_and_log_err(cx);
671 }
672}
673
674impl Render for ConfigurationView {
675 fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
676 let is_authenticated = self.state.read(cx).is_authenticated();
677
678 let lmstudio_intro = "Run local LLMs like Llama, Phi, and Qwen.";
679
680 if self.loading_models_task.is_some() {
681 div().child(Label::new("Loading models...")).into_any()
682 } else {
683 v_flex()
684 .gap_2()
685 .child(
686 v_flex().gap_1().child(Label::new(lmstudio_intro)).child(
687 List::new()
688 .child(InstructionListItem::text_only(
689 "LM Studio needs to be running with at least one model downloaded.",
690 ))
691 .child(InstructionListItem::text_only(
692 "To get your first model, try running `lms get qwen2.5-coder-7b`",
693 )),
694 ),
695 )
696 .child(
697 h_flex()
698 .w_full()
699 .justify_between()
700 .gap_2()
701 .child(
702 h_flex()
703 .w_full()
704 .gap_2()
705 .map(|this| {
706 if is_authenticated {
707 this.child(
708 Button::new("lmstudio-site", "LM Studio")
709 .style(ButtonStyle::Subtle)
710 .icon(IconName::ArrowUpRight)
711 .icon_size(IconSize::Small)
712 .icon_color(Color::Muted)
713 .on_click(move |_, _window, cx| {
714 cx.open_url(LMSTUDIO_SITE)
715 })
716 .into_any_element(),
717 )
718 } else {
719 this.child(
720 Button::new(
721 "download_lmstudio_button",
722 "Download LM Studio",
723 )
724 .style(ButtonStyle::Subtle)
725 .icon(IconName::ArrowUpRight)
726 .icon_size(IconSize::Small)
727 .icon_color(Color::Muted)
728 .on_click(move |_, _window, cx| {
729 cx.open_url(LMSTUDIO_DOWNLOAD_URL)
730 })
731 .into_any_element(),
732 )
733 }
734 })
735 .child(
736 Button::new("view-models", "Model Catalog")
737 .style(ButtonStyle::Subtle)
738 .icon(IconName::ArrowUpRight)
739 .icon_size(IconSize::Small)
740 .icon_color(Color::Muted)
741 .on_click(move |_, _window, cx| {
742 cx.open_url(LMSTUDIO_CATALOG_URL)
743 }),
744 ),
745 )
746 .map(|this| {
747 if is_authenticated {
748 this.child(
749 ButtonLike::new("connected")
750 .disabled(true)
751 .cursor_style(gpui::CursorStyle::Arrow)
752 .child(
753 h_flex()
754 .gap_2()
755 .child(Indicator::dot().color(Color::Success))
756 .child(Label::new("Connected"))
757 .into_any_element(),
758 ),
759 )
760 } else {
761 this.child(
762 Button::new("retry_lmstudio_models", "Connect")
763 .icon_position(IconPosition::Start)
764 .icon_size(IconSize::XSmall)
765 .icon(IconName::PlayFilled)
766 .on_click(cx.listener(move |this, _, _window, cx| {
767 this.retry_connection(cx)
768 })),
769 )
770 }
771 }),
772 )
773 .into_any()
774 }
775 }
776}