lmstudio.rs

  1use anyhow::{Result, anyhow};
  2use collections::HashMap;
  3use futures::Stream;
  4use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
  5use gpui::{AnyView, App, AsyncApp, Context, Subscription, Task};
  6use http_client::HttpClient;
  7use language_model::{
  8    AuthenticateError, LanguageModelCompletionError, LanguageModelCompletionEvent,
  9    LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent,
 10    StopReason,
 11};
 12use language_model::{
 13    LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
 14    LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
 15    LanguageModelRequest, RateLimiter, Role,
 16};
 17use lmstudio::{
 18    ChatCompletionRequest, ChatMessage, ModelType, ResponseStreamEvent, get_models, preload_model,
 19    stream_chat_completion,
 20};
 21use schemars::JsonSchema;
 22use serde::{Deserialize, Serialize};
 23use settings::{Settings, SettingsStore};
 24use std::pin::Pin;
 25use std::str::FromStr;
 26use std::{collections::BTreeMap, sync::Arc};
 27use ui::{ButtonLike, Indicator, List, prelude::*};
 28use util::ResultExt;
 29
 30use crate::AllLanguageModelSettings;
 31use crate::ui::InstructionListItem;
 32
 33const LMSTUDIO_DOWNLOAD_URL: &str = "https://lmstudio.ai/download";
 34const LMSTUDIO_CATALOG_URL: &str = "https://lmstudio.ai/models";
 35const LMSTUDIO_SITE: &str = "https://lmstudio.ai/";
 36
 37const PROVIDER_ID: &str = "lmstudio";
 38const PROVIDER_NAME: &str = "LM Studio";
 39
 40#[derive(Default, Debug, Clone, PartialEq)]
 41pub struct LmStudioSettings {
 42    pub api_url: String,
 43    pub available_models: Vec<AvailableModel>,
 44}
 45
 46#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 47pub struct AvailableModel {
 48    pub name: String,
 49    pub display_name: Option<String>,
 50    pub max_tokens: usize,
 51    pub supports_tool_calls: bool,
 52}
 53
 54pub struct LmStudioLanguageModelProvider {
 55    http_client: Arc<dyn HttpClient>,
 56    state: gpui::Entity<State>,
 57}
 58
 59pub struct State {
 60    http_client: Arc<dyn HttpClient>,
 61    available_models: Vec<lmstudio::Model>,
 62    fetch_model_task: Option<Task<Result<()>>>,
 63    _subscription: Subscription,
 64}
 65
 66impl State {
 67    fn is_authenticated(&self) -> bool {
 68        !self.available_models.is_empty()
 69    }
 70
 71    fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
 72        let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
 73        let http_client = self.http_client.clone();
 74        let api_url = settings.api_url.clone();
 75
 76        // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
 77        cx.spawn(async move |this, cx| {
 78            let models = get_models(http_client.as_ref(), &api_url, None).await?;
 79
 80            let mut models: Vec<lmstudio::Model> = models
 81                .into_iter()
 82                .filter(|model| model.r#type != ModelType::Embeddings)
 83                .map(|model| {
 84                    lmstudio::Model::new(
 85                        &model.id,
 86                        None,
 87                        None,
 88                        model.capabilities.supports_tool_calls(),
 89                    )
 90                })
 91                .collect();
 92
 93            models.sort_by(|a, b| a.name.cmp(&b.name));
 94
 95            this.update(cx, |this, cx| {
 96                this.available_models = models;
 97                cx.notify();
 98            })
 99        })
100    }
101
102    fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
103        let task = self.fetch_models(cx);
104        self.fetch_model_task.replace(task);
105    }
106
107    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
108        if self.is_authenticated() {
109            return Task::ready(Ok(()));
110        }
111
112        let fetch_models_task = self.fetch_models(cx);
113        cx.spawn(async move |_this, _cx| Ok(fetch_models_task.await?))
114    }
115}
116
117impl LmStudioLanguageModelProvider {
118    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
119        let this = Self {
120            http_client: http_client.clone(),
121            state: cx.new(|cx| {
122                let subscription = cx.observe_global::<SettingsStore>({
123                    let mut settings = AllLanguageModelSettings::get_global(cx).lmstudio.clone();
124                    move |this: &mut State, cx| {
125                        let new_settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
126                        if &settings != new_settings {
127                            settings = new_settings.clone();
128                            this.restart_fetch_models_task(cx);
129                            cx.notify();
130                        }
131                    }
132                });
133
134                State {
135                    http_client,
136                    available_models: Default::default(),
137                    fetch_model_task: None,
138                    _subscription: subscription,
139                }
140            }),
141        };
142        this.state
143            .update(cx, |state, cx| state.restart_fetch_models_task(cx));
144        this
145    }
146}
147
148impl LanguageModelProviderState for LmStudioLanguageModelProvider {
149    type ObservableEntity = State;
150
151    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
152        Some(self.state.clone())
153    }
154}
155
156impl LanguageModelProvider for LmStudioLanguageModelProvider {
157    fn id(&self) -> LanguageModelProviderId {
158        LanguageModelProviderId(PROVIDER_ID.into())
159    }
160
161    fn name(&self) -> LanguageModelProviderName {
162        LanguageModelProviderName(PROVIDER_NAME.into())
163    }
164
165    fn icon(&self) -> IconName {
166        IconName::AiLmStudio
167    }
168
169    fn default_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
170        // We shouldn't try to select default model, because it might lead to a load call for an unloaded model.
171        // In a constrained environment where user might not have enough resources it'll be a bad UX to select something
172        // to load by default.
173        None
174    }
175
176    fn default_fast_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
177        // See explanation for default_model.
178        None
179    }
180
181    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
182        let mut models: BTreeMap<String, lmstudio::Model> = BTreeMap::default();
183
184        // Add models from the LM Studio API
185        for model in self.state.read(cx).available_models.iter() {
186            models.insert(model.name.clone(), model.clone());
187        }
188
189        // Override with available models from settings
190        for model in AllLanguageModelSettings::get_global(cx)
191            .lmstudio
192            .available_models
193            .iter()
194        {
195            models.insert(
196                model.name.clone(),
197                lmstudio::Model {
198                    name: model.name.clone(),
199                    display_name: model.display_name.clone(),
200                    max_tokens: model.max_tokens,
201                    supports_tool_calls: model.supports_tool_calls,
202                },
203            );
204        }
205
206        models
207            .into_values()
208            .map(|model| {
209                Arc::new(LmStudioLanguageModel {
210                    id: LanguageModelId::from(model.name.clone()),
211                    model: model.clone(),
212                    http_client: self.http_client.clone(),
213                    request_limiter: RateLimiter::new(4),
214                }) as Arc<dyn LanguageModel>
215            })
216            .collect()
217    }
218
219    fn load_model(&self, model: Arc<dyn LanguageModel>, cx: &App) {
220        let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
221        let http_client = self.http_client.clone();
222        let api_url = settings.api_url.clone();
223        let id = model.id().0.to_string();
224        cx.spawn(async move |_| preload_model(http_client, &api_url, &id).await)
225            .detach_and_log_err(cx);
226    }
227
228    fn is_authenticated(&self, cx: &App) -> bool {
229        self.state.read(cx).is_authenticated()
230    }
231
232    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
233        self.state.update(cx, |state, cx| state.authenticate(cx))
234    }
235
236    fn configuration_view(&self, _window: &mut Window, cx: &mut App) -> AnyView {
237        let state = self.state.clone();
238        cx.new(|cx| ConfigurationView::new(state, cx)).into()
239    }
240
241    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
242        self.state.update(cx, |state, cx| state.fetch_models(cx))
243    }
244}
245
246pub struct LmStudioLanguageModel {
247    id: LanguageModelId,
248    model: lmstudio::Model,
249    http_client: Arc<dyn HttpClient>,
250    request_limiter: RateLimiter,
251}
252
253impl LmStudioLanguageModel {
254    fn to_lmstudio_request(&self, request: LanguageModelRequest) -> ChatCompletionRequest {
255        let mut messages = Vec::new();
256
257        for message in request.messages {
258            for content in message.content {
259                match content {
260                    MessageContent::Text(text) | MessageContent::Thinking { text, .. } => messages
261                        .push(match message.role {
262                            Role::User => ChatMessage::User { content: text },
263                            Role::Assistant => ChatMessage::Assistant {
264                                content: Some(text),
265                                tool_calls: Vec::new(),
266                            },
267                            Role::System => ChatMessage::System { content: text },
268                        }),
269                    MessageContent::RedactedThinking(_) => {}
270                    MessageContent::Image(_) => {}
271                    MessageContent::ToolUse(tool_use) => {
272                        let tool_call = lmstudio::ToolCall {
273                            id: tool_use.id.to_string(),
274                            content: lmstudio::ToolCallContent::Function {
275                                function: lmstudio::FunctionContent {
276                                    name: tool_use.name.to_string(),
277                                    arguments: serde_json::to_string(&tool_use.input)
278                                        .unwrap_or_default(),
279                                },
280                            },
281                        };
282
283                        if let Some(lmstudio::ChatMessage::Assistant { tool_calls, .. }) =
284                            messages.last_mut()
285                        {
286                            tool_calls.push(tool_call);
287                        } else {
288                            messages.push(lmstudio::ChatMessage::Assistant {
289                                content: None,
290                                tool_calls: vec![tool_call],
291                            });
292                        }
293                    }
294                    MessageContent::ToolResult(tool_result) => {
295                        match &tool_result.content {
296                            LanguageModelToolResultContent::Text(text) => {
297                                messages.push(lmstudio::ChatMessage::Tool {
298                                    content: text.to_string(),
299                                    tool_call_id: tool_result.tool_use_id.to_string(),
300                                });
301                            }
302                            LanguageModelToolResultContent::Image(_) => {
303                                // no support for images for now
304                            }
305                        };
306                    }
307                }
308            }
309        }
310
311        ChatCompletionRequest {
312            model: self.model.name.clone(),
313            messages,
314            stream: true,
315            max_tokens: Some(-1),
316            stop: Some(request.stop),
317            // In LM Studio you can configure specific settings you'd like to use for your model.
318            // For example Qwen3 is recommended to be used with 0.7 temperature.
319            // It would be a bad UX to silently override these settings from Zed, so we pass no temperature as a default.
320            temperature: request.temperature.or(None),
321            tools: request
322                .tools
323                .into_iter()
324                .map(|tool| lmstudio::ToolDefinition::Function {
325                    function: lmstudio::FunctionDefinition {
326                        name: tool.name,
327                        description: Some(tool.description),
328                        parameters: Some(tool.input_schema),
329                    },
330                })
331                .collect(),
332            tool_choice: request.tool_choice.map(|choice| match choice {
333                LanguageModelToolChoice::Auto => lmstudio::ToolChoice::Auto,
334                LanguageModelToolChoice::Any => lmstudio::ToolChoice::Required,
335                LanguageModelToolChoice::None => lmstudio::ToolChoice::None,
336            }),
337        }
338    }
339
340    fn stream_completion(
341        &self,
342        request: ChatCompletionRequest,
343        cx: &AsyncApp,
344    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
345    {
346        let http_client = self.http_client.clone();
347        let Ok(api_url) = cx.update(|cx| {
348            let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
349            settings.api_url.clone()
350        }) else {
351            return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
352        };
353
354        let future = self.request_limiter.stream(async move {
355            let request = stream_chat_completion(http_client.as_ref(), &api_url, request);
356            let response = request.await?;
357            Ok(response)
358        });
359
360        async move { Ok(future.await?.boxed()) }.boxed()
361    }
362}
363
364impl LanguageModel for LmStudioLanguageModel {
365    fn id(&self) -> LanguageModelId {
366        self.id.clone()
367    }
368
369    fn name(&self) -> LanguageModelName {
370        LanguageModelName::from(self.model.display_name().to_string())
371    }
372
373    fn provider_id(&self) -> LanguageModelProviderId {
374        LanguageModelProviderId(PROVIDER_ID.into())
375    }
376
377    fn provider_name(&self) -> LanguageModelProviderName {
378        LanguageModelProviderName(PROVIDER_NAME.into())
379    }
380
381    fn supports_tools(&self) -> bool {
382        self.model.supports_tool_calls()
383    }
384
385    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
386        self.supports_tools()
387            && match choice {
388                LanguageModelToolChoice::Auto => true,
389                LanguageModelToolChoice::Any => true,
390                LanguageModelToolChoice::None => true,
391            }
392    }
393
394    fn supports_images(&self) -> bool {
395        false
396    }
397
398    fn telemetry_id(&self) -> String {
399        format!("lmstudio/{}", self.model.id())
400    }
401
402    fn max_token_count(&self) -> usize {
403        self.model.max_token_count()
404    }
405
406    fn count_tokens(
407        &self,
408        request: LanguageModelRequest,
409        _cx: &App,
410    ) -> BoxFuture<'static, Result<usize>> {
411        // Endpoint for this is coming soon. In the meantime, hacky estimation
412        let token_count = request
413            .messages
414            .iter()
415            .map(|msg| msg.string_contents().split_whitespace().count())
416            .sum::<usize>();
417
418        let estimated_tokens = (token_count as f64 * 0.75) as usize;
419        async move { Ok(estimated_tokens) }.boxed()
420    }
421
422    fn stream_completion(
423        &self,
424        request: LanguageModelRequest,
425        cx: &AsyncApp,
426    ) -> BoxFuture<
427        'static,
428        Result<
429            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
430        >,
431    > {
432        let request = self.to_lmstudio_request(request);
433        let completions = self.stream_completion(request, cx);
434        async move {
435            let mapper = LmStudioEventMapper::new();
436            Ok(mapper.map_stream(completions.await?).boxed())
437        }
438        .boxed()
439    }
440}
441
442struct LmStudioEventMapper {
443    tool_calls_by_index: HashMap<usize, RawToolCall>,
444}
445
446impl LmStudioEventMapper {
447    fn new() -> Self {
448        Self {
449            tool_calls_by_index: HashMap::default(),
450        }
451    }
452
453    pub fn map_stream(
454        mut self,
455        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
456    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
457    {
458        events.flat_map(move |event| {
459            futures::stream::iter(match event {
460                Ok(event) => self.map_event(event),
461                Err(error) => vec![Err(LanguageModelCompletionError::Other(anyhow!(error)))],
462            })
463        })
464    }
465
466    pub fn map_event(
467        &mut self,
468        event: ResponseStreamEvent,
469    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
470        let Some(choice) = event.choices.into_iter().next() else {
471            return vec![Err(LanguageModelCompletionError::Other(anyhow!(
472                "Response contained no choices"
473            )))];
474        };
475
476        let mut events = Vec::new();
477        if let Some(content) = choice.delta.content {
478            events.push(Ok(LanguageModelCompletionEvent::Text(content)));
479        }
480
481        if let Some(tool_calls) = choice.delta.tool_calls {
482            for tool_call in tool_calls {
483                let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
484
485                if let Some(tool_id) = tool_call.id {
486                    entry.id = tool_id;
487                }
488
489                if let Some(function) = tool_call.function {
490                    if let Some(name) = function.name {
491                        // At the time of writing this code LM Studio (0.3.15) is incompatible with the OpenAI API:
492                        // 1. It sends function name in the first chunk
493                        // 2. It sends empty string in the function name field in all subsequent chunks for arguments
494                        // According to https://platform.openai.com/docs/guides/function-calling?api-mode=responses#streaming
495                        // function name field should be sent only inside the first chunk.
496                        if !name.is_empty() {
497                            entry.name = name;
498                        }
499                    }
500
501                    if let Some(arguments) = function.arguments {
502                        entry.arguments.push_str(&arguments);
503                    }
504                }
505            }
506        }
507
508        match choice.finish_reason.as_deref() {
509            Some("stop") => {
510                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
511            }
512            Some("tool_calls") => {
513                events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
514                    match serde_json::Value::from_str(&tool_call.arguments) {
515                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
516                            LanguageModelToolUse {
517                                id: tool_call.id.into(),
518                                name: tool_call.name.into(),
519                                is_input_complete: true,
520                                input,
521                                raw_input: tool_call.arguments,
522                            },
523                        )),
524                        Err(error) => Err(LanguageModelCompletionError::BadInputJson {
525                            id: tool_call.id.into(),
526                            tool_name: tool_call.name.into(),
527                            raw_input: tool_call.arguments.into(),
528                            json_parse_error: error.to_string(),
529                        }),
530                    }
531                }));
532
533                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
534            }
535            Some(stop_reason) => {
536                log::error!("Unexpected OpenAI stop_reason: {stop_reason:?}",);
537                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
538            }
539            None => {}
540        }
541
542        events
543    }
544}
545
546#[derive(Default)]
547struct RawToolCall {
548    id: String,
549    name: String,
550    arguments: String,
551}
552
553struct ConfigurationView {
554    state: gpui::Entity<State>,
555    loading_models_task: Option<Task<()>>,
556}
557
558impl ConfigurationView {
559    pub fn new(state: gpui::Entity<State>, cx: &mut Context<Self>) -> Self {
560        let loading_models_task = Some(cx.spawn({
561            let state = state.clone();
562            async move |this, cx| {
563                if let Some(task) = state
564                    .update(cx, |state, cx| state.authenticate(cx))
565                    .log_err()
566                {
567                    task.await.log_err();
568                }
569                this.update(cx, |this, cx| {
570                    this.loading_models_task = None;
571                    cx.notify();
572                })
573                .log_err();
574            }
575        }));
576
577        Self {
578            state,
579            loading_models_task,
580        }
581    }
582
583    fn retry_connection(&self, cx: &mut App) {
584        self.state
585            .update(cx, |state, cx| state.fetch_models(cx))
586            .detach_and_log_err(cx);
587    }
588}
589
590impl Render for ConfigurationView {
591    fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
592        let is_authenticated = self.state.read(cx).is_authenticated();
593
594        let lmstudio_intro = "Run local LLMs like Llama, Phi, and Qwen.";
595
596        if self.loading_models_task.is_some() {
597            div().child(Label::new("Loading models...")).into_any()
598        } else {
599            v_flex()
600                .gap_2()
601                .child(
602                    v_flex().gap_1().child(Label::new(lmstudio_intro)).child(
603                        List::new()
604                            .child(InstructionListItem::text_only(
605                                "LM Studio needs to be running with at least one model downloaded.",
606                            ))
607                            .child(InstructionListItem::text_only(
608                                "To get your first model, try running `lms get qwen2.5-coder-7b`",
609                            )),
610                    ),
611                )
612                .child(
613                    h_flex()
614                        .w_full()
615                        .justify_between()
616                        .gap_2()
617                        .child(
618                            h_flex()
619                                .w_full()
620                                .gap_2()
621                                .map(|this| {
622                                    if is_authenticated {
623                                        this.child(
624                                            Button::new("lmstudio-site", "LM Studio")
625                                                .style(ButtonStyle::Subtle)
626                                                .icon(IconName::ArrowUpRight)
627                                                .icon_size(IconSize::XSmall)
628                                                .icon_color(Color::Muted)
629                                                .on_click(move |_, _window, cx| {
630                                                    cx.open_url(LMSTUDIO_SITE)
631                                                })
632                                                .into_any_element(),
633                                        )
634                                    } else {
635                                        this.child(
636                                            Button::new(
637                                                "download_lmstudio_button",
638                                                "Download LM Studio",
639                                            )
640                                            .style(ButtonStyle::Subtle)
641                                            .icon(IconName::ArrowUpRight)
642                                            .icon_size(IconSize::XSmall)
643                                            .icon_color(Color::Muted)
644                                            .on_click(move |_, _window, cx| {
645                                                cx.open_url(LMSTUDIO_DOWNLOAD_URL)
646                                            })
647                                            .into_any_element(),
648                                        )
649                                    }
650                                })
651                                .child(
652                                    Button::new("view-models", "Model Catalog")
653                                        .style(ButtonStyle::Subtle)
654                                        .icon(IconName::ArrowUpRight)
655                                        .icon_size(IconSize::XSmall)
656                                        .icon_color(Color::Muted)
657                                        .on_click(move |_, _window, cx| {
658                                            cx.open_url(LMSTUDIO_CATALOG_URL)
659                                        }),
660                                ),
661                        )
662                        .map(|this| {
663                            if is_authenticated {
664                                this.child(
665                                    ButtonLike::new("connected")
666                                        .disabled(true)
667                                        .cursor_style(gpui::CursorStyle::Arrow)
668                                        .child(
669                                            h_flex()
670                                                .gap_2()
671                                                .child(Indicator::dot().color(Color::Success))
672                                                .child(Label::new("Connected"))
673                                                .into_any_element(),
674                                        ),
675                                )
676                            } else {
677                                this.child(
678                                    Button::new("retry_lmstudio_models", "Connect")
679                                        .icon_position(IconPosition::Start)
680                                        .icon_size(IconSize::XSmall)
681                                        .icon(IconName::Play)
682                                        .on_click(cx.listener(move |this, _, _window, cx| {
683                                            this.retry_connection(cx)
684                                        })),
685                                )
686                            }
687                        }),
688                )
689                .into_any()
690        }
691    }
692}