lmstudio.rs

  1use anyhow::{Result, anyhow};
  2use collections::HashMap;
  3use futures::Stream;
  4use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
  5use gpui::{AnyView, App, AsyncApp, Context, Subscription, Task};
  6use http_client::HttpClient;
  7use language_model::{
  8    AuthenticateError, LanguageModelCompletionError, LanguageModelCompletionEvent,
  9    LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse, MessageContent,
 10    StopReason,
 11};
 12use language_model::{
 13    LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
 14    LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
 15    LanguageModelRequest, RateLimiter, Role,
 16};
 17use lmstudio::{
 18    ChatCompletionRequest, ChatMessage, ModelType, ResponseStreamEvent, get_models,
 19    stream_chat_completion,
 20};
 21use schemars::JsonSchema;
 22use serde::{Deserialize, Serialize};
 23use settings::{Settings, SettingsStore};
 24use std::pin::Pin;
 25use std::str::FromStr;
 26use std::{collections::BTreeMap, sync::Arc};
 27use ui::{ButtonLike, Indicator, List, prelude::*};
 28use util::ResultExt;
 29
 30use crate::AllLanguageModelSettings;
 31use crate::ui::InstructionListItem;
 32
 33const LMSTUDIO_DOWNLOAD_URL: &str = "https://lmstudio.ai/download";
 34const LMSTUDIO_CATALOG_URL: &str = "https://lmstudio.ai/models";
 35const LMSTUDIO_SITE: &str = "https://lmstudio.ai/";
 36
 37const PROVIDER_ID: &str = "lmstudio";
 38const PROVIDER_NAME: &str = "LM Studio";
 39
 40#[derive(Default, Debug, Clone, PartialEq)]
 41pub struct LmStudioSettings {
 42    pub api_url: String,
 43    pub available_models: Vec<AvailableModel>,
 44}
 45
 46#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 47pub struct AvailableModel {
 48    pub name: String,
 49    pub display_name: Option<String>,
 50    pub max_tokens: usize,
 51    pub supports_tool_calls: bool,
 52}
 53
 54pub struct LmStudioLanguageModelProvider {
 55    http_client: Arc<dyn HttpClient>,
 56    state: gpui::Entity<State>,
 57}
 58
 59pub struct State {
 60    http_client: Arc<dyn HttpClient>,
 61    available_models: Vec<lmstudio::Model>,
 62    fetch_model_task: Option<Task<Result<()>>>,
 63    _subscription: Subscription,
 64}
 65
 66impl State {
 67    fn is_authenticated(&self) -> bool {
 68        !self.available_models.is_empty()
 69    }
 70
 71    fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
 72        let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
 73        let http_client = self.http_client.clone();
 74        let api_url = settings.api_url.clone();
 75
 76        // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
 77        cx.spawn(async move |this, cx| {
 78            let models = get_models(http_client.as_ref(), &api_url, None).await?;
 79
 80            let mut models: Vec<lmstudio::Model> = models
 81                .into_iter()
 82                .filter(|model| model.r#type != ModelType::Embeddings)
 83                .map(|model| {
 84                    lmstudio::Model::new(
 85                        &model.id,
 86                        None,
 87                        None,
 88                        model.capabilities.supports_tool_calls(),
 89                    )
 90                })
 91                .collect();
 92
 93            models.sort_by(|a, b| a.name.cmp(&b.name));
 94
 95            this.update(cx, |this, cx| {
 96                this.available_models = models;
 97                cx.notify();
 98            })
 99        })
100    }
101
102    fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
103        let task = self.fetch_models(cx);
104        self.fetch_model_task.replace(task);
105    }
106
107    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
108        if self.is_authenticated() {
109            return Task::ready(Ok(()));
110        }
111
112        let fetch_models_task = self.fetch_models(cx);
113        cx.spawn(async move |_this, _cx| Ok(fetch_models_task.await?))
114    }
115}
116
117impl LmStudioLanguageModelProvider {
118    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
119        let this = Self {
120            http_client: http_client.clone(),
121            state: cx.new(|cx| {
122                let subscription = cx.observe_global::<SettingsStore>({
123                    let mut settings = AllLanguageModelSettings::get_global(cx).lmstudio.clone();
124                    move |this: &mut State, cx| {
125                        let new_settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
126                        if &settings != new_settings {
127                            settings = new_settings.clone();
128                            this.restart_fetch_models_task(cx);
129                            cx.notify();
130                        }
131                    }
132                });
133
134                State {
135                    http_client,
136                    available_models: Default::default(),
137                    fetch_model_task: None,
138                    _subscription: subscription,
139                }
140            }),
141        };
142        this.state
143            .update(cx, |state, cx| state.restart_fetch_models_task(cx));
144        this
145    }
146}
147
148impl LanguageModelProviderState for LmStudioLanguageModelProvider {
149    type ObservableEntity = State;
150
151    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>> {
152        Some(self.state.clone())
153    }
154}
155
156impl LanguageModelProvider for LmStudioLanguageModelProvider {
157    fn id(&self) -> LanguageModelProviderId {
158        LanguageModelProviderId(PROVIDER_ID.into())
159    }
160
161    fn name(&self) -> LanguageModelProviderName {
162        LanguageModelProviderName(PROVIDER_NAME.into())
163    }
164
165    fn icon(&self) -> IconName {
166        IconName::AiLmStudio
167    }
168
169    fn default_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
170        // We shouldn't try to select default model, because it might lead to a load call for an unloaded model.
171        // In a constrained environment where user might not have enough resources it'll be a bad UX to select something
172        // to load by default.
173        None
174    }
175
176    fn default_fast_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
177        // See explanation for default_model.
178        None
179    }
180
181    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
182        let mut models: BTreeMap<String, lmstudio::Model> = BTreeMap::default();
183
184        // Add models from the LM Studio API
185        for model in self.state.read(cx).available_models.iter() {
186            models.insert(model.name.clone(), model.clone());
187        }
188
189        // Override with available models from settings
190        for model in AllLanguageModelSettings::get_global(cx)
191            .lmstudio
192            .available_models
193            .iter()
194        {
195            models.insert(
196                model.name.clone(),
197                lmstudio::Model {
198                    name: model.name.clone(),
199                    display_name: model.display_name.clone(),
200                    max_tokens: model.max_tokens,
201                    supports_tool_calls: model.supports_tool_calls,
202                },
203            );
204        }
205
206        models
207            .into_values()
208            .map(|model| {
209                Arc::new(LmStudioLanguageModel {
210                    id: LanguageModelId::from(model.name.clone()),
211                    model: model.clone(),
212                    http_client: self.http_client.clone(),
213                    request_limiter: RateLimiter::new(4),
214                }) as Arc<dyn LanguageModel>
215            })
216            .collect()
217    }
218
219    fn is_authenticated(&self, cx: &App) -> bool {
220        self.state.read(cx).is_authenticated()
221    }
222
223    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
224        self.state.update(cx, |state, cx| state.authenticate(cx))
225    }
226
227    fn configuration_view(&self, _window: &mut Window, cx: &mut App) -> AnyView {
228        let state = self.state.clone();
229        cx.new(|cx| ConfigurationView::new(state, cx)).into()
230    }
231
232    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
233        self.state.update(cx, |state, cx| state.fetch_models(cx))
234    }
235}
236
237pub struct LmStudioLanguageModel {
238    id: LanguageModelId,
239    model: lmstudio::Model,
240    http_client: Arc<dyn HttpClient>,
241    request_limiter: RateLimiter,
242}
243
244impl LmStudioLanguageModel {
245    fn to_lmstudio_request(&self, request: LanguageModelRequest) -> ChatCompletionRequest {
246        let mut messages = Vec::new();
247
248        for message in request.messages {
249            for content in message.content {
250                match content {
251                    MessageContent::Text(text) | MessageContent::Thinking { text, .. } => messages
252                        .push(match message.role {
253                            Role::User => ChatMessage::User { content: text },
254                            Role::Assistant => ChatMessage::Assistant {
255                                content: Some(text),
256                                tool_calls: Vec::new(),
257                            },
258                            Role::System => ChatMessage::System { content: text },
259                        }),
260                    MessageContent::RedactedThinking(_) => {}
261                    MessageContent::Image(_) => {}
262                    MessageContent::ToolUse(tool_use) => {
263                        let tool_call = lmstudio::ToolCall {
264                            id: tool_use.id.to_string(),
265                            content: lmstudio::ToolCallContent::Function {
266                                function: lmstudio::FunctionContent {
267                                    name: tool_use.name.to_string(),
268                                    arguments: serde_json::to_string(&tool_use.input)
269                                        .unwrap_or_default(),
270                                },
271                            },
272                        };
273
274                        if let Some(lmstudio::ChatMessage::Assistant { tool_calls, .. }) =
275                            messages.last_mut()
276                        {
277                            tool_calls.push(tool_call);
278                        } else {
279                            messages.push(lmstudio::ChatMessage::Assistant {
280                                content: None,
281                                tool_calls: vec![tool_call],
282                            });
283                        }
284                    }
285                    MessageContent::ToolResult(tool_result) => {
286                        match &tool_result.content {
287                            LanguageModelToolResultContent::Text(text) => {
288                                messages.push(lmstudio::ChatMessage::Tool {
289                                    content: text.to_string(),
290                                    tool_call_id: tool_result.tool_use_id.to_string(),
291                                });
292                            }
293                            LanguageModelToolResultContent::Image(_) => {
294                                // no support for images for now
295                            }
296                        };
297                    }
298                }
299            }
300        }
301
302        ChatCompletionRequest {
303            model: self.model.name.clone(),
304            messages,
305            stream: true,
306            max_tokens: Some(-1),
307            stop: Some(request.stop),
308            // In LM Studio you can configure specific settings you'd like to use for your model.
309            // For example Qwen3 is recommended to be used with 0.7 temperature.
310            // It would be a bad UX to silently override these settings from Zed, so we pass no temperature as a default.
311            temperature: request.temperature.or(None),
312            tools: request
313                .tools
314                .into_iter()
315                .map(|tool| lmstudio::ToolDefinition::Function {
316                    function: lmstudio::FunctionDefinition {
317                        name: tool.name,
318                        description: Some(tool.description),
319                        parameters: Some(tool.input_schema),
320                    },
321                })
322                .collect(),
323            tool_choice: request.tool_choice.map(|choice| match choice {
324                LanguageModelToolChoice::Auto => lmstudio::ToolChoice::Auto,
325                LanguageModelToolChoice::Any => lmstudio::ToolChoice::Required,
326                LanguageModelToolChoice::None => lmstudio::ToolChoice::None,
327            }),
328        }
329    }
330
331    fn stream_completion(
332        &self,
333        request: ChatCompletionRequest,
334        cx: &AsyncApp,
335    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
336    {
337        let http_client = self.http_client.clone();
338        let Ok(api_url) = cx.update(|cx| {
339            let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
340            settings.api_url.clone()
341        }) else {
342            return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
343        };
344
345        let future = self.request_limiter.stream(async move {
346            let request = stream_chat_completion(http_client.as_ref(), &api_url, request);
347            let response = request.await?;
348            Ok(response)
349        });
350
351        async move { Ok(future.await?.boxed()) }.boxed()
352    }
353}
354
355impl LanguageModel for LmStudioLanguageModel {
356    fn id(&self) -> LanguageModelId {
357        self.id.clone()
358    }
359
360    fn name(&self) -> LanguageModelName {
361        LanguageModelName::from(self.model.display_name().to_string())
362    }
363
364    fn provider_id(&self) -> LanguageModelProviderId {
365        LanguageModelProviderId(PROVIDER_ID.into())
366    }
367
368    fn provider_name(&self) -> LanguageModelProviderName {
369        LanguageModelProviderName(PROVIDER_NAME.into())
370    }
371
372    fn supports_tools(&self) -> bool {
373        self.model.supports_tool_calls()
374    }
375
376    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
377        self.supports_tools()
378            && match choice {
379                LanguageModelToolChoice::Auto => true,
380                LanguageModelToolChoice::Any => true,
381                LanguageModelToolChoice::None => true,
382            }
383    }
384
385    fn supports_images(&self) -> bool {
386        false
387    }
388
389    fn telemetry_id(&self) -> String {
390        format!("lmstudio/{}", self.model.id())
391    }
392
393    fn max_token_count(&self) -> usize {
394        self.model.max_token_count()
395    }
396
397    fn count_tokens(
398        &self,
399        request: LanguageModelRequest,
400        _cx: &App,
401    ) -> BoxFuture<'static, Result<usize>> {
402        // Endpoint for this is coming soon. In the meantime, hacky estimation
403        let token_count = request
404            .messages
405            .iter()
406            .map(|msg| msg.string_contents().split_whitespace().count())
407            .sum::<usize>();
408
409        let estimated_tokens = (token_count as f64 * 0.75) as usize;
410        async move { Ok(estimated_tokens) }.boxed()
411    }
412
413    fn stream_completion(
414        &self,
415        request: LanguageModelRequest,
416        cx: &AsyncApp,
417    ) -> BoxFuture<
418        'static,
419        Result<
420            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
421        >,
422    > {
423        let request = self.to_lmstudio_request(request);
424        let completions = self.stream_completion(request, cx);
425        async move {
426            let mapper = LmStudioEventMapper::new();
427            Ok(mapper.map_stream(completions.await?).boxed())
428        }
429        .boxed()
430    }
431}
432
433struct LmStudioEventMapper {
434    tool_calls_by_index: HashMap<usize, RawToolCall>,
435}
436
437impl LmStudioEventMapper {
438    fn new() -> Self {
439        Self {
440            tool_calls_by_index: HashMap::default(),
441        }
442    }
443
444    pub fn map_stream(
445        mut self,
446        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
447    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
448    {
449        events.flat_map(move |event| {
450            futures::stream::iter(match event {
451                Ok(event) => self.map_event(event),
452                Err(error) => vec![Err(LanguageModelCompletionError::Other(anyhow!(error)))],
453            })
454        })
455    }
456
457    pub fn map_event(
458        &mut self,
459        event: ResponseStreamEvent,
460    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
461        let Some(choice) = event.choices.into_iter().next() else {
462            return vec![Err(LanguageModelCompletionError::Other(anyhow!(
463                "Response contained no choices"
464            )))];
465        };
466
467        let mut events = Vec::new();
468        if let Some(content) = choice.delta.content {
469            events.push(Ok(LanguageModelCompletionEvent::Text(content)));
470        }
471
472        if let Some(tool_calls) = choice.delta.tool_calls {
473            for tool_call in tool_calls {
474                let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
475
476                if let Some(tool_id) = tool_call.id {
477                    entry.id = tool_id;
478                }
479
480                if let Some(function) = tool_call.function {
481                    if let Some(name) = function.name {
482                        // At the time of writing this code LM Studio (0.3.15) is incompatible with the OpenAI API:
483                        // 1. It sends function name in the first chunk
484                        // 2. It sends empty string in the function name field in all subsequent chunks for arguments
485                        // According to https://platform.openai.com/docs/guides/function-calling?api-mode=responses#streaming
486                        // function name field should be sent only inside the first chunk.
487                        if !name.is_empty() {
488                            entry.name = name;
489                        }
490                    }
491
492                    if let Some(arguments) = function.arguments {
493                        entry.arguments.push_str(&arguments);
494                    }
495                }
496            }
497        }
498
499        match choice.finish_reason.as_deref() {
500            Some("stop") => {
501                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
502            }
503            Some("tool_calls") => {
504                events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
505                    match serde_json::Value::from_str(&tool_call.arguments) {
506                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
507                            LanguageModelToolUse {
508                                id: tool_call.id.into(),
509                                name: tool_call.name.into(),
510                                is_input_complete: true,
511                                input,
512                                raw_input: tool_call.arguments,
513                            },
514                        )),
515                        Err(error) => Err(LanguageModelCompletionError::BadInputJson {
516                            id: tool_call.id.into(),
517                            tool_name: tool_call.name.into(),
518                            raw_input: tool_call.arguments.into(),
519                            json_parse_error: error.to_string(),
520                        }),
521                    }
522                }));
523
524                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
525            }
526            Some(stop_reason) => {
527                log::error!("Unexpected OpenAI stop_reason: {stop_reason:?}",);
528                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
529            }
530            None => {}
531        }
532
533        events
534    }
535}
536
537#[derive(Default)]
538struct RawToolCall {
539    id: String,
540    name: String,
541    arguments: String,
542}
543
544struct ConfigurationView {
545    state: gpui::Entity<State>,
546    loading_models_task: Option<Task<()>>,
547}
548
549impl ConfigurationView {
550    pub fn new(state: gpui::Entity<State>, cx: &mut Context<Self>) -> Self {
551        let loading_models_task = Some(cx.spawn({
552            let state = state.clone();
553            async move |this, cx| {
554                if let Some(task) = state
555                    .update(cx, |state, cx| state.authenticate(cx))
556                    .log_err()
557                {
558                    task.await.log_err();
559                }
560                this.update(cx, |this, cx| {
561                    this.loading_models_task = None;
562                    cx.notify();
563                })
564                .log_err();
565            }
566        }));
567
568        Self {
569            state,
570            loading_models_task,
571        }
572    }
573
574    fn retry_connection(&self, cx: &mut App) {
575        self.state
576            .update(cx, |state, cx| state.fetch_models(cx))
577            .detach_and_log_err(cx);
578    }
579}
580
581impl Render for ConfigurationView {
582    fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
583        let is_authenticated = self.state.read(cx).is_authenticated();
584
585        let lmstudio_intro = "Run local LLMs like Llama, Phi, and Qwen.";
586
587        if self.loading_models_task.is_some() {
588            div().child(Label::new("Loading models...")).into_any()
589        } else {
590            v_flex()
591                .gap_2()
592                .child(
593                    v_flex().gap_1().child(Label::new(lmstudio_intro)).child(
594                        List::new()
595                            .child(InstructionListItem::text_only(
596                                "LM Studio needs to be running with at least one model downloaded.",
597                            ))
598                            .child(InstructionListItem::text_only(
599                                "To get your first model, try running `lms get qwen2.5-coder-7b`",
600                            )),
601                    ),
602                )
603                .child(
604                    h_flex()
605                        .w_full()
606                        .justify_between()
607                        .gap_2()
608                        .child(
609                            h_flex()
610                                .w_full()
611                                .gap_2()
612                                .map(|this| {
613                                    if is_authenticated {
614                                        this.child(
615                                            Button::new("lmstudio-site", "LM Studio")
616                                                .style(ButtonStyle::Subtle)
617                                                .icon(IconName::ArrowUpRight)
618                                                .icon_size(IconSize::XSmall)
619                                                .icon_color(Color::Muted)
620                                                .on_click(move |_, _window, cx| {
621                                                    cx.open_url(LMSTUDIO_SITE)
622                                                })
623                                                .into_any_element(),
624                                        )
625                                    } else {
626                                        this.child(
627                                            Button::new(
628                                                "download_lmstudio_button",
629                                                "Download LM Studio",
630                                            )
631                                            .style(ButtonStyle::Subtle)
632                                            .icon(IconName::ArrowUpRight)
633                                            .icon_size(IconSize::XSmall)
634                                            .icon_color(Color::Muted)
635                                            .on_click(move |_, _window, cx| {
636                                                cx.open_url(LMSTUDIO_DOWNLOAD_URL)
637                                            })
638                                            .into_any_element(),
639                                        )
640                                    }
641                                })
642                                .child(
643                                    Button::new("view-models", "Model Catalog")
644                                        .style(ButtonStyle::Subtle)
645                                        .icon(IconName::ArrowUpRight)
646                                        .icon_size(IconSize::XSmall)
647                                        .icon_color(Color::Muted)
648                                        .on_click(move |_, _window, cx| {
649                                            cx.open_url(LMSTUDIO_CATALOG_URL)
650                                        }),
651                                ),
652                        )
653                        .map(|this| {
654                            if is_authenticated {
655                                this.child(
656                                    ButtonLike::new("connected")
657                                        .disabled(true)
658                                        .cursor_style(gpui::CursorStyle::Arrow)
659                                        .child(
660                                            h_flex()
661                                                .gap_2()
662                                                .child(Indicator::dot().color(Color::Success))
663                                                .child(Label::new("Connected"))
664                                                .into_any_element(),
665                                        ),
666                                )
667                            } else {
668                                this.child(
669                                    Button::new("retry_lmstudio_models", "Connect")
670                                        .icon_position(IconPosition::Start)
671                                        .icon_size(IconSize::XSmall)
672                                        .icon(IconName::Play)
673                                        .on_click(cx.listener(move |this, _, _window, cx| {
674                                            this.retry_connection(cx)
675                                        })),
676                                )
677                            }
678                        }),
679                )
680                .into_any()
681        }
682    }
683}