1use anyhow::{Result, anyhow};
2use collections::HashMap;
3use fs::Fs;
4use futures::Stream;
5use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
6use gpui::{AnyView, App, AsyncApp, Context, CursorStyle, Entity, Subscription, Task};
7use http_client::HttpClient;
8use language_model::{
9 ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
10 LanguageModelCompletionEvent, LanguageModelToolChoice, LanguageModelToolResultContent,
11 LanguageModelToolUse, MessageContent, StopReason, TokenUsage, env_var,
12};
13use language_model::{
14 LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
15 LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
16};
17use lmstudio::{LMSTUDIO_API_URL, ModelType, get_models};
18
19pub use settings::LmStudioAvailableModel as AvailableModel;
20use settings::{Settings, SettingsStore, update_settings_file};
21use std::pin::Pin;
22use std::sync::LazyLock;
23use std::{collections::BTreeMap, sync::Arc};
24use ui::{
25 ButtonLike, ConfiguredApiCard, ElevationIndex, List, ListBulletItem, Tooltip, prelude::*,
26};
27use ui_input::InputField;
28
29use crate::AllLanguageModelSettings;
30use crate::provider::util::parse_tool_arguments;
31
32const LMSTUDIO_DOWNLOAD_URL: &str = "https://lmstudio.ai/download";
33const LMSTUDIO_CATALOG_URL: &str = "https://lmstudio.ai/models";
34const LMSTUDIO_SITE: &str = "https://lmstudio.ai/";
35
36const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("lmstudio");
37const PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("LM Studio");
38
39const API_KEY_ENV_VAR_NAME: &str = "LMSTUDIO_API_KEY";
40static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
41
42#[derive(Default, Debug, Clone, PartialEq)]
43pub struct LmStudioSettings {
44 pub api_url: String,
45 pub available_models: Vec<AvailableModel>,
46}
47
48pub struct LmStudioLanguageModelProvider {
49 http_client: Arc<dyn HttpClient>,
50 state: Entity<State>,
51}
52
53pub struct State {
54 api_key_state: ApiKeyState,
55 http_client: Arc<dyn HttpClient>,
56 available_models: Vec<lmstudio::Model>,
57 fetch_model_task: Option<Task<Result<()>>>,
58 _subscription: Subscription,
59}
60
61impl State {
62 fn is_authenticated(&self) -> bool {
63 !self.available_models.is_empty()
64 }
65
66 fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
67 let api_url = LmStudioLanguageModelProvider::api_url(cx).into();
68 let task = self
69 .api_key_state
70 .store(api_url, api_key, |this| &mut this.api_key_state, cx);
71 self.restart_fetch_models_task(cx);
72 task
73 }
74
75 fn fetch_models(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
76 let settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
77 let http_client = self.http_client.clone();
78 let api_url = settings.api_url.clone();
79 let api_key = self.api_key_state.key(&api_url);
80
81 // As a proxy for the server being "authenticated", we'll check if its up by fetching the models
82 cx.spawn(async move |this, cx| {
83 let models =
84 get_models(http_client.as_ref(), &api_url, api_key.as_deref(), None).await?;
85
86 let mut models: Vec<lmstudio::Model> = models
87 .into_iter()
88 .filter(|model| model.r#type != ModelType::Embeddings)
89 .map(|model| {
90 lmstudio::Model::new(
91 &model.id,
92 None,
93 model
94 .loaded_context_length
95 .or_else(|| model.max_context_length),
96 model.capabilities.supports_tool_calls(),
97 model.capabilities.supports_images() || model.r#type == ModelType::Vlm,
98 )
99 })
100 .collect();
101
102 models.sort_by(|a, b| a.name.cmp(&b.name));
103
104 this.update(cx, |this, cx| {
105 this.available_models = models;
106 cx.notify();
107 })
108 })
109 }
110
111 fn restart_fetch_models_task(&mut self, cx: &mut Context<Self>) {
112 let task = self.fetch_models(cx);
113 self.fetch_model_task.replace(task);
114 }
115
116 fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
117 let api_url = LmStudioLanguageModelProvider::api_url(cx).into();
118 let _task = self
119 .api_key_state
120 .load_if_needed(api_url, |this| &mut this.api_key_state, cx);
121
122 if self.is_authenticated() {
123 return Task::ready(Ok(()));
124 }
125
126 let fetch_models_task = self.fetch_models(cx);
127 cx.spawn(async move |_this, _cx| {
128 match fetch_models_task.await {
129 Ok(()) => Ok(()),
130 Err(err) => {
131 // If any cause in the error chain is an std::io::Error with
132 // ErrorKind::ConnectionRefused, treat this as "credentials not found"
133 // (i.e. LM Studio not running).
134 let mut connection_refused = false;
135 for cause in err.chain() {
136 if let Some(io_err) = cause.downcast_ref::<std::io::Error>() {
137 if io_err.kind() == std::io::ErrorKind::ConnectionRefused {
138 connection_refused = true;
139 break;
140 }
141 }
142 }
143 if connection_refused {
144 Err(AuthenticateError::ConnectionRefused)
145 } else {
146 Err(AuthenticateError::Other(err))
147 }
148 }
149 }
150 })
151 }
152}
153
154impl LmStudioLanguageModelProvider {
155 pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
156 let this = Self {
157 http_client: http_client.clone(),
158 state: cx.new(|cx| {
159 let subscription = cx.observe_global::<SettingsStore>({
160 let mut settings = AllLanguageModelSettings::get_global(cx).lmstudio.clone();
161 move |this: &mut State, cx| {
162 let new_settings = &AllLanguageModelSettings::get_global(cx).lmstudio;
163 if &settings != new_settings {
164 settings = new_settings.clone();
165 this.restart_fetch_models_task(cx);
166 cx.notify();
167 }
168 }
169 });
170
171 State {
172 api_key_state: ApiKeyState::new(
173 Self::api_url(cx).into(),
174 (*API_KEY_ENV_VAR).clone(),
175 ),
176 http_client,
177 available_models: Default::default(),
178 fetch_model_task: None,
179 _subscription: subscription,
180 }
181 }),
182 };
183 this.state
184 .update(cx, |state, cx| state.restart_fetch_models_task(cx));
185 this
186 }
187
188 fn api_url(cx: &App) -> String {
189 AllLanguageModelSettings::get_global(cx)
190 .lmstudio
191 .api_url
192 .clone()
193 }
194
195 fn has_custom_url(cx: &App) -> bool {
196 Self::api_url(cx) != LMSTUDIO_API_URL
197 }
198}
199
200impl LanguageModelProviderState for LmStudioLanguageModelProvider {
201 type ObservableEntity = State;
202
203 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
204 Some(self.state.clone())
205 }
206}
207
208impl LanguageModelProvider for LmStudioLanguageModelProvider {
209 fn id(&self) -> LanguageModelProviderId {
210 PROVIDER_ID
211 }
212
213 fn name(&self) -> LanguageModelProviderName {
214 PROVIDER_NAME
215 }
216
217 fn icon(&self) -> IconOrSvg {
218 IconOrSvg::Icon(IconName::AiLmStudio)
219 }
220
221 fn default_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
222 // We shouldn't try to select default model, because it might lead to a load call for an unloaded model.
223 // In a constrained environment where user might not have enough resources it'll be a bad UX to select something
224 // to load by default.
225 None
226 }
227
228 fn default_fast_model(&self, _: &App) -> Option<Arc<dyn LanguageModel>> {
229 // See explanation for default_model.
230 None
231 }
232
233 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
234 let mut models: BTreeMap<String, lmstudio::Model> = BTreeMap::default();
235
236 // Add models from the LM Studio API
237 for model in self.state.read(cx).available_models.iter() {
238 models.insert(model.name.clone(), model.clone());
239 }
240
241 // Override with available models from settings
242 for model in AllLanguageModelSettings::get_global(cx)
243 .lmstudio
244 .available_models
245 .iter()
246 {
247 models.insert(
248 model.name.clone(),
249 lmstudio::Model {
250 name: model.name.clone(),
251 display_name: model.display_name.clone(),
252 max_tokens: model.max_tokens,
253 supports_tool_calls: model.supports_tool_calls,
254 supports_images: model.supports_images,
255 },
256 );
257 }
258
259 models
260 .into_values()
261 .map(|model| {
262 Arc::new(LmStudioLanguageModel {
263 id: LanguageModelId::from(model.name.clone()),
264 model,
265 http_client: self.http_client.clone(),
266 request_limiter: RateLimiter::new(4),
267 state: self.state.clone(),
268 }) as Arc<dyn LanguageModel>
269 })
270 .collect()
271 }
272
273 fn is_authenticated(&self, cx: &App) -> bool {
274 self.state.read(cx).is_authenticated()
275 }
276
277 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
278 self.state.update(cx, |state, cx| state.authenticate(cx))
279 }
280
281 fn configuration_view(
282 &self,
283 _target_agent: language_model::ConfigurationViewTargetAgent,
284 _window: &mut Window,
285 cx: &mut App,
286 ) -> AnyView {
287 cx.new(|cx| ConfigurationView::new(self.state.clone(), _window, cx))
288 .into()
289 }
290
291 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
292 self.state
293 .update(cx, |state, cx| state.set_api_key(None, cx))
294 }
295}
296
297pub struct LmStudioLanguageModel {
298 id: LanguageModelId,
299 model: lmstudio::Model,
300 http_client: Arc<dyn HttpClient>,
301 request_limiter: RateLimiter,
302 state: Entity<State>,
303}
304
305impl LmStudioLanguageModel {
306 fn to_lmstudio_request(
307 &self,
308 request: LanguageModelRequest,
309 ) -> lmstudio::ChatCompletionRequest {
310 let mut messages = Vec::new();
311
312 for message in request.messages {
313 for content in message.content {
314 match content {
315 MessageContent::Text(text) => add_message_content_part(
316 lmstudio::MessagePart::Text { text },
317 message.role,
318 &mut messages,
319 ),
320 MessageContent::Thinking { .. } => {}
321 MessageContent::RedactedThinking(_) => {}
322 MessageContent::Image(image) => {
323 add_message_content_part(
324 lmstudio::MessagePart::Image {
325 image_url: lmstudio::ImageUrl {
326 url: image.to_base64_url(),
327 detail: None,
328 },
329 },
330 message.role,
331 &mut messages,
332 );
333 }
334 MessageContent::ToolUse(tool_use) => {
335 let tool_call = lmstudio::ToolCall {
336 id: tool_use.id.to_string(),
337 content: lmstudio::ToolCallContent::Function {
338 function: lmstudio::FunctionContent {
339 name: tool_use.name.to_string(),
340 arguments: serde_json::to_string(&tool_use.input)
341 .unwrap_or_default(),
342 },
343 },
344 };
345
346 if let Some(lmstudio::ChatMessage::Assistant { tool_calls, .. }) =
347 messages.last_mut()
348 {
349 tool_calls.push(tool_call);
350 } else {
351 messages.push(lmstudio::ChatMessage::Assistant {
352 content: None,
353 tool_calls: vec![tool_call],
354 });
355 }
356 }
357 MessageContent::ToolResult(tool_result) => {
358 let content = match &tool_result.content {
359 LanguageModelToolResultContent::Text(text) => {
360 vec![lmstudio::MessagePart::Text {
361 text: text.to_string(),
362 }]
363 }
364 LanguageModelToolResultContent::Image(image) => {
365 vec![lmstudio::MessagePart::Image {
366 image_url: lmstudio::ImageUrl {
367 url: image.to_base64_url(),
368 detail: None,
369 },
370 }]
371 }
372 };
373
374 messages.push(lmstudio::ChatMessage::Tool {
375 content: content.into(),
376 tool_call_id: tool_result.tool_use_id.to_string(),
377 });
378 }
379 }
380 }
381 }
382
383 lmstudio::ChatCompletionRequest {
384 model: self.model.name.clone(),
385 messages,
386 stream: true,
387 max_tokens: Some(-1),
388 stop: Some(request.stop),
389 // In LM Studio you can configure specific settings you'd like to use for your model.
390 // For example Qwen3 is recommended to be used with 0.7 temperature.
391 // It would be a bad UX to silently override these settings from Zed, so we pass no temperature as a default.
392 temperature: request.temperature.or(None),
393 tools: request
394 .tools
395 .into_iter()
396 .map(|tool| lmstudio::ToolDefinition::Function {
397 function: lmstudio::FunctionDefinition {
398 name: tool.name,
399 description: Some(tool.description),
400 parameters: Some(tool.input_schema),
401 },
402 })
403 .collect(),
404 tool_choice: request.tool_choice.map(|choice| match choice {
405 LanguageModelToolChoice::Auto => lmstudio::ToolChoice::Auto,
406 LanguageModelToolChoice::Any => lmstudio::ToolChoice::Required,
407 LanguageModelToolChoice::None => lmstudio::ToolChoice::None,
408 }),
409 }
410 }
411
412 fn stream_completion(
413 &self,
414 request: lmstudio::ChatCompletionRequest,
415 cx: &AsyncApp,
416 ) -> BoxFuture<
417 'static,
418 Result<futures::stream::BoxStream<'static, Result<lmstudio::ResponseStreamEvent>>>,
419 > {
420 let http_client = self.http_client.clone();
421 let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
422 let api_url = LmStudioLanguageModelProvider::api_url(cx);
423 (state.api_key_state.key(&api_url), api_url)
424 });
425
426 let future = self.request_limiter.stream(async move {
427 let stream = lmstudio::stream_chat_completion(
428 http_client.as_ref(),
429 &api_url,
430 api_key.as_deref(),
431 request,
432 )
433 .await?;
434 Ok(stream)
435 });
436
437 async move { Ok(future.await?.boxed()) }.boxed()
438 }
439}
440
441impl LanguageModel for LmStudioLanguageModel {
442 fn id(&self) -> LanguageModelId {
443 self.id.clone()
444 }
445
446 fn name(&self) -> LanguageModelName {
447 LanguageModelName::from(self.model.display_name().to_string())
448 }
449
450 fn provider_id(&self) -> LanguageModelProviderId {
451 PROVIDER_ID
452 }
453
454 fn provider_name(&self) -> LanguageModelProviderName {
455 PROVIDER_NAME
456 }
457
458 fn supports_tools(&self) -> bool {
459 self.model.supports_tool_calls()
460 }
461
462 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
463 self.supports_tools()
464 && match choice {
465 LanguageModelToolChoice::Auto => true,
466 LanguageModelToolChoice::Any => true,
467 LanguageModelToolChoice::None => true,
468 }
469 }
470
471 fn supports_images(&self) -> bool {
472 self.model.supports_images
473 }
474
475 fn telemetry_id(&self) -> String {
476 format!("lmstudio/{}", self.model.id())
477 }
478
479 fn max_token_count(&self) -> u64 {
480 self.model.max_token_count()
481 }
482
483 fn count_tokens(
484 &self,
485 request: LanguageModelRequest,
486 _cx: &App,
487 ) -> BoxFuture<'static, Result<u64>> {
488 // Endpoint for this is coming soon. In the meantime, hacky estimation
489 let token_count = request
490 .messages
491 .iter()
492 .map(|msg| msg.string_contents().split_whitespace().count())
493 .sum::<usize>();
494
495 let estimated_tokens = (token_count as f64 * 0.75) as u64;
496 async move { Ok(estimated_tokens) }.boxed()
497 }
498
499 fn stream_completion(
500 &self,
501 request: LanguageModelRequest,
502 cx: &AsyncApp,
503 ) -> BoxFuture<
504 'static,
505 Result<
506 BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
507 LanguageModelCompletionError,
508 >,
509 > {
510 let request = self.to_lmstudio_request(request);
511 let completions = self.stream_completion(request, cx);
512 async move {
513 let mapper = LmStudioEventMapper::new();
514 Ok(mapper.map_stream(completions.await?).boxed())
515 }
516 .boxed()
517 }
518}
519
520struct LmStudioEventMapper {
521 tool_calls_by_index: HashMap<usize, RawToolCall>,
522}
523
524impl LmStudioEventMapper {
525 fn new() -> Self {
526 Self {
527 tool_calls_by_index: HashMap::default(),
528 }
529 }
530
531 pub fn map_stream(
532 mut self,
533 events: Pin<Box<dyn Send + Stream<Item = Result<lmstudio::ResponseStreamEvent>>>>,
534 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
535 {
536 events.flat_map(move |event| {
537 futures::stream::iter(match event {
538 Ok(event) => self.map_event(event),
539 Err(error) => vec![Err(LanguageModelCompletionError::from(error))],
540 })
541 })
542 }
543
544 pub fn map_event(
545 &mut self,
546 event: lmstudio::ResponseStreamEvent,
547 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
548 let Some(choice) = event.choices.into_iter().next() else {
549 return vec![Err(LanguageModelCompletionError::from(anyhow!(
550 "Response contained no choices"
551 )))];
552 };
553
554 let mut events = Vec::new();
555 if let Some(content) = choice.delta.content {
556 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
557 }
558
559 if let Some(reasoning_content) = choice.delta.reasoning_content {
560 events.push(Ok(LanguageModelCompletionEvent::Thinking {
561 text: reasoning_content,
562 signature: None,
563 }));
564 }
565
566 if let Some(tool_calls) = choice.delta.tool_calls {
567 for tool_call in tool_calls {
568 let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
569
570 if let Some(tool_id) = tool_call.id {
571 entry.id = tool_id;
572 }
573
574 if let Some(function) = tool_call.function {
575 if let Some(name) = function.name {
576 // At the time of writing this code LM Studio (0.3.15) is incompatible with the OpenAI API:
577 // 1. It sends function name in the first chunk
578 // 2. It sends empty string in the function name field in all subsequent chunks for arguments
579 // According to https://platform.openai.com/docs/guides/function-calling?api-mode=responses#streaming
580 // function name field should be sent only inside the first chunk.
581 if !name.is_empty() {
582 entry.name = name;
583 }
584 }
585
586 if let Some(arguments) = function.arguments {
587 entry.arguments.push_str(&arguments);
588 }
589 }
590 }
591 }
592
593 if let Some(usage) = event.usage {
594 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
595 input_tokens: usage.prompt_tokens,
596 output_tokens: usage.completion_tokens,
597 cache_creation_input_tokens: 0,
598 cache_read_input_tokens: 0,
599 })));
600 }
601
602 match choice.finish_reason.as_deref() {
603 Some("stop") => {
604 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
605 }
606 Some("tool_calls") => {
607 events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
608 match parse_tool_arguments(&tool_call.arguments) {
609 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
610 LanguageModelToolUse {
611 id: tool_call.id.into(),
612 name: tool_call.name.into(),
613 is_input_complete: true,
614 input,
615 raw_input: tool_call.arguments,
616 thought_signature: None,
617 },
618 )),
619 Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
620 id: tool_call.id.into(),
621 tool_name: tool_call.name.into(),
622 raw_input: tool_call.arguments.into(),
623 json_parse_error: error.to_string(),
624 }),
625 }
626 }));
627
628 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
629 }
630 Some(stop_reason) => {
631 log::error!("Unexpected LMStudio stop_reason: {stop_reason:?}",);
632 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
633 }
634 None => {}
635 }
636
637 events
638 }
639}
640
641#[derive(Default)]
642struct RawToolCall {
643 id: String,
644 name: String,
645 arguments: String,
646}
647
648fn add_message_content_part(
649 new_part: lmstudio::MessagePart,
650 role: Role,
651 messages: &mut Vec<lmstudio::ChatMessage>,
652) {
653 match (role, messages.last_mut()) {
654 (Role::User, Some(lmstudio::ChatMessage::User { content }))
655 | (
656 Role::Assistant,
657 Some(lmstudio::ChatMessage::Assistant {
658 content: Some(content),
659 ..
660 }),
661 )
662 | (Role::System, Some(lmstudio::ChatMessage::System { content })) => {
663 content.push_part(new_part);
664 }
665 _ => {
666 messages.push(match role {
667 Role::User => lmstudio::ChatMessage::User {
668 content: lmstudio::MessageContent::from(vec![new_part]),
669 },
670 Role::Assistant => lmstudio::ChatMessage::Assistant {
671 content: Some(lmstudio::MessageContent::from(vec![new_part])),
672 tool_calls: Vec::new(),
673 },
674 Role::System => lmstudio::ChatMessage::System {
675 content: lmstudio::MessageContent::from(vec![new_part]),
676 },
677 });
678 }
679 }
680}
681
682struct ConfigurationView {
683 state: Entity<State>,
684 api_key_editor: Entity<InputField>,
685 api_url_editor: Entity<InputField>,
686}
687
688impl ConfigurationView {
689 pub fn new(state: Entity<State>, _window: &mut Window, cx: &mut Context<Self>) -> Self {
690 let api_key_editor = cx.new(|cx| InputField::new(_window, cx, "sk-...").label("API key"));
691
692 let api_url_editor = cx.new(|cx| {
693 let input = InputField::new(_window, cx, LMSTUDIO_API_URL).label("API URL");
694 input.set_text(&LmStudioLanguageModelProvider::api_url(cx), _window, cx);
695 input
696 });
697
698 cx.observe(&state, |_, _, cx| {
699 cx.notify();
700 })
701 .detach();
702
703 Self {
704 state,
705 api_key_editor,
706 api_url_editor,
707 }
708 }
709
710 fn retry_connection(&mut self, _window: &mut Window, cx: &mut Context<Self>) {
711 let has_api_url = LmStudioLanguageModelProvider::has_custom_url(cx);
712 let has_api_key = self
713 .state
714 .read_with(cx, |state, _| state.api_key_state.has_key());
715 if !has_api_url {
716 self.save_api_url(cx);
717 }
718 if !has_api_key {
719 self.save_api_key(&Default::default(), _window, cx);
720 }
721
722 self.state.update(cx, |state, cx| {
723 state.restart_fetch_models_task(cx);
724 });
725 }
726
727 fn save_api_key(&mut self, _: &menu::Confirm, _window: &mut Window, cx: &mut Context<Self>) {
728 let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
729 if api_key.is_empty() {
730 return;
731 }
732
733 self.api_key_editor
734 .update(cx, |input, cx| input.set_text("", _window, cx));
735
736 let state = self.state.clone();
737 cx.spawn_in(_window, async move |_, cx| {
738 state
739 .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))
740 .await
741 })
742 .detach_and_log_err(cx);
743 }
744
745 fn reset_api_key(&mut self, _window: &mut Window, cx: &mut Context<Self>) {
746 self.api_key_editor
747 .update(cx, |input, cx| input.set_text("", _window, cx));
748
749 let state = self.state.clone();
750 cx.spawn_in(_window, async move |_, cx| {
751 state
752 .update(cx, |state, cx| state.set_api_key(None, cx))
753 .await
754 })
755 .detach_and_log_err(cx);
756
757 cx.notify();
758 }
759
760 fn save_api_url(&self, cx: &mut Context<Self>) {
761 let api_url = self.api_url_editor.read(cx).text(cx).trim().to_string();
762 let current_url = LmStudioLanguageModelProvider::api_url(cx);
763 if !api_url.is_empty() && &api_url != ¤t_url {
764 self.state
765 .update(cx, |state, cx| state.set_api_key(None, cx))
766 .detach_and_log_err(cx);
767
768 let fs = <dyn Fs>::global(cx);
769 update_settings_file(fs, cx, move |settings, _| {
770 settings
771 .language_models
772 .get_or_insert_default()
773 .lmstudio
774 .get_or_insert_default()
775 .api_url = Some(api_url);
776 });
777 }
778 }
779
780 fn reset_api_url(&mut self, _window: &mut Window, cx: &mut Context<Self>) {
781 self.api_url_editor
782 .update(cx, |input, cx| input.set_text("", _window, cx));
783
784 // Clear API key when URL changes since keys are URL-specific
785 self.state
786 .update(cx, |state, cx| state.set_api_key(None, cx))
787 .detach_and_log_err(cx);
788
789 let fs = <dyn Fs>::global(cx);
790 update_settings_file(fs, cx, |settings, _cx| {
791 if let Some(settings) = settings
792 .language_models
793 .as_mut()
794 .and_then(|models| models.lmstudio.as_mut())
795 {
796 settings.api_url = Some(LMSTUDIO_API_URL.into());
797 }
798 });
799 cx.notify();
800 }
801
802 fn render_api_url_editor(&self, cx: &Context<Self>) -> impl IntoElement {
803 let api_url = LmStudioLanguageModelProvider::api_url(cx);
804 let custom_api_url_set = api_url != LMSTUDIO_API_URL;
805
806 if custom_api_url_set {
807 h_flex()
808 .p_3()
809 .justify_between()
810 .rounded_md()
811 .border_1()
812 .border_color(cx.theme().colors().border)
813 .bg(cx.theme().colors().elevated_surface_background)
814 .child(
815 h_flex()
816 .gap_2()
817 .child(Icon::new(IconName::Check).color(Color::Success))
818 .child(v_flex().gap_1().child(Label::new(api_url))),
819 )
820 .child(
821 Button::new("reset-api-url", "Reset API URL")
822 .label_size(LabelSize::Small)
823 .icon(IconName::Undo)
824 .icon_size(IconSize::Small)
825 .icon_position(IconPosition::Start)
826 .layer(ElevationIndex::ModalSurface)
827 .on_click(
828 cx.listener(|this, _, _window, cx| this.reset_api_url(_window, cx)),
829 ),
830 )
831 .into_any_element()
832 } else {
833 v_flex()
834 .on_action(cx.listener(|this, _: &menu::Confirm, _window, cx| {
835 this.save_api_url(cx);
836 cx.notify();
837 }))
838 .gap_2()
839 .child(self.api_url_editor.clone())
840 .into_any_element()
841 }
842 }
843
844 fn render_api_key_editor(&self, cx: &Context<Self>) -> impl IntoElement {
845 let state = self.state.read(cx);
846 let env_var_set = state.api_key_state.is_from_env_var();
847 let configured_card_label = if env_var_set {
848 format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable.")
849 } else {
850 "API key configured".to_string()
851 };
852
853 if !state.api_key_state.has_key() {
854 v_flex()
855 .on_action(cx.listener(Self::save_api_key))
856 .child(self.api_key_editor.clone())
857 .child(
858 Label::new(format!(
859 "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."
860 ))
861 .size(LabelSize::Small)
862 .color(Color::Muted),
863 )
864 .into_any_element()
865 } else {
866 ConfiguredApiCard::new(configured_card_label)
867 .disabled(env_var_set)
868 .on_click(cx.listener(|this, _, _window, cx| this.reset_api_key(_window, cx)))
869 .when(env_var_set, |this| {
870 this.tooltip_label(format!(
871 "To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."
872 ))
873 })
874 .into_any_element()
875 }
876 }
877}
878
879impl Render for ConfigurationView {
880 fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
881 let is_authenticated = self.state.read(cx).is_authenticated();
882
883 v_flex()
884 .gap_2()
885 .child(
886 v_flex()
887 .gap_1()
888 .child(Label::new("Run local LLMs like Llama, Phi, and Qwen."))
889 .child(
890 List::new()
891 .child(ListBulletItem::new(
892 "LM Studio needs to be running with at least one model downloaded.",
893 ))
894 .child(
895 ListBulletItem::new("")
896 .child(Label::new("To get your first model, try running"))
897 .child(Label::new("lms get qwen2.5-coder-7b").inline_code(cx)),
898 ),
899 )
900 .child(Label::new(
901 "Alternatively, you can connect to an LM Studio server by specifying its \
902 URL and API key (may not be required):",
903 )),
904 )
905 .child(self.render_api_url_editor(cx))
906 .child(self.render_api_key_editor(cx))
907 .child(
908 h_flex()
909 .w_full()
910 .justify_between()
911 .gap_2()
912 .child(
913 h_flex()
914 .w_full()
915 .gap_2()
916 .map(|this| {
917 if is_authenticated {
918 this.child(
919 Button::new("lmstudio-site", "LM Studio")
920 .style(ButtonStyle::Subtle)
921 .icon(IconName::ArrowUpRight)
922 .icon_size(IconSize::Small)
923 .icon_color(Color::Muted)
924 .on_click(move |_, _window, cx| {
925 cx.open_url(LMSTUDIO_SITE)
926 })
927 .into_any_element(),
928 )
929 } else {
930 this.child(
931 Button::new(
932 "download_lmstudio_button",
933 "Download LM Studio",
934 )
935 .style(ButtonStyle::Subtle)
936 .icon(IconName::ArrowUpRight)
937 .icon_size(IconSize::Small)
938 .icon_color(Color::Muted)
939 .on_click(move |_, _window, cx| {
940 cx.open_url(LMSTUDIO_DOWNLOAD_URL)
941 })
942 .into_any_element(),
943 )
944 }
945 })
946 .child(
947 Button::new("view-models", "Model Catalog")
948 .style(ButtonStyle::Subtle)
949 .icon(IconName::ArrowUpRight)
950 .icon_size(IconSize::Small)
951 .icon_color(Color::Muted)
952 .on_click(move |_, _window, cx| {
953 cx.open_url(LMSTUDIO_CATALOG_URL)
954 }),
955 ),
956 )
957 .map(|this| {
958 if is_authenticated {
959 this.child(
960 ButtonLike::new("connected")
961 .disabled(true)
962 .cursor_style(CursorStyle::Arrow)
963 .child(
964 h_flex()
965 .gap_2()
966 .child(Icon::new(IconName::Check).color(Color::Success))
967 .child(Label::new("Connected"))
968 .into_any_element(),
969 )
970 .child(
971 IconButton::new("refresh-models", IconName::RotateCcw)
972 .tooltip(Tooltip::text("Refresh Models"))
973 .on_click(cx.listener(|this, _, _window, cx| {
974 this.state.update(cx, |state, _| {
975 state.available_models.clear();
976 });
977 this.retry_connection(_window, cx);
978 })),
979 ),
980 )
981 } else {
982 this.child(
983 Button::new("retry_lmstudio_models", "Connect")
984 .icon_position(IconPosition::Start)
985 .icon_size(IconSize::XSmall)
986 .icon(IconName::PlayFilled)
987 .on_click(cx.listener(move |this, _, _window, cx| {
988 this.retry_connection(_window, cx)
989 })),
990 )
991 }
992 }),
993 )
994 }
995}