From 4ac793558991a7dce7941beb96d202858a9b05fe Mon Sep 17 00:00:00 2001
From: Umesh Yadav <23421535+imumesh18@users.noreply.github.com>
Date: Mon, 9 Jun 2025 15:25:34 +0530
Subject: [PATCH] language_models: Add thinking support to LM Studio provider
(#32337)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
It works similar to how deepseek works where the thinking is returned as
reasoning_content and we don't have to send the reasoning_content back
in the request.
This is a experiment feature which can be enabled from settings like
this:
Here is how it looks to use(tested with
`deepseek/deepseek-r1-0528-qwen3-8b`
Release Notes:
- Add thinking support to LM Studio provider
---
.../language_models/src/provider/lmstudio.rs | 25 ++++++++++++-------
crates/lmstudio/src/lmstudio.rs | 2 ++
2 files changed, 18 insertions(+), 9 deletions(-)
diff --git a/crates/language_models/src/provider/lmstudio.rs b/crates/language_models/src/provider/lmstudio.rs
index a9129027d646453e84a3c474bba30127ac2ba6b7..792d39bfed1d21b866eda080a324309502189d72 100644
--- a/crates/language_models/src/provider/lmstudio.rs
+++ b/crates/language_models/src/provider/lmstudio.rs
@@ -250,15 +250,15 @@ impl LmStudioLanguageModel {
for message in request.messages {
for content in message.content {
match content {
- MessageContent::Text(text) | MessageContent::Thinking { text, .. } => messages
- .push(match message.role {
- Role::User => ChatMessage::User { content: text },
- Role::Assistant => ChatMessage::Assistant {
- content: Some(text),
- tool_calls: Vec::new(),
- },
- Role::System => ChatMessage::System { content: text },
- }),
+ MessageContent::Text(text) => messages.push(match message.role {
+ Role::User => ChatMessage::User { content: text },
+ Role::Assistant => ChatMessage::Assistant {
+ content: Some(text),
+ tool_calls: Vec::new(),
+ },
+ Role::System => ChatMessage::System { content: text },
+ }),
+ MessageContent::Thinking { .. } => {}
MessageContent::RedactedThinking(_) => {}
MessageContent::Image(_) => {}
MessageContent::ToolUse(tool_use) => {
@@ -471,6 +471,13 @@ impl LmStudioEventMapper {
events.push(Ok(LanguageModelCompletionEvent::Text(content)));
}
+ if let Some(reasoning_content) = choice.delta.reasoning_content {
+ events.push(Ok(LanguageModelCompletionEvent::Thinking {
+ text: reasoning_content,
+ signature: None,
+ }));
+ }
+
if let Some(tool_calls) = choice.delta.tool_calls {
for tool_call in tool_calls {
let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
diff --git a/crates/lmstudio/src/lmstudio.rs b/crates/lmstudio/src/lmstudio.rs
index b62909fe315ae7fbf05853cb6c8e59b8b48d0cb1..943f8a2a0df54d95138c629e36e6d73af3fb207c 100644
--- a/crates/lmstudio/src/lmstudio.rs
+++ b/crates/lmstudio/src/lmstudio.rs
@@ -277,6 +277,8 @@ pub struct ResponseMessageDelta {
pub role: Option,
pub content: Option,
#[serde(default, skip_serializing_if = "Option::is_none")]
+ pub reasoning_content: Option,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
pub tool_calls: Option>,
}