agent: Add support for setting thinking effort for Zed provider (#48545)

Marshall Bowers created

This PR adds the ability to set the thinking effort of a model.

Right now this only applies to Opus 4.6 through the Zed provider.

This is gated behind the `cloud-thinking-toggle` feature flag.

UI is still rough; needs a design pass:

<img width="639" height="163" alt="Screenshot 2026-02-05 at 7 45 54 PM"
src="https://github.com/user-attachments/assets/2b5a9ef8-74cd-498e-9c81-b92666572409"
/>

<img width="263" height="148" alt="Screenshot 2026-02-05 at 7 45 58 PM"
src="https://github.com/user-attachments/assets/40232cb0-1743-443b-b04c-5cd33065513d"
/>

Release Notes:

- N/A

Change summary

crates/agent/src/agent.rs                                        |   8 
crates/agent/src/edit_agent.rs                                   |   1 
crates/agent/src/native_agent_server.rs                          |   1 
crates/agent/src/thread.rs                                       |  26 
crates/agent_ui/src/acp/thread_view/active_thread.rs             | 162 +
crates/agent_ui/src/agent_configuration/manage_profiles_modal.rs |   3 
crates/agent_ui/src/agent_panel.rs                               |   1 
crates/agent_ui/src/buffer_codegen.rs                            |   2 
crates/agent_ui/src/favorite_models.rs                           |   1 
crates/agent_ui/src/terminal_inline_assistant.rs                 |   1 
crates/agent_ui/src/text_thread_editor.rs                        |  10 
crates/anthropic/src/anthropic.rs                                |   3 
crates/assistant_text_thread/src/text_thread.rs                  |   1 
crates/cloud_llm_client/src/cloud_llm_client.rs                  |   2 
crates/eval/src/instance.rs                                      |   1 
crates/git_ui/src/git_panel.rs                                   |   3 
crates/language_model/src/language_model.rs                      |   8 
crates/language_model/src/request.rs                             |   1 
crates/language_models/src/provider/anthropic.rs                 |   1 
crates/language_models/src/provider/cloud.rs                     |  14 
crates/language_models/src/provider/copilot_chat.rs              |   1 
crates/language_models/src/provider/mistral.rs                   |   2 
crates/language_models/src/provider/open_ai.rs                   |   3 
crates/rules_library/src/rules_library.rs                        |   1 
crates/settings_content/src/agent.rs                             |   2 
25 files changed, 224 insertions(+), 35 deletions(-)

Detailed changes

crates/agent/src/agent.rs 🔗

@@ -1151,8 +1151,15 @@ impl acp_thread::AgentModelSelector for NativeAgentModelSelector {
             return Task::ready(Err(anyhow!("Invalid model ID {}", model_id)));
         };
 
+        // We want to reset the effort level when switching models, as the currently-selected effort level may
+        // not be compatible.
+        let effort = model
+            .default_effort_level()
+            .map(|effort_level| effort_level.value.to_string());
+
         thread.update(cx, |thread, cx| {
             thread.set_model(model.clone(), cx);
+            thread.set_thinking_effort(effort.clone(), cx);
         });
 
         update_settings_file(
@@ -1178,6 +1185,7 @@ impl acp_thread::AgentModelSelector for NativeAgentModelSelector {
                         provider: provider.into(),
                         model,
                         enable_thinking,
+                        effort,
                     });
             },
         );

crates/agent/src/edit_agent.rs 🔗

@@ -732,6 +732,7 @@ impl EditAgent {
             stop: Vec::new(),
             temperature: None,
             thinking_allowed: true,
+            thinking_effort: None,
         };
 
         Ok(self.model.stream_completion_text(request, cx).await?.stream)

crates/agent/src/native_agent_server.rs 🔗

@@ -108,6 +108,7 @@ fn model_id_to_selection(model_id: &acp::ModelId) -> LanguageModelSelection {
         provider: provider.to_owned().into(),
         model: model.to_owned(),
         enable_thinking: false,
+        effort: None,
     }
 }
 

crates/agent/src/thread.rs 🔗

@@ -797,6 +797,7 @@ pub struct Thread {
     model: Option<Arc<dyn LanguageModel>>,
     summarization_model: Option<Arc<dyn LanguageModel>>,
     thinking_enabled: bool,
+    thinking_effort: Option<String>,
     prompt_capabilities_tx: watch::Sender<acp::PromptCapabilities>,
     pub(crate) prompt_capabilities_rx: watch::Receiver<acp::PromptCapabilities>,
     pub(crate) project: Entity<Project>,
@@ -833,6 +834,10 @@ impl Thread {
             .default_model
             .as_ref()
             .is_some_and(|model| model.enable_thinking);
+        let thinking_effort = settings
+            .default_model
+            .as_ref()
+            .and_then(|model| model.effort.clone());
         let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
         let (prompt_capabilities_tx, prompt_capabilities_rx) =
             watch::channel(Self::prompt_capabilities(model.as_deref()));
@@ -865,6 +870,7 @@ impl Thread {
             model,
             summarization_model: None,
             thinking_enabled: enable_thinking,
+            thinking_effort,
             prompt_capabilities_tx,
             prompt_capabilities_rx,
             project,
@@ -892,6 +898,10 @@ impl Thread {
             .default_model
             .as_ref()
             .is_some_and(|model| model.enable_thinking);
+        let thinking_effort = settings
+            .default_model
+            .as_ref()
+            .and_then(|model| model.effort.clone());
         let action_log = cx.new(|_cx| ActionLog::new(project.clone()));
         let (prompt_capabilities_tx, prompt_capabilities_rx) =
             watch::channel(Self::prompt_capabilities(Some(model.as_ref())));
@@ -932,6 +942,7 @@ impl Thread {
             model: Some(model),
             summarization_model: None,
             thinking_enabled: enable_thinking,
+            thinking_effort,
             prompt_capabilities_tx,
             prompt_capabilities_rx,
             project,
@@ -1079,6 +1090,10 @@ impl Thread {
             .default_model
             .as_ref()
             .is_some_and(|model| model.enable_thinking);
+        let thinking_effort = settings
+            .default_model
+            .as_ref()
+            .and_then(|model| model.effort.clone());
 
         let mut model = LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
             db_thread
@@ -1136,6 +1151,7 @@ impl Thread {
             summarization_model: None,
             // TODO: Should we persist this on the `DbThread`?
             thinking_enabled: enable_thinking,
+            thinking_effort,
             project,
             action_log,
             updated_at: db_thread.updated_at,
@@ -1243,6 +1259,15 @@ impl Thread {
         cx.notify();
     }
 
+    pub fn thinking_effort(&self) -> Option<&String> {
+        self.thinking_effort.as_ref()
+    }
+
+    pub fn set_thinking_effort(&mut self, effort: Option<String>, cx: &mut Context<Self>) {
+        self.thinking_effort = effort;
+        cx.notify();
+    }
+
     pub fn last_message(&self) -> Option<Message> {
         if let Some(message) = self.pending_message.clone() {
             Some(Message::Agent(message))
@@ -2329,6 +2354,7 @@ impl Thread {
             stop: Vec::new(),
             temperature: AgentSettings::temperature_for_model(model, cx),
             thinking_allowed: self.thinking_enabled,
+            thinking_effort: self.thinking_effort.clone(),
         };
 
         log::debug!("Completion request built successfully");

crates/agent_ui/src/acp/thread_view/active_thread.rs 🔗

@@ -1,5 +1,7 @@
-use gpui::List;
+use gpui::{Corner, List};
+use language_model::LanguageModelEffortLevel;
 use settings::update_settings_file;
+use ui::SplitButton;
 
 use super::*;
 
@@ -2349,7 +2351,7 @@ impl AcpThreadView {
                             .gap_0p5()
                             .child(self.render_add_context_button(cx))
                             .child(self.render_follow_toggle(cx))
-                            .children(self.render_thinking_toggle(cx)),
+                            .children(self.render_thinking_control(cx)),
                     )
                     .child(
                         h_flex()
@@ -2693,14 +2695,15 @@ impl AcpThreadView {
         }
     }
 
-    fn render_thinking_toggle(&self, cx: &mut Context<Self>) -> Option<IconButton> {
+    fn render_thinking_control(&self, cx: &mut Context<Self>) -> Option<AnyElement> {
         if !cx.has_flag::<CloudThinkingToggleFeatureFlag>() {
             return None;
         }
 
         let thread = self.as_native_thread(cx)?.read(cx);
+        let model = thread.model()?;
 
-        let supports_thinking = thread.model()?.supports_thinking();
+        let supports_thinking = model.supports_thinking();
         if !supports_thinking {
             return None;
         }
@@ -2715,34 +2718,137 @@ impl AcpThreadView {
 
         let focus_handle = self.message_editor.focus_handle(cx);
 
-        Some(
-            IconButton::new("thinking-mode", icon)
-                .icon_size(IconSize::Small)
-                .icon_color(Color::Muted)
-                .toggle_state(thinking)
-                .tooltip(move |_, cx| {
-                    Tooltip::for_action_in(tooltip_label, &ToggleThinkingMode, &focus_handle, cx)
-                })
-                .on_click(cx.listener(move |this, _, _window, cx| {
-                    if let Some(thread) = this.as_native_thread(cx) {
-                        thread.update(cx, |thread, cx| {
-                            let enable_thinking = !thread.thinking_enabled();
-                            thread.set_thinking_enabled(enable_thinking, cx);
-
-                            let fs = thread.project().read(cx).fs().clone();
-                            update_settings_file(fs, cx, move |settings, _| {
-                                if let Some(agent) = settings.agent.as_mut()
-                                    && let Some(default_model) = agent.default_model.as_mut()
-                                {
-                                    default_model.enable_thinking = enable_thinking;
-                                }
-                            });
+        let thinking_toggle = IconButton::new("thinking-mode", icon)
+            .icon_size(IconSize::Small)
+            .icon_color(Color::Muted)
+            .toggle_state(thinking)
+            .tooltip(move |_, cx| {
+                Tooltip::for_action_in(tooltip_label, &ToggleThinkingMode, &focus_handle, cx)
+            })
+            .on_click(cx.listener(move |this, _, _window, cx| {
+                if let Some(thread) = this.as_native_thread(cx) {
+                    thread.update(cx, |thread, cx| {
+                        let enable_thinking = !thread.thinking_enabled();
+                        thread.set_thinking_enabled(enable_thinking, cx);
+
+                        let fs = thread.project().read(cx).fs().clone();
+                        update_settings_file(fs, cx, move |settings, _| {
+                            if let Some(agent) = settings.agent.as_mut()
+                                && let Some(default_model) = agent.default_model.as_mut()
+                            {
+                                default_model.enable_thinking = enable_thinking;
+                            }
                         });
-                    }
-                })),
+                    });
+                }
+            }));
+
+        if model.supported_effort_levels().is_empty() {
+            return Some(thinking_toggle.into_any_element());
+        }
+
+        Some(
+            SplitButton::new(
+                thinking_toggle,
+                self.render_effort_selector(
+                    model.supported_effort_levels(),
+                    thread.thinking_effort().cloned(),
+                    cx,
+                )
+                .into_any_element(),
+            )
+            .style(ui::SplitButtonStyle::Outlined)
+            .into_any_element(),
         )
     }
 
+    fn render_effort_selector(
+        &self,
+        supported_effort_levels: Vec<LanguageModelEffortLevel>,
+        selected_effort: Option<String>,
+        cx: &Context<Self>,
+    ) -> impl IntoElement {
+        let weak_self = cx.weak_entity();
+
+        let default_effort_level = supported_effort_levels
+            .iter()
+            .find(|effort_level| effort_level.is_default)
+            .cloned();
+
+        let selected = selected_effort.and_then(|effort| {
+            supported_effort_levels
+                .iter()
+                .find(|level| level.value == effort)
+                .cloned()
+        });
+
+        PopoverMenu::new("effort-selector")
+            .trigger(
+                ui::ButtonLike::new_rounded_right("effort-selector-trigger")
+                    .layer(ui::ElevationIndex::ModalSurface)
+                    .size(ui::ButtonSize::None)
+                    .child(
+                        Label::new(
+                            selected
+                                .clone()
+                                .or(default_effort_level)
+                                .map_or("Select Effort".into(), |effort| effort.name),
+                        )
+                        .size(LabelSize::Small),
+                    )
+                    .child(
+                        div()
+                            .px_1()
+                            .child(Icon::new(IconName::ChevronDown).size(IconSize::XSmall)),
+                    ),
+            )
+            .menu(move |window, cx| {
+                Some(ContextMenu::build(window, cx, |mut menu, _window, _cx| {
+                    for effort_level in supported_effort_levels.clone() {
+                        let is_selected = selected
+                            .as_ref()
+                            .is_some_and(|selected| selected.value == effort_level.value);
+                        let entry = ContextMenuEntry::new(effort_level.name)
+                            .toggleable(IconPosition::End, is_selected);
+
+                        menu.push_item(entry.handler({
+                            let effort = effort_level.value.clone();
+                            let weak_self = weak_self.clone();
+                            move |_window, cx| {
+                                let effort = effort.clone();
+                                weak_self
+                                    .update(cx, |this, cx| {
+                                        if let Some(thread) = this.as_native_thread(cx) {
+                                            thread.update(cx, |thread, cx| {
+                                                thread.set_thinking_effort(
+                                                    Some(effort.to_string()),
+                                                    cx,
+                                                );
+
+                                                let fs = thread.project().read(cx).fs().clone();
+                                                update_settings_file(fs, cx, move |settings, _| {
+                                                    if let Some(agent) = settings.agent.as_mut()
+                                                        && let Some(default_model) =
+                                                            agent.default_model.as_mut()
+                                                    {
+                                                        default_model.effort =
+                                                            Some(effort.to_string());
+                                                    }
+                                                });
+                                            });
+                                        }
+                                    })
+                                    .ok();
+                            }
+                        }));
+                    }
+
+                    menu
+                }))
+            })
+            .anchor(Corner::BottomRight)
+    }
+
     fn render_send_button(&self, cx: &mut Context<Self>) -> AnyElement {
         let message_editor = self.message_editor.read(cx);
         let is_editor_empty = message_editor.is_empty(cx);

crates/agent_ui/src/buffer_codegen.rs 🔗

@@ -544,6 +544,7 @@ impl CodegenAlternative {
                 temperature,
                 messages,
                 thinking_allowed: false,
+                thinking_effort: None,
             }
         }))
     }
@@ -622,6 +623,7 @@ impl CodegenAlternative {
                 temperature,
                 messages: vec![request_message],
                 thinking_allowed: false,
+                thinking_effort: None,
             }
         }))
     }

crates/agent_ui/src/favorite_models.rs 🔗

@@ -10,6 +10,7 @@ fn language_model_to_selection(model: &Arc<dyn LanguageModel>) -> LanguageModelS
         provider: model.provider_id().to_string().into(),
         model: model.id().0.to_string(),
         enable_thinking: false,
+        effort: None,
     }
 }
 

crates/agent_ui/src/text_thread_editor.rs 🔗

@@ -319,13 +319,15 @@ impl TextThreadEditor {
                         move |model, cx| {
                             update_settings_file(fs.clone(), cx, move |settings, _| {
                                 let provider = model.provider_id().0.to_string();
-                                let enable_thinking = model.supports_thinking();
-                                let model = model.id().0.to_string();
+                                let model_id = model.id().0.to_string();
                                 settings.agent.get_or_insert_default().set_model(
                                     LanguageModelSelection {
                                         provider: LanguageModelProviderSetting(provider),
-                                        model,
-                                        enable_thinking,
+                                        model: model_id,
+                                        enable_thinking: model.supports_thinking(),
+                                        effort: model
+                                            .default_effort_level()
+                                            .map(|effort| effort.value.to_string()),
                                     },
                                 )
                             });

crates/anthropic/src/anthropic.rs 🔗

@@ -961,8 +961,9 @@ pub enum Thinking {
     Adaptive,
 }
 
-#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
+#[derive(Debug, Clone, Copy, Serialize, Deserialize, EnumString)]
 #[serde(rename_all = "snake_case")]
+#[strum(serialize_all = "snake_case")]
 pub enum Effort {
     Low,
     Medium,

crates/assistant_text_thread/src/text_thread.rs 🔗

@@ -2269,6 +2269,7 @@ impl TextThread {
             stop: Vec::new(),
             temperature: model.and_then(|model| AgentSettings::temperature_for_model(model, cx)),
             thinking_allowed: true,
+            thinking_effort: None,
         };
         for message in self.messages(cx) {
             if message.status != MessageStatus::Done {

crates/cloud_llm_client/src/cloud_llm_client.rs 🔗

@@ -309,6 +309,8 @@ pub struct LanguageModel {
 pub struct SupportedEffortLevel {
     pub name: Arc<str>,
     pub value: Arc<str>,
+    #[serde(default, skip_serializing_if = "Option::is_none")]
+    pub is_default: Option<bool>,
 }
 
 #[derive(Debug, Serialize, Deserialize)]

crates/eval/src/instance.rs 🔗

@@ -563,6 +563,7 @@ impl ExampleInstance {
                 tool_choice: None,
                 stop: Vec::new(),
                 thinking_allowed: true,
+                thinking_effort: None,
             };
 
             let model = model.clone();

crates/git_ui/src/git_panel.rs 🔗

@@ -2695,13 +2695,14 @@ impl GitPanel {
                         role: Role::User,
                         content: vec![content.into()],
                         cache: false,
-            reasoning_details: None,
+                        reasoning_details: None,
                     }],
                     tools: Vec::new(),
                     tool_choice: None,
                     stop: Vec::new(),
                     temperature,
                     thinking_allowed: false,
+                    thinking_effort: None,
                 };
 
                 let stream = model.stream_completion_text(request, cx);

crates/language_model/src/language_model.rs 🔗

@@ -577,6 +577,7 @@ impl Default for LanguageModelTextStream {
 pub struct LanguageModelEffortLevel {
     pub name: SharedString,
     pub value: SharedString,
+    pub is_default: bool,
 }
 
 pub trait LanguageModel: Send + Sync {
@@ -607,6 +608,13 @@ pub trait LanguageModel: Send + Sync {
         Vec::new()
     }
 
+    /// Returns the default effort level to use when thinking.
+    fn default_effort_level(&self) -> Option<LanguageModelEffortLevel> {
+        self.supported_effort_levels()
+            .into_iter()
+            .find(|effort_level| effort_level.is_default)
+    }
+
     /// Whether this model supports images
     fn supports_images(&self) -> bool;
 

crates/language_model/src/request.rs 🔗

@@ -451,6 +451,7 @@ pub struct LanguageModelRequest {
     pub stop: Vec<String>,
     pub temperature: Option<f32>,
     pub thinking_allowed: bool,
+    pub thinking_effort: Option<String>,
 }
 
 #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]

crates/language_models/src/provider/cloud.rs 🔗

@@ -34,6 +34,7 @@ pub use settings::ZedDotDevAvailableModel as AvailableModel;
 pub use settings::ZedDotDevAvailableProvider as AvailableProvider;
 use smol::io::{AsyncReadExt, BufReader};
 use std::pin::Pin;
+use std::str::FromStr;
 use std::sync::Arc;
 use std::time::Duration;
 use thiserror::Error;
@@ -584,6 +585,7 @@ impl LanguageModel for CloudLanguageModel {
             .map(|effort_level| LanguageModelEffortLevel {
                 name: effort_level.name.clone().into(),
                 value: effort_level.value.clone().into(),
+                is_default: effort_level.is_default.unwrap_or(false),
             })
             .collect()
     }
@@ -745,10 +747,14 @@ impl LanguageModel for CloudLanguageModel {
         } else {
             thinking_allowed && self.model.id.0.ends_with("-thinking")
         };
+        let effort = request
+            .thinking_effort
+            .as_ref()
+            .and_then(|effort| anthropic::Effort::from_str(effort).ok());
         let provider_name = provider_name(&self.model.provider);
         match self.model.provider {
             cloud_llm_client::LanguageModelProvider::Anthropic => {
-                let request = into_anthropic(
+                let mut request = into_anthropic(
                     request,
                     self.model.id.to_string(),
                     1.0,
@@ -761,6 +767,12 @@ impl LanguageModel for CloudLanguageModel {
                         AnthropicModelMode::Default
                     },
                 );
+
+                if enable_thinking && effort.is_some() {
+                    request.thinking = Some(anthropic::Thinking::Adaptive);
+                    request.output_config = Some(anthropic::OutputConfig { effort });
+                }
+
                 let client = self.client.clone();
                 let llm_api_token = self.llm_api_token.clone();
                 let future = self.request_limiter.stream(async move {

crates/language_models/src/provider/mistral.rs 🔗

@@ -861,6 +861,7 @@ mod tests {
             intent: None,
             stop: vec![],
             thinking_allowed: true,
+            thinking_effort: None,
         };
 
         let mistral_request = into_mistral(request, mistral::Model::MistralSmallLatest, None);
@@ -894,6 +895,7 @@ mod tests {
             intent: None,
             stop: vec![],
             thinking_allowed: true,
+            thinking_effort: None,
         };
 
         let mistral_request = into_mistral(request, mistral::Model::Pixtral12BLatest, None);

crates/language_models/src/provider/open_ai.rs 🔗

@@ -552,6 +552,7 @@ pub fn into_open_ai_response(
         stop: _,
         temperature,
         thinking_allowed: _,
+        thinking_effort: _,
     } = request;
 
     let mut input_items = Vec::new();
@@ -1434,6 +1435,7 @@ mod tests {
             stop: vec![],
             temperature: None,
             thinking_allowed: true,
+            thinking_effort: None,
         };
 
         // Validate that all models are supported by tiktoken-rs
@@ -1570,6 +1572,7 @@ mod tests {
             stop: vec!["<STOP>".into()],
             temperature: None,
             thinking_allowed: false,
+            thinking_effort: None,
         };
 
         let response = into_open_ai_response(

crates/settings_content/src/agent.rs 🔗

@@ -152,6 +152,7 @@ impl AgentSettingsContent {
             provider: provider.into(),
             model,
             enable_thinking: false,
+            effort: None,
         });
     }
 
@@ -265,6 +266,7 @@ pub struct LanguageModelSelection {
     pub model: String,
     #[serde(default)]
     pub enable_thinking: bool,
+    pub effort: Option<String>,
 }
 
 #[with_fallible_options]