Cargo.lock 🔗
@@ -308,7 +308,6 @@ version = "0.1.0"
dependencies = [
"agent-client-protocol",
"anyhow",
- "cloud_llm_client",
"collections",
"convert_case 0.8.0",
"fs",
Marshall Bowers created
This PR removes the code for Burn Mode, as we won't need it anymore
after the 17th.
Closes CLO-79.
Release Notes:
- N/A
Cargo.lock | 1
assets/icons/zed_burn_mode.svg | 3
assets/icons/zed_burn_mode_on.svg | 1
assets/keymaps/default-linux.json | 2
assets/keymaps/default-macos.json | 2
assets/keymaps/default-windows.json | 2
assets/settings/default.json | 2
crates/agent/src/db.rs | 11
crates/agent/src/edit_agent.rs | 1
crates/agent/src/legacy_thread.rs | 8
crates/agent/src/tests/mod.rs | 4
crates/agent/src/thread.rs | 38 --
crates/agent/src/thread_store.rs | 1
crates/agent_settings/Cargo.toml | 1
crates/agent_settings/src/agent_settings.rs | 29 --
crates/agent_ui/src/acp/thread_view.rs | 147 +---------
crates/agent_ui/src/agent_ui.rs | 7
crates/agent_ui/src/buffer_codegen.rs | 2
crates/agent_ui/src/terminal_inline_assistant.rs | 1
crates/agent_ui/src/text_thread_editor.rs | 51 ---
crates/agent_ui/src/ui.rs | 2
crates/agent_ui/src/ui/burn_mode_tooltip.rs | 69 -----
crates/assistant_text_thread/src/text_thread.rs | 20 -
crates/cloud_llm_client/src/cloud_llm_client.rs | 12
crates/collab/src/tests/agent_sharing_tests.rs | 3
crates/eval/src/instance.rs | 1
crates/git_ui/src/git_panel.rs | 1
crates/icons/src/icons.rs | 2
crates/language_model/src/language_model.rs | 23 -
crates/language_model/src/request.rs | 3
crates/language_models/src/provider/anthropic.rs | 1
crates/language_models/src/provider/cloud.rs | 17 -
crates/language_models/src/provider/copilot_chat.rs | 1
crates/language_models/src/provider/mistral.rs | 2
crates/language_models/src/provider/open_ai.rs | 3
crates/migrator/src/migrations.rs | 6
crates/migrator/src/migrations/m_2025_05_29/settings.rs | 51 ---
crates/migrator/src/migrator.rs | 71 -----
crates/rules_library/src/rules_library.rs | 1
crates/settings_content/src/agent.rs | 13
40 files changed, 40 insertions(+), 576 deletions(-)
@@ -308,7 +308,6 @@ version = "0.1.0"
dependencies = [
"agent-client-protocol",
"anyhow",
- "cloud_llm_client",
"collections",
"convert_case 0.8.0",
"fs",
@@ -1,3 +0,0 @@
-<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
-<path d="M5.70519 9.31137C6.13992 9.31137 6.55683 9.13868 6.86423 8.83128C7.17163 8.52389 7.34432 8.10696 7.34432 7.67224C7.34432 6.76744 7.01649 6.36094 6.68868 5.70528C5.98581 4.30022 6.54181 3.04726 7.99998 1.77136C8.32781 3.41049 9.31128 4.98406 10.6226 6.03311C11.9339 7.08215 12.5896 8.3279 12.5896 9.6392C12.5896 10.2419 12.4708 10.8387 12.2402 11.3956C12.0096 11.9524 11.6715 12.4583 11.2453 12.8845C10.8191 13.3107 10.3132 13.6487 9.75633 13.8794C9.1995 14.1101 8.60269 14.2287 7.99998 14.2287C7.39727 14.2287 6.80046 14.1101 6.24362 13.8794C5.68679 13.6487 5.18083 13.3107 4.75465 12.8845C4.32848 12.4583 3.99041 11.9524 3.75976 11.3956C3.52911 10.8387 3.4104 10.2419 3.4104 9.6392C3.4104 8.88324 3.6943 8.13513 4.06606 7.67224C4.06606 8.10696 4.23875 8.52389 4.54615 8.83128C4.85354 9.13868 5.27047 9.31137 5.70519 9.31137Z" stroke="black" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
-</svg>
@@ -1 +0,0 @@
-<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="none"><g fill="#000" clip-path="url(#a)"><path fill-opacity=".5" stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M5.705 9.311a1.64 1.64 0 0 0 1.64-1.639c0-.905-.328-1.311-.656-1.967C5.986 4.3 6.542 3.047 8 1.771c.328 1.64 1.312 3.213 2.623 4.262 1.311 1.05 1.967 2.295 1.967 3.606a4.59 4.59 0 1 1-9.18 0c0-.756.285-1.504.656-1.967a1.64 1.64 0 0 0 1.64 1.64Z"/><path d="M2.286 4.571a1.143 1.143 0 1 0 0-2.285 1.143 1.143 0 0 0 0 2.285ZM11.429 2.286a1.143 1.143 0 1 0 0-2.286 1.143 1.143 0 0 0 0 2.286ZM14.857 5.714a1.143 1.143 0 1 0 0-2.286 1.143 1.143 0 0 0 0 2.286Z"/></g><defs><clipPath id="a"><path fill="#fff" d="M0 0h16v16H0z"/></clipPath></defs></svg>
@@ -256,8 +256,6 @@
"ctrl->": "agent::AddSelectionToThread",
"ctrl-shift-e": "project_panel::ToggleFocus",
"ctrl-shift-enter": "agent::ContinueThread",
- "super-ctrl-b": "agent::ToggleBurnMode",
- "alt-enter": "agent::ContinueWithBurnMode",
"ctrl-y": "agent::AllowOnce",
"ctrl-alt-a": "agent::OpenPermissionDropdown",
"ctrl-alt-z": "agent::RejectOnce",
@@ -296,9 +296,7 @@
"shift-alt-escape": "agent::ExpandMessageEditor",
"cmd->": "agent::AddSelectionToThread",
"cmd-shift-e": "project_panel::ToggleFocus",
- "cmd-ctrl-b": "agent::ToggleBurnMode",
"cmd-shift-enter": "agent::ContinueThread",
- "alt-enter": "agent::ContinueWithBurnMode",
"cmd-y": "agent::AllowOnce",
"cmd-alt-a": "agent::OpenPermissionDropdown",
"cmd-alt-z": "agent::RejectOnce",
@@ -257,8 +257,6 @@
"ctrl-shift-.": "agent::AddSelectionToThread",
"ctrl-shift-e": "project_panel::ToggleFocus",
"ctrl-shift-enter": "agent::ContinueThread",
- "super-ctrl-b": "agent::ToggleBurnMode",
- "alt-enter": "agent::ContinueWithBurnMode",
"shift-alt-a": "agent::AllowOnce",
"ctrl-alt-a": "agent::OpenPermissionDropdown",
"shift-alt-z": "agent::RejectOnce",
@@ -913,8 +913,6 @@
"inline_assistant_use_streaming_tools": true,
// Whether the agent is enabled.
"enabled": true,
- // What completion mode to start new threads in, if available. Can be 'normal' or 'burn'.
- "preferred_completion_mode": "normal",
// Whether to show the agent panel button in the status bar.
"button": true,
// Where to dock the agent panel. Can be 'left', 'right' or 'bottom'.
@@ -1,7 +1,7 @@
use crate::{AgentMessage, AgentMessageContent, UserMessage, UserMessageContent};
use acp_thread::UserMessageId;
use agent_client_protocol as acp;
-use agent_settings::{AgentProfileId, CompletionMode};
+use agent_settings::AgentProfileId;
use anyhow::{Result, anyhow};
use chrono::{DateTime, Utc};
use collections::{HashMap, IndexMap};
@@ -47,8 +47,6 @@ pub struct DbThread {
#[serde(default)]
pub model: Option<DbLanguageModel>,
#[serde(default)]
- pub completion_mode: Option<CompletionMode>,
- #[serde(default)]
pub profile: Option<AgentProfileId>,
#[serde(default)]
pub imported: bool,
@@ -61,8 +59,6 @@ pub struct SharedThread {
pub updated_at: DateTime<Utc>,
#[serde(default)]
pub model: Option<DbLanguageModel>,
- #[serde(default)]
- pub completion_mode: Option<CompletionMode>,
pub version: String,
}
@@ -75,7 +71,6 @@ impl SharedThread {
messages: thread.messages.clone(),
updated_at: thread.updated_at,
model: thread.model.clone(),
- completion_mode: thread.completion_mode,
version: Self::VERSION.to_string(),
}
}
@@ -90,7 +85,6 @@ impl SharedThread {
cumulative_token_usage: Default::default(),
request_token_usage: Default::default(),
model: self.model,
- completion_mode: self.completion_mode,
profile: None,
imported: true,
}
@@ -264,7 +258,6 @@ impl DbThread {
cumulative_token_usage: thread.cumulative_token_usage,
request_token_usage,
model: thread.model,
- completion_mode: thread.completion_mode,
profile: thread.profile,
imported: false,
})
@@ -515,7 +508,6 @@ mod tests {
messages: vec![],
updated_at: Utc.with_ymd_and_hms(2024, 1, 1, 0, 0, 0).unwrap(),
model: None,
- completion_mode: None,
version: SharedThread::VERSION.to_string(),
};
@@ -558,7 +550,6 @@ mod tests {
cumulative_token_usage: Default::default(),
request_token_usage: HashMap::default(),
model: None,
- completion_mode: None,
profile: None,
imported: false,
}
@@ -726,7 +726,6 @@ impl EditAgent {
thread_id: conversation.thread_id,
prompt_id: conversation.prompt_id,
intent: Some(intent),
- mode: conversation.mode,
messages: conversation.messages,
tool_choice,
tools,
@@ -1,5 +1,5 @@
use crate::ProjectSnapshot;
-use agent_settings::{AgentProfileId, CompletionMode};
+use agent_settings::AgentProfileId;
use anyhow::Result;
use chrono::{DateTime, Utc};
use gpui::SharedString;
@@ -37,8 +37,6 @@ pub struct SerializedThread {
#[serde(default)]
pub model: Option<SerializedLanguageModel>,
#[serde(default)]
- pub completion_mode: Option<CompletionMode>,
- #[serde(default)]
pub tool_use_limit_reached: bool,
#[serde(default)]
pub profile: Option<AgentProfileId>,
@@ -186,7 +184,6 @@ impl LegacySerializedThread {
request_token_usage: Vec::new(),
detailed_summary_state: DetailedSummaryState::default(),
model: None,
- completion_mode: None,
tool_use_limit_reached: false,
profile: None,
}
@@ -275,7 +272,6 @@ mod tests {
request_token_usage: vec![],
detailed_summary_state: DetailedSummaryState::default(),
model: None,
- completion_mode: None,
tool_use_limit_reached: false,
profile: None
}
@@ -341,7 +337,6 @@ mod tests {
request_token_usage: vec![],
detailed_summary_state: DetailedSummaryState::default(),
model: None,
- completion_mode: None,
tool_use_limit_reached: false,
profile: None,
});
@@ -393,7 +388,6 @@ mod tests {
request_token_usage: vec![],
detailed_summary_state: DetailedSummaryState::default(),
model: None,
- completion_mode: None,
tool_use_limit_reached: false,
profile: None
}
@@ -2858,7 +2858,6 @@ async fn test_send_no_retry_on_success(cx: &mut TestAppContext) {
let mut events = thread
.update(cx, |thread, cx| {
- thread.set_completion_mode(agent_settings::CompletionMode::Burn, cx);
thread.send(UserMessageId::new(), ["Hello!"], cx)
})
.unwrap();
@@ -2902,7 +2901,6 @@ async fn test_send_retry_on_error(cx: &mut TestAppContext) {
let mut events = thread
.update(cx, |thread, cx| {
- thread.set_completion_mode(agent_settings::CompletionMode::Burn, cx);
thread.send(UserMessageId::new(), ["Hello!"], cx)
})
.unwrap();
@@ -2967,7 +2965,6 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) {
let events = thread
.update(cx, |thread, cx| {
- thread.set_completion_mode(agent_settings::CompletionMode::Burn, cx);
thread.add_tool(EchoTool);
thread.send(UserMessageId::new(), ["Call the echo tool!"], cx)
})
@@ -3048,7 +3045,6 @@ async fn test_send_max_retries_exceeded(cx: &mut TestAppContext) {
let mut events = thread
.update(cx, |thread, cx| {
- thread.set_completion_mode(agent_settings::CompletionMode::Burn, cx);
thread.send(UserMessageId::new(), ["Hello!"], cx)
})
.unwrap();
@@ -12,8 +12,8 @@ use feature_flags::{FeatureFlagAppExt as _, SubagentsFeatureFlag};
use agent_client_protocol as acp;
use agent_settings::{
- AgentProfileId, AgentProfileSettings, AgentSettings, CompletionMode,
- SUMMARIZE_THREAD_DETAILED_PROMPT, SUMMARIZE_THREAD_PROMPT,
+ AgentProfileId, AgentProfileSettings, AgentSettings, SUMMARIZE_THREAD_DETAILED_PROMPT,
+ SUMMARIZE_THREAD_PROMPT,
};
use anyhow::{Context as _, Result, anyhow};
use chrono::{DateTime, Utc};
@@ -32,12 +32,11 @@ use gpui::{
App, AppContext, AsyncApp, Context, Entity, EventEmitter, SharedString, Task, WeakEntity,
};
use language_model::{
- LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelExt,
- LanguageModelId, LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry,
- LanguageModelRequest, LanguageModelRequestMessage, LanguageModelRequestTool,
- LanguageModelToolResult, LanguageModelToolResultContent, LanguageModelToolSchemaFormat,
- LanguageModelToolUse, LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage,
- ZED_CLOUD_PROVIDER_ID,
+ LanguageModel, LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelId,
+ LanguageModelImage, LanguageModelProviderId, LanguageModelRegistry, LanguageModelRequest,
+ LanguageModelRequestMessage, LanguageModelRequestTool, LanguageModelToolResult,
+ LanguageModelToolResultContent, LanguageModelToolSchemaFormat, LanguageModelToolUse,
+ LanguageModelToolUseId, Role, SelectedModel, StopReason, TokenUsage, ZED_CLOUD_PROVIDER_ID,
};
use project::Project;
use prompt_store::ProjectContext;
@@ -691,7 +690,6 @@ pub struct Thread {
summary: Option<SharedString>,
messages: Vec<Message>,
user_store: Entity<UserStore>,
- completion_mode: CompletionMode,
/// Holds the task that handles agent interaction until the end of the turn.
/// Survives across multiple requests as the model performs tool calls and
/// we run tools, report their results.
@@ -753,7 +751,6 @@ impl Thread {
summary: None,
messages: Vec::new(),
user_store: project.read(cx).user_store(),
- completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
running_turn: None,
pending_message: None,
tools: BTreeMap::default(),
@@ -806,7 +803,6 @@ impl Thread {
summary: None,
messages: Vec::new(),
user_store: project.read(cx).user_store(),
- completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
running_turn: None,
pending_message: None,
tools: parent_tools,
@@ -1003,7 +999,6 @@ impl Thread {
summary: db_thread.detailed_summary,
messages: db_thread.messages,
user_store: project.read(cx).user_store(),
- completion_mode: db_thread.completion_mode.unwrap_or_default(),
running_turn: None,
pending_message: None,
tools: BTreeMap::default(),
@@ -1042,7 +1037,6 @@ impl Thread {
provider: model.provider_id().to_string(),
model: model.name().0.to_string(),
}),
- completion_mode: Some(self.completion_mode),
profile: Some(self.profile_id.clone()),
imported: self.imported,
};
@@ -1115,20 +1109,6 @@ impl Thread {
cx.notify()
}
- pub fn completion_mode(&self) -> CompletionMode {
- self.completion_mode
- }
-
- pub fn set_completion_mode(&mut self, mode: CompletionMode, cx: &mut Context<Self>) {
- let old_usage = self.latest_token_usage();
- self.completion_mode = mode;
- let new_usage = self.latest_token_usage();
- if old_usage != new_usage {
- cx.emit(TokenUsageUpdated(new_usage));
- }
- cx.notify()
- }
-
pub fn last_message(&self) -> Option<Message> {
if let Some(message) = self.pending_message.clone() {
Some(Message::Agent(message))
@@ -1285,7 +1265,7 @@ impl Thread {
let usage = self.latest_request_token_usage()?;
let model = self.model.clone()?;
Some(acp_thread::TokenUsage {
- max_tokens: model.max_token_count_for_mode(self.completion_mode.into()),
+ max_tokens: model.max_token_count(),
used_tokens: usage.total_tokens(),
input_tokens: usage.input_tokens,
output_tokens: usage.output_tokens,
@@ -2175,7 +2155,6 @@ impl Thread {
log::debug!("Building completion request");
log::debug!("Completion intent: {:?}", completion_intent);
- log::debug!("Completion mode: {:?}", self.completion_mode);
let available_tools: Vec<_> = self
.running_turn
@@ -2191,7 +2170,6 @@ impl Thread {
thread_id: Some(self.id.to_string()),
prompt_id: Some(self.prompt_id.to_string()),
intent: Some(completion_intent),
- mode: Some(self.completion_mode.into()),
messages,
tools,
tool_choice: None,
@@ -141,7 +141,6 @@ mod tests {
cumulative_token_usage: Default::default(),
request_token_usage: HashMap::default(),
model: None,
- completion_mode: None,
profile: None,
imported: false,
}
@@ -14,7 +14,6 @@ path = "src/agent_settings.rs"
[dependencies]
agent-client-protocol.workspace = true
anyhow.workspace = true
-cloud_llm_client.workspace = true
collections.workspace = true
convert_case.workspace = true
fs.workspace = true
@@ -43,7 +43,6 @@ pub struct AgentSettings {
pub play_sound_when_agent_done: bool,
pub single_file_review: bool,
pub model_parameters: Vec<LanguageModelParameters>,
- pub preferred_completion_mode: CompletionMode,
pub enable_feedback: bool,
pub expand_edit_card: bool,
pub expand_terminal_card: bool,
@@ -109,33 +108,6 @@ impl AgentSettings {
}
}
-#[derive(Clone, Copy, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Default)]
-#[serde(rename_all = "snake_case")]
-pub enum CompletionMode {
- #[default]
- Normal,
- #[serde(alias = "max")]
- Burn,
-}
-
-impl From<CompletionMode> for cloud_llm_client::CompletionMode {
- fn from(value: CompletionMode) -> Self {
- match value {
- CompletionMode::Normal => cloud_llm_client::CompletionMode::Normal,
- CompletionMode::Burn => cloud_llm_client::CompletionMode::Max,
- }
- }
-}
-
-impl From<settings::CompletionMode> for CompletionMode {
- fn from(value: settings::CompletionMode) -> Self {
- match value {
- settings::CompletionMode::Normal => CompletionMode::Normal,
- settings::CompletionMode::Burn => CompletionMode::Burn,
- }
- }
-}
-
#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, JsonSchema)]
pub struct AgentProfileId(pub Arc<str>);
@@ -281,7 +253,6 @@ impl Settings for AgentSettings {
play_sound_when_agent_done: agent.play_sound_when_agent_done.unwrap(),
single_file_review: agent.single_file_review.unwrap(),
model_parameters: agent.model_parameters,
- preferred_completion_mode: agent.preferred_completion_mode.unwrap().into(),
enable_feedback: agent.enable_feedback.unwrap(),
expand_edit_card: agent.expand_edit_card.unwrap(),
expand_terminal_card: agent.expand_terminal_card.unwrap(),
@@ -8,7 +8,7 @@ use action_log::{ActionLog, ActionLogTelemetry};
use agent::{NativeAgentServer, NativeAgentSessionList, SharedThread, ThreadStore};
use agent_client_protocol::{self as acp, PromptCapabilities};
use agent_servers::{AgentServer, AgentServerDelegate};
-use agent_settings::{AgentProfileId, AgentSettings, CompletionMode};
+use agent_settings::{AgentProfileId, AgentSettings};
use anyhow::{Result, anyhow};
use arrayvec::ArrayVec;
use audio::{Audio, Sound};
@@ -67,12 +67,12 @@ use crate::acp::entry_view_state::{EntryViewEvent, ViewEvent};
use crate::acp::message_editor::{MessageEditor, MessageEditorEvent};
use crate::agent_diff::AgentDiff;
use crate::profile_selector::{ProfileProvider, ProfileSelector};
-use crate::ui::{AgentNotification, AgentNotificationEvent, BurnModeTooltip};
+use crate::ui::{AgentNotification, AgentNotificationEvent};
use crate::{
AgentDiffPane, AgentPanel, AllowAlways, AllowOnce, AuthorizeToolCall, ClearMessageQueue,
CycleFavoriteModels, CycleModeSelector, ExpandMessageEditor, Follow, KeepAll, NewThread,
OpenAgentDiff, OpenHistory, RejectAll, RejectOnce, SelectPermissionGranularity,
- SendImmediately, SendNextQueuedMessage, ToggleBurnMode, ToggleProfileSelector,
+ SendImmediately, SendNextQueuedMessage, ToggleProfileSelector,
};
const MAX_COLLAPSED_LINES: usize = 3;
@@ -5953,8 +5953,7 @@ impl AcpThreadView {
h_flex()
.gap_0p5()
.child(self.render_add_context_button(cx))
- .child(self.render_follow_toggle(cx))
- .children(self.render_burn_mode_toggle(cx)),
+ .child(self.render_follow_toggle(cx)),
)
.child(
h_flex()
@@ -6109,28 +6108,6 @@ impl AcpThreadView {
}
}
- fn toggle_burn_mode(
- &mut self,
- _: &ToggleBurnMode,
- _window: &mut Window,
- cx: &mut Context<Self>,
- ) {
- let Some(thread) = self.as_native_thread(cx) else {
- return;
- };
-
- thread.update(cx, |thread, cx| {
- let current_mode = thread.completion_mode();
- thread.set_completion_mode(
- match current_mode {
- CompletionMode::Burn => CompletionMode::Normal,
- CompletionMode::Normal => CompletionMode::Burn,
- },
- cx,
- );
- });
- }
-
fn keep_all(&mut self, _: &KeepAll, _window: &mut Window, cx: &mut Context<Self>) {
let Some(thread) = self.thread() else {
return;
@@ -6304,41 +6281,6 @@ impl AcpThreadView {
Some(())
}
- fn render_burn_mode_toggle(&self, cx: &mut Context<Self>) -> Option<AnyElement> {
- let thread = self.as_native_thread(cx)?.read(cx);
-
- if thread
- .model()
- .is_none_or(|model| !model.supports_burn_mode())
- {
- return None;
- }
-
- let active_completion_mode = thread.completion_mode();
- let burn_mode_enabled = active_completion_mode == CompletionMode::Burn;
- let icon = if burn_mode_enabled {
- IconName::ZedBurnModeOn
- } else {
- IconName::ZedBurnMode
- };
-
- Some(
- IconButton::new("burn-mode", icon)
- .icon_size(IconSize::Small)
- .icon_color(Color::Muted)
- .toggle_state(burn_mode_enabled)
- .selected_icon_color(Color::Error)
- .on_click(cx.listener(|this, _event, window, cx| {
- this.toggle_burn_mode(&ToggleBurnMode, window, cx);
- }))
- .tooltip(move |_window, cx| {
- cx.new(|_| BurnModeTooltip::new().selected(burn_mode_enabled))
- .into()
- })
- .into_any_element(),
- )
- }
-
fn render_send_button(&self, cx: &mut Context<Self>) -> AnyElement {
let message_editor = self.message_editor.read(cx);
let is_editor_empty = message_editor.is_empty(cx);
@@ -7313,19 +7255,7 @@ impl AcpThreadView {
),
};
- let burn_mode_available = self.as_native_thread(cx).is_some_and(|thread| {
- thread.read(cx).completion_mode() == CompletionMode::Normal
- && thread
- .read(cx)
- .model()
- .is_some_and(|model| model.supports_burn_mode())
- });
-
- let description = if burn_mode_available {
- "To continue, start a new thread from a summary or turn Burn Mode on."
- } else {
- "To continue, start a new thread from a summary."
- };
+ let description = "To continue, start a new thread from a summary.";
Some(
Callout::new()
@@ -7334,34 +7264,23 @@ impl AcpThreadView {
.title(title)
.description(description)
.actions_slot(
- h_flex()
- .gap_0p5()
- .child(
- Button::new("start-new-thread", "Start New Thread")
- .label_size(LabelSize::Small)
- .on_click(cx.listener(|this, _, window, cx| {
- let Some(thread) = this.thread() else {
- return;
- };
- let session_id = thread.read(cx).session_id().clone();
- window.dispatch_action(
- crate::NewNativeAgentThreadFromSummary {
- from_session_id: session_id,
- }
- .boxed_clone(),
- cx,
- );
- })),
- )
- .when(burn_mode_available, |this| {
- this.child(
- IconButton::new("burn-mode-callout", IconName::ZedBurnMode)
- .icon_size(IconSize::XSmall)
- .on_click(cx.listener(|this, _event, window, cx| {
- this.toggle_burn_mode(&ToggleBurnMode, window, cx);
- })),
- )
- }),
+ h_flex().gap_0p5().child(
+ Button::new("start-new-thread", "Start New Thread")
+ .label_size(LabelSize::Small)
+ .on_click(cx.listener(|this, _, window, cx| {
+ let Some(thread) = this.thread() else {
+ return;
+ };
+ let session_id = thread.read(cx).session_id().clone();
+ window.dispatch_action(
+ crate::NewNativeAgentThreadFromSummary {
+ from_session_id: session_id,
+ }
+ .boxed_clone(),
+ cx,
+ );
+ })),
+ ),
)
.dismiss_action(self.dismiss_error_button(cx)),
)
@@ -7585,14 +7504,6 @@ impl AcpThreadView {
.thread()
.map_or(false, |thread| thread.read(cx).can_resume(cx));
- let can_enable_burn_mode = self.as_native_thread(cx).map_or(false, |thread| {
- let thread = thread.read(cx);
- let supports_burn_mode = thread
- .model()
- .map_or(false, |model| model.supports_burn_mode());
- supports_burn_mode && thread.completion_mode() == CompletionMode::Normal
- });
-
let markdown = if let Some(markdown) = &self.thread_error_markdown {
markdown.clone()
} else {
@@ -7614,19 +7525,6 @@ impl AcpThreadView {
.actions_slot(
h_flex()
.gap_0p5()
- .when(can_resume && can_enable_burn_mode, |this| {
- this.child(
- Button::new("enable-burn-mode-and-retry", "Enable Burn Mode and Retry")
- .icon(IconName::ZedBurnMode)
- .icon_position(IconPosition::Start)
- .icon_size(IconSize::Small)
- .label_size(LabelSize::Small)
- .on_click(cx.listener(|this, _, window, cx| {
- this.toggle_burn_mode(&ToggleBurnMode, window, cx);
- this.resume_chat(cx);
- })),
- )
- })
.when(can_resume, |this| {
this.child(
IconButton::new("retry", IconName::RotateCw)
@@ -7893,7 +7791,6 @@ impl Render for AcpThreadView {
.on_action(cx.listener(|this, _: &menu::Cancel, _, cx| {
this.cancel_generation(cx);
}))
- .on_action(cx.listener(Self::toggle_burn_mode))
.on_action(cx.listener(Self::keep_all))
.on_action(cx.listener(Self::reject_all))
.on_action(cx.listener(Self::allow_always))
@@ -119,10 +119,6 @@ actions!(
ResetTrialEndUpsell,
/// Continues the current thread.
ContinueThread,
- /// Continues the thread with burn mode enabled.
- ContinueWithBurnMode,
- /// Toggles burn mode for faster responses.
- ToggleBurnMode,
/// Interrupts the current generation and sends the message immediately.
SendImmediately,
/// Sends the next queued message immediately.
@@ -460,7 +456,7 @@ fn register_slash_commands(cx: &mut App) {
#[cfg(test)]
mod tests {
use super::*;
- use agent_settings::{AgentProfileId, AgentSettings, CompletionMode};
+ use agent_settings::{AgentProfileId, AgentSettings};
use command_palette_hooks::CommandPaletteFilter;
use editor::actions::AcceptEditPrediction;
use gpui::{BorrowAppContext, TestAppContext, px};
@@ -503,7 +499,6 @@ mod tests {
play_sound_when_agent_done: false,
single_file_review: false,
model_parameters: vec![],
- preferred_completion_mode: CompletionMode::Normal,
enable_feedback: false,
expand_edit_card: true,
expand_terminal_card: true,
@@ -538,7 +538,6 @@ impl CodegenAlternative {
thread_id: None,
prompt_id: None,
intent: Some(CompletionIntent::InlineAssist),
- mode: None,
tools,
tool_choice,
stop: Vec::new(),
@@ -617,7 +616,6 @@ impl CodegenAlternative {
thread_id: None,
prompt_id: None,
intent: Some(CompletionIntent::InlineAssist),
- mode: None,
tools: Vec::new(),
tool_choice: None,
stop: Vec::new(),
@@ -268,7 +268,6 @@ impl TerminalInlineAssistant {
LanguageModelRequest {
thread_id: None,
prompt_id: None,
- mode: None,
intent: Some(CompletionIntent::TerminalInlineAssist),
messages: vec![request_message],
tools: Vec::new(),
@@ -1,9 +1,8 @@
use crate::{
agent_panel::AgentType,
language_model_selector::{LanguageModelSelector, language_model_selector},
- ui::{BurnModeTooltip, ModelSelectorTooltip},
+ ui::ModelSelectorTooltip,
};
-use agent_settings::CompletionMode;
use anyhow::Result;
use assistant_slash_command::{SlashCommand, SlashCommandOutputSection, SlashCommandWorkingSet};
use assistant_slash_commands::{DefaultSlashCommand, FileSlashCommand, selections_creases};
@@ -34,8 +33,7 @@ use language::{
language_settings::{SoftWrap, all_language_settings},
};
use language_model::{
- ConfigurationError, IconOrSvg, LanguageModelExt, LanguageModelImage, LanguageModelRegistry,
- Role,
+ ConfigurationError, IconOrSvg, LanguageModelImage, LanguageModelRegistry, Role,
};
use multi_buffer::MultiBufferRow;
use picker::{Picker, popover_menu::PickerPopoverMenu};
@@ -2331,45 +2329,6 @@ impl TextThreadEditor {
)
}
- fn render_burn_mode_toggle(&self, cx: &mut Context<Self>) -> Option<AnyElement> {
- let text_thread = self.text_thread().read(cx);
- let active_model = LanguageModelRegistry::read_global(cx)
- .default_model()
- .map(|default| default.model)?;
- if !active_model.supports_burn_mode() {
- return None;
- }
-
- let active_completion_mode = text_thread.completion_mode();
- let burn_mode_enabled = active_completion_mode == CompletionMode::Burn;
- let icon = if burn_mode_enabled {
- IconName::ZedBurnModeOn
- } else {
- IconName::ZedBurnMode
- };
-
- Some(
- IconButton::new("burn-mode", icon)
- .icon_size(IconSize::Small)
- .icon_color(Color::Muted)
- .toggle_state(burn_mode_enabled)
- .selected_icon_color(Color::Error)
- .on_click(cx.listener(move |this, _event, _window, cx| {
- this.text_thread().update(cx, |text_thread, _cx| {
- text_thread.set_completion_mode(match active_completion_mode {
- CompletionMode::Burn => CompletionMode::Normal,
- CompletionMode::Normal => CompletionMode::Burn,
- });
- });
- }))
- .tooltip(move |_window, cx| {
- cx.new(|_| BurnModeTooltip::new().selected(burn_mode_enabled))
- .into()
- })
- .into_any_element(),
- )
- }
-
fn render_language_model_selector(
&self,
window: &mut Window,
@@ -2826,8 +2785,7 @@ impl Render for TextThreadEditor {
.child(
h_flex()
.gap_0p5()
- .child(self.render_inject_context_menu(cx))
- .children(self.render_burn_mode_toggle(cx)),
+ .child(self.render_inject_context_menu(cx)),
)
.child(
h_flex()
@@ -3194,8 +3152,7 @@ fn token_state(text_thread: &Entity<TextThread>, cx: &App) -> Option<TokenState>
.default_model()?
.model;
let token_count = text_thread.read(cx).token_count()?;
- let max_token_count =
- model.max_token_count_for_mode(text_thread.read(cx).completion_mode().into());
+ let max_token_count = model.max_token_count();
let token_state = if max_token_count.saturating_sub(token_count) == 0 {
TokenState::NoTokensLeft {
max_token_count,
@@ -1,6 +1,5 @@
mod acp_onboarding_modal;
mod agent_notification;
-mod burn_mode_tooltip;
mod claude_code_onboarding_modal;
mod end_trial_upsell;
mod hold_for_default;
@@ -10,7 +9,6 @@ mod onboarding_modal;
pub use acp_onboarding_modal::*;
pub use agent_notification::*;
-pub use burn_mode_tooltip::*;
pub use claude_code_onboarding_modal::*;
pub use end_trial_upsell::*;
pub use hold_for_default::*;
@@ -1,69 +0,0 @@
-use crate::ToggleBurnMode;
-use gpui::{Context, FontWeight, IntoElement, Render, Window};
-use ui::{KeyBinding, prelude::*, tooltip_container};
-
-pub struct BurnModeTooltip {
- selected: bool,
-}
-
-impl BurnModeTooltip {
- pub fn new() -> Self {
- Self { selected: false }
- }
-
- pub fn selected(mut self, selected: bool) -> Self {
- self.selected = selected;
- self
- }
-}
-
-impl Render for BurnModeTooltip {
- fn render(&mut self, _window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
- let (icon, color) = if self.selected {
- (IconName::ZedBurnModeOn, Color::Error)
- } else {
- (IconName::ZedBurnMode, Color::Default)
- };
-
- let turned_on = h_flex()
- .h_4()
- .px_1()
- .border_1()
- .border_color(cx.theme().colors().border)
- .bg(cx.theme().colors().text_accent.opacity(0.1))
- .rounded_sm()
- .child(
- Label::new("ON")
- .size(LabelSize::XSmall)
- .weight(FontWeight::SEMIBOLD)
- .color(Color::Accent),
- );
-
- let title = h_flex()
- .gap_1p5()
- .child(Icon::new(icon).size(IconSize::Small).color(color))
- .child(Label::new("Burn Mode"))
- .when(self.selected, |title| title.child(turned_on));
-
- let keybinding = KeyBinding::for_action(&ToggleBurnMode, cx).size(rems_from_px(12.));
-
- tooltip_container(cx, |this, _| {
- this
- .child(
- h_flex()
- .justify_between()
- .child(title)
- .child(keybinding)
- )
- .child(
- div()
- .max_w_64()
- .child(
- Label::new("Enables models to use large context windows, unlimited tool calls, and other capabilities for expanded reasoning.")
- .size(LabelSize::Small)
- .color(Color::Muted)
- )
- )
- })
- }
-}
@@ -29,7 +29,6 @@ use open_ai::Model as OpenAiModel;
use paths::text_threads_dir;
use prompt_store::PromptBuilder;
use serde::{Deserialize, Serialize};
-use settings::Settings;
use smallvec::SmallVec;
use std::{
cmp::{Ordering, max},
@@ -688,7 +687,6 @@ pub struct TextThread {
_subscriptions: Vec<Subscription>,
language_registry: Arc<LanguageRegistry>,
prompt_builder: Arc<PromptBuilder>,
- completion_mode: agent_settings::CompletionMode,
}
trait ContextAnnotation {
@@ -721,14 +719,6 @@ impl TextThread {
)
}
- pub fn completion_mode(&self) -> agent_settings::CompletionMode {
- self.completion_mode
- }
-
- pub fn set_completion_mode(&mut self, completion_mode: agent_settings::CompletionMode) {
- self.completion_mode = completion_mode;
- }
-
pub fn new(
id: TextThreadId,
replica_id: ReplicaId,
@@ -773,7 +763,6 @@ impl TextThread {
pending_cache_warming_task: Task::ready(None),
_subscriptions: vec![cx.subscribe(&buffer, Self::handle_buffer_event)],
pending_save: Task::ready(Ok(())),
- completion_mode: AgentSettings::get_global(cx).preferred_completion_mode,
path: None,
buffer,
language_registry,
@@ -2274,7 +2263,6 @@ impl TextThread {
thread_id: None,
prompt_id: None,
intent: Some(CompletionIntent::UserPrompt),
- mode: None,
messages: Vec::new(),
tools: Vec::new(),
tool_choice: None,
@@ -2333,15 +2321,7 @@ impl TextThread {
completion_request.messages.push(request_message);
}
}
- let supports_burn_mode = if let Some(model) = model {
- model.supports_burn_mode()
- } else {
- false
- };
- if supports_burn_mode {
- completion_request.mode = Some(self.completion_mode.into());
- }
completion_request
}
@@ -199,13 +199,6 @@ pub enum EditPredictionRejectReason {
Discarded,
}
-#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
-#[serde(rename_all = "snake_case")]
-pub enum CompletionMode {
- Normal,
- Max,
-}
-
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum CompletionIntent {
@@ -228,8 +221,6 @@ pub struct CompletionBody {
pub prompt_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub intent: Option<CompletionIntent>,
- #[serde(skip_serializing_if = "Option::is_none", default)]
- pub mode: Option<CompletionMode>,
pub provider: LanguageModelProvider,
pub model: String,
pub provider_request: serde_json::Value,
@@ -328,10 +319,9 @@ pub struct LanguageModel {
pub supports_tools: bool,
pub supports_images: bool,
pub supports_thinking: bool,
- pub supports_max_mode: bool,
#[serde(default)]
pub supports_streaming_tools: bool,
- // only used by OpenAI and xAI
+ /// Only used by OpenAI and xAI.
#[serde(default)]
pub supports_parallel_tool_calls: bool,
}
@@ -24,7 +24,6 @@ async fn test_share_and_retrieve_thread(
messages: vec![],
updated_at: chrono::Utc::now(),
model: None,
- completion_mode: None,
version: SharedThread::VERSION.to_string(),
};
@@ -153,7 +152,6 @@ async fn test_sync_imported_thread(
messages: vec![],
updated_at: chrono::Utc::now(),
model: None,
- completion_mode: None,
version: SharedThread::VERSION.to_string(),
};
@@ -186,7 +184,6 @@ async fn test_sync_imported_thread(
messages: vec![],
updated_at: chrono::Utc::now(),
model: None,
- completion_mode: None,
version: SharedThread::VERSION.to_string(),
};
@@ -548,7 +548,6 @@ impl ExampleInstance {
let request = LanguageModelRequest {
thread_id: None,
prompt_id: None,
- mode: None,
intent: None,
messages: vec![LanguageModelRequestMessage {
role: Role::User,
@@ -2680,7 +2680,6 @@ impl GitPanel {
thread_id: None,
prompt_id: None,
intent: Some(CompletionIntent::GenerateGitCommitMessage),
- mode: None,
messages: vec![LanguageModelRequestMessage {
role: Role::User,
content: vec![content.into()],
@@ -262,8 +262,6 @@ pub enum IconName {
ZedAgent,
ZedAgentTwo,
ZedAssistant,
- ZedBurnMode,
- ZedBurnModeOn,
ZedPredict,
ZedPredictDisabled,
ZedPredictDown,
@@ -13,7 +13,7 @@ pub mod fake_provider;
use anthropic::{AnthropicError, parse_prompt_too_long};
use anyhow::{Result, anyhow};
use client::Client;
-use cloud_llm_client::{CompletionMode, CompletionRequestStatus};
+use cloud_llm_client::CompletionRequestStatus;
use futures::FutureExt;
use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
use gpui::{AnyView, App, AsyncApp, SharedString, Task, Window};
@@ -600,11 +600,6 @@ pub trait LanguageModel: Send + Sync {
/// Whether this model supports choosing which tool to use.
fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool;
- /// Returns whether this model supports "burn mode";
- fn supports_burn_mode(&self) -> bool {
- false
- }
-
/// Returns whether this model or provider supports streaming tool calls;
fn supports_streaming_tools(&self) -> bool {
false
@@ -621,10 +616,6 @@ pub trait LanguageModel: Send + Sync {
}
fn max_token_count(&self) -> u64;
- /// Returns the maximum token count for this model in burn mode (If `supports_burn_mode` is `false` this returns `None`)
- fn max_token_count_in_burn_mode(&self) -> Option<u64> {
- None
- }
fn max_output_tokens(&self) -> Option<u64> {
None
}
@@ -756,18 +747,6 @@ pub trait LanguageModel: Send + Sync {
}
}
-pub trait LanguageModelExt: LanguageModel {
- fn max_token_count_for_mode(&self, mode: CompletionMode) -> u64 {
- match mode {
- CompletionMode::Normal => self.max_token_count(),
- CompletionMode::Max => self
- .max_token_count_in_burn_mode()
- .unwrap_or_else(|| self.max_token_count()),
- }
- }
-}
-impl LanguageModelExt for dyn LanguageModel {}
-
impl std::fmt::Debug for dyn LanguageModel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("<dyn LanguageModel>")
@@ -3,7 +3,7 @@ use std::sync::Arc;
use anyhow::Result;
use base64::write::EncoderWriter;
-use cloud_llm_client::{CompletionIntent, CompletionMode};
+use cloud_llm_client::CompletionIntent;
use gpui::{
App, AppContext as _, DevicePixels, Image, ImageFormat, ObjectFit, SharedString, Size, Task,
point, px, size,
@@ -442,7 +442,6 @@ pub struct LanguageModelRequest {
pub thread_id: Option<String>,
pub prompt_id: Option<String>,
pub intent: Option<CompletionIntent>,
- pub mode: Option<CompletionMode>,
pub messages: Vec<LanguageModelRequestMessage>,
pub tools: Vec<LanguageModelRequestTool>,
pub tool_choice: Option<LanguageModelToolChoice>,
@@ -1159,7 +1159,6 @@ mod tests {
thread_id: None,
prompt_id: None,
intent: None,
- mode: None,
stop: vec![],
temperature: None,
tools: vec![],
@@ -583,10 +583,6 @@ impl LanguageModel for CloudLanguageModel {
}
}
- fn supports_burn_mode(&self) -> bool {
- self.model.supports_max_mode
- }
-
fn supports_split_token_display(&self) -> bool {
use cloud_llm_client::LanguageModelProvider::*;
matches!(self.model.provider, OpenAi)
@@ -613,13 +609,6 @@ impl LanguageModel for CloudLanguageModel {
self.model.max_token_count as u64
}
- fn max_token_count_in_burn_mode(&self) -> Option<u64> {
- self.model
- .max_token_count_in_max_mode
- .filter(|_| self.model.supports_max_mode)
- .map(|max_token_count| max_token_count as u64)
- }
-
fn max_output_tokens(&self) -> Option<u64> {
Some(self.model.max_output_tokens as u64)
}
@@ -730,7 +719,6 @@ impl LanguageModel for CloudLanguageModel {
let thread_id = request.thread_id.clone();
let prompt_id = request.prompt_id.clone();
let intent = request.intent;
- let mode = request.mode;
let app_version = Some(cx.update(|cx| AppVersion::global(cx)));
let use_responses_api = cx.update(|cx| cx.has_flag::<OpenAiResponsesApiFeatureFlag>());
let thinking_allowed = request.thinking_allowed;
@@ -764,7 +752,6 @@ impl LanguageModel for CloudLanguageModel {
thread_id,
prompt_id,
intent,
- mode,
provider: cloud_llm_client::LanguageModelProvider::Anthropic,
model: request.model.clone(),
provider_request: serde_json::to_value(&request)
@@ -811,7 +798,6 @@ impl LanguageModel for CloudLanguageModel {
thread_id,
prompt_id,
intent,
- mode,
provider: cloud_llm_client::LanguageModelProvider::OpenAi,
model: request.model.clone(),
provider_request: serde_json::to_value(&request)
@@ -849,7 +835,6 @@ impl LanguageModel for CloudLanguageModel {
thread_id,
prompt_id,
intent,
- mode,
provider: cloud_llm_client::LanguageModelProvider::OpenAi,
model: request.model.clone(),
provider_request: serde_json::to_value(&request)
@@ -891,7 +876,6 @@ impl LanguageModel for CloudLanguageModel {
thread_id,
prompt_id,
intent,
- mode,
provider: cloud_llm_client::LanguageModelProvider::XAi,
model: request.model.clone(),
provider_request: serde_json::to_value(&request)
@@ -926,7 +910,6 @@ impl LanguageModel for CloudLanguageModel {
thread_id,
prompt_id,
intent,
- mode,
provider: cloud_llm_client::LanguageModelProvider::Google,
model: request.model.model_id.clone(),
provider_request: serde_json::to_value(&request)
@@ -923,7 +923,6 @@ fn into_copilot_responses(
thread_id: _,
prompt_id: _,
intent: _,
- mode: _,
messages,
tools,
tool_choice,
@@ -899,7 +899,6 @@ mod tests {
thread_id: None,
prompt_id: None,
intent: None,
- mode: None,
stop: vec![],
thinking_allowed: true,
};
@@ -933,7 +932,6 @@ mod tests {
thread_id: None,
prompt_id: None,
intent: None,
- mode: None,
stop: vec![],
thinking_allowed: true,
};
@@ -538,7 +538,6 @@ pub fn into_open_ai_response(
thread_id,
prompt_id: _,
intent: _,
- mode: _,
messages,
tools,
tool_choice,
@@ -1406,7 +1405,6 @@ mod tests {
thread_id: None,
prompt_id: None,
intent: None,
- mode: None,
messages: vec![LanguageModelRequestMessage {
role: Role::User,
content: vec![MessageContent::Text("message".into())],
@@ -1513,7 +1511,6 @@ mod tests {
thread_id: Some("thread-123".into()),
prompt_id: None,
intent: None,
- mode: None,
messages: vec![
LanguageModelRequestMessage {
role: Role::System,
@@ -70,12 +70,6 @@ pub(crate) mod m_2025_05_08 {
pub(crate) use settings::SETTINGS_PATTERNS;
}
-pub(crate) mod m_2025_05_29 {
- mod settings;
-
- pub(crate) use settings::SETTINGS_PATTERNS;
-}
-
pub(crate) mod m_2025_06_16 {
mod settings;
@@ -1,51 +0,0 @@
-use std::ops::Range;
-use tree_sitter::{Query, QueryMatch};
-
-use crate::MigrationPatterns;
-use crate::patterns::SETTINGS_NESTED_KEY_VALUE_PATTERN;
-
-pub const SETTINGS_PATTERNS: MigrationPatterns = &[(
- SETTINGS_NESTED_KEY_VALUE_PATTERN,
- replace_preferred_completion_mode_value,
-)];
-
-fn replace_preferred_completion_mode_value(
- contents: &str,
- mat: &QueryMatch,
- query: &Query,
-) -> Option<(Range<usize>, String)> {
- let parent_object_capture_ix = query.capture_index_for_name("parent_key")?;
- let parent_object_range = mat
- .nodes_for_capture_index(parent_object_capture_ix)
- .next()?
- .byte_range();
- let parent_object_name = contents.get(parent_object_range)?;
-
- if parent_object_name != "agent" {
- return None;
- }
-
- let setting_name_capture_ix = query.capture_index_for_name("setting_name")?;
- let setting_name_range = mat
- .nodes_for_capture_index(setting_name_capture_ix)
- .next()?
- .byte_range();
- let setting_name = contents.get(setting_name_range)?;
-
- if setting_name != "preferred_completion_mode" {
- return None;
- }
-
- let value_capture_ix = query.capture_index_for_name("setting_value")?;
- let value_range = mat
- .nodes_for_capture_index(value_capture_ix)
- .next()?
- .byte_range();
- let value = contents.get(value_range.clone())?;
-
- if value.trim() == "\"max\"" {
- Some((value_range, "\"burn\"".to_string()))
- } else {
- None
- }
-}
@@ -190,10 +190,6 @@ pub fn migrate_settings(text: &str) -> Result<Option<String>> {
migrations::m_2025_05_08::SETTINGS_PATTERNS,
&SETTINGS_QUERY_2025_05_08,
),
- MigrationType::TreeSitter(
- migrations::m_2025_05_29::SETTINGS_PATTERNS,
- &SETTINGS_QUERY_2025_05_29,
- ),
MigrationType::TreeSitter(
migrations::m_2025_06_16::SETTINGS_PATTERNS,
&SETTINGS_QUERY_2025_06_16,
@@ -330,10 +326,6 @@ define_query!(
SETTINGS_QUERY_2025_05_08,
migrations::m_2025_05_08::SETTINGS_PATTERNS
);
-define_query!(
- SETTINGS_QUERY_2025_05_29,
- migrations::m_2025_05_29::SETTINGS_PATTERNS
-);
define_query!(
SETTINGS_QUERY_2025_06_16,
migrations::m_2025_06_16::SETTINGS_PATTERNS
@@ -963,67 +955,6 @@ mod tests {
);
}
- #[test]
- fn test_preferred_completion_mode_migration() {
- assert_migrate_settings(
- r#"{
- "agent": {
- "preferred_completion_mode": "max",
- "enabled": true
- }
- }"#,
- Some(
- r#"{
- "agent": {
- "preferred_completion_mode": "burn",
- "enabled": true
- }
- }"#,
- ),
- );
-
- assert_migrate_settings(
- r#"{
- "agent": {
- "preferred_completion_mode": "normal",
- "enabled": true
- }
- }"#,
- None,
- );
-
- assert_migrate_settings(
- r#"{
- "agent": {
- "preferred_completion_mode": "burn",
- "enabled": true
- }
- }"#,
- None,
- );
-
- assert_migrate_settings(
- r#"{
- "other_section": {
- "preferred_completion_mode": "max"
- },
- "agent": {
- "preferred_completion_mode": "max"
- }
- }"#,
- Some(
- r#"{
- "other_section": {
- "preferred_completion_mode": "max"
- },
- "agent": {
- "preferred_completion_mode": "burn"
- }
- }"#,
- ),
- );
- }
-
#[test]
fn test_mcp_settings_migration() {
assert_migrate_settings_with_migrations(
@@ -1299,7 +1230,6 @@ mod tests {
"agent": {
"version": "2",
"enabled": true,
- "preferred_completion_mode": "normal",
"button": true,
"dock": "right",
"default_width": 640,
@@ -1322,7 +1252,6 @@ mod tests {
},
"agent": {
"enabled": true,
- "preferred_completion_mode": "normal",
"button": true,
"dock": "right",
"default_width": 640,
@@ -1089,7 +1089,6 @@ impl RulesLibrary {
thread_id: None,
prompt_id: None,
intent: None,
- mode: None,
messages: vec![LanguageModelRequestMessage {
role: Role::System,
content: vec![body.to_string().into()],
@@ -97,10 +97,6 @@ pub struct AgentSettingsContent {
/// Default: []
#[serde(default)]
pub model_parameters: Vec<LanguageModelParameters>,
- /// What completion mode to enable for new threads
- ///
- /// Default: normal
- pub preferred_completion_mode: Option<CompletionMode>,
/// Whether to show thumb buttons for feedback in the agent panel.
///
/// Default: true
@@ -297,15 +293,6 @@ pub struct LanguageModelSelection {
pub model: String,
}
-#[derive(Clone, Copy, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Default)]
-#[serde(rename_all = "snake_case")]
-pub enum CompletionMode {
- #[default]
- Normal,
- #[serde(alias = "max")]
- Burn,
-}
-
#[with_fallible_options]
#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)]
pub struct LanguageModelParameters {