diff --git a/Cargo.lock b/Cargo.lock index a7c457c1e14ef4f26556ea2982429cf0ab3fc1ca..88bc8ad6fe21f0fef5045a807194c98307b4e156 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -275,6 +275,7 @@ dependencies = [ "nix 0.29.0", "project", "release_channel", + "remote", "reqwest_client", "serde", "serde_json", @@ -331,6 +332,7 @@ dependencies = [ "buffer_diff", "chrono", "client", + "clock", "cloud_api_types", "collections", "command_palette_hooks", @@ -365,6 +367,7 @@ dependencies = [ "markdown", "menu", "multi_buffer", + "node_runtime", "notifications", "ordered-float 2.10.1", "parking_lot", @@ -377,6 +380,9 @@ dependencies = [ "proto", "rand 0.9.2", "release_channel", + "remote", + "remote_connection", + "remote_server", "reqwest_client", "rope", "rules_library", @@ -16077,21 +16083,33 @@ dependencies = [ "agent_ui", "anyhow", "chrono", + "client", + "clock", "editor", + "extension", "fs", "git", "gpui", + "http_client", + "language", "language_model", + "log", "menu", + "node_runtime", "platform_title_bar", "pretty_assertions", "project", "prompt_store", "recent_projects", + "release_channel", "remote", + "remote_connection", + "remote_server", + "semver", "serde", "serde_json", "settings", + "smol", "theme", "theme_settings", "ui", diff --git a/assets/keymaps/vim.json b/assets/keymaps/vim.json index efc375795ee70c57c372aa8c56352bcae5f8e8f7..37e8cd764dfe3c988ac915600f7c2c0a3756085c 100644 --- a/assets/keymaps/vim.json +++ b/assets/keymaps/vim.json @@ -1140,6 +1140,11 @@ "g g": "menu::SelectFirst", "shift-g": "menu::SelectLast", "/": "agents_sidebar::FocusSidebarFilter", + "d d": "agent::RemoveSelectedThread", + "o": "agents_sidebar::NewThreadInGroup", + "shift-o": "agents_sidebar::NewThreadInGroup", + "] p": "multi_workspace::NextProject", + "[ p": "multi_workspace::PreviousProject", "z a": "editor::ToggleFold", "z c": "menu::SelectParent", "z o": "menu::SelectChild", diff --git a/crates/acp_thread/src/acp_thread.rs b/crates/acp_thread/src/acp_thread.rs index 36c9fb40c4a573e09da05618a29c1898cced60ad..d7645edb505e96ff092dd3839b2a3ffa518c323c 100644 --- a/crates/acp_thread/src/acp_thread.rs +++ b/crates/acp_thread/src/acp_thread.rs @@ -36,6 +36,18 @@ use util::path_list::PathList; use util::{ResultExt, get_default_system_shell_preferring_bash, paths::PathStyle}; use uuid::Uuid; +/// Returned when the model stops because it exhausted its output token budget. +#[derive(Debug)] +pub struct MaxOutputTokensError; + +impl std::fmt::Display for MaxOutputTokensError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "output token limit reached") + } +} + +impl std::error::Error for MaxOutputTokensError {} + /// Key used in ACP ToolCall meta to store the tool's programmatic name. /// This is a workaround since ACP's ToolCall doesn't have a dedicated name field. pub const TOOL_NAME_META_KEY: &str = "tool_name"; @@ -2262,17 +2274,15 @@ impl AcpThread { .is_some_and(|max| u.output_tokens >= max) }); - let message = if exceeded_max_output_tokens { + if exceeded_max_output_tokens { log::error!( "Max output tokens reached. Usage: {:?}", this.token_usage ); - "Maximum output tokens reached" } else { log::error!("Max tokens reached. Usage: {:?}", this.token_usage); - "Maximum tokens reached" - }; - return Err(anyhow!(message)); + } + return Err(anyhow!(MaxOutputTokensError)); } let canceled = matches!(r.stop_reason, acp::StopReason::Cancelled); diff --git a/crates/acp_thread/src/connection.rs b/crates/acp_thread/src/connection.rs index 58a8aa33830f12ffb713490c87c47133cc2ad96f..32bb8abde9aa5f67563780a7fe4993028f0df346 100644 --- a/crates/acp_thread/src/connection.rs +++ b/crates/acp_thread/src/connection.rs @@ -117,7 +117,7 @@ pub trait AgentConnection { &self, _method: &acp::AuthMethodId, _cx: &App, - ) -> Option { + ) -> Option>> { None } diff --git a/crates/agent/src/agent.rs b/crates/agent/src/agent.rs index b7aa9d1e311016f572928993e049798c2b5e3bb2..fe62d851ef1ba71c787aa6ec516b0b6b67449d67 100644 --- a/crates/agent/src/agent.rs +++ b/crates/agent/src/agent.rs @@ -1355,6 +1355,7 @@ impl acp_thread::AgentModelSelector for NativeAgentModelSelector { let provider = model.provider_id().0.to_string(); let model = model.id().0.to_string(); let enable_thinking = thread.read(cx).thinking_enabled(); + let speed = thread.read(cx).speed(); settings .agent .get_or_insert_default() @@ -1363,6 +1364,7 @@ impl acp_thread::AgentModelSelector for NativeAgentModelSelector { model, enable_thinking, effort, + speed, }); }, ); diff --git a/crates/agent/src/native_agent_server.rs b/crates/agent/src/native_agent_server.rs index 7f19f9005e3ff54e361f57075b7af06508476564..305c4f51952b3bf7e2771a8372a5975cac9e9385 100644 --- a/crates/agent/src/native_agent_server.rs +++ b/crates/agent/src/native_agent_server.rs @@ -97,6 +97,7 @@ fn model_id_to_selection(model_id: &acp::ModelId) -> LanguageModelSelection { model: model.to_owned(), enable_thinking: false, effort: None, + speed: None, } } diff --git a/crates/agent/src/thread.rs b/crates/agent/src/thread.rs index 220e9e6b30841da2e726cf5f53d0a6ea7d1624bb..bd9ef285169bf98ce196990156a269e830ccd738 100644 --- a/crates/agent/src/thread.rs +++ b/crates/agent/src/thread.rs @@ -64,6 +64,18 @@ const TOOL_CANCELED_MESSAGE: &str = "Tool canceled by user"; pub const MAX_TOOL_NAME_LENGTH: usize = 64; pub const MAX_SUBAGENT_DEPTH: u8 = 1; +/// Returned when a turn is attempted but no language model has been selected. +#[derive(Debug)] +pub struct NoModelConfiguredError; + +impl std::fmt::Display for NoModelConfiguredError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "no language model configured") + } +} + +impl std::error::Error for NoModelConfiguredError {} + /// Context passed to a subagent thread for lifecycle management #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SubagentContext { @@ -1041,6 +1053,10 @@ impl Thread { .default_model .as_ref() .and_then(|model| model.effort.clone()); + let speed = settings + .default_model + .as_ref() + .and_then(|model| model.speed); let (prompt_capabilities_tx, prompt_capabilities_rx) = watch::channel(Self::prompt_capabilities(model.as_deref())); Self { @@ -1072,7 +1088,7 @@ impl Thread { model, summarization_model: None, thinking_enabled: enable_thinking, - speed: None, + speed, thinking_effort, prompt_capabilities_tx, prompt_capabilities_rx, @@ -1768,7 +1784,9 @@ impl Thread { &mut self, cx: &mut Context, ) -> Result>> { - let model = self.model().context("No language model configured")?; + let model = self + .model() + .ok_or_else(|| anyhow!(NoModelConfiguredError))?; log::info!("Thread::send called with model: {}", model.name().0); self.advance_prompt_id(); @@ -1892,7 +1910,10 @@ impl Thread { // mid-turn changes (e.g. the user switches model, toggles tools, // or changes profile) take effect between tool-call rounds. let (model, request) = this.update(cx, |this, cx| { - let model = this.model.clone().context("No language model configured")?; + let model = this + .model + .clone() + .ok_or_else(|| anyhow!(NoModelConfiguredError))?; this.refresh_turn_tools(cx); let request = this.build_completion_request(intent, cx)?; anyhow::Ok((model, request)) @@ -2738,7 +2759,9 @@ impl Thread { completion_intent }; - let model = self.model().context("No language model configured")?; + let model = self + .model() + .ok_or_else(|| anyhow!(NoModelConfiguredError))?; let tools = if let Some(turn) = self.running_turn.as_ref() { turn.tools .iter() diff --git a/crates/agent/src/tools/streaming_edit_file_tool.rs b/crates/agent/src/tools/streaming_edit_file_tool.rs index 47da35bbf25ad188f3f6b98e843b2955910bb7ac..c988fede454ff6e8b4dc327c81132224a8b87a49 100644 --- a/crates/agent/src/tools/streaming_edit_file_tool.rs +++ b/crates/agent/src/tools/streaming_edit_file_tool.rs @@ -189,9 +189,9 @@ pub enum StreamingEditFileToolOutput { }, Error { error: String, - #[serde(default)] + #[serde(default, skip_serializing_if = "Option::is_none")] input_path: Option, - #[serde(default)] + #[serde(default, skip_serializing_if = "String::is_empty")] diff: String, }, } diff --git a/crates/agent_servers/Cargo.toml b/crates/agent_servers/Cargo.toml index 7151f0084b1cb7d9b206f57551ce715ef67483f7..5fbf1e821cb4a41f09c433ec05fdde9fbbde1a9f 100644 --- a/crates/agent_servers/Cargo.toml +++ b/crates/agent_servers/Cargo.toml @@ -39,6 +39,7 @@ language_model.workspace = true log.workspace = true project.workspace = true release_channel.workspace = true +remote.workspace = true reqwest_client = { workspace = true, optional = true } serde.workspace = true serde_json.workspace = true diff --git a/crates/agent_servers/src/acp.rs b/crates/agent_servers/src/acp.rs index e56db9df927ab3cdf838587f1cb4f9514eb5a758..dbcaabed1cf1971a6e281d8d31f8dad25dfb7434 100644 --- a/crates/agent_servers/src/acp.rs +++ b/crates/agent_servers/src/acp.rs @@ -10,20 +10,20 @@ use collections::HashMap; use feature_flags::{AcpBetaFeatureFlag, FeatureFlagAppExt as _}; use futures::AsyncBufReadExt as _; use futures::io::BufReader; -use project::agent_server_store::AgentServerCommand; +use project::agent_server_store::{AgentServerCommand, AgentServerStore}; use project::{AgentId, Project}; +use remote::remote_client::Interactive; use serde::Deserialize; use settings::Settings as _; -use task::{ShellBuilder, SpawnInTerminal}; -use util::ResultExt as _; -use util::path_list::PathList; -use util::process::Child; - use std::path::PathBuf; use std::process::Stdio; use std::rc::Rc; use std::{any::Any, cell::RefCell}; +use task::{ShellBuilder, SpawnInTerminal}; use thiserror::Error; +use util::ResultExt as _; +use util::path_list::PathList; +use util::process::Child; use anyhow::{Context as _, Result}; use gpui::{App, AppContext as _, AsyncApp, Entity, SharedString, Task, WeakEntity}; @@ -46,7 +46,7 @@ pub struct AcpConnection { connection: Rc, sessions: Rc>>, auth_methods: Vec, - command: AgentServerCommand, + agent_server_store: WeakEntity, agent_capabilities: acp::AgentCapabilities, default_mode: Option, default_model: Option, @@ -167,6 +167,7 @@ pub async fn connect( agent_id: AgentId, project: Entity, command: AgentServerCommand, + agent_server_store: WeakEntity, default_mode: Option, default_model: Option, default_config_options: HashMap, @@ -176,6 +177,7 @@ pub async fn connect( agent_id, project, command.clone(), + agent_server_store, default_mode, default_model, default_config_options, @@ -192,23 +194,52 @@ impl AcpConnection { agent_id: AgentId, project: Entity, command: AgentServerCommand, + agent_server_store: WeakEntity, default_mode: Option, default_model: Option, default_config_options: HashMap, cx: &mut AsyncApp, ) -> Result { + let root_dir = project.read_with(cx, |project, cx| { + project + .default_path_list(cx) + .ordered_paths() + .next() + .cloned() + }); + let original_command = command.clone(); + let (path, args, env) = project + .read_with(cx, |project, cx| { + project.remote_client().and_then(|client| { + let template = client + .read(cx) + .build_command_with_options( + Some(command.path.display().to_string()), + &command.args, + &command.env.clone().into_iter().flatten().collect(), + root_dir.as_ref().map(|path| path.display().to_string()), + None, + Interactive::No, + ) + .log_err()?; + Some((template.program, template.args, template.env)) + }) + }) + .unwrap_or_else(|| { + ( + command.path.display().to_string(), + command.args, + command.env.unwrap_or_default(), + ) + }); + let shell = cx.update(|cx| TerminalSettings::get(None, cx).shell.clone()); let builder = ShellBuilder::new(&shell, cfg!(windows)).non_interactive(); - let mut child = - builder.build_std_command(Some(command.path.display().to_string()), &command.args); - child.envs(command.env.iter().flatten()); - if let Some(cwd) = project.update(cx, |project, cx| { + let mut child = builder.build_std_command(Some(path.clone()), &args); + child.envs(env.clone()); + if let Some(cwd) = project.read_with(cx, |project, _cx| { if project.is_local() { - project - .default_path_list(cx) - .ordered_paths() - .next() - .cloned() + root_dir.as_ref() } else { None } @@ -220,11 +251,7 @@ impl AcpConnection { let stdout = child.stdout.take().context("Failed to take stdout")?; let stdin = child.stdin.take().context("Failed to take stdin")?; let stderr = child.stderr.take().context("Failed to take stderr")?; - log::debug!( - "Spawning external agent server: {:?}, {:?}", - command.path, - command.args - ); + log::debug!("Spawning external agent server: {:?}, {:?}", path, args); log::trace!("Spawned (pid: {})", child.id()); let sessions = Rc::new(RefCell::new(HashMap::default())); @@ -342,13 +369,13 @@ impl AcpConnection { // TODO: Remove this override once Google team releases their official auth methods let auth_methods = if agent_id.0.as_ref() == GEMINI_ID { - let mut args = command.args.clone(); - args.retain(|a| a != "--experimental-acp" && a != "--acp"); + let mut gemini_args = original_command.args.clone(); + gemini_args.retain(|a| a != "--experimental-acp" && a != "--acp"); let value = serde_json::json!({ "label": "gemini /auth", - "command": command.path.to_string_lossy().into_owned(), - "args": args, - "env": command.env.clone().unwrap_or_default(), + "command": original_command.path.to_string_lossy(), + "args": gemini_args, + "env": original_command.env.unwrap_or_default(), }); let meta = acp::Meta::from_iter([("terminal-auth".to_string(), value)]); vec![acp::AuthMethod::Agent( @@ -362,7 +389,7 @@ impl AcpConnection { Ok(Self { id: agent_id, auth_methods, - command, + agent_server_store, connection, telemetry_id, sessions, @@ -494,18 +521,12 @@ fn terminal_auth_task( agent_id: &AgentId, method: &acp::AuthMethodTerminal, ) -> SpawnInTerminal { - let mut args = command.args.clone(); - args.extend(method.args.clone()); - - let mut env = command.env.clone().unwrap_or_default(); - env.extend(method.env.clone()); - acp_thread::build_terminal_auth_task( terminal_auth_task_id(agent_id, &method.id), method.name.clone(), command.path.to_string_lossy().into_owned(), - args, - env, + command.args.clone(), + command.env.clone().unwrap_or_default(), ) } @@ -890,7 +911,7 @@ impl AgentConnection for AcpConnection { &self, method_id: &acp::AuthMethodId, cx: &App, - ) -> Option { + ) -> Option>> { let method = self .auth_methods .iter() @@ -898,9 +919,28 @@ impl AgentConnection for AcpConnection { match method { acp::AuthMethod::Terminal(terminal) if cx.has_flag::() => { - Some(terminal_auth_task(&self.command, &self.id, terminal)) + let agent_id = self.id.clone(); + let terminal = terminal.clone(); + let store = self.agent_server_store.clone(); + Some(cx.spawn(async move |cx| { + let command = store + .update(cx, |store, cx| { + let agent = store + .get_external_agent(&agent_id) + .context("Agent server not found")?; + anyhow::Ok(agent.get_command( + terminal.args.clone(), + HashMap::from_iter(terminal.env.clone()), + &mut cx.to_async(), + )) + })? + .context("Failed to get agent command")? + .await?; + Ok(terminal_auth_task(&command, &agent_id, &terminal)) + })) } - _ => meta_terminal_auth_task(&self.id, method_id, method), + _ => meta_terminal_auth_task(&self.id, method_id, method) + .map(|task| Task::ready(Ok(task))), } } @@ -1075,39 +1115,32 @@ mod tests { use super::*; #[test] - fn terminal_auth_task_reuses_command_and_merges_args_and_env() { + fn terminal_auth_task_builds_spawn_from_prebuilt_command() { let command = AgentServerCommand { path: "/path/to/agent".into(), - args: vec!["--acp".into(), "--verbose".into()], + args: vec!["--acp".into(), "--verbose".into(), "/auth".into()], env: Some(HashMap::from_iter([ ("BASE".into(), "1".into()), - ("SHARED".into(), "base".into()), + ("SHARED".into(), "override".into()), + ("EXTRA".into(), "2".into()), ])), }; - let method = acp::AuthMethodTerminal::new("login", "Login") - .args(vec!["/auth".into()]) - .env(std::collections::HashMap::from_iter([ - ("EXTRA".into(), "2".into()), - ("SHARED".into(), "override".into()), - ])); + let method = acp::AuthMethodTerminal::new("login", "Login"); - let terminal_auth_task = terminal_auth_task(&command, &AgentId::new("test-agent"), &method); + let task = terminal_auth_task(&command, &AgentId::new("test-agent"), &method); + assert_eq!(task.command.as_deref(), Some("/path/to/agent")); + assert_eq!(task.args, vec!["--acp", "--verbose", "/auth"]); assert_eq!( - terminal_auth_task.command.as_deref(), - Some("/path/to/agent") - ); - assert_eq!(terminal_auth_task.args, vec!["--acp", "--verbose", "/auth"]); - assert_eq!( - terminal_auth_task.env, + task.env, HashMap::from_iter([ ("BASE".into(), "1".into()), ("SHARED".into(), "override".into()), ("EXTRA".into(), "2".into()), ]) ); - assert_eq!(terminal_auth_task.label, "Login"); - assert_eq!(terminal_auth_task.command_label, "Login"); + assert_eq!(task.label, "Login"); + assert_eq!(task.command_label, "Login"); } #[test] @@ -1127,21 +1160,17 @@ mod tests { )])), ); - let terminal_auth_task = - meta_terminal_auth_task(&AgentId::new("test-agent"), &method_id, &method) - .expect("expected legacy terminal auth task"); + let task = meta_terminal_auth_task(&AgentId::new("test-agent"), &method_id, &method) + .expect("expected legacy terminal auth task"); + assert_eq!(task.id.0, "external-agent-test-agent-legacy-login-login"); + assert_eq!(task.command.as_deref(), Some("legacy-agent")); + assert_eq!(task.args, vec!["auth", "--interactive"]); assert_eq!( - terminal_auth_task.id.0, - "external-agent-test-agent-legacy-login-login" - ); - assert_eq!(terminal_auth_task.command.as_deref(), Some("legacy-agent")); - assert_eq!(terminal_auth_task.args, vec!["auth", "--interactive"]); - assert_eq!( - terminal_auth_task.env, + task.env, HashMap::from_iter([("AUTH_MODE".into(), "interactive".into())]) ); - assert_eq!(terminal_auth_task.label, "legacy /auth"); + assert_eq!(task.label, "legacy /auth"); } #[test] @@ -1186,30 +1215,30 @@ mod tests { let command = AgentServerCommand { path: "/path/to/agent".into(), - args: vec!["--acp".into()], - env: Some(HashMap::from_iter([("BASE".into(), "1".into())])), + args: vec!["--acp".into(), "/auth".into()], + env: Some(HashMap::from_iter([ + ("BASE".into(), "1".into()), + ("AUTH_MODE".into(), "first-class".into()), + ])), }; - let terminal_auth_task = match &method { + let task = match &method { acp::AuthMethod::Terminal(terminal) => { terminal_auth_task(&command, &AgentId::new("test-agent"), terminal) } _ => unreachable!(), }; + assert_eq!(task.command.as_deref(), Some("/path/to/agent")); + assert_eq!(task.args, vec!["--acp", "/auth"]); assert_eq!( - terminal_auth_task.command.as_deref(), - Some("/path/to/agent") - ); - assert_eq!(terminal_auth_task.args, vec!["--acp", "/auth"]); - assert_eq!( - terminal_auth_task.env, + task.env, HashMap::from_iter([ ("BASE".into(), "1".into()), ("AUTH_MODE".into(), "first-class".into()), ]) ); - assert_eq!(terminal_auth_task.label, "Login"); + assert_eq!(task.label, "Login"); } } diff --git a/crates/agent_servers/src/custom.rs b/crates/agent_servers/src/custom.rs index fb8d0a515244576d2cf02e4989cbd71beca448c7..151ddcefcfb0b839199c21d826a4c9f6836f876b 100644 --- a/crates/agent_servers/src/custom.rs +++ b/crates/agent_servers/src/custom.rs @@ -360,17 +360,17 @@ impl AgentServer for CustomAgentServer { let agent = store.get_external_agent(&agent_id).with_context(|| { format!("Custom agent server `{}` is not registered", agent_id) })?; - anyhow::Ok(agent.get_command( - extra_env, - delegate.new_version_available, - &mut cx.to_async(), - )) + if let Some(new_version_available_tx) = delegate.new_version_available { + agent.set_new_version_available_tx(new_version_available_tx); + } + anyhow::Ok(agent.get_command(vec![], extra_env, &mut cx.to_async())) })?? .await?; let connection = crate::acp::connect( agent_id, project, command, + store.clone(), default_mode, default_model, default_config_options, diff --git a/crates/agent_ui/Cargo.toml b/crates/agent_ui/Cargo.toml index e505a124b6898953db9751ddfc8ab98cb7f496f0..78f035106d37faa16a1494a138dfa38ed304dd8d 100644 --- a/crates/agent_ui/Cargo.toml +++ b/crates/agent_ui/Cargo.toml @@ -82,6 +82,8 @@ prompt_store.workspace = true proto.workspace = true rand.workspace = true release_channel.workspace = true +remote.workspace = true +remote_connection.workspace = true rope.workspace = true rules_library.workspace = true schemars.workspace = true @@ -115,17 +117,23 @@ reqwest_client = { workspace = true, optional = true } acp_thread = { workspace = true, features = ["test-support"] } agent = { workspace = true, features = ["test-support"] } buffer_diff = { workspace = true, features = ["test-support"] } - +client = { workspace = true, features = ["test-support"] } +clock = { workspace = true, features = ["test-support"] } db = { workspace = true, features = ["test-support"] } editor = { workspace = true, features = ["test-support"] } eval_utils.workspace = true gpui = { workspace = true, "features" = ["test-support"] } +http_client = { workspace = true, features = ["test-support"] } indoc.workspace = true language = { workspace = true, "features" = ["test-support"] } languages = { workspace = true, features = ["test-support"] } language_model = { workspace = true, "features" = ["test-support"] } +node_runtime = { workspace = true, features = ["test-support"] } pretty_assertions.workspace = true project = { workspace = true, features = ["test-support"] } +remote = { workspace = true, features = ["test-support"] } +remote_connection = { workspace = true, features = ["test-support"] } +remote_server = { workspace = true, features = ["test-support"] } semver.workspace = true reqwest_client.workspace = true diff --git a/crates/agent_ui/src/agent_configuration/manage_profiles_modal.rs b/crates/agent_ui/src/agent_configuration/manage_profiles_modal.rs index 9a8b56f43f906f9ad57cb3dec9e7d95af4cb6cc5..9e042b8ad66111213231d99098a8122e85e9ef6a 100644 --- a/crates/agent_ui/src/agent_configuration/manage_profiles_modal.rs +++ b/crates/agent_ui/src/agent_configuration/manage_profiles_modal.rs @@ -267,6 +267,7 @@ impl ManageProfilesModal { effort: model .default_effort_level() .map(|effort| effort.value.to_string()), + speed: None, }); } } diff --git a/crates/agent_ui/src/agent_panel.rs b/crates/agent_ui/src/agent_panel.rs index 291a5ff0d9da2c2c48f705358e159bc0cbfe7fcb..2ff4cd18a78fd53c5d540e66670d6e6c9e51aa47 100644 --- a/crates/agent_ui/src/agent_panel.rs +++ b/crates/agent_ui/src/agent_panel.rs @@ -56,8 +56,9 @@ use extension_host::ExtensionStore; use fs::Fs; use gpui::{ Action, Animation, AnimationExt, AnyElement, App, AsyncWindowContext, ClipboardItem, Corner, - DismissEvent, Entity, EventEmitter, ExternalPaths, FocusHandle, Focusable, KeyContext, Pixels, - Subscription, Task, UpdateGlobal, WeakEntity, prelude::*, pulsating_between, + DismissEvent, Entity, EntityId, EventEmitter, ExternalPaths, FocusHandle, Focusable, + KeyContext, Pixels, Subscription, Task, UpdateGlobal, WeakEntity, prelude::*, + pulsating_between, }; use language::LanguageRegistry; use language_model::LanguageModelRegistry; @@ -65,6 +66,7 @@ use project::git_store::{GitStoreEvent, RepositoryEvent}; use project::project_settings::ProjectSettings; use project::{Project, ProjectPath, Worktree, linked_worktree_short_name}; use prompt_store::{PromptStore, UserPromptId}; +use remote::RemoteConnectionOptions; use rules_library::{RulesLibrary, open_rules_library}; use settings::TerminalDockPosition; use settings::{Settings, update_settings_file}; @@ -77,8 +79,8 @@ use ui::{ }; use util::{ResultExt as _, debug_panic}; use workspace::{ - CollaboratorId, DraggedSelection, DraggedTab, OpenMode, OpenResult, PathList, - SerializedPathList, ToggleWorkspaceSidebar, ToggleZoom, Workspace, WorkspaceId, + CollaboratorId, DraggedSelection, DraggedTab, PathList, SerializedPathList, + ToggleWorkspaceSidebar, ToggleZoom, Workspace, WorkspaceId, dock::{DockPosition, Panel, PanelEvent}, }; use zed_actions::{ @@ -818,7 +820,7 @@ pub struct AgentPanel { agent_layout_onboarding_dismissed: AtomicBool, selected_agent: Agent, start_thread_in: StartThreadIn, - worktree_creation_status: Option, + worktree_creation_status: Option<(EntityId, WorktreeCreationStatus)>, _thread_view_subscription: Option, _active_thread_focus_subscription: Option, _worktree_creation_task: Option>, @@ -1861,6 +1863,7 @@ impl AgentPanel { model, enable_thinking, effort, + speed: None, }) }); } @@ -1893,6 +1896,14 @@ impl AgentPanel { } } + pub fn conversation_views(&self) -> Vec> { + self.active_conversation_view() + .into_iter() + .cloned() + .chain(self.background_threads.values().cloned()) + .collect() + } + pub fn active_thread_view(&self, cx: &App) -> Option> { let server_view = self.active_conversation_view()?; server_view.read(cx).active_thread().cloned() @@ -2785,6 +2796,7 @@ impl AgentPanel { PathBuf, futures::channel::oneshot::Receiver>, )>, + fs: Arc, cx: &mut AsyncWindowContext, ) -> Result> { let mut created_paths: Vec = Vec::new(); @@ -2793,10 +2805,10 @@ impl AgentPanel { let mut first_error: Option = None; for (repo, new_path, receiver) in creation_infos { + repos_and_paths.push((repo.clone(), new_path.clone())); match receiver.await { Ok(Ok(())) => { - created_paths.push(new_path.clone()); - repos_and_paths.push((repo, new_path)); + created_paths.push(new_path); } Ok(Err(err)) => { if first_error.is_none() { @@ -2815,34 +2827,66 @@ impl AgentPanel { return Ok(created_paths); }; - // Rollback all successfully created worktrees - let mut rollback_receivers = Vec::new(); + // Rollback all attempted worktrees (both successful and failed) + let mut rollback_futures = Vec::new(); for (rollback_repo, rollback_path) in &repos_and_paths { - if let Ok(receiver) = cx.update(|_, cx| { - rollback_repo.update(cx, |repo, _cx| { - repo.remove_worktree(rollback_path.clone(), true) + let receiver = cx + .update(|_, cx| { + rollback_repo.update(cx, |repo, _cx| { + repo.remove_worktree(rollback_path.clone(), true) + }) }) - }) { - rollback_receivers.push((rollback_path.clone(), receiver)); - } + .ok(); + + rollback_futures.push((rollback_path.clone(), receiver)); } + let mut rollback_failures: Vec = Vec::new(); - for (path, receiver) in rollback_receivers { - match receiver.await { - Ok(Ok(())) => {} - Ok(Err(rollback_err)) => { - log::error!( - "failed to rollback worktree at {}: {rollback_err}", - path.display() - ); - rollback_failures.push(format!("{}: {rollback_err}", path.display())); + for (path, receiver_opt) in rollback_futures { + let mut git_remove_failed = false; + + if let Some(receiver) = receiver_opt { + match receiver.await { + Ok(Ok(())) => {} + Ok(Err(rollback_err)) => { + log::error!( + "git worktree remove failed for {}: {rollback_err}", + path.display() + ); + git_remove_failed = true; + } + Err(canceled) => { + log::error!( + "git worktree remove failed for {}: {canceled}", + path.display() + ); + git_remove_failed = true; + } } - Err(rollback_err) => { - log::error!( - "failed to rollback worktree at {}: {rollback_err}", - path.display() - ); - rollback_failures.push(format!("{}: {rollback_err}", path.display())); + } else { + log::error!( + "failed to dispatch git worktree remove for {}", + path.display() + ); + git_remove_failed = true; + } + + // `git worktree remove` normally removes this directory, but since + // `git worktree remove` failed (or wasn't dispatched), manually rm the directory. + if git_remove_failed { + if let Err(fs_err) = fs + .remove_dir( + &path, + fs::RemoveOptions { + recursive: true, + ignore_if_not_exists: true, + }, + ) + .await + { + let msg = format!("{}: failed to remove directory: {fs_err}", path.display()); + log::error!("{}", msg); + rollback_failures.push(msg); } } } @@ -2860,7 +2904,9 @@ impl AgentPanel { window: &mut Window, cx: &mut Context, ) { - self.worktree_creation_status = Some(WorktreeCreationStatus::Error(message)); + if let Some((_, status)) = &mut self.worktree_creation_status { + *status = WorktreeCreationStatus::Error(message); + } if matches!(self.active_view, ActiveView::Uninitialized) { let selected_agent = self.selected_agent.clone(); self.new_agent_thread(selected_agent, window, cx); @@ -2877,12 +2923,17 @@ impl AgentPanel { ) { if matches!( self.worktree_creation_status, - Some(WorktreeCreationStatus::Creating) + Some((_, WorktreeCreationStatus::Creating)) ) { return; } - self.worktree_creation_status = Some(WorktreeCreationStatus::Creating); + let conversation_view_id = self + .active_conversation_view() + .map(|v| v.entity_id()) + .unwrap_or_else(|| EntityId::from(0u64)); + self.worktree_creation_status = + Some((conversation_view_id, WorktreeCreationStatus::Creating)); cx.notify(); let (git_repos, non_git_paths) = self.classify_worktrees(cx); @@ -2932,6 +2983,24 @@ impl AgentPanel { .absolute_path(&project_path, cx) }); + let remote_connection_options = self.project.read(cx).remote_connection_options(cx); + + if remote_connection_options.is_some() { + let is_disconnected = self + .project + .read(cx) + .remote_client() + .is_some_and(|client| client.read(cx).is_disconnected()); + if is_disconnected { + self.set_worktree_creation_error( + "Cannot create worktree: remote connection is not active".into(), + window, + cx, + ); + return; + } + } + let workspace = self.workspace.clone(); let window_handle = window .window_handle() @@ -3030,8 +3099,10 @@ impl AgentPanel { } }; + let fs = cx.update(|_, cx| ::global(cx))?; + let created_paths = - match Self::await_and_rollback_on_failure(creation_infos, cx).await { + match Self::await_and_rollback_on_failure(creation_infos, fs, cx).await { Ok(paths) => paths, Err(err) => { this.update_in(cx, |this, window, cx| { @@ -3058,25 +3129,21 @@ impl AgentPanel { } }; - let app_state = match workspace.upgrade() { - Some(workspace) => cx.update(|_, cx| workspace.read(cx).app_state().clone())?, - None => { - this.update_in(cx, |this, window, cx| { - this.set_worktree_creation_error( - "Workspace no longer available".into(), - window, - cx, - ); - })?; - return anyhow::Ok(()); - } - }; + if workspace.upgrade().is_none() { + this.update_in(cx, |this, window, cx| { + this.set_worktree_creation_error( + "Workspace no longer available".into(), + window, + cx, + ); + })?; + return anyhow::Ok(()); + } let this_for_error = this.clone(); if let Err(err) = Self::open_worktree_workspace_and_start_thread( this, all_paths, - app_state, window_handle, active_file_path, path_remapping, @@ -3084,6 +3151,7 @@ impl AgentPanel { has_non_git, content, selected_agent, + remote_connection_options, cx, ) .await @@ -3109,7 +3177,6 @@ impl AgentPanel { async fn open_worktree_workspace_and_start_thread( this: WeakEntity, all_paths: Vec, - app_state: Arc, window_handle: Option>, active_file_path: Option, path_remapping: Vec<(PathBuf, PathBuf)>, @@ -3117,25 +3184,39 @@ impl AgentPanel { has_non_git: bool, content: Vec, selected_agent: Option, + remote_connection_options: Option, cx: &mut AsyncWindowContext, ) -> Result<()> { - let OpenResult { - window: new_window_handle, - workspace: new_workspace, - .. - } = cx - .update(|_window, cx| { - Workspace::new_local( - all_paths, - app_state, - window_handle, - None, + let window_handle = window_handle + .ok_or_else(|| anyhow!("No window handle available for workspace creation"))?; + + let (workspace_task, modal_workspace) = + window_handle.update(cx, |multi_workspace, window, cx| { + let path_list = PathList::new(&all_paths); + let active_workspace = multi_workspace.workspace().clone(); + let modal_workspace = active_workspace.clone(); + + let task = multi_workspace.find_or_create_workspace( + path_list, + remote_connection_options, None, - OpenMode::Add, + move |connection_options, window, cx| { + remote_connection::connect_with_modal( + &active_workspace, + connection_options, + window, + cx, + ) + }, + window, cx, - ) - })? - .await?; + ); + (task, modal_workspace) + })?; + + let result = workspace_task.await; + remote_connection::dismiss_connection_modal(&modal_workspace, cx); + let new_workspace = result?; let panels_task = new_workspace.update(cx, |workspace, _cx| workspace.take_panels_task()); @@ -3171,7 +3252,7 @@ impl AgentPanel { auto_submit: true, }; - new_window_handle.update(cx, |_multi_workspace, window, cx| { + window_handle.update(cx, |_multi_workspace, window, cx| { new_workspace.update(cx, |workspace, cx| { if has_non_git { let toast_id = workspace::notifications::NotificationId::unique::(); @@ -3256,7 +3337,7 @@ impl AgentPanel { }); })?; - new_window_handle.update(cx, |multi_workspace, window, cx| { + window_handle.update(cx, |multi_workspace, window, cx| { multi_workspace.activate(new_workspace.clone(), window, cx); new_workspace.update(cx, |workspace, cx| { @@ -3373,7 +3454,7 @@ impl Panel for AgentPanel { && matches!(self.active_view, ActiveView::Uninitialized) && !matches!( self.worktree_creation_status, - Some(WorktreeCreationStatus::Creating) + Some((_, WorktreeCreationStatus::Creating)) ) { let selected_agent = self.selected_agent.clone(); @@ -3613,13 +3694,19 @@ impl AgentPanel { !self.project.read(cx).repositories(cx).is_empty() } + fn is_active_view_creating_worktree(&self, _cx: &App) -> bool { + match &self.worktree_creation_status { + Some((view_id, WorktreeCreationStatus::Creating)) => { + self.active_conversation_view().map(|v| v.entity_id()) == Some(*view_id) + } + _ => false, + } + } + fn render_start_thread_in_selector(&self, cx: &mut Context) -> impl IntoElement { let focus_handle = self.focus_handle(cx); - let is_creating = matches!( - self.worktree_creation_status, - Some(WorktreeCreationStatus::Creating) - ); + let is_creating = self.is_active_view_creating_worktree(cx); let trigger_parts = self .start_thread_in @@ -3672,10 +3759,7 @@ impl AgentPanel { } fn render_new_worktree_branch_selector(&self, cx: &mut Context) -> impl IntoElement { - let is_creating = matches!( - self.worktree_creation_status, - Some(WorktreeCreationStatus::Creating) - ); + let is_creating = self.is_active_view_creating_worktree(cx); let project_ref = self.project.read(cx); let trigger_parts = self @@ -4143,7 +4227,11 @@ impl AgentPanel { } fn render_worktree_creation_status(&self, cx: &mut Context) -> Option { - let status = self.worktree_creation_status.as_ref()?; + let (view_id, status) = self.worktree_creation_status.as_ref()?; + let active_view_id = self.active_conversation_view().map(|v| v.entity_id()); + if active_view_id != Some(*view_id) { + return None; + } match status { WorktreeCreationStatus::Creating => Some( h_flex() @@ -4683,10 +4771,11 @@ impl AgentPanel { /// /// This is a test-only helper for visual tests. pub fn worktree_creation_status_for_tests(&self) -> Option<&WorktreeCreationStatus> { - self.worktree_creation_status.as_ref() + self.worktree_creation_status.as_ref().map(|(_, s)| s) } - /// Sets the worktree creation status directly. + /// Sets the worktree creation status directly, associating it with the + /// currently active conversation view. /// /// This is a test-only helper for visual tests that need to show the /// "Creating worktree…" spinner or error banners. @@ -4695,7 +4784,13 @@ impl AgentPanel { status: Option, cx: &mut Context, ) { - self.worktree_creation_status = status; + self.worktree_creation_status = status.map(|s| { + let view_id = self + .active_conversation_view() + .map(|v| v.entity_id()) + .unwrap_or_else(|| EntityId::from(0u64)); + (view_id, s) + }); cx.notify(); } @@ -4736,6 +4831,7 @@ mod tests { }; use acp_thread::{StubAgentConnection, ThreadStatus}; use agent_servers::CODEX_ID; + use feature_flags::FeatureFlagAppExt; use fs::FakeFs; use gpui::{TestAppContext, VisualTestContext}; use project::Project; @@ -4752,7 +4848,7 @@ mod tests { language_model::LanguageModelRegistry::test(cx); }); - // --- Create a MultiWorkspace window with two workspaces --- + // Create a MultiWorkspace window with two workspaces. let fs = FakeFs::new(cx.executor()); let project_a = Project::test(fs.clone(), [], cx).await; let project_b = Project::test(fs, [], cx).await; @@ -4781,7 +4877,7 @@ mod tests { let cx = &mut VisualTestContext::from_window(multi_workspace.into(), cx); - // --- Set up workspace A: with an active thread --- + // Set up workspace A: with an active thread. let panel_a = workspace_a.update_in(cx, |workspace, window, cx| { cx.new(|cx| AgentPanel::new(workspace, None, window, cx)) }); @@ -4807,7 +4903,7 @@ mod tests { let agent_type_a = panel_a.read_with(cx, |panel, _cx| panel.selected_agent.clone()); - // --- Set up workspace B: ClaudeCode, no active thread --- + // Set up workspace B: ClaudeCode, no active thread. let panel_b = workspace_b.update_in(cx, |workspace, window, cx| { cx.new(|cx| AgentPanel::new(workspace, None, window, cx)) }); @@ -4818,12 +4914,12 @@ mod tests { }; }); - // --- Serialize both panels --- + // Serialize both panels. panel_a.update(cx, |panel, cx| panel.serialize(cx)); panel_b.update(cx, |panel, cx| panel.serialize(cx)); cx.run_until_parked(); - // --- Load fresh panels for each workspace and verify independent state --- + // Load fresh panels for each workspace and verify independent state. let async_cx = cx.update(|window, cx| window.to_async(cx)); let loaded_a = AgentPanel::load(workspace_a.downgrade(), async_cx) .await @@ -5942,7 +6038,8 @@ mod tests { // Simulate worktree creation in progress and reset to Uninitialized panel.update_in(cx, |panel, window, cx| { - panel.worktree_creation_status = Some(WorktreeCreationStatus::Creating); + panel.worktree_creation_status = + Some((EntityId::from(0u64), WorktreeCreationStatus::Creating)); panel.active_view = ActiveView::Uninitialized; Panel::set_active(panel, true, window, cx); assert!( @@ -6388,7 +6485,7 @@ mod tests { let metadata = store .entry(session_id) .unwrap_or_else(|| panic!("{label} thread metadata should exist")); - metadata.folder_paths.clone() + metadata.folder_paths().clone() }); let mut sorted = metadata_paths.ordered_paths().cloned().collect::>(); sorted.sort(); @@ -6637,4 +6734,499 @@ mod tests { ); }); } + + #[gpui::test] + async fn test_rollback_all_succeed_returns_ok(cx: &mut TestAppContext) { + init_test(cx); + let fs = FakeFs::new(cx.executor()); + cx.update(|cx| { + cx.update_flags(true, vec!["agent-v2".to_string()]); + agent::ThreadStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + ::set_global(fs.clone(), cx); + }); + + fs.insert_tree( + "/project", + json!({ + ".git": {}, + "src": { "main.rs": "fn main() {}" } + }), + ) + .await; + + let project = Project::test(fs.clone(), [Path::new("/project")], cx).await; + cx.executor().run_until_parked(); + + let repository = project.read_with(cx, |project, cx| { + project.repositories(cx).values().next().unwrap().clone() + }); + + let multi_workspace = + cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + + let path_a = PathBuf::from("/worktrees/branch/project_a"); + let path_b = PathBuf::from("/worktrees/branch/project_b"); + + let (sender_a, receiver_a) = futures::channel::oneshot::channel::>(); + let (sender_b, receiver_b) = futures::channel::oneshot::channel::>(); + sender_a.send(Ok(())).unwrap(); + sender_b.send(Ok(())).unwrap(); + + let creation_infos = vec![ + (repository.clone(), path_a.clone(), receiver_a), + (repository.clone(), path_b.clone(), receiver_b), + ]; + + let fs_clone = fs.clone(); + let result = multi_workspace + .update(cx, |_, window, cx| { + window.spawn(cx, async move |cx| { + AgentPanel::await_and_rollback_on_failure(creation_infos, fs_clone, cx).await + }) + }) + .unwrap() + .await; + + let paths = result.expect("all succeed should return Ok"); + assert_eq!(paths, vec![path_a, path_b]); + } + + #[gpui::test] + async fn test_rollback_on_failure_attempts_all_worktrees(cx: &mut TestAppContext) { + init_test(cx); + let fs = FakeFs::new(cx.executor()); + cx.update(|cx| { + cx.update_flags(true, vec!["agent-v2".to_string()]); + agent::ThreadStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + ::set_global(fs.clone(), cx); + }); + + fs.insert_tree( + "/project", + json!({ + ".git": {}, + "src": { "main.rs": "fn main() {}" } + }), + ) + .await; + + let project = Project::test(fs.clone(), [Path::new("/project")], cx).await; + cx.executor().run_until_parked(); + + let repository = project.read_with(cx, |project, cx| { + project.repositories(cx).values().next().unwrap().clone() + }); + + // Actually create a worktree so it exists in FakeFs for rollback to find. + let success_path = PathBuf::from("/worktrees/branch/project"); + cx.update(|cx| { + repository.update(cx, |repo, _| { + repo.create_worktree( + git::repository::CreateWorktreeTarget::NewBranch { + branch_name: "branch".to_string(), + base_sha: None, + }, + success_path.clone(), + ) + }) + }) + .await + .unwrap() + .unwrap(); + cx.executor().run_until_parked(); + + // Verify the worktree directory exists before rollback. + assert!( + fs.is_dir(&success_path).await, + "worktree directory should exist before rollback" + ); + + let multi_workspace = + cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + + // Build creation_infos: one success, one failure. + let failed_path = PathBuf::from("/worktrees/branch/failed_project"); + + let (sender_ok, receiver_ok) = futures::channel::oneshot::channel::>(); + let (sender_err, receiver_err) = futures::channel::oneshot::channel::>(); + sender_ok.send(Ok(())).unwrap(); + sender_err + .send(Err(anyhow!("branch already exists"))) + .unwrap(); + + let creation_infos = vec![ + (repository.clone(), success_path.clone(), receiver_ok), + (repository.clone(), failed_path.clone(), receiver_err), + ]; + + let fs_clone = fs.clone(); + let result = multi_workspace + .update(cx, |_, window, cx| { + window.spawn(cx, async move |cx| { + AgentPanel::await_and_rollback_on_failure(creation_infos, fs_clone, cx).await + }) + }) + .unwrap() + .await; + + assert!( + result.is_err(), + "should return error when any creation fails" + ); + let err_msg = result.unwrap_err().to_string(); + assert!( + err_msg.contains("branch already exists"), + "error should mention the original failure: {err_msg}" + ); + + // The successful worktree should have been rolled back by git. + cx.executor().run_until_parked(); + assert!( + !fs.is_dir(&success_path).await, + "successful worktree directory should be removed by rollback" + ); + } + + #[gpui::test] + async fn test_rollback_on_canceled_receiver(cx: &mut TestAppContext) { + init_test(cx); + let fs = FakeFs::new(cx.executor()); + cx.update(|cx| { + cx.update_flags(true, vec!["agent-v2".to_string()]); + agent::ThreadStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + ::set_global(fs.clone(), cx); + }); + + fs.insert_tree( + "/project", + json!({ + ".git": {}, + "src": { "main.rs": "fn main() {}" } + }), + ) + .await; + + let project = Project::test(fs.clone(), [Path::new("/project")], cx).await; + cx.executor().run_until_parked(); + + let repository = project.read_with(cx, |project, cx| { + project.repositories(cx).values().next().unwrap().clone() + }); + + let multi_workspace = + cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + + let path = PathBuf::from("/worktrees/branch/project"); + + // Drop the sender to simulate a canceled receiver. + let (_sender, receiver) = futures::channel::oneshot::channel::>(); + drop(_sender); + + let creation_infos = vec![(repository.clone(), path.clone(), receiver)]; + + let fs_clone = fs.clone(); + let result = multi_workspace + .update(cx, |_, window, cx| { + window.spawn(cx, async move |cx| { + AgentPanel::await_and_rollback_on_failure(creation_infos, fs_clone, cx).await + }) + }) + .unwrap() + .await; + + assert!( + result.is_err(), + "should return error when receiver is canceled" + ); + let err_msg = result.unwrap_err().to_string(); + assert!( + err_msg.contains("canceled"), + "error should mention cancellation: {err_msg}" + ); + } + + #[gpui::test] + async fn test_rollback_cleans_up_orphan_directories(cx: &mut TestAppContext) { + init_test(cx); + let fs = FakeFs::new(cx.executor()); + cx.update(|cx| { + cx.update_flags(true, vec!["agent-v2".to_string()]); + agent::ThreadStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + ::set_global(fs.clone(), cx); + }); + + fs.insert_tree( + "/project", + json!({ + ".git": {}, + "src": { "main.rs": "fn main() {}" } + }), + ) + .await; + + let project = Project::test(fs.clone(), [Path::new("/project")], cx).await; + cx.executor().run_until_parked(); + + let repository = project.read_with(cx, |project, cx| { + project.repositories(cx).values().next().unwrap().clone() + }); + + let multi_workspace = + cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + + // Simulate the orphan state: create_dir_all was called but git + // worktree add failed, leaving a directory with leftover files. + let orphan_path = PathBuf::from("/worktrees/branch/orphan_project"); + fs.insert_tree( + "/worktrees/branch/orphan_project", + json!({ "leftover.txt": "junk" }), + ) + .await; + + assert!( + fs.is_dir(&orphan_path).await, + "orphan dir should exist before rollback" + ); + + let (sender, receiver) = futures::channel::oneshot::channel::>(); + sender.send(Err(anyhow!("hook failed"))).unwrap(); + + let creation_infos = vec![(repository.clone(), orphan_path.clone(), receiver)]; + + let fs_clone = fs.clone(); + let result = multi_workspace + .update(cx, |_, window, cx| { + window.spawn(cx, async move |cx| { + AgentPanel::await_and_rollback_on_failure(creation_infos, fs_clone, cx).await + }) + }) + .unwrap() + .await; + + cx.executor().run_until_parked(); + + assert!(result.is_err()); + assert!( + !fs.is_dir(&orphan_path).await, + "orphan worktree directory should be removed by filesystem cleanup" + ); + } + + #[gpui::test] + async fn test_worktree_creation_for_remote_project( + cx: &mut TestAppContext, + server_cx: &mut TestAppContext, + ) { + init_test(cx); + + let app_state = cx.update(|cx| { + agent::ThreadStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + + let app_state = workspace::AppState::test(cx); + workspace::init(app_state.clone(), cx); + app_state + }); + + server_cx.update(|cx| { + release_channel::init(semver::Version::new(0, 0, 0), cx); + }); + + // Set up the remote server side with a git repo. + let server_fs = FakeFs::new(server_cx.executor()); + server_fs + .insert_tree( + "/project", + json!({ + ".git": {}, + "src": { + "main.rs": "fn main() {}" + } + }), + ) + .await; + server_fs.set_branch_name(Path::new("/project/.git"), Some("main")); + + // Create a mock remote connection. + let (opts, server_session, _) = remote::RemoteClient::fake_server(cx, server_cx); + + server_cx.update(remote_server::HeadlessProject::init); + let server_executor = server_cx.executor(); + let _headless = server_cx.new(|cx| { + remote_server::HeadlessProject::new( + remote_server::HeadlessAppState { + session: server_session, + fs: server_fs.clone(), + http_client: Arc::new(http_client::BlockedHttpClient), + node_runtime: node_runtime::NodeRuntime::unavailable(), + languages: Arc::new(language::LanguageRegistry::new(server_executor.clone())), + extension_host_proxy: Arc::new(extension::ExtensionHostProxy::new()), + startup_time: Instant::now(), + }, + false, + cx, + ) + }); + + // Connect the client side and build a remote project. + // Use a separate Client to avoid double-registering proto handlers + // (Workspace::test_new creates its own WorkspaceStore from the + // project's client). + let remote_client = remote::RemoteClient::connect_mock(opts, cx).await; + let project = cx.update(|cx| { + let project_client = client::Client::new( + Arc::new(clock::FakeSystemClock::new()), + http_client::FakeHttpClient::with_404_response(), + cx, + ); + let user_store = cx.new(|cx| client::UserStore::new(project_client.clone(), cx)); + project::Project::remote( + remote_client, + project_client, + node_runtime::NodeRuntime::unavailable(), + user_store, + app_state.languages.clone(), + app_state.fs.clone(), + false, + cx, + ) + }); + + // Open the remote path as a worktree in the project. + let worktree_path = Path::new("/project"); + project + .update(cx, |project, cx| { + project.find_or_create_worktree(worktree_path, true, cx) + }) + .await + .expect("should be able to open remote worktree"); + cx.run_until_parked(); + + // Verify the project is indeed remote. + project.read_with(cx, |project, cx| { + assert!(!project.is_local(), "project should be remote, not local"); + assert!( + project.remote_connection_options(cx).is_some(), + "project should have remote connection options" + ); + }); + + // Create the workspace and agent panel. + let multi_workspace = + cx.add_window(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + multi_workspace + .update(cx, |multi_workspace, _, cx| { + multi_workspace.open_sidebar(cx); + }) + .unwrap(); + + let workspace = multi_workspace + .read_with(cx, |mw, _cx| mw.workspace().clone()) + .unwrap(); + + workspace.update(cx, |workspace, _cx| { + workspace.set_random_database_id(); + }); + + // Register a callback so new workspaces also get an AgentPanel. + cx.update(|cx| { + cx.observe_new( + |workspace: &mut Workspace, + window: Option<&mut Window>, + cx: &mut Context| { + if let Some(window) = window { + let panel = cx.new(|cx| AgentPanel::new(workspace, None, window, cx)); + workspace.add_panel(panel, window, cx); + } + }, + ) + .detach(); + }); + + let cx = &mut VisualTestContext::from_window(multi_workspace.into(), cx); + cx.run_until_parked(); + + let panel = workspace.update_in(cx, |workspace, window, cx| { + let panel = cx.new(|cx| AgentPanel::new(workspace, None, window, cx)); + workspace.add_panel(panel.clone(), window, cx); + panel + }); + + cx.run_until_parked(); + + // Open a thread. + panel.update_in(cx, |panel, window, cx| { + panel.open_external_thread_with_server( + Rc::new(StubAgentServer::default_response()), + window, + cx, + ); + }); + cx.run_until_parked(); + + // Set start_thread_in to LinkedWorktree to bypass git worktree + // creation and directly test workspace opening for a known path. + let linked_path = PathBuf::from("/project"); + panel.update_in(cx, |panel, window, cx| { + panel.set_start_thread_in( + &StartThreadIn::LinkedWorktree { + path: linked_path.clone(), + display_name: "project".to_string(), + }, + window, + cx, + ); + }); + + // Trigger worktree creation. + let content = vec![acp::ContentBlock::Text(acp::TextContent::new( + "Hello from remote test", + ))]; + panel.update_in(cx, |panel, window, cx| { + panel.handle_worktree_requested( + content, + WorktreeCreationArgs::Linked { + worktree_path: linked_path, + }, + window, + cx, + ); + }); + + // The refactored code uses `find_or_create_workspace`, which + // finds the existing remote workspace (matching paths + host) + // and reuses it instead of creating a new connection. + cx.run_until_parked(); + + // The task should have completed: the existing workspace was + // found and reused. + panel.read_with(cx, |panel, _cx| { + assert!( + panel.worktree_creation_status.is_none(), + "worktree creation should have completed, but status is: {:?}", + panel.worktree_creation_status + ); + }); + + // The existing remote workspace was reused — no new workspace + // should have been created. + multi_workspace + .read_with(cx, |multi_workspace, cx| { + let project = workspace.read(cx).project().clone(); + assert!( + !project.read(cx).is_local(), + "workspace project should still be remote, not local" + ); + assert_eq!( + multi_workspace.workspaces().count(), + 1, + "existing remote workspace should be reused, not a new one created" + ); + }) + .unwrap(); + } } diff --git a/crates/agent_ui/src/agent_ui.rs b/crates/agent_ui/src/agent_ui.rs index 037821d56d00851100488d68b0b44cee0aecbd53..2cf4218719a0412534d9832c3cb54587f4c45a73 100644 --- a/crates/agent_ui/src/agent_ui.rs +++ b/crates/agent_ui/src/agent_ui.rs @@ -33,6 +33,7 @@ mod thread_history; mod thread_history_view; mod thread_import; pub mod thread_metadata_store; +pub mod thread_worktree_archive; mod thread_worktree_picker; pub mod threads_archive_view; mod ui; diff --git a/crates/agent_ui/src/conversation_view.rs b/crates/agent_ui/src/conversation_view.rs index 80190858151b2cf79500290a95ee0d0b6a4e8c97..528e38333144524c4a4dffa63a7a8b107c829e41 100644 --- a/crates/agent_ui/src/conversation_view.rs +++ b/crates/agent_ui/src/conversation_view.rs @@ -1,12 +1,15 @@ use acp_thread::{ AcpThread, AcpThreadEvent, AgentSessionInfo, AgentThreadEntry, AssistantMessage, - AssistantMessageChunk, AuthRequired, LoadError, MentionUri, PermissionOptionChoice, - PermissionOptions, PermissionPattern, RetryStatus, SelectedPermissionOutcome, ThreadStatus, - ToolCall, ToolCallContent, ToolCallStatus, UserMessageId, + AssistantMessageChunk, AuthRequired, LoadError, MaxOutputTokensError, MentionUri, + PermissionOptionChoice, PermissionOptions, PermissionPattern, RetryStatus, + SelectedPermissionOutcome, ThreadStatus, ToolCall, ToolCallContent, ToolCallStatus, + UserMessageId, }; use acp_thread::{AgentConnection, Plan}; use action_log::{ActionLog, ActionLogTelemetry, DiffStats}; -use agent::{NativeAgentServer, NativeAgentSessionList, SharedThread, ThreadStore}; +use agent::{ + NativeAgentServer, NativeAgentSessionList, NoModelConfiguredError, SharedThread, ThreadStore, +}; use agent_client_protocol as acp; #[cfg(test)] use agent_servers::AgentServerDelegate; @@ -34,7 +37,7 @@ use gpui::{ list, point, pulsating_between, }; use language::Buffer; -use language_model::LanguageModelRegistry; +use language_model::{LanguageModelCompletionError, LanguageModelRegistry}; use markdown::{Markdown, MarkdownElement, MarkdownFont, MarkdownStyle}; use parking_lot::RwLock; use project::{AgentId, AgentServerStore, Project, ProjectEntryId}; @@ -78,7 +81,7 @@ use crate::agent_diff::AgentDiff; use crate::entry_view_state::{EntryViewEvent, ViewEvent}; use crate::message_editor::{MessageEditor, MessageEditorEvent}; use crate::profile_selector::{ProfileProvider, ProfileSelector}; -use crate::thread_metadata_store::ThreadMetadataStore; + use crate::ui::{AgentNotification, AgentNotificationEvent}; use crate::{ Agent, AgentDiffPane, AgentInitialContent, AgentPanel, AllowAlways, AllowOnce, @@ -113,6 +116,31 @@ pub(crate) enum ThreadError { PaymentRequired, Refusal, AuthenticationRequired(SharedString), + RateLimitExceeded { + provider: SharedString, + }, + ServerOverloaded { + provider: SharedString, + }, + PromptTooLarge, + NoApiKey { + provider: SharedString, + }, + StreamError { + provider: SharedString, + }, + InvalidApiKey { + provider: SharedString, + }, + PermissionDenied { + provider: SharedString, + }, + RequestFailed, + MaxOutputTokens, + NoModelSelected, + ApiError { + provider: SharedString, + }, Other { message: SharedString, acp_error_code: Option, @@ -121,12 +149,57 @@ pub(crate) enum ThreadError { impl From for ThreadError { fn from(error: anyhow::Error) -> Self { - if error.is::() { + if error.is::() { + Self::MaxOutputTokens + } else if error.is::() { + Self::NoModelSelected + } else if error.is::() { Self::PaymentRequired } else if let Some(acp_error) = error.downcast_ref::() && acp_error.code == acp::ErrorCode::AuthRequired { Self::AuthenticationRequired(acp_error.message.clone().into()) + } else if let Some(lm_error) = error.downcast_ref::() { + use LanguageModelCompletionError::*; + match lm_error { + RateLimitExceeded { provider, .. } => Self::RateLimitExceeded { + provider: provider.to_string().into(), + }, + ServerOverloaded { provider, .. } | ApiInternalServerError { provider, .. } => { + Self::ServerOverloaded { + provider: provider.to_string().into(), + } + } + PromptTooLarge { .. } => Self::PromptTooLarge, + NoApiKey { provider } => Self::NoApiKey { + provider: provider.to_string().into(), + }, + StreamEndedUnexpectedly { provider } + | ApiReadResponseError { provider, .. } + | DeserializeResponse { provider, .. } + | HttpSend { provider, .. } => Self::StreamError { + provider: provider.to_string().into(), + }, + AuthenticationError { provider, .. } => Self::InvalidApiKey { + provider: provider.to_string().into(), + }, + PermissionError { provider, .. } => Self::PermissionDenied { + provider: provider.to_string().into(), + }, + UpstreamProviderError { .. } => Self::RequestFailed, + BadRequestFormat { provider, .. } + | HttpResponseError { provider, .. } + | ApiEndpointNotFound { provider } => Self::ApiError { + provider: provider.to_string().into(), + }, + _ => { + let message: SharedString = format!("{:#}", error).into(); + Self::Other { + message, + acp_error_code: None, + } + } + } } else { let message: SharedString = format!("{:#}", error).into(); @@ -354,6 +427,20 @@ impl ConversationView { .pending_tool_call(id, cx) } + pub fn root_thread_has_pending_tool_call(&self, cx: &App) -> bool { + let Some(root_thread) = self.root_thread(cx) else { + return false; + }; + let root_id = root_thread.read(cx).id.clone(); + self.as_connected().is_some_and(|connected| { + connected + .conversation + .read(cx) + .pending_tool_call(&root_id, cx) + .is_some() + }) + } + pub fn root_thread(&self, cx: &App) -> Option> { match &self.server_state { ServerState::Connected(connected) => { @@ -1510,24 +1597,30 @@ impl ConversationView { let agent_telemetry_id = connection.telemetry_id(); - if let Some(login) = connection.terminal_auth_task(&method, cx) { + if let Some(login_task) = connection.terminal_auth_task(&method, cx) { configuration_view.take(); pending_auth_method.replace(method.clone()); let project = self.project.clone(); - let authenticate = Self::spawn_external_agent_login( - login, - workspace, - project, - method.clone(), - false, - window, - cx, - ); cx.notify(); self.auth_task = Some(cx.spawn_in(window, { async move |this, cx| { - let result = authenticate.await; + let result = async { + let login = login_task.await?; + this.update_in(cx, |_this, window, cx| { + Self::spawn_external_agent_login( + login, + workspace, + project, + method.clone(), + false, + window, + cx, + ) + })? + .await + } + .await; match &result { Ok(_) => telemetry::event!( @@ -2628,22 +2721,6 @@ impl ConversationView { pub fn history(&self) -> Option<&Entity> { self.as_connected().and_then(|c| c.history.as_ref()) } - - pub fn delete_history_entry(&mut self, session_id: &acp::SessionId, cx: &mut Context) { - let Some(connected) = self.as_connected() else { - return; - }; - - let Some(history) = &connected.history else { - return; - }; - let task = history.update(cx, |history, cx| history.delete_session(&session_id, cx)); - task.detach_and_log_err(cx); - - if let Some(store) = ThreadMetadataStore::try_global(cx) { - store.update(cx, |store, cx| store.delete(session_id.clone(), cx)); - } - } } fn loading_contents_spinner(size: IconSize) -> AnyElement { @@ -2813,6 +2890,7 @@ pub(crate) mod tests { use workspace::{Item, MultiWorkspace}; use crate::agent_panel; + use crate::thread_metadata_store::ThreadMetadataStore; use super::*; @@ -6620,19 +6698,11 @@ pub(crate) mod tests { conversation_view.read_with(cx, |conversation_view, cx| { let state = conversation_view.active_thread().unwrap(); let error = &state.read(cx).thread_error; - match error { - Some(ThreadError::Other { message, .. }) => { - assert!( - message.contains("Maximum tokens reached"), - "Expected 'Maximum tokens reached' error, got: {}", - message - ); - } - other => panic!( - "Expected ThreadError::Other with 'Maximum tokens reached', got: {:?}", - other.is_some() - ), - } + assert!( + matches!(error, Some(ThreadError::MaxOutputTokens)), + "Expected ThreadError::MaxOutputTokens, got: {:?}", + error.is_some() + ); }); } diff --git a/crates/agent_ui/src/conversation_view/thread_view.rs b/crates/agent_ui/src/conversation_view/thread_view.rs index 882390f6330a57b0d6b857f4ff78be8079dcf3ee..8805777190bf1cb7b2752ff2c1037665e8ae9bff 100644 --- a/crates/agent_ui/src/conversation_view/thread_view.rs +++ b/crates/agent_ui/src/conversation_view/thread_view.rs @@ -330,6 +330,7 @@ pub struct ThreadView { pub hovered_recent_history_item: Option, pub show_external_source_prompt_warning: bool, pub show_codex_windows_warning: bool, + pub multi_root_callout_dismissed: bool, pub generating_indicator_in_list: bool, pub history: Option>, pub _history_subscription: Option, @@ -573,6 +574,7 @@ impl ThreadView { history, _history_subscription: history_subscription, show_codex_windows_warning, + multi_root_callout_dismissed: false, generating_indicator_in_list: false, }; @@ -1259,6 +1261,62 @@ impl ThreadView { ThreadError::AuthenticationRequired(message) => { ("authentication_required", None, message.clone()) } + ThreadError::RateLimitExceeded { provider } => ( + "rate_limit_exceeded", + None, + format!("{provider}'s rate limit was reached.").into(), + ), + ThreadError::ServerOverloaded { provider } => ( + "server_overloaded", + None, + format!("{provider}'s servers are temporarily unavailable.").into(), + ), + ThreadError::PromptTooLarge => ( + "prompt_too_large", + None, + "Context too large for the model's context window.".into(), + ), + ThreadError::NoApiKey { provider } => ( + "no_api_key", + None, + format!("No API key configured for {provider}.").into(), + ), + ThreadError::StreamError { provider } => ( + "stream_error", + None, + format!("Connection to {provider}'s API was interrupted.").into(), + ), + ThreadError::InvalidApiKey { provider } => ( + "invalid_api_key", + None, + format!("Invalid or expired API key for {provider}.").into(), + ), + ThreadError::PermissionDenied { provider } => ( + "permission_denied", + None, + format!( + "{provider}'s API rejected the request due to insufficient permissions." + ) + .into(), + ), + ThreadError::RequestFailed => ( + "request_failed", + None, + "Request could not be completed after multiple attempts.".into(), + ), + ThreadError::MaxOutputTokens => ( + "max_output_tokens", + None, + "Model reached its maximum output length.".into(), + ), + ThreadError::NoModelSelected => { + ("no_model_selected", None, "No model selected.".into()) + } + ThreadError::ApiError { provider } => ( + "api_error", + None, + format!("{provider}'s API returned an unexpected error.").into(), + ), ThreadError::Other { acp_error_code, message, @@ -4331,17 +4389,27 @@ impl Render for TokenUsageTooltip { impl ThreadView { fn render_entries(&mut self, cx: &mut Context) -> List { + let max_content_width = AgentSettings::get_global(cx).max_content_width; + let centered_container = move |content: AnyElement| { + h_flex() + .w_full() + .justify_center() + .child(div().max_w(max_content_width).w_full().child(content)) + }; + list( self.list_state.clone(), cx.processor(move |this, index: usize, window, cx| { let entries = this.thread.read(cx).entries(); if let Some(entry) = entries.get(index) { - this.render_entry(index, entries.len(), entry, window, cx) + let rendered = this.render_entry(index, entries.len(), entry, window, cx); + centered_container(rendered.into_any_element()).into_any_element() } else if this.generating_indicator_in_list { let confirmation = entries .last() .is_some_and(|entry| Self::is_waiting_for_confirmation(entry)); - this.render_generating(confirmation, cx).into_any_element() + let rendered = this.render_generating(confirmation, cx); + centered_container(rendered.into_any_element()).into_any_element() } else { Empty.into_any() } @@ -8076,6 +8144,109 @@ impl ThreadView { self.render_authentication_required_error(error.clone(), cx) } ThreadError::PaymentRequired => self.render_payment_required_error(cx), + ThreadError::RateLimitExceeded { provider } => self.render_error_callout( + "Rate Limit Reached", + format!( + "{provider}'s rate limit was reached. Zed will retry automatically. \ + You can also wait a moment and try again." + ) + .into(), + true, + true, + cx, + ), + ThreadError::ServerOverloaded { provider } => self.render_error_callout( + "Provider Unavailable", + format!( + "{provider}'s servers are temporarily unavailable. Zed will retry \ + automatically. If the problem persists, check the provider's status page." + ) + .into(), + true, + true, + cx, + ), + ThreadError::PromptTooLarge => self.render_prompt_too_large_error(cx), + ThreadError::NoApiKey { provider } => self.render_error_callout( + "API Key Missing", + format!( + "No API key is configured for {provider}. \ + Add your key via the Agent Panel settings to continue." + ) + .into(), + false, + true, + cx, + ), + ThreadError::StreamError { provider } => self.render_error_callout( + "Connection Interrupted", + format!( + "The connection to {provider}'s API was interrupted. Zed will retry \ + automatically. If the problem persists, check your network connection." + ) + .into(), + true, + true, + cx, + ), + ThreadError::InvalidApiKey { provider } => self.render_error_callout( + "Invalid API Key", + format!( + "The API key for {provider} is invalid or has expired. \ + Update your key via the Agent Panel settings to continue." + ) + .into(), + false, + false, + cx, + ), + ThreadError::PermissionDenied { provider } => self.render_error_callout( + "Permission Denied", + format!( + "{provider}'s API rejected the request due to insufficient permissions. \ + Check that your API key has access to this model." + ) + .into(), + false, + false, + cx, + ), + ThreadError::RequestFailed => self.render_error_callout( + "Request Failed", + "The request could not be completed after multiple attempts. \ + Try again in a moment." + .into(), + true, + false, + cx, + ), + ThreadError::MaxOutputTokens => self.render_error_callout( + "Output Limit Reached", + "The model stopped because it reached its maximum output length. \ + You can ask it to continue where it left off." + .into(), + false, + false, + cx, + ), + ThreadError::NoModelSelected => self.render_error_callout( + "No Model Selected", + "Select a model from the model picker below to get started.".into(), + false, + false, + cx, + ), + ThreadError::ApiError { provider } => self.render_error_callout( + "API Error", + format!( + "{provider}'s API returned an unexpected error. \ + If the problem persists, try switching models or restarting Zed." + ) + .into(), + true, + true, + cx, + ), }; Some(div().child(content)) @@ -8136,6 +8307,72 @@ impl ThreadView { .dismiss_action(self.dismiss_error_button(cx)) } + fn render_error_callout( + &self, + title: &'static str, + message: SharedString, + show_retry: bool, + show_copy: bool, + cx: &mut Context, + ) -> Callout { + let can_resume = show_retry && self.thread.read(cx).can_retry(cx); + let show_actions = can_resume || show_copy; + + Callout::new() + .severity(Severity::Error) + .icon(IconName::XCircle) + .title(title) + .description(message.clone()) + .when(show_actions, |callout| { + callout.actions_slot( + h_flex() + .gap_0p5() + .when(can_resume, |this| this.child(self.retry_button(cx))) + .when(show_copy, |this| { + this.child(self.create_copy_button(message.clone())) + }), + ) + }) + .dismiss_action(self.dismiss_error_button(cx)) + } + + fn render_prompt_too_large_error(&self, cx: &mut Context) -> Callout { + const MESSAGE: &str = "This conversation is too long for the model's context window. \ + Start a new thread or remove some attached files to continue."; + + Callout::new() + .severity(Severity::Error) + .icon(IconName::XCircle) + .title("Context Too Large") + .description(MESSAGE) + .actions_slot( + h_flex() + .gap_0p5() + .child(self.new_thread_button(cx)) + .child(self.create_copy_button(MESSAGE)), + ) + .dismiss_action(self.dismiss_error_button(cx)) + } + + fn retry_button(&self, cx: &mut Context) -> impl IntoElement { + Button::new("retry", "Retry") + .label_size(LabelSize::Small) + .style(ButtonStyle::Filled) + .on_click(cx.listener(|this, _, _, cx| { + this.retry_generation(cx); + })) + } + + fn new_thread_button(&self, cx: &mut Context) -> impl IntoElement { + Button::new("new_thread", "New Thread") + .label_size(LabelSize::Small) + .style(ButtonStyle::Filled) + .on_click(cx.listener(|this, _, window, cx| { + this.clear_thread_error(cx); + window.dispatch_action(NewThread.boxed_clone(), cx); + })) + } + fn upgrade_button(&self, cx: &mut Context) -> impl IntoElement { Button::new("upgrade", "Upgrade") .label_size(LabelSize::Small) @@ -8338,6 +8575,53 @@ impl ThreadView { ) } + fn render_multi_root_callout(&self, cx: &mut Context) -> Option { + if self.multi_root_callout_dismissed { + return None; + } + + if self.as_native_connection(cx).is_some() { + return None; + } + + let project = self.project.upgrade()?; + let worktree_count = project.read(cx).visible_worktrees(cx).count(); + if worktree_count <= 1 { + return None; + } + + let work_dirs = self.thread.read(cx).work_dirs()?; + let active_dir = work_dirs + .ordered_paths() + .next() + .and_then(|p| p.file_name()) + .map(|name| name.to_string_lossy().to_string()) + .unwrap_or_else(|| "one folder".to_string()); + + let description = format!( + "This agent only operates on \"{}\". Other folders in this workspace are not accessible to it.", + active_dir + ); + + Some( + Callout::new() + .severity(Severity::Warning) + .icon(IconName::Warning) + .title("External Agents currently don't support multi-root workspaces") + .description(description) + .border_position(ui::BorderPosition::Bottom) + .dismiss_action( + IconButton::new("dismiss-multi-root-callout", IconName::Close) + .icon_size(IconSize::Small) + .tooltip(Tooltip::text("Dismiss")) + .on_click(cx.listener(|this, _, _, cx| { + this.multi_root_callout_dismissed = true; + cx.notify(); + })), + ), + ) + } + fn render_new_version_callout(&self, version: &SharedString, cx: &mut Context) -> Div { let server_view = self.server_view.clone(); let has_version = !version.is_empty(); @@ -8467,13 +8751,20 @@ impl ThreadView { return; }; thread.update(cx, |thread, cx| { - thread.set_speed( - thread - .speed() - .map(|speed| speed.toggle()) - .unwrap_or(Speed::Fast), - cx, - ); + let new_speed = thread + .speed() + .map(|speed| speed.toggle()) + .unwrap_or(Speed::Fast); + thread.set_speed(new_speed, cx); + + let fs = thread.project().read(cx).fs().clone(); + update_settings_file(fs, cx, move |settings, _| { + if let Some(agent) = settings.agent.as_mut() + && let Some(default_model) = agent.default_model.as_mut() + { + default_model.speed = Some(new_speed); + } + }); }); } @@ -8539,7 +8830,6 @@ impl ThreadView { impl Render for ThreadView { fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { let has_messages = self.list_state.item_count() > 0; - let max_content_width = AgentSettings::get_global(cx).max_content_width; let list_state = self.list_state.clone(); let conversation = v_flex() @@ -8550,13 +8840,7 @@ impl Render for ThreadView { if has_messages { this.flex_1() .size_full() - .child( - v_flex() - .mx_auto() - .max_w(max_content_width) - .size_full() - .child(self.render_entries(cx)), - ) + .child(self.render_entries(cx)) .vertical_scrollbar_for(&list_state, window, cx) .into_any() } else { @@ -8741,6 +9025,7 @@ impl Render for ThreadView { .size_full() .children(self.render_subagent_titlebar(cx)) .child(conversation) + .children(self.render_multi_root_callout(cx)) .children(self.render_activity_bar(window, cx)) .when(self.show_external_source_prompt_warning, |this| { this.child(self.render_external_source_prompt_warning(cx)) diff --git a/crates/agent_ui/src/favorite_models.rs b/crates/agent_ui/src/favorite_models.rs index 5e69808f225f525a3e052fdb01e4095ef38cac03..aa48ca8d12459b2860982a6b204a5350e6c4ce4c 100644 --- a/crates/agent_ui/src/favorite_models.rs +++ b/crates/agent_ui/src/favorite_models.rs @@ -11,6 +11,7 @@ fn language_model_to_selection(model: &Arc) -> LanguageModelS model: model.id().0.to_string(), enable_thinking: false, effort: None, + speed: None, } } diff --git a/crates/agent_ui/src/inline_assistant.rs b/crates/agent_ui/src/inline_assistant.rs index 39d70790e0d4a18554b2a1c11510e529d921cd1b..f2beb719cc7e5638cfc36f339419bda405a8e773 100644 --- a/crates/agent_ui/src/inline_assistant.rs +++ b/crates/agent_ui/src/inline_assistant.rs @@ -1,10 +1,8 @@ use language_models::provider::anthropic::telemetry::{ AnthropicCompletionType, AnthropicEventData, AnthropicEventType, report_anthropic_event, }; -use std::cmp; use std::mem; use std::ops::Range; -use std::rc::Rc; use std::sync::Arc; use uuid::Uuid; @@ -27,8 +25,8 @@ use editor::RowExt; use editor::SelectionEffects; use editor::scroll::ScrollOffset; use editor::{ - Anchor, AnchorRangeExt, CodeActionProvider, Editor, EditorEvent, HighlightKey, MultiBuffer, - MultiBufferSnapshot, ToOffset as _, ToPoint, + Anchor, AnchorRangeExt, Editor, EditorEvent, HighlightKey, MultiBuffer, MultiBufferSnapshot, + ToOffset as _, ToPoint, actions::SelectAll, display_map::{ BlockContext, BlockPlacement, BlockProperties, BlockStyle, CustomBlockId, EditorMargins, @@ -45,15 +43,14 @@ use language::{Buffer, Point, Selection, TransactionId}; use language_model::{ConfigurationError, ConfiguredModel, LanguageModelRegistry}; use multi_buffer::MultiBufferRow; use parking_lot::Mutex; -use project::{CodeAction, DisableAiSettings, LspAction, Project, ProjectTransaction}; +use project::{DisableAiSettings, Project}; use prompt_store::{PromptBuilder, PromptStore}; use settings::{Settings, SettingsStore}; use terminal_view::{TerminalView, terminal_panel::TerminalPanel}; -use text::{OffsetRangeExt, ToPoint as _}; use ui::prelude::*; use util::{RangeExt, ResultExt, maybe}; -use workspace::{ItemHandle, Toast, Workspace, dock::Panel, notifications::NotificationId}; +use workspace::{Toast, Workspace, dock::Panel, notifications::NotificationId}; use zed_actions::agent::OpenSettings; pub fn init(fs: Arc, prompt_builder: Arc, cx: &mut App) { @@ -184,7 +181,7 @@ impl InlineAssistant { fn handle_workspace_event( &mut self, - workspace: Entity, + _workspace: Entity, event: &workspace::Event, window: &mut Window, cx: &mut App, @@ -203,51 +200,10 @@ impl InlineAssistant { } } } - workspace::Event::ItemAdded { item } => { - self.register_workspace_item(&workspace, item.as_ref(), window, cx); - } _ => (), } } - fn register_workspace_item( - &mut self, - workspace: &Entity, - item: &dyn ItemHandle, - window: &mut Window, - cx: &mut App, - ) { - let is_ai_enabled = !DisableAiSettings::get_global(cx).disable_ai; - - if let Some(editor) = item.act_as::(cx) { - editor.update(cx, |editor, cx| { - if is_ai_enabled { - editor.add_code_action_provider( - Rc::new(AssistantCodeActionProvider { - editor: cx.entity().downgrade(), - workspace: workspace.downgrade(), - }), - window, - cx, - ); - - if DisableAiSettings::get_global(cx).disable_ai { - // Cancel any active edit predictions - if editor.has_active_edit_prediction() { - editor.cancel(&Default::default(), window, cx); - } - } - } else { - editor.remove_code_action_provider( - ASSISTANT_CODE_ACTION_PROVIDER_ID.into(), - window, - cx, - ); - } - }); - } - } - pub fn inline_assist( workspace: &mut Workspace, action: &zed_actions::assistant::InlineAssist, @@ -1875,130 +1831,6 @@ struct InlineAssistDecorations { end_block_id: CustomBlockId, } -struct AssistantCodeActionProvider { - editor: WeakEntity, - workspace: WeakEntity, -} - -const ASSISTANT_CODE_ACTION_PROVIDER_ID: &str = "assistant"; - -impl CodeActionProvider for AssistantCodeActionProvider { - fn id(&self) -> Arc { - ASSISTANT_CODE_ACTION_PROVIDER_ID.into() - } - - fn code_actions( - &self, - buffer: &Entity, - range: Range, - _: &mut Window, - cx: &mut App, - ) -> Task>> { - if !AgentSettings::get_global(cx).enabled(cx) { - return Task::ready(Ok(Vec::new())); - } - - let snapshot = buffer.read(cx).snapshot(); - let mut range = range.to_point(&snapshot); - - // Expand the range to line boundaries. - range.start.column = 0; - range.end.column = snapshot.line_len(range.end.row); - - let mut has_diagnostics = false; - for diagnostic in snapshot.diagnostics_in_range::<_, Point>(range.clone(), false) { - range.start = cmp::min(range.start, diagnostic.range.start); - range.end = cmp::max(range.end, diagnostic.range.end); - has_diagnostics = true; - } - if has_diagnostics { - let symbols_containing_start = snapshot.symbols_containing(range.start, None); - if let Some(symbol) = symbols_containing_start.last() { - range.start = cmp::min(range.start, symbol.range.start.to_point(&snapshot)); - range.end = cmp::max(range.end, symbol.range.end.to_point(&snapshot)); - } - let symbols_containing_end = snapshot.symbols_containing(range.end, None); - if let Some(symbol) = symbols_containing_end.last() { - range.start = cmp::min(range.start, symbol.range.start.to_point(&snapshot)); - range.end = cmp::max(range.end, symbol.range.end.to_point(&snapshot)); - } - - Task::ready(Ok(vec![CodeAction { - server_id: language::LanguageServerId(0), - range: snapshot.anchor_before(range.start)..snapshot.anchor_after(range.end), - lsp_action: LspAction::Action(Box::new(lsp::CodeAction { - title: "Fix with Assistant".into(), - ..Default::default() - })), - resolved: true, - }])) - } else { - Task::ready(Ok(Vec::new())) - } - } - - fn apply_code_action( - &self, - _buffer: Entity, - action: CodeAction, - _push_to_history: bool, - window: &mut Window, - cx: &mut App, - ) -> Task> { - let editor = self.editor.clone(); - let workspace = self.workspace.clone(); - let prompt_store = PromptStore::global(cx); - window.spawn(cx, async move |cx| { - let workspace = workspace.upgrade().context("workspace was released")?; - let (thread_store, history) = cx.update(|_window, cx| { - let panel = workspace - .read(cx) - .panel::(cx) - .context("missing agent panel")? - .read(cx); - - let history = panel - .connection_store() - .read(cx) - .entry(&crate::Agent::NativeAgent) - .and_then(|e| e.read(cx).history()) - .map(|h| h.downgrade()); - - anyhow::Ok((panel.thread_store().clone(), history)) - })??; - let editor = editor.upgrade().context("editor was released")?; - let range = editor - .update(cx, |editor, cx| { - editor.buffer().update(cx, |multibuffer, cx| { - let multibuffer_snapshot = multibuffer.read(cx); - multibuffer_snapshot.buffer_anchor_range_to_anchor_range(action.range) - }) - }) - .context("invalid range")?; - - let prompt_store = prompt_store.await.ok(); - cx.update_global(|assistant: &mut InlineAssistant, window, cx| { - let assist_id = assistant.suggest_assist( - &editor, - range, - "Fix Diagnostics".into(), - None, - true, - workspace, - thread_store, - prompt_store, - history, - window, - cx, - ); - assistant.start_assist(assist_id, window, cx); - })?; - - Ok(ProjectTransaction::default()) - }) - } -} - fn merge_ranges(ranges: &mut Vec>, buffer: &MultiBufferSnapshot) { ranges.sort_unstable_by(|a, b| { a.start diff --git a/crates/agent_ui/src/thread_import.rs b/crates/agent_ui/src/thread_import.rs index 5402b1c74353b73a522a068aa32dfd0a9dc85c60..686ca5d6cd4fdfede7eb4a5ed70c90074972fdf4 100644 --- a/crates/agent_ui/src/thread_import.rs +++ b/crates/agent_ui/src/thread_import.rs @@ -12,17 +12,18 @@ use gpui::{ }; use notifications::status_toast::{StatusToast, ToastIcon}; use project::{AgentId, AgentRegistryStore, AgentServerStore}; +use remote::RemoteConnectionOptions; use ui::{ Checkbox, KeyBinding, ListItem, ListItemSpacing, Modal, ModalFooter, ModalHeader, Section, prelude::*, }; use util::ResultExt; -use workspace::{ModalView, MultiWorkspace, PathList, Workspace}; +use workspace::{ModalView, MultiWorkspace, Workspace}; use crate::{ Agent, AgentPanel, agent_connection_store::AgentConnectionStore, - thread_metadata_store::{ThreadMetadata, ThreadMetadataStore}, + thread_metadata_store::{ThreadMetadata, ThreadMetadataStore, ThreadWorktreePaths}, }; pub struct AcpThreadImportOnboarding; @@ -436,19 +437,28 @@ fn find_threads_to_import( let mut wait_for_connection_tasks = Vec::new(); for store in stores { + let remote_connection = store + .read(cx) + .project() + .read(cx) + .remote_connection_options(cx); + for agent_id in agent_ids.clone() { let agent = Agent::from(agent_id.clone()); let server = agent.server(::global(cx), ThreadStore::global(cx)); let entry = store.update(cx, |store, cx| store.request_connection(agent, server, cx)); - wait_for_connection_tasks - .push(entry.read(cx).wait_for_connection().map(|s| (agent_id, s))); + + wait_for_connection_tasks.push(entry.read(cx).wait_for_connection().map({ + let remote_connection = remote_connection.clone(); + move |state| (agent_id, remote_connection, state) + })); } } let mut session_list_tasks = Vec::new(); cx.spawn(async move |cx| { let results = futures::future::join_all(wait_for_connection_tasks).await; - for (agent, result) in results { + for (agent_id, remote_connection, result) in results { let Some(state) = result.log_err() else { continue; }; @@ -457,18 +467,25 @@ fn find_threads_to_import( }; let task = cx.update(|cx| { list.list_sessions(AgentSessionListRequest::default(), cx) - .map(|r| (agent, r)) + .map({ + let remote_connection = remote_connection.clone(); + move |response| (agent_id, remote_connection, response) + }) }); session_list_tasks.push(task); } let mut sessions_by_agent = Vec::new(); let results = futures::future::join_all(session_list_tasks).await; - for (agent_id, result) in results { + for (agent_id, remote_connection, result) in results { let Some(response) = result.log_err() else { continue; }; - sessions_by_agent.push((agent_id, response.sessions)); + sessions_by_agent.push(SessionByAgent { + agent_id, + remote_connection, + sessions: response.sessions, + }); } Ok(collect_importable_threads( @@ -478,12 +495,23 @@ fn find_threads_to_import( }) } +struct SessionByAgent { + agent_id: AgentId, + remote_connection: Option, + sessions: Vec, +} + fn collect_importable_threads( - sessions_by_agent: Vec<(AgentId, Vec)>, + sessions_by_agent: Vec, mut existing_sessions: HashSet, ) -> Vec { let mut to_insert = Vec::new(); - for (agent_id, sessions) in sessions_by_agent { + for SessionByAgent { + agent_id, + remote_connection, + sessions, + } in sessions_by_agent + { for session in sessions { if !existing_sessions.insert(session.session_id.clone()) { continue; @@ -499,8 +527,8 @@ fn collect_importable_threads( .unwrap_or_else(|| crate::DEFAULT_THREAD_TITLE.into()), updated_at: session.updated_at.unwrap_or_else(|| Utc::now()), created_at: session.created_at, - folder_paths, - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::from_folder_paths(&folder_paths), + remote_connection: remote_connection.clone(), archived: true, }); } @@ -538,9 +566,10 @@ mod tests { let existing = HashSet::from_iter(vec![acp::SessionId::new("existing-1")]); let paths = PathList::new(&[Path::new("/project")]); - let sessions_by_agent = vec![( - AgentId::new("agent-a"), - vec![ + let sessions_by_agent = vec![SessionByAgent { + agent_id: AgentId::new("agent-a"), + remote_connection: None, + sessions: vec![ make_session( "existing-1", Some("Already There"), @@ -550,7 +579,7 @@ mod tests { ), make_session("new-1", Some("Brand New"), Some(paths), None, None), ], - )]; + }]; let result = collect_importable_threads(sessions_by_agent, existing); @@ -564,13 +593,14 @@ mod tests { let existing = HashSet::default(); let paths = PathList::new(&[Path::new("/project")]); - let sessions_by_agent = vec![( - AgentId::new("agent-a"), - vec![ + let sessions_by_agent = vec![SessionByAgent { + agent_id: AgentId::new("agent-a"), + remote_connection: None, + sessions: vec![ make_session("has-dirs", Some("With Dirs"), Some(paths), None, None), make_session("no-dirs", Some("No Dirs"), None, None, None), ], - )]; + }]; let result = collect_importable_threads(sessions_by_agent, existing); @@ -583,13 +613,14 @@ mod tests { let existing = HashSet::default(); let paths = PathList::new(&[Path::new("/project")]); - let sessions_by_agent = vec![( - AgentId::new("agent-a"), - vec![ + let sessions_by_agent = vec![SessionByAgent { + agent_id: AgentId::new("agent-a"), + remote_connection: None, + sessions: vec![ make_session("s1", Some("Thread 1"), Some(paths.clone()), None, None), make_session("s2", Some("Thread 2"), Some(paths), None, None), ], - )]; + }]; let result = collect_importable_threads(sessions_by_agent, existing); @@ -603,20 +634,22 @@ mod tests { let paths = PathList::new(&[Path::new("/project")]); let sessions_by_agent = vec![ - ( - AgentId::new("agent-a"), - vec![make_session( + SessionByAgent { + agent_id: AgentId::new("agent-a"), + remote_connection: None, + sessions: vec![make_session( "s1", Some("From A"), Some(paths.clone()), None, None, )], - ), - ( - AgentId::new("agent-b"), - vec![make_session("s2", Some("From B"), Some(paths), None, None)], - ), + }, + SessionByAgent { + agent_id: AgentId::new("agent-b"), + remote_connection: None, + sessions: vec![make_session("s2", Some("From B"), Some(paths), None, None)], + }, ]; let result = collect_importable_threads(sessions_by_agent, existing); @@ -640,26 +673,28 @@ mod tests { let paths = PathList::new(&[Path::new("/project")]); let sessions_by_agent = vec![ - ( - AgentId::new("agent-a"), - vec![make_session( + SessionByAgent { + agent_id: AgentId::new("agent-a"), + remote_connection: None, + sessions: vec![make_session( "shared-session", Some("From A"), Some(paths.clone()), None, None, )], - ), - ( - AgentId::new("agent-b"), - vec![make_session( + }, + SessionByAgent { + agent_id: AgentId::new("agent-b"), + remote_connection: None, + sessions: vec![make_session( "shared-session", Some("From B"), Some(paths), None, None, )], - ), + }, ]; let result = collect_importable_threads(sessions_by_agent, existing); @@ -679,13 +714,14 @@ mod tests { let existing = HashSet::from_iter(vec![acp::SessionId::new("s1"), acp::SessionId::new("s2")]); - let sessions_by_agent = vec![( - AgentId::new("agent-a"), - vec![ + let sessions_by_agent = vec![SessionByAgent { + agent_id: AgentId::new("agent-a"), + remote_connection: None, + sessions: vec![ make_session("s1", Some("T1"), Some(paths.clone()), None, None), make_session("s2", Some("T2"), Some(paths), None, None), ], - )]; + }]; let result = collect_importable_threads(sessions_by_agent, existing); assert!(result.is_empty()); diff --git a/crates/agent_ui/src/thread_metadata_store.rs b/crates/agent_ui/src/thread_metadata_store.rs index 7f50482d9d47ae51ffda7ec2dac2907fb6f88095..4ba68b400a60320e95bfd645ee662f6483dc6cf4 100644 --- a/crates/agent_ui/src/thread_metadata_store.rs +++ b/crates/agent_ui/src/thread_metadata_store.rs @@ -10,31 +10,37 @@ use anyhow::Context as _; use chrono::{DateTime, Utc}; use collections::{HashMap, HashSet}; use db::{ + kvp::KeyValueStore, sqlez::{ bindable::Column, domain::Domain, statement::Statement, thread_safe_connection::ThreadSafeConnection, }, sqlez_macros::sql, }; -use futures::{FutureExt as _, future::Shared}; +use fs::Fs; +use futures::{FutureExt, future::Shared}; use gpui::{AppContext as _, Entity, Global, Subscription, Task}; use project::AgentId; +use remote::RemoteConnectionOptions; use ui::{App, Context, SharedString}; use util::ResultExt as _; -use workspace::PathList; +use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb}; use crate::DEFAULT_THREAD_TITLE; +const THREAD_REMOTE_CONNECTION_MIGRATION_KEY: &str = "thread-metadata-remote-connection-backfill"; + pub fn init(cx: &mut App) { ThreadMetadataStore::init_global(cx); - migrate_thread_metadata(cx); + let migration_task = migrate_thread_metadata(cx); + migrate_thread_remote_connections(cx, migration_task); } /// Migrate existing thread metadata from native agent thread store to the new metadata storage. /// We skip migrating threads that do not have a project. /// /// TODO: Remove this after N weeks of shipping the sidebar -fn migrate_thread_metadata(cx: &mut App) { +fn migrate_thread_metadata(cx: &mut App) -> Task> { let store = ThreadMetadataStore::global(cx); let db = store.read(cx).db.clone(); @@ -58,8 +64,8 @@ fn migrate_thread_metadata(cx: &mut App) { title: entry.title, updated_at: entry.updated_at, created_at: entry.created_at, - folder_paths: entry.folder_paths, - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::from_folder_paths(&entry.folder_paths), + remote_connection: None, archived: true, }) }) @@ -75,11 +81,11 @@ fn migrate_thread_metadata(cx: &mut App) { if is_first_migration { let mut per_project: HashMap> = HashMap::default(); for entry in &mut to_migrate { - if entry.folder_paths.is_empty() { + if entry.worktree_paths.is_empty() { continue; } per_project - .entry(entry.folder_paths.clone()) + .entry(entry.worktree_paths.folder_path_list().clone()) .or_default() .push(entry); } @@ -104,12 +110,219 @@ fn migrate_thread_metadata(cx: &mut App) { let _ = store.update(cx, |store, cx| store.reload(cx)); anyhow::Ok(()) }) +} + +fn migrate_thread_remote_connections(cx: &mut App, migration_task: Task>) { + let store = ThreadMetadataStore::global(cx); + let db = store.read(cx).db.clone(); + let kvp = KeyValueStore::global(cx); + let workspace_db = WorkspaceDb::global(cx); + let fs = ::global(cx); + + cx.spawn(async move |cx| -> anyhow::Result<()> { + migration_task.await?; + + if kvp + .read_kvp(THREAD_REMOTE_CONNECTION_MIGRATION_KEY)? + .is_some() + { + return Ok(()); + } + + let recent_workspaces = workspace_db.recent_workspaces_on_disk(fs.as_ref()).await?; + + let mut local_path_lists = HashSet::::default(); + let mut remote_path_lists = HashMap::::default(); + + recent_workspaces + .iter() + .filter(|(_, location, path_list, _)| { + !path_list.is_empty() && matches!(location, &SerializedWorkspaceLocation::Local) + }) + .for_each(|(_, _, path_list, _)| { + local_path_lists.insert(path_list.clone()); + }); + + for (_, location, path_list, _) in recent_workspaces { + match location { + SerializedWorkspaceLocation::Remote(remote_connection) + if !local_path_lists.contains(&path_list) => + { + remote_path_lists + .entry(path_list) + .or_insert(remote_connection); + } + _ => {} + } + } + + let mut reloaded = false; + for metadata in db.list()? { + if metadata.remote_connection.is_some() { + continue; + } + + if let Some(remote_connection) = remote_path_lists + .get(metadata.folder_paths()) + .or_else(|| remote_path_lists.get(metadata.main_worktree_paths())) + { + db.save(ThreadMetadata { + remote_connection: Some(remote_connection.clone()), + ..metadata + }) + .await?; + reloaded = true; + } + } + + let reloaded_task = reloaded + .then_some(store.update(cx, |store, cx| store.reload(cx))) + .unwrap_or(Task::ready(()).shared()); + + kvp.write_kvp( + THREAD_REMOTE_CONNECTION_MIGRATION_KEY.to_string(), + "1".to_string(), + ) + .await?; + reloaded_task.await; + + Ok(()) + }) .detach_and_log_err(cx); } struct GlobalThreadMetadataStore(Entity); impl Global for GlobalThreadMetadataStore {} +/// Paired worktree paths for a thread. Each folder path has a corresponding +/// main worktree path at the same position. The two lists are always the +/// same length and are modified together via `add_path` / `remove_main_path`. +/// +/// For non-linked worktrees, the main path and folder path are identical. +/// For linked worktrees, the main path is the original repo and the folder +/// path is the linked worktree location. +/// +/// Internally stores two `PathList`s with matching insertion order so that +/// `ordered_paths()` on both yields positionally-paired results. +#[derive(Default, Debug, Clone)] +pub struct ThreadWorktreePaths { + folder_paths: PathList, + main_worktree_paths: PathList, +} + +impl PartialEq for ThreadWorktreePaths { + fn eq(&self, other: &Self) -> bool { + self.folder_paths == other.folder_paths + && self.main_worktree_paths == other.main_worktree_paths + } +} + +impl ThreadWorktreePaths { + /// Build from a project's current state. Each visible worktree is paired + /// with its main repo path (resolved via git), falling back to the + /// worktree's own path if no git repo is found. + pub fn from_project(project: &project::Project, cx: &App) -> Self { + let (mains, folders): (Vec, Vec) = project + .visible_worktrees(cx) + .map(|worktree| { + let snapshot = worktree.read(cx).snapshot(); + let folder_path = snapshot.abs_path().to_path_buf(); + let main_path = snapshot + .root_repo_common_dir() + .and_then(|dir| Some(dir.parent()?.to_path_buf())) + .unwrap_or_else(|| folder_path.clone()); + (main_path, folder_path) + }) + .unzip(); + Self { + folder_paths: PathList::new(&folders), + main_worktree_paths: PathList::new(&mains), + } + } + + /// Build from two parallel `PathList`s that already share the same + /// insertion order. Used for deserialization from DB. + /// + /// Returns an error if the two lists have different lengths, which + /// indicates corrupted data from a prior migration bug. + pub fn from_path_lists( + main_worktree_paths: PathList, + folder_paths: PathList, + ) -> anyhow::Result { + anyhow::ensure!( + main_worktree_paths.paths().len() == folder_paths.paths().len(), + "main_worktree_paths has {} entries but folder_paths has {}", + main_worktree_paths.paths().len(), + folder_paths.paths().len(), + ); + Ok(Self { + folder_paths, + main_worktree_paths, + }) + } + + /// Build for non-linked worktrees where main == folder for every path. + pub fn from_folder_paths(folder_paths: &PathList) -> Self { + Self { + folder_paths: folder_paths.clone(), + main_worktree_paths: folder_paths.clone(), + } + } + + pub fn is_empty(&self) -> bool { + self.folder_paths.is_empty() + } + + /// The folder paths (for workspace matching / `threads_by_paths` index). + pub fn folder_path_list(&self) -> &PathList { + &self.folder_paths + } + + /// The main worktree paths (for group key / `threads_by_main_paths` index). + pub fn main_worktree_path_list(&self) -> &PathList { + &self.main_worktree_paths + } + + /// Iterate the (main_worktree_path, folder_path) pairs in insertion order. + pub fn ordered_pairs(&self) -> impl Iterator { + self.main_worktree_paths + .ordered_paths() + .zip(self.folder_paths.ordered_paths()) + } + + /// Add a new path pair. If the exact (main, folder) pair already exists, + /// this is a no-op. Rebuilds both internal `PathList`s to maintain + /// consistent ordering. + pub fn add_path(&mut self, main_path: &Path, folder_path: &Path) { + let already_exists = self + .ordered_pairs() + .any(|(m, f)| m.as_path() == main_path && f.as_path() == folder_path); + if already_exists { + return; + } + let (mut mains, mut folders): (Vec, Vec) = self + .ordered_pairs() + .map(|(m, f)| (m.clone(), f.clone())) + .unzip(); + mains.push(main_path.to_path_buf()); + folders.push(folder_path.to_path_buf()); + self.main_worktree_paths = PathList::new(&mains); + self.folder_paths = PathList::new(&folders); + } + + /// Remove all pairs whose main worktree path matches the given path. + /// This removes the corresponding entries from both lists. + pub fn remove_main_path(&mut self, main_path: &Path) { + let (mains, folders): (Vec, Vec) = self + .ordered_pairs() + .filter(|(m, _)| m.as_path() != main_path) + .map(|(m, f)| (m.clone(), f.clone())) + .unzip(); + self.main_worktree_paths = PathList::new(&mains); + self.folder_paths = PathList::new(&folders); + } +} + /// Lightweight metadata for any thread (native or ACP), enough to populate /// the sidebar list and route to the correct load path when clicked. #[derive(Debug, Clone, PartialEq)] @@ -119,16 +332,25 @@ pub struct ThreadMetadata { pub title: SharedString, pub updated_at: DateTime, pub created_at: Option>, - pub folder_paths: PathList, - pub main_worktree_paths: PathList, + pub worktree_paths: ThreadWorktreePaths, + pub remote_connection: Option, pub archived: bool, } +impl ThreadMetadata { + pub fn folder_paths(&self) -> &PathList { + self.worktree_paths.folder_path_list() + } + pub fn main_worktree_paths(&self) -> &PathList { + self.worktree_paths.main_worktree_path_list() + } +} + impl From<&ThreadMetadata> for acp_thread::AgentSessionInfo { fn from(meta: &ThreadMetadata) -> Self { Self { session_id: meta.session_id.clone(), - work_dirs: Some(meta.folder_paths.clone()), + work_dirs: Some(meta.folder_paths().clone()), title: Some(meta.title.clone()), updated_at: Some(meta.updated_at), created_at: meta.created_at, @@ -190,6 +412,7 @@ pub struct ThreadMetadataStore { reload_task: Option>>, session_subscriptions: HashMap, pending_thread_ops_tx: smol::channel::Sender, + in_flight_archives: HashMap, smol::channel::Sender<()>)>, _db_operations_task: Task<()>, } @@ -311,12 +534,12 @@ impl ThreadMetadataStore { for row in rows { this.threads_by_paths - .entry(row.folder_paths.clone()) + .entry(row.folder_paths().clone()) .or_default() .insert(row.session_id.clone()); - if !row.main_worktree_paths.is_empty() { + if !row.main_worktree_paths().is_empty() { this.threads_by_main_paths - .entry(row.main_worktree_paths.clone()) + .entry(row.main_worktree_paths().clone()) .or_default() .insert(row.session_id.clone()); } @@ -351,17 +574,17 @@ impl ThreadMetadataStore { fn save_internal(&mut self, metadata: ThreadMetadata) { if let Some(thread) = self.threads.get(&metadata.session_id) { - if thread.folder_paths != metadata.folder_paths { - if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) { + if thread.folder_paths() != metadata.folder_paths() { + if let Some(session_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) { session_ids.remove(&metadata.session_id); } } - if thread.main_worktree_paths != metadata.main_worktree_paths - && !thread.main_worktree_paths.is_empty() + if thread.main_worktree_paths() != metadata.main_worktree_paths() + && !thread.main_worktree_paths().is_empty() { if let Some(session_ids) = self .threads_by_main_paths - .get_mut(&thread.main_worktree_paths) + .get_mut(thread.main_worktree_paths()) { session_ids.remove(&metadata.session_id); } @@ -372,13 +595,13 @@ impl ThreadMetadataStore { .insert(metadata.session_id.clone(), metadata.clone()); self.threads_by_paths - .entry(metadata.folder_paths.clone()) + .entry(metadata.folder_paths().clone()) .or_default() .insert(metadata.session_id.clone()); - if !metadata.main_worktree_paths.is_empty() { + if !metadata.main_worktree_paths().is_empty() { self.threads_by_main_paths - .entry(metadata.main_worktree_paths.clone()) + .entry(metadata.main_worktree_paths().clone()) .or_default() .insert(metadata.session_id.clone()); } @@ -396,19 +619,148 @@ impl ThreadMetadataStore { ) { if let Some(thread) = self.threads.get(session_id) { self.save_internal(ThreadMetadata { - folder_paths: work_dirs, + worktree_paths: ThreadWorktreePaths::from_path_lists( + thread.main_worktree_paths().clone(), + work_dirs.clone(), + ) + .unwrap_or_else(|_| ThreadWorktreePaths::from_folder_paths(&work_dirs)), ..thread.clone() }); cx.notify(); } } - pub fn archive(&mut self, session_id: &acp::SessionId, cx: &mut Context) { + pub fn archive( + &mut self, + session_id: &acp::SessionId, + archive_job: Option<(Task<()>, smol::channel::Sender<()>)>, + cx: &mut Context, + ) { self.update_archived(session_id, true, cx); + + if let Some(job) = archive_job { + self.in_flight_archives.insert(session_id.clone(), job); + } } pub fn unarchive(&mut self, session_id: &acp::SessionId, cx: &mut Context) { self.update_archived(session_id, false, cx); + // Dropping the Sender triggers cancellation in the background task. + self.in_flight_archives.remove(session_id); + } + + pub fn cleanup_completed_archive(&mut self, session_id: &acp::SessionId) { + self.in_flight_archives.remove(session_id); + } + + /// Updates a thread's `folder_paths` after an archived worktree has been + /// restored to disk. The restored worktree may land at a different path + /// than it had before archival, so each `(old_path, new_path)` pair in + /// `path_replacements` is applied to the thread's stored folder paths. + pub fn update_restored_worktree_paths( + &mut self, + session_id: &acp::SessionId, + path_replacements: &[(PathBuf, PathBuf)], + cx: &mut Context, + ) { + if let Some(thread) = self.threads.get(session_id).cloned() { + let mut paths: Vec = thread.folder_paths().paths().to_vec(); + for (old_path, new_path) in path_replacements { + if let Some(pos) = paths.iter().position(|p| p == old_path) { + paths[pos] = new_path.clone(); + } + } + let new_folder_paths = PathList::new(&paths); + self.save_internal(ThreadMetadata { + worktree_paths: ThreadWorktreePaths::from_path_lists( + thread.main_worktree_paths().clone(), + new_folder_paths.clone(), + ) + .unwrap_or_else(|_| ThreadWorktreePaths::from_folder_paths(&new_folder_paths)), + ..thread + }); + cx.notify(); + } + } + + pub fn complete_worktree_restore( + &mut self, + session_id: &acp::SessionId, + path_replacements: &[(PathBuf, PathBuf)], + cx: &mut Context, + ) { + if let Some(thread) = self.threads.get(session_id).cloned() { + let mut paths: Vec = thread.folder_paths().paths().to_vec(); + for (old_path, new_path) in path_replacements { + for path in &mut paths { + if path == old_path { + *path = new_path.clone(); + } + } + } + let new_folder_paths = PathList::new(&paths); + self.save_internal(ThreadMetadata { + worktree_paths: ThreadWorktreePaths::from_path_lists( + thread.main_worktree_paths().clone(), + new_folder_paths.clone(), + ) + .unwrap_or_else(|_| ThreadWorktreePaths::from_folder_paths(&new_folder_paths)), + ..thread + }); + cx.notify(); + } + } + + /// Apply a mutation to the worktree paths of all threads whose current + /// `main_worktree_paths` matches `current_main_paths`, then re-index. + pub fn change_worktree_paths( + &mut self, + current_main_paths: &PathList, + mutate: impl Fn(&mut ThreadWorktreePaths), + cx: &mut Context, + ) { + let session_ids: Vec<_> = self + .threads_by_main_paths + .get(current_main_paths) + .into_iter() + .flatten() + .cloned() + .collect(); + + if session_ids.is_empty() { + return; + } + + for session_id in &session_ids { + if let Some(thread) = self.threads.get_mut(session_id) { + if let Some(ids) = self + .threads_by_main_paths + .get_mut(thread.main_worktree_paths()) + { + ids.remove(session_id); + } + if let Some(ids) = self.threads_by_paths.get_mut(thread.folder_paths()) { + ids.remove(session_id); + } + + mutate(&mut thread.worktree_paths); + + self.threads_by_main_paths + .entry(thread.main_worktree_paths().clone()) + .or_default() + .insert(session_id.clone()); + self.threads_by_paths + .entry(thread.folder_paths().clone()) + .or_default() + .insert(session_id.clone()); + + self.pending_thread_ops_tx + .try_send(DbOperation::Upsert(thread.clone())) + .log_err(); + } + } + + cx.notify(); } pub fn create_archived_worktree( @@ -462,6 +814,30 @@ impl ThreadMetadataStore { cx.background_spawn(async move { db.delete_archived_worktree(id).await }) } + pub fn unlink_thread_from_all_archived_worktrees( + &self, + session_id: String, + cx: &App, + ) -> Task> { + let db = self.db.clone(); + cx.background_spawn(async move { + db.unlink_thread_from_all_archived_worktrees(session_id) + .await + }) + } + + pub fn is_archived_worktree_referenced( + &self, + archived_worktree_id: i64, + cx: &App, + ) -> Task> { + let db = self.db.clone(); + cx.background_spawn(async move { + db.is_archived_worktree_referenced(archived_worktree_id) + .await + }) + } + fn update_archived( &mut self, session_id: &acp::SessionId, @@ -479,13 +855,13 @@ impl ThreadMetadataStore { pub fn delete(&mut self, session_id: acp::SessionId, cx: &mut Context) { if let Some(thread) = self.threads.get(&session_id) { - if let Some(session_ids) = self.threads_by_paths.get_mut(&thread.folder_paths) { + if let Some(session_ids) = self.threads_by_paths.get_mut(thread.folder_paths()) { session_ids.remove(&session_id); } - if !thread.main_worktree_paths.is_empty() { + if !thread.main_worktree_paths().is_empty() { if let Some(session_ids) = self .threads_by_main_paths - .get_mut(&thread.main_worktree_paths) + .get_mut(thread.main_worktree_paths()) { session_ids.remove(&session_id); } @@ -564,6 +940,7 @@ impl ThreadMetadataStore { reload_task: None, session_subscriptions: HashMap::default(), pending_thread_ops_tx: tx, + in_flight_archives: HashMap::default(), _db_operations_task, }; let _ = this.reload(cx); @@ -624,21 +1001,11 @@ impl ThreadMetadataStore { let agent_id = thread_ref.connection().agent_id(); - let folder_paths = { - let project = thread_ref.project().read(cx); - let paths: Vec> = project - .visible_worktrees(cx) - .map(|worktree| worktree.read(cx).abs_path()) - .collect(); - PathList::new(&paths) - }; + let project = thread_ref.project().read(cx); + let worktree_paths = ThreadWorktreePaths::from_project(project, cx); - let main_worktree_paths = thread_ref - .project() - .read(cx) - .project_group_key(cx) - .path_list() - .clone(); + let project_group_key = project.project_group_key(cx); + let remote_connection = project_group_key.host(); // Threads without a folder path (e.g. started in an empty // window) are archived by default so they don't get lost, @@ -646,7 +1013,7 @@ impl ThreadMetadataStore { // them from the archive. let archived = existing_thread .map(|t| t.archived) - .unwrap_or(folder_paths.is_empty()); + .unwrap_or(worktree_paths.is_empty()); let metadata = ThreadMetadata { session_id, @@ -654,8 +1021,8 @@ impl ThreadMetadataStore { title, created_at: Some(created_at), updated_at, - folder_paths, - main_worktree_paths, + worktree_paths, + remote_connection, archived, }; @@ -710,6 +1077,7 @@ impl Domain for ThreadMetadataDb { PRIMARY KEY (session_id, archived_worktree_id) ) STRICT; ), + sql!(ALTER TABLE sidebar_threads ADD COLUMN remote_connection TEXT), ]; } @@ -726,7 +1094,7 @@ impl ThreadMetadataDb { /// List all sidebar thread metadata, ordered by updated_at descending. pub fn list(&self) -> anyhow::Result> { self.select::( - "SELECT session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order \ + "SELECT session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection \ FROM sidebar_threads \ ORDER BY updated_at DESC" )?() @@ -743,24 +1111,30 @@ impl ThreadMetadataDb { let title = row.title.to_string(); let updated_at = row.updated_at.to_rfc3339(); let created_at = row.created_at.map(|dt| dt.to_rfc3339()); - let serialized = row.folder_paths.serialize(); - let (folder_paths, folder_paths_order) = if row.folder_paths.is_empty() { + let serialized = row.folder_paths().serialize(); + let (folder_paths, folder_paths_order) = if row.folder_paths().is_empty() { (None, None) } else { (Some(serialized.paths), Some(serialized.order)) }; - let main_serialized = row.main_worktree_paths.serialize(); - let (main_worktree_paths, main_worktree_paths_order) = if row.main_worktree_paths.is_empty() - { - (None, None) - } else { - (Some(main_serialized.paths), Some(main_serialized.order)) - }; + let main_serialized = row.main_worktree_paths().serialize(); + let (main_worktree_paths, main_worktree_paths_order) = + if row.main_worktree_paths().is_empty() { + (None, None) + } else { + (Some(main_serialized.paths), Some(main_serialized.order)) + }; + let remote_connection = row + .remote_connection + .as_ref() + .map(serde_json::to_string) + .transpose() + .context("serialize thread metadata remote connection")?; let archived = row.archived; self.write(move |conn| { - let sql = "INSERT INTO sidebar_threads(session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order) \ - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10) \ + let sql = "INSERT INTO sidebar_threads(session_id, agent_id, title, updated_at, created_at, folder_paths, folder_paths_order, archived, main_worktree_paths, main_worktree_paths_order, remote_connection) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11) \ ON CONFLICT(session_id) DO UPDATE SET \ agent_id = excluded.agent_id, \ title = excluded.title, \ @@ -770,7 +1144,8 @@ impl ThreadMetadataDb { folder_paths_order = excluded.folder_paths_order, \ archived = excluded.archived, \ main_worktree_paths = excluded.main_worktree_paths, \ - main_worktree_paths_order = excluded.main_worktree_paths_order"; + main_worktree_paths_order = excluded.main_worktree_paths_order, \ + remote_connection = excluded.remote_connection"; let mut stmt = Statement::prepare(conn, sql)?; let mut i = stmt.bind(&id, 1)?; i = stmt.bind(&agent_id, i)?; @@ -781,7 +1156,8 @@ impl ThreadMetadataDb { i = stmt.bind(&folder_paths_order, i)?; i = stmt.bind(&archived, i)?; i = stmt.bind(&main_worktree_paths, i)?; - stmt.bind(&main_worktree_paths_order, i)?; + i = stmt.bind(&main_worktree_paths_order, i)?; + stmt.bind(&remote_connection, i)?; stmt.exec() }) .await @@ -872,6 +1248,31 @@ impl ThreadMetadataDb { }) .await } + + pub async fn unlink_thread_from_all_archived_worktrees( + &self, + session_id: String, + ) -> anyhow::Result<()> { + self.write(move |conn| { + let mut stmt = Statement::prepare( + conn, + "DELETE FROM thread_archived_worktrees WHERE session_id = ?", + )?; + stmt.bind(&session_id, 1)?; + stmt.exec() + }) + .await + } + + pub async fn is_archived_worktree_referenced( + &self, + archived_worktree_id: i64, + ) -> anyhow::Result { + self.select_row_bound::( + "SELECT COUNT(*) FROM thread_archived_worktrees WHERE archived_worktree_id = ?1", + )?(archived_worktree_id) + .map(|count| count.unwrap_or(0) > 0) + } } impl Column for ThreadMetadata { @@ -889,6 +1290,8 @@ impl Column for ThreadMetadata { Column::column(statement, next)?; let (main_worktree_paths_order_str, next): (Option, i32) = Column::column(statement, next)?; + let (remote_connection_json, next): (Option, i32) = + Column::column(statement, next)?; let agent_id = agent_id .map(|id| AgentId::new(id)) @@ -919,6 +1322,16 @@ impl Column for ThreadMetadata { }) .unwrap_or_default(); + let remote_connection = remote_connection_json + .as_deref() + .map(serde_json::from_str::) + .transpose() + .context("deserialize thread metadata remote connection")?; + + let worktree_paths = + ThreadWorktreePaths::from_path_lists(main_worktree_paths, folder_paths) + .unwrap_or_else(|_| ThreadWorktreePaths::default()); + Ok(( ThreadMetadata { session_id: acp::SessionId::new(id), @@ -926,8 +1339,8 @@ impl Column for ThreadMetadata { title: title.into(), updated_at, created_at, - folder_paths, - main_worktree_paths, + worktree_paths, + remote_connection, archived, }, next, @@ -971,6 +1384,7 @@ mod tests { use gpui::TestAppContext; use project::FakeFs; use project::Project; + use remote::WslConnectionOptions; use std::path::Path; use std::rc::Rc; @@ -1008,21 +1422,38 @@ mod tests { title: title.to_string().into(), updated_at, created_at: Some(updated_at), - folder_paths, - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::from_folder_paths(&folder_paths), + remote_connection: None, } } fn init_test(cx: &mut TestAppContext) { + let fs = FakeFs::new(cx.executor()); cx.update(|cx| { let settings_store = settings::SettingsStore::test(cx); cx.set_global(settings_store); + ::set_global(fs, cx); ThreadMetadataStore::init_global(cx); ThreadStore::init_global(cx); }); cx.run_until_parked(); } + fn clear_thread_metadata_remote_connection_backfill(cx: &mut TestAppContext) { + let kvp = cx.update(|cx| KeyValueStore::global(cx)); + smol::block_on(kvp.delete_kvp("thread-metadata-remote-connection-backfill".to_string())) + .unwrap(); + } + + fn run_thread_metadata_migrations(cx: &mut TestAppContext) { + clear_thread_metadata_remote_connection_backfill(cx); + cx.update(|cx| { + let migration_task = migrate_thread_metadata(cx); + migrate_thread_remote_connections(cx, migration_task); + }); + cx.run_until_parked(); + } + #[gpui::test] async fn test_store_initializes_cache_from_database(cx: &mut TestAppContext) { let first_paths = PathList::new(&[Path::new("/project-a")]); @@ -1222,8 +1653,8 @@ mod tests { title: "Existing Metadata".into(), updated_at: now - chrono::Duration::seconds(10), created_at: Some(now - chrono::Duration::seconds(10)), - folder_paths: project_a_paths.clone(), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::from_folder_paths(&project_a_paths), + remote_connection: None, archived: false, }; @@ -1281,8 +1712,7 @@ mod tests { cx.run_until_parked(); } - cx.update(|cx| migrate_thread_metadata(cx)); - cx.run_until_parked(); + run_thread_metadata_migrations(cx); let list = cx.update(|cx| { let store = ThreadMetadataStore::global(cx); @@ -1332,8 +1762,8 @@ mod tests { title: "Existing Metadata".into(), updated_at: existing_updated_at, created_at: Some(existing_updated_at), - folder_paths: project_paths.clone(), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::from_folder_paths(&project_paths), + remote_connection: None, archived: false, }; @@ -1362,8 +1792,7 @@ mod tests { save_task.await.unwrap(); cx.run_until_parked(); - cx.update(|cx| migrate_thread_metadata(cx)); - cx.run_until_parked(); + run_thread_metadata_migrations(cx); let list = cx.update(|cx| { let store = ThreadMetadataStore::global(cx); @@ -1374,6 +1803,82 @@ mod tests { assert_eq!(list[0].session_id.0.as_ref(), "existing-session"); } + #[gpui::test] + async fn test_migrate_thread_remote_connections_backfills_from_workspace_db( + cx: &mut TestAppContext, + ) { + init_test(cx); + + let folder_paths = PathList::new(&[Path::new("/remote-project")]); + let updated_at = Utc::now(); + let metadata = make_metadata( + "remote-session", + "Remote Thread", + updated_at, + folder_paths.clone(), + ); + + cx.update(|cx| { + let store = ThreadMetadataStore::global(cx); + store.update(cx, |store, cx| { + store.save(metadata, cx); + }); + }); + cx.run_until_parked(); + + let workspace_db = cx.update(|cx| WorkspaceDb::global(cx)); + let workspace_id = workspace_db.next_id().await.unwrap(); + let serialized_paths = folder_paths.serialize(); + let remote_connection_id = 1_i64; + workspace_db + .write(move |conn| { + let mut stmt = Statement::prepare( + conn, + "INSERT INTO remote_connections(id, kind, user, distro) VALUES (?1, ?2, ?3, ?4)", + )?; + let mut next_index = stmt.bind(&remote_connection_id, 1)?; + next_index = stmt.bind(&"wsl", next_index)?; + next_index = stmt.bind(&Some("anth".to_string()), next_index)?; + stmt.bind(&Some("Ubuntu".to_string()), next_index)?; + stmt.exec()?; + + let mut stmt = Statement::prepare( + conn, + "UPDATE workspaces SET paths = ?2, paths_order = ?3, remote_connection_id = ?4, timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?1", + )?; + let mut next_index = stmt.bind(&workspace_id, 1)?; + next_index = stmt.bind(&serialized_paths.paths, next_index)?; + next_index = stmt.bind(&serialized_paths.order, next_index)?; + stmt.bind(&Some(remote_connection_id as i32), next_index)?; + stmt.exec() + }) + .await + .unwrap(); + + clear_thread_metadata_remote_connection_backfill(cx); + cx.update(|cx| { + migrate_thread_remote_connections(cx, Task::ready(Ok(()))); + }); + cx.run_until_parked(); + + let metadata = cx.update(|cx| { + let store = ThreadMetadataStore::global(cx); + store + .read(cx) + .entry(&acp::SessionId::new("remote-session")) + .cloned() + .expect("expected migrated metadata row") + }); + + assert_eq!( + metadata.remote_connection, + Some(RemoteConnectionOptions::Wsl(WslConnectionOptions { + distro_name: "Ubuntu".to_string(), + user: Some("anth".to_string()), + })) + ); + } + #[gpui::test] async fn test_migrate_thread_metadata_archives_beyond_five_most_recent_per_project( cx: &mut TestAppContext, @@ -1422,8 +1927,7 @@ mod tests { cx.run_until_parked(); } - cx.update(|cx| migrate_thread_metadata(cx)); - cx.run_until_parked(); + run_thread_metadata_migrations(cx); let list = cx.update(|cx| { let store = ThreadMetadataStore::global(cx); @@ -1435,7 +1939,7 @@ mod tests { // Project A: 5 most recent should be unarchived, 2 oldest should be archived let mut project_a_entries: Vec<_> = list .iter() - .filter(|m| m.folder_paths == project_a_paths) + .filter(|m| *m.folder_paths() == project_a_paths) .collect(); assert_eq!(project_a_entries.len(), 7); project_a_entries.sort_by(|a, b| b.updated_at.cmp(&a.updated_at)); @@ -1458,7 +1962,7 @@ mod tests { // Project B: all 3 should be unarchived (under the limit) let project_b_entries: Vec<_> = list .iter() - .filter(|m| m.folder_paths == project_b_paths) + .filter(|m| *m.folder_paths() == project_b_paths) .collect(); assert_eq!(project_b_entries.len(), 3); assert!(project_b_entries.iter().all(|m| !m.archived)); @@ -1622,7 +2126,7 @@ mod tests { let without_worktree = store .entry(&session_without_worktree) .expect("missing metadata for thread without project association"); - assert!(without_worktree.folder_paths.is_empty()); + assert!(without_worktree.folder_paths().is_empty()); assert!( without_worktree.archived, "expected thread without project association to be archived" @@ -1632,7 +2136,7 @@ mod tests { .entry(&session_with_worktree) .expect("missing metadata for thread with project association"); assert_eq!( - with_worktree.folder_paths, + *with_worktree.folder_paths(), PathList::new(&[Path::new("/project-a")]) ); assert!( @@ -1812,10 +2316,11 @@ mod tests { cx.update(|cx| { let store = ThreadMetadataStore::global(cx); store.update(cx, |store, cx| { - store.archive(&acp::SessionId::new("session-1"), cx); + store.archive(&acp::SessionId::new("session-1"), None, cx); }); }); + // Thread 1 should now be archived cx.run_until_parked(); cx.update(|cx| { @@ -1889,7 +2394,7 @@ mod tests { cx.update(|cx| { let store = ThreadMetadataStore::global(cx); store.update(cx, |store, cx| { - store.archive(&acp::SessionId::new("session-2"), cx); + store.archive(&acp::SessionId::new("session-2"), None, cx); }); }); @@ -1989,7 +2494,7 @@ mod tests { cx.update(|cx| { let store = ThreadMetadataStore::global(cx); store.update(cx, |store, cx| { - store.archive(&acp::SessionId::new("session-1"), cx); + store.archive(&acp::SessionId::new("session-1"), None, cx); }); }); @@ -2037,7 +2542,7 @@ mod tests { cx.update(|cx| { let store = ThreadMetadataStore::global(cx); store.update(cx, |store, cx| { - store.archive(&acp::SessionId::new("nonexistent"), cx); + store.archive(&acp::SessionId::new("nonexistent"), None, cx); }); }); @@ -2066,7 +2571,7 @@ mod tests { let store = ThreadMetadataStore::global(cx); store.update(cx, |store, cx| { store.save(metadata.clone(), cx); - store.archive(&session_id, cx); + store.archive(&session_id, None, cx); }); }); @@ -2226,6 +2731,188 @@ mod tests { assert_eq!(wt1[0].id, wt2[0].id); } + #[gpui::test] + async fn test_complete_worktree_restore_multiple_paths(cx: &mut TestAppContext) { + init_test(cx); + let store = cx.update(|cx| ThreadMetadataStore::global(cx)); + + let original_paths = PathList::new(&[ + Path::new("/projects/worktree-a"), + Path::new("/projects/worktree-b"), + Path::new("/other/unrelated"), + ]); + let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths); + + store.update(cx, |store, cx| { + store.save_manually(meta, cx); + }); + + let replacements = vec![ + ( + PathBuf::from("/projects/worktree-a"), + PathBuf::from("/restored/worktree-a"), + ), + ( + PathBuf::from("/projects/worktree-b"), + PathBuf::from("/restored/worktree-b"), + ), + ]; + + store.update(cx, |store, cx| { + store.complete_worktree_restore( + &acp::SessionId::new("session-multi"), + &replacements, + cx, + ); + }); + + let entry = store.read_with(cx, |store, _cx| { + store.entry(&acp::SessionId::new("session-multi")).cloned() + }); + let entry = entry.unwrap(); + let paths = entry.folder_paths().paths(); + assert_eq!(paths.len(), 3); + assert!(paths.contains(&PathBuf::from("/restored/worktree-a"))); + assert!(paths.contains(&PathBuf::from("/restored/worktree-b"))); + assert!(paths.contains(&PathBuf::from("/other/unrelated"))); + } + + #[gpui::test] + async fn test_complete_worktree_restore_preserves_unmatched_paths(cx: &mut TestAppContext) { + init_test(cx); + let store = cx.update(|cx| ThreadMetadataStore::global(cx)); + + let original_paths = + PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]); + let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths); + + store.update(cx, |store, cx| { + store.save_manually(meta, cx); + }); + + let replacements = vec![ + ( + PathBuf::from("/projects/worktree-a"), + PathBuf::from("/new/worktree-a"), + ), + ( + PathBuf::from("/nonexistent/path"), + PathBuf::from("/should/not/appear"), + ), + ]; + + store.update(cx, |store, cx| { + store.complete_worktree_restore( + &acp::SessionId::new("session-partial"), + &replacements, + cx, + ); + }); + + let entry = store.read_with(cx, |store, _cx| { + store + .entry(&acp::SessionId::new("session-partial")) + .cloned() + }); + let entry = entry.unwrap(); + let paths = entry.folder_paths().paths(); + assert_eq!(paths.len(), 2); + assert!(paths.contains(&PathBuf::from("/new/worktree-a"))); + assert!(paths.contains(&PathBuf::from("/other/path"))); + assert!(!paths.contains(&PathBuf::from("/should/not/appear"))); + } + + #[gpui::test] + async fn test_update_restored_worktree_paths_multiple(cx: &mut TestAppContext) { + init_test(cx); + let store = cx.update(|cx| ThreadMetadataStore::global(cx)); + + let original_paths = PathList::new(&[ + Path::new("/projects/worktree-a"), + Path::new("/projects/worktree-b"), + Path::new("/other/unrelated"), + ]); + let meta = make_metadata("session-multi", "Multi Thread", Utc::now(), original_paths); + + store.update(cx, |store, cx| { + store.save_manually(meta, cx); + }); + + let replacements = vec![ + ( + PathBuf::from("/projects/worktree-a"), + PathBuf::from("/restored/worktree-a"), + ), + ( + PathBuf::from("/projects/worktree-b"), + PathBuf::from("/restored/worktree-b"), + ), + ]; + + store.update(cx, |store, cx| { + store.update_restored_worktree_paths( + &acp::SessionId::new("session-multi"), + &replacements, + cx, + ); + }); + + let entry = store.read_with(cx, |store, _cx| { + store.entry(&acp::SessionId::new("session-multi")).cloned() + }); + let entry = entry.unwrap(); + let paths = entry.folder_paths().paths(); + assert_eq!(paths.len(), 3); + assert!(paths.contains(&PathBuf::from("/restored/worktree-a"))); + assert!(paths.contains(&PathBuf::from("/restored/worktree-b"))); + assert!(paths.contains(&PathBuf::from("/other/unrelated"))); + } + + #[gpui::test] + async fn test_update_restored_worktree_paths_preserves_unmatched(cx: &mut TestAppContext) { + init_test(cx); + let store = cx.update(|cx| ThreadMetadataStore::global(cx)); + + let original_paths = + PathList::new(&[Path::new("/projects/worktree-a"), Path::new("/other/path")]); + let meta = make_metadata("session-partial", "Partial", Utc::now(), original_paths); + + store.update(cx, |store, cx| { + store.save_manually(meta, cx); + }); + + let replacements = vec![ + ( + PathBuf::from("/projects/worktree-a"), + PathBuf::from("/new/worktree-a"), + ), + ( + PathBuf::from("/nonexistent/path"), + PathBuf::from("/should/not/appear"), + ), + ]; + + store.update(cx, |store, cx| { + store.update_restored_worktree_paths( + &acp::SessionId::new("session-partial"), + &replacements, + cx, + ); + }); + + let entry = store.read_with(cx, |store, _cx| { + store + .entry(&acp::SessionId::new("session-partial")) + .cloned() + }); + let entry = entry.unwrap(); + let paths = entry.folder_paths().paths(); + assert_eq!(paths.len(), 2); + assert!(paths.contains(&PathBuf::from("/new/worktree-a"))); + assert!(paths.contains(&PathBuf::from("/other/path"))); + assert!(!paths.contains(&PathBuf::from("/should/not/appear"))); + } + #[gpui::test] async fn test_multiple_archived_worktrees_per_thread(cx: &mut TestAppContext) { init_test(cx); @@ -2291,4 +2978,136 @@ mod tests { assert!(paths.contains(&Path::new("/projects/worktree-a"))); assert!(paths.contains(&Path::new("/projects/worktree-b"))); } + + // ── ThreadWorktreePaths tests ────────────────────────────────────── + + /// Helper to build a `ThreadWorktreePaths` from (main, folder) pairs. + fn make_worktree_paths(pairs: &[(&str, &str)]) -> ThreadWorktreePaths { + let (mains, folders): (Vec<&Path>, Vec<&Path>) = pairs + .iter() + .map(|(m, f)| (Path::new(*m), Path::new(*f))) + .unzip(); + ThreadWorktreePaths::from_path_lists(PathList::new(&mains), PathList::new(&folders)) + .unwrap() + } + + #[test] + fn test_thread_worktree_paths_full_add_then_remove_cycle() { + // Full scenario from the issue: + // 1. Start with linked worktree selectric → zed + // 2. Add cloud + // 3. Remove zed + + let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]); + + // Step 2: add cloud + paths.add_path(Path::new("/projects/cloud"), Path::new("/projects/cloud")); + + assert_eq!(paths.ordered_pairs().count(), 2); + assert_eq!( + paths.folder_path_list(), + &PathList::new(&[ + Path::new("/worktrees/selectric/zed"), + Path::new("/projects/cloud"), + ]) + ); + assert_eq!( + paths.main_worktree_path_list(), + &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud"),]) + ); + + // Step 3: remove zed + paths.remove_main_path(Path::new("/projects/zed")); + + assert_eq!(paths.ordered_pairs().count(), 1); + assert_eq!( + paths.folder_path_list(), + &PathList::new(&[Path::new("/projects/cloud")]) + ); + assert_eq!( + paths.main_worktree_path_list(), + &PathList::new(&[Path::new("/projects/cloud")]) + ); + } + + #[test] + fn test_thread_worktree_paths_add_is_idempotent() { + let mut paths = make_worktree_paths(&[("/projects/zed", "/projects/zed")]); + + paths.add_path(Path::new("/projects/zed"), Path::new("/projects/zed")); + + assert_eq!(paths.ordered_pairs().count(), 1); + } + + #[test] + fn test_thread_worktree_paths_remove_nonexistent_is_noop() { + let mut paths = make_worktree_paths(&[("/projects/zed", "/worktrees/selectric/zed")]); + + paths.remove_main_path(Path::new("/projects/nonexistent")); + + assert_eq!(paths.ordered_pairs().count(), 1); + } + + #[test] + fn test_thread_worktree_paths_from_path_lists_preserves_association() { + let folder = PathList::new(&[ + Path::new("/worktrees/selectric/zed"), + Path::new("/projects/cloud"), + ]); + let main = PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/cloud")]); + + let paths = ThreadWorktreePaths::from_path_lists(main, folder).unwrap(); + + let pairs: Vec<_> = paths + .ordered_pairs() + .map(|(m, f)| (m.clone(), f.clone())) + .collect(); + assert_eq!(pairs.len(), 2); + assert!(pairs.contains(&( + PathBuf::from("/projects/zed"), + PathBuf::from("/worktrees/selectric/zed") + ))); + assert!(pairs.contains(&( + PathBuf::from("/projects/cloud"), + PathBuf::from("/projects/cloud") + ))); + } + + #[test] + fn test_thread_worktree_paths_main_deduplicates_linked_worktrees() { + // Two linked worktrees of the same main repo: the main_worktree_path_list + // deduplicates because PathList stores unique sorted paths, but + // ordered_pairs still has both entries. + let paths = make_worktree_paths(&[ + ("/projects/zed", "/worktrees/selectric/zed"), + ("/projects/zed", "/worktrees/feature/zed"), + ]); + + // main_worktree_path_list has the duplicate main path twice + // (PathList keeps all entries from its input) + assert_eq!(paths.ordered_pairs().count(), 2); + assert_eq!( + paths.folder_path_list(), + &PathList::new(&[ + Path::new("/worktrees/selectric/zed"), + Path::new("/worktrees/feature/zed"), + ]) + ); + assert_eq!( + paths.main_worktree_path_list(), + &PathList::new(&[Path::new("/projects/zed"), Path::new("/projects/zed"),]) + ); + } + + #[test] + fn test_thread_worktree_paths_mismatched_lengths_returns_error() { + let folder = PathList::new(&[ + Path::new("/worktrees/selectric/zed"), + Path::new("/projects/cloud"), + ]); + let main = PathList::new(&[Path::new("/projects/zed")]); + + let result = ThreadWorktreePaths::from_path_lists(main, folder); + assert!(result.is_err()); + } } diff --git a/crates/agent_ui/src/thread_worktree_archive.rs b/crates/agent_ui/src/thread_worktree_archive.rs new file mode 100644 index 0000000000000000000000000000000000000000..4398a2154d4abd550535b247ab1a9e518f84b39d --- /dev/null +++ b/crates/agent_ui/src/thread_worktree_archive.rs @@ -0,0 +1,728 @@ +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; + +use agent_client_protocol as acp; +use anyhow::{Context as _, Result, anyhow}; +use gpui::{App, AsyncApp, Entity, Task}; +use project::{ + LocalProjectFlags, Project, WorktreeId, + git_store::{Repository, resolve_git_worktree_to_main_repo}, +}; +use util::ResultExt; +use workspace::{AppState, MultiWorkspace, Workspace}; + +use crate::thread_metadata_store::{ArchivedGitWorktree, ThreadMetadataStore}; + +/// The plan for archiving a single git worktree root. +/// +/// A thread can have multiple folder paths open, so there may be multiple +/// `RootPlan`s per archival operation. Each one captures everything needed to +/// persist the worktree's git state and then remove it from disk. +/// +/// All fields are gathered synchronously by [`build_root_plan`] while the +/// worktree is still loaded in open projects. This is important because +/// workspace removal tears down project and repository entities, making +/// them unavailable for the later async persist/remove steps. +#[derive(Clone)] +pub struct RootPlan { + /// Absolute path of the git worktree on disk. + pub root_path: PathBuf, + /// Absolute path to the main git repository this worktree is linked to. + /// Used both for creating a git ref to prevent GC of WIP commits during + /// [`persist_worktree_state`], and for `git worktree remove` during + /// [`remove_root`]. + pub main_repo_path: PathBuf, + /// Every open `Project` that has this worktree loaded, so they can all + /// call `remove_worktree` and release it during [`remove_root`]. + /// Multiple projects can reference the same path when the user has the + /// worktree open in more than one workspace. + pub affected_projects: Vec, + /// The `Repository` entity for this worktree, used to run git commands + /// (create WIP commits, stage files, reset) during + /// [`persist_worktree_state`]. `None` when the `GitStore` hasn't created + /// a `Repository` for this worktree yet — in that case, + /// `persist_worktree_state` falls back to creating a temporary headless + /// project to obtain one. + pub worktree_repo: Option>, + /// The branch the worktree was on, so it can be restored later. + /// `None` if the worktree was in detached HEAD state or if no + /// `Repository` entity was available at planning time (in which case + /// `persist_worktree_state` reads it from the repo snapshot instead). + pub branch_name: Option, +} + +/// A `Project` that references a worktree being archived, paired with the +/// `WorktreeId` it uses for that worktree. +/// +/// The same worktree path can appear in multiple open workspaces/projects +/// (e.g. when the user has two windows open that both include the same +/// linked worktree). Each one needs to call `remove_worktree` and wait for +/// the release during [`remove_root`], otherwise the project would still +/// hold a reference to the directory and `git worktree remove` would fail. +#[derive(Clone)] +pub struct AffectedProject { + pub project: Entity, + pub worktree_id: WorktreeId, +} + +fn archived_worktree_ref_name(id: i64) -> String { + format!("refs/archived-worktrees/{}", id) +} + +/// Builds a [`RootPlan`] for archiving the git worktree at `path`. +/// +/// This is a synchronous planning step that must run *before* any workspace +/// removal, because it needs live project and repository entities that are +/// torn down when a workspace is removed. It does three things: +/// +/// 1. Finds every `Project` across all open workspaces that has this +/// worktree loaded (`affected_projects`). +/// 2. Looks for a `Repository` entity whose snapshot identifies this path +/// as a linked worktree (`worktree_repo`), which is needed for the git +/// operations in [`persist_worktree_state`]. +/// 3. Determines the `main_repo_path` — the parent repo that owns this +/// linked worktree — needed for both git ref creation and +/// `git worktree remove`. +/// +/// When no `Repository` entity is available (e.g. the `GitStore` hasn't +/// finished scanning), the function falls back to deriving `main_repo_path` +/// from the worktree snapshot's `root_repo_common_dir`. In that case +/// `worktree_repo` is `None` and [`persist_worktree_state`] will create a +/// temporary headless project to obtain one. +/// +/// Returns `None` if no open project has this path as a visible worktree. +pub fn build_root_plan( + path: &Path, + workspaces: &[Entity], + cx: &App, +) -> Option { + let path = path.to_path_buf(); + + let affected_projects = workspaces + .iter() + .filter_map(|workspace| { + let project = workspace.read(cx).project().clone(); + let worktree = project + .read(cx) + .visible_worktrees(cx) + .find(|worktree| worktree.read(cx).abs_path().as_ref() == path.as_path())?; + let worktree_id = worktree.read(cx).id(); + Some(AffectedProject { + project, + worktree_id, + }) + }) + .collect::>(); + + if affected_projects.is_empty() { + return None; + } + + let linked_repo = workspaces + .iter() + .flat_map(|workspace| { + workspace + .read(cx) + .project() + .read(cx) + .repositories(cx) + .values() + .cloned() + .collect::>() + }) + .find_map(|repo| { + let snapshot = repo.read(cx).snapshot(); + (snapshot.is_linked_worktree() + && snapshot.work_directory_abs_path.as_ref() == path.as_path()) + .then_some((snapshot, repo)) + }); + + let matching_worktree_snapshot = workspaces.iter().find_map(|workspace| { + workspace + .read(cx) + .project() + .read(cx) + .visible_worktrees(cx) + .find(|worktree| worktree.read(cx).abs_path().as_ref() == path.as_path()) + .map(|worktree| worktree.read(cx).snapshot()) + }); + + let (main_repo_path, worktree_repo, branch_name) = + if let Some((linked_snapshot, repo)) = linked_repo { + ( + linked_snapshot.original_repo_abs_path.to_path_buf(), + Some(repo), + linked_snapshot + .branch + .as_ref() + .map(|branch| branch.name().to_string()), + ) + } else { + let main_repo_path = matching_worktree_snapshot + .as_ref()? + .root_repo_common_dir() + .and_then(|dir| dir.parent())? + .to_path_buf(); + (main_repo_path, None, None) + }; + + Some(RootPlan { + root_path: path, + main_repo_path, + affected_projects, + worktree_repo, + branch_name, + }) +} + +/// Returns `true` if any unarchived thread other than `current_session_id` +/// references `path` in its folder paths. Used to determine whether a +/// worktree can safely be removed from disk. +pub fn path_is_referenced_by_other_unarchived_threads( + current_session_id: &acp::SessionId, + path: &Path, + cx: &App, +) -> bool { + ThreadMetadataStore::global(cx) + .read(cx) + .entries() + .filter(|thread| thread.session_id != *current_session_id) + .filter(|thread| !thread.archived) + .any(|thread| { + thread + .folder_paths() + .paths() + .iter() + .any(|other_path| other_path.as_path() == path) + }) +} + +/// Removes a worktree from all affected projects and deletes it from disk +/// via `git worktree remove`. +/// +/// This is the destructive counterpart to [`persist_worktree_state`]. It +/// first detaches the worktree from every [`AffectedProject`], waits for +/// each project to fully release it, then asks the main repository to +/// delete the worktree directory. If the git removal fails, the worktree +/// is re-added to each project via [`rollback_root`]. +pub async fn remove_root(root: RootPlan, cx: &mut AsyncApp) -> Result<()> { + let release_tasks: Vec<_> = root + .affected_projects + .iter() + .map(|affected| { + let project = affected.project.clone(); + let worktree_id = affected.worktree_id; + project.update(cx, |project, cx| { + let wait = project.wait_for_worktree_release(worktree_id, cx); + project.remove_worktree(worktree_id, cx); + wait + }) + }) + .collect(); + + if let Err(error) = remove_root_after_worktree_removal(&root, release_tasks, cx).await { + rollback_root(&root, cx).await; + return Err(error); + } + + Ok(()) +} + +async fn remove_root_after_worktree_removal( + root: &RootPlan, + release_tasks: Vec>>, + cx: &mut AsyncApp, +) -> Result<()> { + for task in release_tasks { + if let Err(error) = task.await { + log::error!("Failed waiting for worktree release: {error:#}"); + } + } + + let (repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx).await?; + // force=true is required because the working directory is still dirty + // — persist_worktree_state captures state into detached commits without + // modifying the real index or working tree, so git refuses to delete + // the worktree without --force. + let receiver = repo.update(cx, |repo: &mut Repository, _cx| { + repo.remove_worktree(root.root_path.clone(), true) + }); + let result = receiver + .await + .map_err(|_| anyhow!("git worktree removal was canceled"))?; + // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation + drop(_temp_project); + result +} + +/// Finds a live `Repository` entity for the given path, or creates a temporary +/// `Project::local` to obtain one. +/// +/// `Repository` entities can only be obtained through a `Project` because +/// `GitStore` (which creates and manages `Repository` entities) is owned by +/// `Project`. When no open workspace contains the repo we need, we spin up a +/// headless `Project::local` just to get a `Repository` handle. The caller +/// keeps the returned `Option>` alive for the duration of the +/// git operations, then drops it. +/// +/// Future improvement: decoupling `GitStore` from `Project` so that +/// `Repository` entities can be created standalone would eliminate this +/// temporary-project workaround. +async fn find_or_create_repository( + repo_path: &Path, + cx: &mut AsyncApp, +) -> Result<(Entity, Option>)> { + let repo_path_owned = repo_path.to_path_buf(); + let live_repo = cx.update(|cx| { + all_open_workspaces(cx) + .into_iter() + .flat_map(|workspace| { + workspace + .read(cx) + .project() + .read(cx) + .repositories(cx) + .values() + .cloned() + .collect::>() + }) + .find(|repo| { + repo.read(cx).snapshot().work_directory_abs_path.as_ref() + == repo_path_owned.as_path() + }) + }); + + if let Some(repo) = live_repo { + return Ok((repo, None)); + } + + let app_state = + current_app_state(cx).context("no app state available for temporary project")?; + let temp_project = cx.update(|cx| { + Project::local( + app_state.client.clone(), + app_state.node_runtime.clone(), + app_state.user_store.clone(), + app_state.languages.clone(), + app_state.fs.clone(), + None, + LocalProjectFlags::default(), + cx, + ) + }); + + let repo_path_for_worktree = repo_path.to_path_buf(); + let create_worktree = temp_project.update(cx, |project, cx| { + project.create_worktree(repo_path_for_worktree, true, cx) + }); + let _worktree = create_worktree.await?; + let initial_scan = temp_project.read_with(cx, |project, cx| project.wait_for_initial_scan(cx)); + initial_scan.await; + + let repo_path_for_find = repo_path.to_path_buf(); + let repo = temp_project + .update(cx, |project, cx| { + project + .repositories(cx) + .values() + .find(|repo| { + repo.read(cx).snapshot().work_directory_abs_path.as_ref() + == repo_path_for_find.as_path() + }) + .cloned() + }) + .context("failed to resolve temporary repository handle")?; + + let barrier = repo.update(cx, |repo: &mut Repository, _cx| repo.barrier()); + barrier + .await + .map_err(|_| anyhow!("temporary repository barrier canceled"))?; + Ok((repo, Some(temp_project))) +} + +/// Re-adds the worktree to every affected project after a failed +/// [`remove_root`]. +async fn rollback_root(root: &RootPlan, cx: &mut AsyncApp) { + for affected in &root.affected_projects { + let task = affected.project.update(cx, |project, cx| { + project.create_worktree(root.root_path.clone(), true, cx) + }); + task.await.log_err(); + } +} + +/// Saves the worktree's full git state so it can be restored later. +/// +/// This creates two detached commits (via [`create_archive_checkpoint`] on +/// the `GitRepository` trait) that capture the staged and unstaged state +/// without moving any branch ref. The commits are: +/// - "WIP staged": a tree matching the current index, parented on HEAD +/// - "WIP unstaged": a tree with all files (including untracked), +/// parented on the staged commit +/// +/// After creating the commits, this function: +/// 1. Records the commit SHAs, branch name, and paths in a DB record. +/// 2. Links every thread referencing this worktree to that record. +/// 3. Creates a git ref on the main repo to prevent GC of the commits. +/// +/// On success, returns the archived worktree DB row ID for rollback. +pub async fn persist_worktree_state(root: &RootPlan, cx: &mut AsyncApp) -> Result { + let (worktree_repo, _temp_worktree_project) = match &root.worktree_repo { + Some(worktree_repo) => (worktree_repo.clone(), None), + None => find_or_create_repository(&root.root_path, cx).await?, + }; + + let original_commit_hash = worktree_repo + .update(cx, |repo, _cx| repo.head_sha()) + .await + .map_err(|_| anyhow!("head_sha canceled"))? + .context("failed to read original HEAD SHA")? + .context("HEAD SHA is None")?; + + // Create two detached WIP commits without moving the branch. + let checkpoint_rx = worktree_repo.update(cx, |repo, _cx| repo.create_archive_checkpoint()); + let (staged_commit_hash, unstaged_commit_hash) = checkpoint_rx + .await + .map_err(|_| anyhow!("create_archive_checkpoint canceled"))? + .context("failed to create archive checkpoint")?; + + // Create DB record + let store = cx.update(|cx| ThreadMetadataStore::global(cx)); + let worktree_path_str = root.root_path.to_string_lossy().to_string(); + let main_repo_path_str = root.main_repo_path.to_string_lossy().to_string(); + let branch_name = root.branch_name.clone().or_else(|| { + worktree_repo.read_with(cx, |repo, _cx| { + repo.snapshot() + .branch + .as_ref() + .map(|branch| branch.name().to_string()) + }) + }); + + let db_result = store + .read_with(cx, |store, cx| { + store.create_archived_worktree( + worktree_path_str.clone(), + main_repo_path_str.clone(), + branch_name.clone(), + staged_commit_hash.clone(), + unstaged_commit_hash.clone(), + original_commit_hash.clone(), + cx, + ) + }) + .await + .context("failed to create archived worktree DB record"); + let archived_worktree_id = match db_result { + Ok(id) => id, + Err(error) => { + return Err(error); + } + }; + + // Link all threads on this worktree to the archived record + let session_ids: Vec = store.read_with(cx, |store, _cx| { + store + .entries() + .filter(|thread| { + thread + .folder_paths() + .paths() + .iter() + .any(|p| p.as_path() == root.root_path) + }) + .map(|thread| thread.session_id.clone()) + .collect() + }); + + for session_id in &session_ids { + let link_result = store + .read_with(cx, |store, cx| { + store.link_thread_to_archived_worktree( + session_id.0.to_string(), + archived_worktree_id, + cx, + ) + }) + .await; + if let Err(error) = link_result { + if let Err(delete_error) = store + .read_with(cx, |store, cx| { + store.delete_archived_worktree(archived_worktree_id, cx) + }) + .await + { + log::error!( + "Failed to delete archived worktree DB record during link rollback: \ + {delete_error:#}" + ); + } + return Err(error.context("failed to link thread to archived worktree")); + } + } + + // Create git ref on main repo to prevent GC of the detached commits. + // This is fatal: without the ref, git gc will eventually collect the + // WIP commits and a later restore will silently fail. + let ref_name = archived_worktree_ref_name(archived_worktree_id); + let (main_repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx) + .await + .context("could not open main repo to create archive ref")?; + let rx = main_repo.update(cx, |repo, _cx| { + repo.update_ref(ref_name.clone(), unstaged_commit_hash.clone()) + }); + rx.await + .map_err(|_| anyhow!("update_ref canceled")) + .and_then(|r| r) + .with_context(|| format!("failed to create ref {ref_name} on main repo"))?; + drop(_temp_project); + + Ok(archived_worktree_id) +} + +/// Undoes a successful [`persist_worktree_state`] by deleting the git ref +/// on the main repo and removing the DB record. Since the WIP commits are +/// detached (they don't move any branch), no git reset is needed — the +/// commits will be garbage-collected once the ref is removed. +pub async fn rollback_persist(archived_worktree_id: i64, root: &RootPlan, cx: &mut AsyncApp) { + // Delete the git ref on main repo + if let Ok((main_repo, _temp_project)) = + find_or_create_repository(&root.main_repo_path, cx).await + { + let ref_name = archived_worktree_ref_name(archived_worktree_id); + let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name)); + rx.await.ok().and_then(|r| r.log_err()); + drop(_temp_project); + } + + // Delete the DB record + let store = cx.update(|cx| ThreadMetadataStore::global(cx)); + if let Err(error) = store + .read_with(cx, |store, cx| { + store.delete_archived_worktree(archived_worktree_id, cx) + }) + .await + { + log::error!("Failed to delete archived worktree DB record during rollback: {error:#}"); + } +} + +/// Restores a previously archived worktree back to disk from its DB record. +/// +/// Creates the git worktree at the original commit (the branch never moved +/// during archival since WIP commits are detached), switches to the branch, +/// then uses [`restore_archive_checkpoint`] to reconstruct the staged/ +/// unstaged state from the WIP commit trees. +pub async fn restore_worktree_via_git( + row: &ArchivedGitWorktree, + cx: &mut AsyncApp, +) -> Result { + let (main_repo, _temp_project) = find_or_create_repository(&row.main_repo_path, cx).await?; + + let worktree_path = &row.worktree_path; + let app_state = current_app_state(cx).context("no app state available")?; + let already_exists = app_state.fs.metadata(worktree_path).await?.is_some(); + + let created_new_worktree = if already_exists { + let is_git_worktree = + resolve_git_worktree_to_main_repo(app_state.fs.as_ref(), worktree_path) + .await + .is_some(); + + if !is_git_worktree { + let rx = main_repo.update(cx, |repo, _cx| repo.repair_worktrees()); + rx.await + .map_err(|_| anyhow!("worktree repair was canceled"))? + .context("failed to repair worktrees")?; + } + false + } else { + // Create worktree at the original commit — the branch still points + // here because archival used detached commits. + let rx = main_repo.update(cx, |repo, _cx| { + repo.create_worktree_detached(worktree_path.clone(), row.original_commit_hash.clone()) + }); + rx.await + .map_err(|_| anyhow!("worktree creation was canceled"))? + .context("failed to create worktree")?; + true + }; + + let (wt_repo, _temp_wt_project) = match find_or_create_repository(worktree_path, cx).await { + Ok(result) => result, + Err(error) => { + remove_new_worktree_on_error(created_new_worktree, &main_repo, worktree_path, cx).await; + return Err(error); + } + }; + + // Switch to the branch. Since the branch was never moved during + // archival (WIP commits are detached), it still points at + // original_commit_hash, so this is essentially a no-op for HEAD. + if let Some(branch_name) = &row.branch_name { + let rx = wt_repo.update(cx, |repo, _cx| repo.change_branch(branch_name.clone())); + if let Err(checkout_error) = rx.await.map_err(|e| anyhow!("{e}")).and_then(|r| r) { + log::debug!( + "change_branch('{}') failed: {checkout_error:#}, trying create_branch", + branch_name + ); + let rx = wt_repo.update(cx, |repo, _cx| { + repo.create_branch(branch_name.clone(), None) + }); + if let Ok(Err(error)) | Err(error) = rx.await.map_err(|e| anyhow!("{e}")) { + log::warn!( + "Could not create branch '{}': {error} — \ + restored worktree will be in detached HEAD state.", + branch_name + ); + } + } + } + + // Restore the staged/unstaged state from the WIP commit trees. + // read-tree --reset -u applies the unstaged tree (including deletions) + // to the working directory, then a bare read-tree sets the index to + // the staged tree without touching the working directory. + let restore_rx = wt_repo.update(cx, |repo, _cx| { + repo.restore_archive_checkpoint( + row.staged_commit_hash.clone(), + row.unstaged_commit_hash.clone(), + ) + }); + if let Err(error) = restore_rx + .await + .map_err(|_| anyhow!("restore_archive_checkpoint canceled")) + .and_then(|r| r) + { + remove_new_worktree_on_error(created_new_worktree, &main_repo, worktree_path, cx).await; + return Err(error.context("failed to restore archive checkpoint")); + } + + Ok(worktree_path.clone()) +} + +async fn remove_new_worktree_on_error( + created_new_worktree: bool, + main_repo: &Entity, + worktree_path: &PathBuf, + cx: &mut AsyncApp, +) { + if created_new_worktree { + let rx = main_repo.update(cx, |repo, _cx| { + repo.remove_worktree(worktree_path.clone(), true) + }); + rx.await.ok().and_then(|r| r.log_err()); + } +} + +/// Deletes the git ref and DB records for a single archived worktree. +/// Used when an archived worktree is no longer referenced by any thread. +pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mut AsyncApp) { + // Delete the git ref from the main repo + if let Ok((main_repo, _temp_project)) = find_or_create_repository(&row.main_repo_path, cx).await + { + let ref_name = archived_worktree_ref_name(row.id); + let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name)); + match rx.await { + Ok(Ok(())) => {} + Ok(Err(error)) => log::warn!("Failed to delete archive ref: {error}"), + Err(_) => log::warn!("Archive ref deletion was canceled"), + } + // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation + drop(_temp_project); + } + + // Delete the DB records + let store = cx.update(|cx| ThreadMetadataStore::global(cx)); + store + .read_with(cx, |store, cx| store.delete_archived_worktree(row.id, cx)) + .await + .log_err(); +} + +/// Cleans up all archived worktree data associated with a thread being deleted. +/// +/// This unlinks the thread from all its archived worktrees and, for any +/// archived worktree that is no longer referenced by any other thread, +/// deletes the git ref and DB records. +pub async fn cleanup_thread_archived_worktrees(session_id: &acp::SessionId, cx: &mut AsyncApp) { + let store = cx.update(|cx| ThreadMetadataStore::global(cx)); + + let archived_worktrees = store + .read_with(cx, |store, cx| { + store.get_archived_worktrees_for_thread(session_id.0.to_string(), cx) + }) + .await; + let archived_worktrees = match archived_worktrees { + Ok(rows) => rows, + Err(error) => { + log::error!( + "Failed to fetch archived worktrees for thread {}: {error:#}", + session_id.0 + ); + return; + } + }; + + if archived_worktrees.is_empty() { + return; + } + + if let Err(error) = store + .read_with(cx, |store, cx| { + store.unlink_thread_from_all_archived_worktrees(session_id.0.to_string(), cx) + }) + .await + { + log::error!( + "Failed to unlink thread {} from archived worktrees: {error:#}", + session_id.0 + ); + return; + } + + for row in &archived_worktrees { + let still_referenced = store + .read_with(cx, |store, cx| { + store.is_archived_worktree_referenced(row.id, cx) + }) + .await; + match still_referenced { + Ok(true) => {} + Ok(false) => { + cleanup_archived_worktree_record(row, cx).await; + } + Err(error) => { + log::error!( + "Failed to check if archived worktree {} is still referenced: {error:#}", + row.id + ); + } + } + } +} + +/// Collects every `Workspace` entity across all open `MultiWorkspace` windows. +pub fn all_open_workspaces(cx: &App) -> Vec> { + cx.windows() + .into_iter() + .filter_map(|window| window.downcast::()) + .flat_map(|multi_workspace| { + multi_workspace + .read(cx) + .map(|multi_workspace| multi_workspace.workspaces().cloned().collect::>()) + .unwrap_or_default() + }) + .collect() +} + +fn current_app_state(cx: &mut AsyncApp) -> Option> { + cx.update(|cx| { + all_open_workspaces(cx) + .into_iter() + .next() + .map(|workspace| workspace.read(cx).app_state().clone()) + }) +} diff --git a/crates/agent_ui/src/threads_archive_view.rs b/crates/agent_ui/src/threads_archive_view.rs index 7cb8410e5017438b0e8adde673887c13397d9abf..6e73584ef87f11810e4c860cc6ff4c8d8ff015a9 100644 --- a/crates/agent_ui/src/threads_archive_view.rs +++ b/crates/agent_ui/src/threads_archive_view.rs @@ -26,7 +26,7 @@ use picker::{ use project::{AgentId, AgentServerStore}; use settings::Settings as _; use theme::ActiveTheme; -use ui::ThreadItem; +use ui::{AgentThreadStatus, ThreadItem}; use ui::{ Divider, KeyBinding, ListItem, ListItemSpacing, ListSubHeader, Tooltip, WithScrollbar, prelude::*, utils::platform_title_bar_height, @@ -113,6 +113,7 @@ fn fuzzy_match_positions(query: &str, text: &str) -> Option> { pub enum ThreadsArchiveViewEvent { Close, Unarchive { thread: ThreadMetadata }, + CancelRestore { session_id: acp::SessionId }, } impl EventEmitter for ThreadsArchiveView {} @@ -131,6 +132,7 @@ pub struct ThreadsArchiveView { workspace: WeakEntity, agent_connection_store: WeakEntity, agent_server_store: WeakEntity, + restoring: HashSet, } impl ThreadsArchiveView { @@ -199,6 +201,7 @@ impl ThreadsArchiveView { workspace, agent_connection_store, agent_server_store, + restoring: HashSet::default(), }; this.update_items(cx); @@ -213,6 +216,16 @@ impl ThreadsArchiveView { self.selection = None; } + pub fn mark_restoring(&mut self, session_id: &acp::SessionId, cx: &mut Context) { + self.restoring.insert(session_id.clone()); + cx.notify(); + } + + pub fn clear_restoring(&mut self, session_id: &acp::SessionId, cx: &mut Context) { + self.restoring.remove(session_id); + cx.notify(); + } + pub fn focus_filter_editor(&self, window: &mut Window, cx: &mut App) { let handle = self.filter_editor.read(cx).focus_handle(cx); handle.focus(window, cx); @@ -323,11 +336,16 @@ impl ThreadsArchiveView { window: &mut Window, cx: &mut Context, ) { - if thread.folder_paths.is_empty() { + if self.restoring.contains(&thread.session_id) { + return; + } + + if thread.folder_paths().is_empty() { self.show_project_picker_for_thread(thread, window, cx); return; } + self.mark_restoring(&thread.session_id, cx); self.selection = None; self.reset_filter_editor_text(window, cx); cx.emit(ThreadsArchiveViewEvent::Unarchive { thread }); @@ -510,14 +528,16 @@ impl ThreadsArchiveView { IconName::Sparkle }; - ThreadItem::new(id, thread.title.clone()) + let is_restoring = self.restoring.contains(&thread.session_id); + + let base = ThreadItem::new(id, thread.title.clone()) .icon(icon) .when_some(icon_from_external_svg, |this, svg| { this.custom_icon_from_external_svg(svg) }) .timestamp(timestamp) .highlight_positions(highlight_positions.clone()) - .project_paths(thread.folder_paths.paths_owned()) + .project_paths(thread.folder_paths().paths_owned()) .focused(is_focused) .hovered(is_hovered) .on_hover(cx.listener(move |this, is_hovered, _window, cx| { @@ -527,8 +547,31 @@ impl ThreadsArchiveView { this.hovered_index = None; } cx.notify(); - })) - .action_slot( + })); + + if is_restoring { + base.status(AgentThreadStatus::Running) + .action_slot( + IconButton::new("cancel-restore", IconName::Close) + .style(ButtonStyle::Filled) + .icon_size(IconSize::Small) + .icon_color(Color::Muted) + .tooltip(Tooltip::text("Cancel Restore")) + .on_click({ + let session_id = thread.session_id.clone(); + cx.listener(move |this, _, _, cx| { + this.clear_restoring(&session_id, cx); + cx.emit(ThreadsArchiveViewEvent::CancelRestore { + session_id: session_id.clone(), + }); + cx.stop_propagation(); + }) + }), + ) + .tooltip(Tooltip::text("Restoring\u{2026}")) + .into_any_element() + } else { + base.action_slot( IconButton::new("delete-thread", IconName::Trash) .style(ButtonStyle::Filled) .icon_size(IconSize::Small) @@ -561,6 +604,7 @@ impl ThreadsArchiveView { }) }) .into_any_element() + } } } } @@ -603,6 +647,9 @@ impl ThreadsArchiveView { .wait_for_connection() }); cx.spawn(async move |_this, cx| { + crate::thread_worktree_archive::cleanup_thread_archived_worktrees(&session_id, cx) + .await; + let state = task.await?; let task = cx.update(|cx| { if let Some(list) = state.connection.session_list(cx) { @@ -883,7 +930,8 @@ impl ProjectPickerDelegate { window: &mut Window, cx: &mut Context>, ) { - self.thread.folder_paths = paths.clone(); + self.thread.worktree_paths = + super::thread_metadata_store::ThreadWorktreePaths::from_folder_paths(&paths); ThreadMetadataStore::global(cx).update(cx, |store, cx| { store.update_working_directories(&self.thread.session_id, paths, cx); }); diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 44abc37af66e3f169d3af1a7d5e29063e382c620..b3a943bef44904218ccdef35579a6f7eaf6a475b 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -532,6 +532,7 @@ impl RejoinedProject { root_name: worktree.root_name.clone(), visible: worktree.visible, abs_path: worktree.abs_path.clone(), + root_repo_common_dir: None, }) .collect(), collaborators: self diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 20316fc3403de0e6212d13d455c5b619000d71b1..fa84a95837d390e4c81c09c1e11d7fc4ad704f20 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -1894,6 +1894,7 @@ async fn join_project( root_name: worktree.root_name.clone(), visible: worktree.visible, abs_path: worktree.abs_path.clone(), + root_repo_common_dir: None, }) .collect::>(); diff --git a/crates/fs/src/fake_git_repo.rs b/crates/fs/src/fake_git_repo.rs index 7b89a0751f17ef8c2bba837882f2a31c7d5451e5..1b4e89102f942c3b4e5526b914c67c271a47ee2e 100644 --- a/crates/fs/src/fake_git_repo.rs +++ b/crates/fs/src/fake_git_repo.rs @@ -1179,6 +1179,39 @@ impl GitRepository for FakeGitRepository { .boxed() } + fn create_archive_checkpoint(&self) -> BoxFuture<'_, Result<(String, String)>> { + let executor = self.executor.clone(); + let fs = self.fs.clone(); + let checkpoints = self.checkpoints.clone(); + let repository_dir_path = self.repository_dir_path.parent().unwrap().to_path_buf(); + async move { + executor.simulate_random_delay().await; + let staged_oid = git::Oid::random(&mut *executor.rng().lock()); + let unstaged_oid = git::Oid::random(&mut *executor.rng().lock()); + let entry = fs.entry(&repository_dir_path)?; + checkpoints.lock().insert(staged_oid, entry.clone()); + checkpoints.lock().insert(unstaged_oid, entry); + Ok((staged_oid.to_string(), unstaged_oid.to_string())) + } + .boxed() + } + + fn restore_archive_checkpoint( + &self, + // The fake filesystem doesn't model a separate index, so only the + // unstaged (full working directory) snapshot is restored. + _staged_sha: String, + unstaged_sha: String, + ) -> BoxFuture<'_, Result<()>> { + match unstaged_sha.parse() { + Ok(commit_sha) => self.restore_checkpoint(GitRepositoryCheckpoint { commit_sha }), + Err(error) => async move { + Err(anyhow::anyhow!(error).context("failed to parse unstaged SHA as Oid")) + } + .boxed(), + } + } + fn compare_checkpoints( &self, left: GitRepositoryCheckpoint, diff --git a/crates/git/src/repository.rs b/crates/git/src/repository.rs index d7049c0a50cb94c049556e395e818dbbddfb89bf..6d17641c6ef9afafe7967f3d4bd5b37ef8c363d3 100644 --- a/crates/git/src/repository.rs +++ b/crates/git/src/repository.rs @@ -916,6 +916,20 @@ pub trait GitRepository: Send + Sync { /// Resets to a previously-created checkpoint. fn restore_checkpoint(&self, checkpoint: GitRepositoryCheckpoint) -> BoxFuture<'_, Result<()>>; + /// Creates two detached commits capturing the current staged and unstaged + /// state without moving any branch. Returns (staged_sha, unstaged_sha). + fn create_archive_checkpoint(&self) -> BoxFuture<'_, Result<(String, String)>>; + + /// Restores the working directory and index from archive checkpoint SHAs. + /// Assumes HEAD is already at the correct commit (original_commit_hash). + /// Restores the index to match staged_sha's tree, and the working + /// directory to match unstaged_sha's tree. + fn restore_archive_checkpoint( + &self, + staged_sha: String, + unstaged_sha: String, + ) -> BoxFuture<'_, Result<()>>; + /// Compares two checkpoints, returning true if they are equal fn compare_checkpoints( &self, @@ -2607,6 +2621,90 @@ impl GitRepository for RealGitRepository { .boxed() } + fn create_archive_checkpoint(&self) -> BoxFuture<'_, Result<(String, String)>> { + let git_binary = self.git_binary(); + self.executor + .spawn(async move { + let mut git = git_binary?.envs(checkpoint_author_envs()); + + let head_sha = git + .run(&["rev-parse", "HEAD"]) + .await + .context("failed to read HEAD")?; + + // Capture the staged state: write-tree reads the current index + let staged_tree = git + .run(&["write-tree"]) + .await + .context("failed to write staged tree")?; + let staged_sha = git + .run(&[ + "commit-tree", + &staged_tree, + "-p", + &head_sha, + "-m", + "WIP staged", + ]) + .await + .context("failed to create staged commit")?; + + // Capture the full state (staged + unstaged + untracked) using + // a temporary index so we don't disturb the real one. + let unstaged_sha = git + .with_temp_index(async |git| { + git.run(&["add", "--all"]).await?; + let full_tree = git.run(&["write-tree"]).await?; + let sha = git + .run(&[ + "commit-tree", + &full_tree, + "-p", + &staged_sha, + "-m", + "WIP unstaged", + ]) + .await?; + Ok(sha) + }) + .await + .context("failed to create unstaged commit")?; + + Ok((staged_sha, unstaged_sha)) + }) + .boxed() + } + + fn restore_archive_checkpoint( + &self, + staged_sha: String, + unstaged_sha: String, + ) -> BoxFuture<'_, Result<()>> { + let git_binary = self.git_binary(); + self.executor + .spawn(async move { + let git = git_binary?; + + // First, set the index AND working tree to match the unstaged + // tree. --reset -u computes a tree-level diff between the + // current index and unstaged_sha's tree and applies additions, + // modifications, and deletions to the working directory. + git.run(&["read-tree", "--reset", "-u", &unstaged_sha]) + .await + .context("failed to restore working directory from unstaged commit")?; + + // Then replace just the index with the staged tree. Without -u + // this doesn't touch the working directory, so the result is: + // working tree = unstaged state, index = staged state. + git.run(&["read-tree", &staged_sha]) + .await + .context("failed to restore index from staged commit")?; + + Ok(()) + }) + .boxed() + } + fn compare_checkpoints( &self, left: GitRepositoryCheckpoint, diff --git a/crates/git_ui/src/worktree_picker.rs b/crates/git_ui/src/worktree_picker.rs index 6c95f42e99169370a08119bc22e2b71e33cb270d..2503c2ec6c4f5a669b0302ea45891434b901ef20 100644 --- a/crates/git_ui/src/worktree_picker.rs +++ b/crates/git_ui/src/worktree_picker.rs @@ -648,6 +648,7 @@ async fn open_remote_worktree( paths, app_state, window_to_use, + None, cx, ) .await?; diff --git a/crates/language_model_core/src/request.rs b/crates/language_model_core/src/request.rs index 48f7f00522bc3dd5c06747d662761efb003886c0..a35f4883389f0ab18ef67fb86851cb59a9294d0e 100644 --- a/crates/language_model_core/src/request.rs +++ b/crates/language_model_core/src/request.rs @@ -333,7 +333,9 @@ pub struct LanguageModelRequest { pub speed: Option, } -#[derive(Clone, Copy, Default, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[derive( + Clone, Copy, Default, Debug, Serialize, Deserialize, PartialEq, Eq, schemars::JsonSchema, +)] #[serde(rename_all = "snake_case")] pub enum Speed { #[default] diff --git a/crates/project/src/agent_server_store.rs b/crates/project/src/agent_server_store.rs index 0b6bb2b739f677ca1f4f3d5558538372ec6e86ff..5a9721d827cf3d189c7954f0698b662e5aaf4852 100644 --- a/crates/project/src/agent_server_store.rs +++ b/crates/project/src/agent_server_store.rs @@ -1,4 +1,3 @@ -use remote::Interactive; use std::{ any::Any, path::{Path, PathBuf}, @@ -116,9 +115,9 @@ pub enum ExternalAgentSource { pub trait ExternalAgentServer { fn get_command( - &mut self, + &self, + extra_args: Vec, extra_env: HashMap, - new_version_available_tx: Option>>, cx: &mut AsyncApp, ) -> Task>; @@ -800,11 +799,10 @@ impl AgentServerStore { if no_browser { extra_env.insert("NO_BROWSER".to_owned(), "1".to_owned()); } - anyhow::Ok(agent.get_command( - extra_env, - new_version_available_tx, - &mut cx.to_async(), - )) + if let Some(new_version_available_tx) = new_version_available_tx { + agent.set_new_version_available_tx(new_version_available_tx); + } + anyhow::Ok(agent.get_command(vec![], extra_env, &mut cx.to_async())) })? .await?; Ok(proto::AgentServerCommand { @@ -986,16 +984,15 @@ impl ExternalAgentServer for RemoteExternalAgentServer { } fn get_command( - &mut self, + &self, + extra_args: Vec, extra_env: HashMap, - new_version_available_tx: Option>>, cx: &mut AsyncApp, ) -> Task> { let project_id = self.project_id; let name = self.name.to_string(); let upstream_client = self.upstream_client.downgrade(); let worktree_store = self.worktree_store.clone(); - self.new_version_available_tx = new_version_available_tx; cx.spawn(async move |cx| { let root_dir = worktree_store.read_with(cx, |worktree_store, cx| { crate::Project::default_visible_worktree_paths(worktree_store, cx) @@ -1015,22 +1012,13 @@ impl ExternalAgentServer for RemoteExternalAgentServer { }) })? .await?; - let root_dir = response.root_dir; + response.args.extend(extra_args); response.env.extend(extra_env); - let command = upstream_client.update(cx, |client, _| { - client.build_command_with_options( - Some(response.path), - &response.args, - &response.env.into_iter().collect(), - Some(root_dir.clone()), - None, - Interactive::No, - ) - })??; + Ok(AgentServerCommand { - path: command.program.into(), - args: command.args, - env: Some(command.env), + path: response.path.into(), + args: response.args, + env: Some(response.env.into_iter().collect()), }) }) } @@ -1162,12 +1150,11 @@ impl ExternalAgentServer for LocalExtensionArchiveAgent { } fn get_command( - &mut self, + &self, + extra_args: Vec, extra_env: HashMap, - new_version_available_tx: Option>>, cx: &mut AsyncApp, ) -> Task> { - self.new_version_available_tx = new_version_available_tx; let fs = self.fs.clone(); let http_client = self.http_client.clone(); let node_runtime = self.node_runtime.clone(); @@ -1309,9 +1296,12 @@ impl ExternalAgentServer for LocalExtensionArchiveAgent { } }; + let mut args = target_config.args.clone(); + args.extend(extra_args); + let command = AgentServerCommand { path: cmd_path, - args: target_config.args.clone(), + args, env: Some(env), }; @@ -1354,12 +1344,11 @@ impl ExternalAgentServer for LocalRegistryArchiveAgent { } fn get_command( - &mut self, + &self, + extra_args: Vec, extra_env: HashMap, - new_version_available_tx: Option>>, cx: &mut AsyncApp, ) -> Task> { - self.new_version_available_tx = new_version_available_tx; let fs = self.fs.clone(); let http_client = self.http_client.clone(); let node_runtime = self.node_runtime.clone(); @@ -1486,9 +1475,12 @@ impl ExternalAgentServer for LocalRegistryArchiveAgent { } }; + let mut args = target_config.args.clone(); + args.extend(extra_args); + let command = AgentServerCommand { path: cmd_path, - args: target_config.args.clone(), + args, env: Some(env), }; @@ -1530,12 +1522,11 @@ impl ExternalAgentServer for LocalRegistryNpxAgent { } fn get_command( - &mut self, + &self, + extra_args: Vec, extra_env: HashMap, - new_version_available_tx: Option>>, cx: &mut AsyncApp, ) -> Task> { - self.new_version_available_tx = new_version_available_tx; let node_runtime = self.node_runtime.clone(); let project_environment = self.project_environment.downgrade(); let package = self.package.clone(); @@ -1566,9 +1557,12 @@ impl ExternalAgentServer for LocalRegistryNpxAgent { env.extend(extra_env); env.extend(settings_env); + let mut args = npm_command.args; + args.extend(extra_args); + let command = AgentServerCommand { path: npm_command.path, - args: npm_command.args, + args, env: Some(env), }; @@ -1592,9 +1586,9 @@ struct LocalCustomAgent { impl ExternalAgentServer for LocalCustomAgent { fn get_command( - &mut self, + &self, + extra_args: Vec, extra_env: HashMap, - _new_version_available_tx: Option>>, cx: &mut AsyncApp, ) -> Task> { let mut command = self.command.clone(); @@ -1609,6 +1603,7 @@ impl ExternalAgentServer for LocalCustomAgent { env.extend(command.env.unwrap_or_default()); env.extend(extra_env); command.env = Some(env); + command.args.extend(extra_args); Ok(command) }) } diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index 3a5522a60188bd2fd89003a6f484fb80e70c5405..a00e2dcfc860d468795e15ad9d78df87d75f15e8 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -6054,22 +6054,20 @@ impl Repository { RepositoryState::Remote(RemoteRepositoryState { project_id, client }) => { let (name, commit, use_existing_branch) = match target { CreateWorktreeTarget::ExistingBranch { branch_name } => { - (branch_name, None, true) + (Some(branch_name), None, true) } CreateWorktreeTarget::NewBranch { branch_name, - base_sha: start_point, - } => (branch_name, start_point, false), - CreateWorktreeTarget::Detached { - base_sha: start_point, - } => (String::new(), start_point, false), + base_sha, + } => (Some(branch_name), base_sha, false), + CreateWorktreeTarget::Detached { base_sha } => (None, base_sha, false), }; client .request(proto::GitCreateWorktree { project_id: project_id.0, repository_id: id.to_proto(), - name, + name: name.unwrap_or_default(), directory: path.to_string_lossy().to_string(), commit, use_existing_branch, @@ -6159,15 +6157,37 @@ impl Repository { }) } - pub fn commit_exists(&mut self, sha: String) -> oneshot::Receiver> { + pub fn create_archive_checkpoint(&mut self) -> oneshot::Receiver> { self.send_job(None, move |repo, _cx| async move { match repo { RepositoryState::Local(LocalRepositoryState { backend, .. }) => { - let results = backend.revparse_batch(vec![sha]).await?; - Ok(results.into_iter().next().flatten().is_some()) + backend.create_archive_checkpoint().await } RepositoryState::Remote(_) => { - anyhow::bail!("commit_exists is not supported for remote repositories") + anyhow::bail!( + "create_archive_checkpoint is not supported for remote repositories" + ) + } + } + }) + } + + pub fn restore_archive_checkpoint( + &mut self, + staged_sha: String, + unstaged_sha: String, + ) -> oneshot::Receiver> { + self.send_job(None, move |repo, _cx| async move { + match repo { + RepositoryState::Local(LocalRepositoryState { backend, .. }) => { + backend + .restore_archive_checkpoint(staged_sha, unstaged_sha) + .await + } + RepositoryState::Remote(_) => { + anyhow::bail!( + "restore_archive_checkpoint is not supported for remote repositories" + ) } } }) diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 9ea50fdc8f12b68147c1073219625c4fd257afd3..1479f159138040681122bac46ace6e73ad62337b 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -4430,7 +4430,8 @@ impl LspStore { WorktreeStoreEvent::WorktreeReleased(..) | WorktreeStoreEvent::WorktreeOrderChanged | WorktreeStoreEvent::WorktreeUpdatedGitRepositories(..) - | WorktreeStoreEvent::WorktreeDeletedEntry(..) => {} + | WorktreeStoreEvent::WorktreeDeletedEntry(..) + | WorktreeStoreEvent::WorktreeUpdatedRootRepoCommonDir(..) => {} } } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 4d1d4a5da809559a36829b1c171556e9ad4eccd8..b93d774a08541aed9a7bd8776f76f66a0ce4677b 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -360,6 +360,7 @@ pub enum Event { WorktreeOrderChanged, WorktreeRemoved(WorktreeId), WorktreeUpdatedEntries(WorktreeId, UpdatedEntriesSet), + WorktreeUpdatedRootRepoCommonDir(WorktreeId), DiskBasedDiagnosticsStarted { language_server_id: LanguageServerId, }, @@ -3681,6 +3682,9 @@ impl Project { } // Listen to the GitStore instead. WorktreeStoreEvent::WorktreeUpdatedGitRepositories(_, _) => {} + WorktreeStoreEvent::WorktreeUpdatedRootRepoCommonDir(worktree_id) => { + cx.emit(Event::WorktreeUpdatedRootRepoCommonDir(*worktree_id)); + } } } @@ -4758,6 +4762,44 @@ impl Project { }) } + /// Returns a task that resolves when the given worktree's `Entity` is + /// fully dropped (all strong references released), not merely when + /// `remove_worktree` is called. `remove_worktree` drops the store's + /// reference and emits `WorktreeRemoved`, but other code may still + /// hold a strong handle — the worktree isn't safe to delete from + /// disk until every handle is gone. + /// + /// We use `observe_release` on the specific entity rather than + /// listening for `WorktreeReleased` events because it's simpler at + /// the call site (one awaitable task, no subscription / channel / + /// ID filtering). + pub fn wait_for_worktree_release( + &mut self, + worktree_id: WorktreeId, + cx: &mut Context, + ) -> Task> { + let Some(worktree) = self.worktree_for_id(worktree_id, cx) else { + return Task::ready(Ok(())); + }; + + let (released_tx, released_rx) = futures::channel::oneshot::channel(); + let released_tx = std::sync::Arc::new(Mutex::new(Some(released_tx))); + let release_subscription = + cx.observe_release(&worktree, move |_project, _released_worktree, _cx| { + if let Some(released_tx) = released_tx.lock().take() { + let _ = released_tx.send(()); + } + }); + + cx.spawn(async move |_project, _cx| { + let _release_subscription = release_subscription; + released_rx + .await + .map_err(|_| anyhow!("worktree release observer dropped before release"))?; + Ok(()) + }) + } + pub fn remove_worktree(&mut self, id_to_remove: WorktreeId, cx: &mut Context) { self.worktree_store.update(cx, |worktree_store, cx| { worktree_store.remove_worktree(id_to_remove, cx); @@ -6055,6 +6097,7 @@ impl Project { /// workspaces by main repos. #[derive(PartialEq, Eq, Hash, Clone, Debug)] pub struct ProjectGroupKey { + /// The paths of the main worktrees for this project group. paths: PathList, host: Option, } @@ -6067,30 +6110,48 @@ impl ProjectGroupKey { Self { paths, host } } - pub fn display_name(&self) -> SharedString { + pub fn path_list(&self) -> &PathList { + &self.paths + } + + pub fn display_name( + &self, + path_detail_map: &std::collections::HashMap, + ) -> SharedString { let mut names = Vec::with_capacity(self.paths.paths().len()); for abs_path in self.paths.paths() { - if let Some(name) = abs_path.file_name() { - names.push(name.to_string_lossy().to_string()); + let detail = path_detail_map.get(abs_path).copied().unwrap_or(0); + let suffix = path_suffix(abs_path, detail); + if !suffix.is_empty() { + names.push(suffix); } } if names.is_empty() { - // TODO: Can we do something better in this case? "Empty Workspace".into() } else { names.join(", ").into() } } - pub fn path_list(&self) -> &PathList { - &self.paths - } - pub fn host(&self) -> Option { self.host.clone() } } +pub fn path_suffix(path: &Path, detail: usize) -> String { + let mut components: Vec<_> = path + .components() + .rev() + .filter_map(|component| match component { + std::path::Component::Normal(s) => Some(s.to_string_lossy()), + _ => None, + }) + .take(detail + 1) + .collect(); + components.reverse(); + components.join("/") +} + pub struct PathMatchCandidateSet { pub snapshot: Snapshot, pub include_ignored: bool, diff --git a/crates/project/src/worktree_store.rs b/crates/project/src/worktree_store.rs index 7ca721ddb50c3f216ed630665e547b60ce4d52bf..be95a6b0ded02ed3527195433adf6eb1ab1f781b 100644 --- a/crates/project/src/worktree_store.rs +++ b/crates/project/src/worktree_store.rs @@ -91,6 +91,7 @@ pub enum WorktreeStoreEvent { WorktreeUpdatedEntries(WorktreeId, UpdatedEntriesSet), WorktreeUpdatedGitRepositories(WorktreeId, UpdatedGitRepositoriesSet), WorktreeDeletedEntry(WorktreeId, ProjectEntryId), + WorktreeUpdatedRootRepoCommonDir(WorktreeId), } impl EventEmitter for WorktreeStore {} @@ -712,6 +713,7 @@ impl WorktreeStore { root_name, visible, abs_path: response.canonicalized_path, + root_repo_common_dir: response.root_repo_common_dir, }, client, path_style, @@ -812,7 +814,11 @@ impl WorktreeStore { // The worktree root itself has been deleted (for single-file worktrees) // The worktree will be removed via the observe_release callback } - worktree::Event::UpdatedRootRepoCommonDir => {} + worktree::Event::UpdatedRootRepoCommonDir => { + cx.emit(WorktreeStoreEvent::WorktreeUpdatedRootRepoCommonDir( + worktree_id, + )); + } } }) .detach(); @@ -1049,6 +1055,9 @@ impl WorktreeStore { root_name: worktree.root_name_str().to_owned(), visible: worktree.is_visible(), abs_path: worktree.abs_path().to_string_lossy().into_owned(), + root_repo_common_dir: worktree + .root_repo_common_dir() + .map(|p| p.to_string_lossy().into_owned()), } }) .collect() diff --git a/crates/project/tests/integration/ext_agent_tests.rs b/crates/project/tests/integration/ext_agent_tests.rs index bd4acf2b3e9419b62ff676331383b48f98874345..82135485d3f262e5984ddbd003b69b828839d4bc 100644 --- a/crates/project/tests/integration/ext_agent_tests.rs +++ b/crates/project/tests/integration/ext_agent_tests.rs @@ -8,9 +8,9 @@ struct NoopExternalAgent; impl ExternalAgentServer for NoopExternalAgent { fn get_command( - &mut self, + &self, + _extra_args: Vec, _extra_env: HashMap, - _new_version_available_tx: Option>>, _cx: &mut AsyncApp, ) -> Task> { Task::ready(Ok(AgentServerCommand { diff --git a/crates/project/tests/integration/extension_agent_tests.rs b/crates/project/tests/integration/extension_agent_tests.rs index 577bc3b2901c52f4f47d9d0c82ef89fc66e2c21a..5af2cd229c476a261a5d37666e00d2dff3b293b4 100644 --- a/crates/project/tests/integration/extension_agent_tests.rs +++ b/crates/project/tests/integration/extension_agent_tests.rs @@ -24,9 +24,9 @@ struct NoopExternalAgent; impl ExternalAgentServer for NoopExternalAgent { fn get_command( - &mut self, + &self, + _extra_args: Vec, _extra_env: HashMap, - _new_version_available_tx: Option>>, _cx: &mut AsyncApp, ) -> Task> { Task::ready(Ok(AgentServerCommand { diff --git a/crates/proto/proto/worktree.proto b/crates/proto/proto/worktree.proto index 08a5892b444c3bcbfbc0fb3f1d010c5233ea7c91..08a1317f6ac7e2f2a173e3080ec4c691b6fa1c98 100644 --- a/crates/proto/proto/worktree.proto +++ b/crates/proto/proto/worktree.proto @@ -40,6 +40,7 @@ message AddWorktree { message AddWorktreeResponse { uint64 worktree_id = 1; string canonicalized_path = 2; + optional string root_repo_common_dir = 3; } message RemoveWorktree { @@ -62,6 +63,7 @@ message WorktreeMetadata { string root_name = 2; bool visible = 3; string abs_path = 4; + optional string root_repo_common_dir = 5; } message ProjectPath { diff --git a/crates/recent_projects/src/recent_projects.rs b/crates/recent_projects/src/recent_projects.rs index 4600500c5fda093d4258ba5d9b804faf9a499f19..c90f2f69154f171dd5023697fbbf757c013f9b84 100644 --- a/crates/recent_projects/src/recent_projects.rs +++ b/crates/recent_projects/src/recent_projects.rs @@ -99,27 +99,40 @@ pub async fn get_recent_projects( .await .unwrap_or_default(); - let entries: Vec = workspaces + let filtered: Vec<_> = workspaces .into_iter() .filter(|(id, _, _, _)| Some(*id) != current_workspace_id) .filter(|(_, location, _, _)| matches!(location, SerializedWorkspaceLocation::Local)) + .collect(); + + let mut all_paths: Vec = filtered + .iter() + .flat_map(|(_, _, path_list, _)| path_list.paths().iter().cloned()) + .collect(); + all_paths.sort(); + all_paths.dedup(); + let path_details = + util::disambiguate::compute_disambiguation_details(&all_paths, |path, detail| { + project::path_suffix(path, detail) + }); + let path_detail_map: std::collections::HashMap = + all_paths.into_iter().zip(path_details).collect(); + + let entries: Vec = filtered + .into_iter() .map(|(workspace_id, _, path_list, timestamp)| { let paths: Vec = path_list.paths().to_vec(); let ordered_paths: Vec<&PathBuf> = path_list.ordered_paths().collect(); - let name = if ordered_paths.len() == 1 { - ordered_paths[0] - .file_name() - .map(|n| n.to_string_lossy().to_string()) - .unwrap_or_else(|| ordered_paths[0].to_string_lossy().to_string()) - } else { - ordered_paths - .iter() - .filter_map(|p| p.file_name()) - .map(|n| n.to_string_lossy().to_string()) - .collect::>() - .join(", ") - }; + let name = ordered_paths + .iter() + .map(|p| { + let detail = path_detail_map.get(*p).copied().unwrap_or(0); + project::path_suffix(p, detail) + }) + .filter(|s| !s.is_empty()) + .collect::>() + .join(", "); let full_path = ordered_paths .iter() @@ -170,6 +183,19 @@ fn get_open_folders(workspace: &Workspace, cx: &App) -> Vec { .map(|wt| wt.read(cx).id()) }; + let mut all_paths: Vec = visible_worktrees + .iter() + .map(|wt| wt.read(cx).abs_path().to_path_buf()) + .collect(); + all_paths.sort(); + all_paths.dedup(); + let path_details = + util::disambiguate::compute_disambiguation_details(&all_paths, |path, detail| { + project::path_suffix(path, detail) + }); + let path_detail_map: std::collections::HashMap = + all_paths.into_iter().zip(path_details).collect(); + let git_store = project.git_store().read(cx); let repositories: Vec<_> = git_store.repositories().values().cloned().collect(); @@ -178,8 +204,9 @@ fn get_open_folders(workspace: &Workspace, cx: &App) -> Vec { .map(|worktree| { let worktree_ref = worktree.read(cx); let worktree_id = worktree_ref.id(); - let name = SharedString::from(worktree_ref.root_name().as_unix_str().to_string()); let path = worktree_ref.abs_path().to_path_buf(); + let detail = path_detail_map.get(&path).copied().unwrap_or(0); + let name = SharedString::from(project::path_suffix(&path, detail)); let branch = get_branch_for_worktree(worktree_ref, &repositories, cx); let is_active = active_worktree_id == Some(worktree_id); OpenFolderEntry { diff --git a/crates/recent_projects/src/remote_connections.rs b/crates/recent_projects/src/remote_connections.rs index 869568edfcdbe9260a13aaa5c0ed7eed6b87e675..448115c6988a3e5a5088f708353d7c7d4ca620aa 100644 --- a/crates/recent_projects/src/remote_connections.rs +++ b/crates/recent_projects/src/remote_connections.rs @@ -132,7 +132,7 @@ pub async fn open_remote_project( app_state: Arc, open_options: workspace::OpenOptions, cx: &mut AsyncApp, -) -> Result<()> { +) -> Result> { let created_new_window = open_options.requesting_window.is_none(); let (existing, open_visible) = find_existing_workspace( @@ -193,7 +193,7 @@ pub async fn open_remote_project( .collect::>(); navigate_to_positions(&existing_window, items, &paths_with_positions, cx); - return Ok(()); + return Ok(existing_window); } // If the remote connection is dead (e.g. server not running after failed reconnect), // fall through to establish a fresh connection instead of showing an error. @@ -341,7 +341,7 @@ pub async fn open_remote_project( .update(cx, |_, window, _| window.remove_window()) .ok(); } - return Ok(()); + return Ok(window); } }; @@ -436,7 +436,7 @@ pub async fn open_remote_project( }); }) .ok(); - Ok(()) + Ok(window) } pub fn navigate_to_positions( diff --git a/crates/recent_projects/src/remote_servers.rs b/crates/recent_projects/src/remote_servers.rs index 97ba0ccbf10718418f8747313521a253220a4b9d..0e15abf296e491185f24718cddf72e2532e9e6aa 100644 --- a/crates/recent_projects/src/remote_servers.rs +++ b/crates/recent_projects/src/remote_servers.rs @@ -505,7 +505,7 @@ impl ProjectPicker { }?; let items = open_remote_project_with_existing_connection( - connection, project, paths, app_state, window, cx, + connection, project, paths, app_state, window, None, cx, ) .await .log_err(); diff --git a/crates/remote/src/remote.rs b/crates/remote/src/remote.rs index 9767481dbb2fc60b841a04af5bb4d616700a8b20..1e118dbb20e9a472b0c22a09431f8b99e6efee9b 100644 --- a/crates/remote/src/remote.rs +++ b/crates/remote/src/remote.rs @@ -9,7 +9,7 @@ pub use remote_client::OpenWslPath; pub use remote_client::{ CommandTemplate, ConnectionIdentifier, ConnectionState, Interactive, RemoteArch, RemoteClient, RemoteClientDelegate, RemoteClientEvent, RemoteConnection, RemoteConnectionOptions, RemoteOs, - RemotePlatform, connect, + RemotePlatform, connect, has_active_connection, }; pub use transport::docker::DockerConnectionOptions; pub use transport::ssh::{SshConnectionOptions, SshPortForwardOption}; diff --git a/crates/remote/src/remote_client.rs b/crates/remote/src/remote_client.rs index c04d3630f92bcc27afb01a619176d3ae79d3fac7..a32d5dc75c7fcb605d39c845d9fa54370a2b978c 100644 --- a/crates/remote/src/remote_client.rs +++ b/crates/remote/src/remote_client.rs @@ -377,6 +377,20 @@ pub async fn connect( .map_err(|e| e.cloned()) } +/// Returns `true` if the global [`ConnectionPool`] already has a live +/// connection for the given options. Callers can use this to decide +/// whether to show interactive UI (e.g., a password modal) before +/// connecting. +pub fn has_active_connection(opts: &RemoteConnectionOptions, cx: &App) -> bool { + cx.try_global::().is_some_and(|pool| { + matches!( + pool.connections.get(opts), + Some(ConnectionPoolEntry::Connected(remote)) + if remote.upgrade().is_some_and(|r| !r.has_been_killed()) + ) + }) +} + impl RemoteClient { pub fn new( unique_identifier: ConnectionIdentifier, diff --git a/crates/remote_connection/src/remote_connection.rs b/crates/remote_connection/src/remote_connection.rs index df6260d1c5b3cd1704bfe0ce6a8476bbc0f39670..8aa4622929d6086b99e840e6b52bd5f46c49c898 100644 --- a/crates/remote_connection/src/remote_connection.rs +++ b/crates/remote_connection/src/remote_connection.rs @@ -19,7 +19,7 @@ use ui::{ prelude::*, }; use ui_input::{ERASED_EDITOR_FACTORY, ErasedEditor}; -use workspace::{DismissDecision, ModalView}; +use workspace::{DismissDecision, ModalView, Workspace}; pub struct RemoteConnectionPrompt { connection_string: SharedString, @@ -536,6 +536,159 @@ impl RemoteClientDelegate { } } +/// Shows a [`RemoteConnectionModal`] on the given workspace and establishes +/// a remote connection. This is a convenience wrapper around +/// [`RemoteConnectionModal`] and [`connect`] suitable for use as the +/// `connect_remote` callback in [`MultiWorkspace::find_or_create_workspace`]. +/// +/// When the global connection pool already has a live connection for the +/// given options, the modal is skipped entirely and the connection is +/// reused silently. +pub fn connect_with_modal( + workspace: &Entity, + connection_options: RemoteConnectionOptions, + window: &mut Window, + cx: &mut App, +) -> Task>>> { + if remote::has_active_connection(&connection_options, cx) { + return connect_reusing_pool(connection_options, cx); + } + + workspace.update(cx, |workspace, cx| { + workspace.toggle_modal(window, cx, |window, cx| { + RemoteConnectionModal::new(&connection_options, Vec::new(), window, cx) + }); + let Some(modal) = workspace.active_modal::(cx) else { + return Task::ready(Err(anyhow::anyhow!( + "Failed to open remote connection dialog" + ))); + }; + let prompt = modal.read(cx).prompt.clone(); + connect( + ConnectionIdentifier::setup(), + connection_options, + prompt, + window, + cx, + ) + }) +} + +/// Dismisses any active [`RemoteConnectionModal`] on the given workspace. +/// +/// This should be called after a remote connection attempt completes +/// (success or failure) when the modal was shown on a workspace that may +/// outlive the connection flow — for example, when the modal is shown +/// on a local workspace before switching to a newly-created remote +/// workspace. +pub fn dismiss_connection_modal(workspace: &Entity, cx: &mut gpui::AsyncWindowContext) { + workspace + .update_in(cx, |workspace, _window, cx| { + if let Some(modal) = workspace.active_modal::(cx) { + modal.update(cx, |modal, cx| modal.finished(cx)); + } + }) + .ok(); +} + +/// Creates a [`RemoteClient`] by reusing an existing connection from the +/// global pool. No interactive UI is shown. This should only be called +/// when [`remote::has_active_connection`] returns `true`. +fn connect_reusing_pool( + connection_options: RemoteConnectionOptions, + cx: &mut App, +) -> Task>>> { + let delegate: Arc = Arc::new(BackgroundRemoteClientDelegate); + + cx.spawn(async move |cx| { + let connection = remote::connect(connection_options, delegate.clone(), cx).await?; + + let (_cancel_guard, cancel_rx) = oneshot::channel::<()>(); + cx.update(|cx| { + RemoteClient::new( + ConnectionIdentifier::setup(), + connection, + cancel_rx, + delegate, + cx, + ) + }) + .await + }) +} + +/// Delegate for remote connections that reuse an existing pooled +/// connection. Password prompts are not expected (the SSH transport +/// is already established), but server binary downloads are supported +/// via [`AutoUpdater`]. +struct BackgroundRemoteClientDelegate; + +impl remote::RemoteClientDelegate for BackgroundRemoteClientDelegate { + fn ask_password( + &self, + prompt: String, + _tx: oneshot::Sender, + _cx: &mut AsyncApp, + ) { + log::warn!( + "Pooled remote connection unexpectedly requires a password \ + (prompt: {prompt})" + ); + } + + fn set_status(&self, _status: Option<&str>, _cx: &mut AsyncApp) {} + + fn download_server_binary_locally( + &self, + platform: RemotePlatform, + release_channel: ReleaseChannel, + version: Option, + cx: &mut AsyncApp, + ) -> Task> { + cx.spawn(async move |cx| { + AutoUpdater::download_remote_server_release( + release_channel, + version.clone(), + platform.os.as_str(), + platform.arch.as_str(), + |_status, _cx| {}, + cx, + ) + .await + .with_context(|| { + format!( + "Downloading remote server binary (version: {}, os: {}, arch: {})", + version + .as_ref() + .map(|v| format!("{v}")) + .unwrap_or("unknown".to_string()), + platform.os, + platform.arch, + ) + }) + }) + } + + fn get_download_url( + &self, + platform: RemotePlatform, + release_channel: ReleaseChannel, + version: Option, + cx: &mut AsyncApp, + ) -> Task>> { + cx.spawn(async move |cx| { + AutoUpdater::get_remote_server_release_url( + release_channel, + version, + platform.os.as_str(), + platform.arch.as_str(), + cx, + ) + .await + }) + } +} + pub fn connect( unique_identifier: ConnectionIdentifier, connection_options: RemoteConnectionOptions, diff --git a/crates/remote_server/src/headless_project.rs b/crates/remote_server/src/headless_project.rs index 7bdbbad796bd2ced34ed7ccab690555457a0842b..63e9b4b787230ea877cdc92e1fdcdd6daa86dc0c 100644 --- a/crates/remote_server/src/headless_project.rs +++ b/crates/remote_server/src/headless_project.rs @@ -523,6 +523,9 @@ impl HeadlessProject { proto::AddWorktreeResponse { worktree_id: worktree.id().to_proto(), canonicalized_path: canonicalized.to_string_lossy().into_owned(), + root_repo_common_dir: worktree + .root_repo_common_dir() + .map(|p| p.to_string_lossy().into_owned()), } }); diff --git a/crates/remote_server/src/remote_editing_tests.rs b/crates/remote_server/src/remote_editing_tests.rs index f0f23577d31075ab815d6dba1cdbdccd275c184a..571c5e7ea1aa5d623cf70d8fd06252bd0860de1b 100644 --- a/crates/remote_server/src/remote_editing_tests.rs +++ b/crates/remote_server/src/remote_editing_tests.rs @@ -2256,8 +2256,8 @@ async fn test_remote_external_agent_server( .get_external_agent(&"foo".into()) .unwrap() .get_command( + vec![], HashMap::from_iter([("OTHER_VAR".into(), "other-val".into())]), - None, &mut cx.to_async(), ) }) @@ -2267,8 +2267,8 @@ async fn test_remote_external_agent_server( assert_eq!( command, AgentServerCommand { - path: "mock".into(), - args: vec!["foo-cli".into(), "--flag".into()], + path: "foo-cli".into(), + args: vec!["--flag".into()], env: Some(HashMap::from_iter([ ("NO_BROWSER".into(), "1".into()), ("VAR".into(), "val".into()), diff --git a/crates/settings_content/src/agent.rs b/crates/settings_content/src/agent.rs index 7e43e8466d3b3d6d0754870180b0b29f87554e1c..76891185c42ee36324c1cc160edfb27d63ecc0d6 100644 --- a/crates/settings_content/src/agent.rs +++ b/crates/settings_content/src/agent.rs @@ -256,6 +256,7 @@ impl AgentSettingsContent { model, enable_thinking: false, effort: None, + speed: None, }); } @@ -397,6 +398,7 @@ pub struct LanguageModelSelection { #[serde(default)] pub enable_thinking: bool, pub effort: Option, + pub speed: Option, } #[with_fallible_options] diff --git a/crates/settings_content/src/merge_from.rs b/crates/settings_content/src/merge_from.rs index c74d5887f11adf13770fe9dc375d960cd3fe68c7..82e3ee9a24e34445cf90b13c446c7b6356586500 100644 --- a/crates/settings_content/src/merge_from.rs +++ b/crates/settings_content/src/merge_from.rs @@ -56,6 +56,7 @@ merge_from_overwrites!( std::sync::Arc, std::path::PathBuf, std::sync::Arc, + language_model_core::Speed, ); impl MergeFrom for Option { diff --git a/crates/sidebar/Cargo.toml b/crates/sidebar/Cargo.toml index 41bf8cdad2068f0c67c38ea06fd176af11f3d560..e9ef4dea630e97732f050e6548392fde9ceedfc8 100644 --- a/crates/sidebar/Cargo.toml +++ b/crates/sidebar/Cargo.toml @@ -27,14 +27,17 @@ editor.workspace = true fs.workspace = true git.workspace = true gpui.workspace = true +log.workspace = true menu.workspace = true platform_title_bar.workspace = true project.workspace = true recent_projects.workspace = true remote.workspace = true +remote_connection.workspace = true serde.workspace = true serde_json.workspace = true settings.workspace = true +smol.workspace = true theme.workspace = true theme_settings.workspace = true ui.workspace = true @@ -48,7 +51,11 @@ acp_thread = { workspace = true, features = ["test-support"] } agent = { workspace = true, features = ["test-support"] } agent_ui = { workspace = true, features = ["test-support"] } editor.workspace = true +extension.workspace = true +language = { workspace = true, features = ["test-support"] } language_model = { workspace = true, features = ["test-support"] } +release_channel.workspace = true +semver.workspace = true pretty_assertions.workspace = true prompt_store.workspace = true recent_projects = { workspace = true, features = ["test-support"] } @@ -56,6 +63,13 @@ serde_json.workspace = true fs = { workspace = true, features = ["test-support"] } git.workspace = true gpui = { workspace = true, features = ["test-support"] } +client = { workspace = true, features = ["test-support"] } +clock = { workspace = true, features = ["test-support"] } +http_client = { workspace = true, features = ["test-support"] } +node_runtime = { workspace = true, features = ["test-support"] } project = { workspace = true, features = ["test-support"] } +remote = { workspace = true, features = ["test-support"] } +remote_connection = { workspace = true, features = ["test-support"] } +remote_server = { workspace = true, features = ["test-support"] } settings = { workspace = true, features = ["test-support"] } workspace = { workspace = true, features = ["test-support"] } diff --git a/crates/sidebar/src/sidebar.rs b/crates/sidebar/src/sidebar.rs index 77d2db85f4ea1269111ad49e6b484647faeed0d2..4d88ddeffdd6625768dd0207176c0984e9833a29 100644 --- a/crates/sidebar/src/sidebar.rs +++ b/crates/sidebar/src/sidebar.rs @@ -4,7 +4,8 @@ use acp_thread::ThreadStatus; use action_log::DiffStats; use agent_client_protocol::{self as acp}; use agent_settings::AgentSettings; -use agent_ui::thread_metadata_store::{ThreadMetadata, ThreadMetadataStore}; +use agent_ui::thread_metadata_store::{ThreadMetadata, ThreadMetadataStore, ThreadWorktreePaths}; +use agent_ui::thread_worktree_archive; use agent_ui::threads_archive_view::{ ThreadsArchiveView, ThreadsArchiveViewEvent, format_history_entry_timestamp, }; @@ -15,9 +16,9 @@ use agent_ui::{ use chrono::{DateTime, Utc}; use editor::Editor; use gpui::{ - Action as _, AnyElement, App, Context, Entity, FocusHandle, Focusable, KeyContext, ListState, - Pixels, Render, SharedString, WeakEntity, Window, WindowHandle, linear_color_stop, - linear_gradient, list, prelude::*, px, + Action as _, AnyElement, App, Context, DismissEvent, Entity, FocusHandle, Focusable, + KeyContext, ListState, Pixels, Render, SharedString, Task, WeakEntity, Window, WindowHandle, + linear_color_stop, linear_gradient, list, prelude::*, px, }; use menu::{ Cancel, Confirm, SelectChild, SelectFirst, SelectLast, SelectNext, SelectParent, SelectPrevious, @@ -33,6 +34,7 @@ use serde::{Deserialize, Serialize}; use settings::Settings as _; use std::collections::{HashMap, HashSet}; use std::mem; +use std::path::PathBuf; use std::rc::Rc; use theme::ActiveTheme; use ui::{ @@ -41,12 +43,12 @@ use ui::{ WithScrollbar, prelude::*, }; use util::ResultExt as _; -use util::path_list::{PathList, SerializedPathList}; +use util::path_list::PathList; use workspace::{ AddFolderToProject, CloseWindow, FocusWorkspaceSidebar, MultiWorkspace, MultiWorkspaceEvent, - NextProject, NextThread, Open, PreviousProject, PreviousThread, ShowFewerThreads, - ShowMoreThreads, Sidebar as WorkspaceSidebar, SidebarSide, ToggleWorkspaceSidebar, Workspace, - sidebar_side_context_menu, + NextProject, NextThread, Open, PreviousProject, PreviousThread, SerializedProjectGroupKey, + ShowFewerThreads, ShowMoreThreads, Sidebar as WorkspaceSidebar, SidebarSide, Toast, + ToggleWorkspaceSidebar, Workspace, notifications::NotificationId, sidebar_side_context_menu, }; use zed_actions::OpenRecent; @@ -94,9 +96,9 @@ struct SerializedSidebar { #[serde(default)] width: Option, #[serde(default)] - collapsed_groups: Vec, + collapsed_groups: Vec, #[serde(default)] - expanded_groups: Vec<(SerializedPathList, usize)>, + expanded_groups: Vec<(SerializedProjectGroupKey, usize)>, #[serde(default)] active_view: SerializedSidebarView, } @@ -108,6 +110,11 @@ enum SidebarView { Archive(Entity), } +enum ArchiveWorktreeOutcome { + Success, + Cancelled, +} + #[derive(Clone, Debug)] enum ActiveEntry { Thread { @@ -134,7 +141,12 @@ impl ActiveEntry { (ActiveEntry::Thread { session_id, .. }, ListEntry::Thread(thread)) => { thread.metadata.session_id == *session_id } - (ActiveEntry::Draft(_workspace), ListEntry::DraftThread { .. }) => true, + ( + ActiveEntry::Draft(_), + ListEntry::DraftThread { + workspace: None, .. + }, + ) => true, _ => false, } } @@ -155,7 +167,25 @@ struct ActiveThreadInfo { #[derive(Clone)] enum ThreadEntryWorkspace { Open(Entity), - Closed(PathList), + Closed { + /// The paths this thread uses (may point to linked worktrees). + folder_paths: PathList, + /// The project group this thread belongs to. + project_group_key: ProjectGroupKey, + }, +} + +impl ThreadEntryWorkspace { + fn is_remote(&self, cx: &App) -> bool { + match self { + ThreadEntryWorkspace::Open(workspace) => { + !workspace.read(cx).project().read(cx).is_local() + } + ThreadEntryWorkspace::Closed { + project_group_key, .. + } => project_group_key.host().is_some(), + } + } } #[derive(Clone)] @@ -208,6 +238,7 @@ enum ListEntry { has_running_threads: bool, waiting_thread_count: usize, is_active: bool, + has_threads: bool, }, Thread(ThreadEntry), ViewMore { @@ -217,16 +248,9 @@ enum ListEntry { /// The user's active draft thread. Shows a prefix of the currently-typed /// prompt, or "Untitled Thread" if the prompt is empty. DraftThread { - worktrees: Vec, - }, - /// A convenience row for starting a new thread. Shown when a project group - /// has no threads, or when an open linked worktree workspace has no threads. - /// When `workspace` is `Some`, this entry is for a specific linked worktree - /// workspace and can be dismissed (removing that workspace). - NewThread { key: project::ProjectGroupKey, - worktrees: Vec, workspace: Option>, + worktrees: Vec, }, } @@ -247,37 +271,22 @@ impl ListEntry { match self { ListEntry::Thread(thread) => match &thread.workspace { ThreadEntryWorkspace::Open(ws) => vec![ws.clone()], - ThreadEntryWorkspace::Closed(_) => Vec::new(), + ThreadEntryWorkspace::Closed { .. } => Vec::new(), }, - ListEntry::DraftThread { .. } => { - vec![multi_workspace.workspace().clone()] - } - ListEntry::ProjectHeader { key, .. } => { - // The header only activates the main worktree workspace - // (the one whose root paths match the group key's path list). - multi_workspace - .workspaces() - .find(|ws| PathList::new(&ws.read(cx).root_paths(cx)) == *key.path_list()) - .cloned() - .into_iter() - .collect() - } - ListEntry::NewThread { key, workspace, .. } => { - // When the NewThread entry is for a specific linked worktree - // workspace, that workspace is reachable. Otherwise fall back - // to the main worktree workspace. + ListEntry::DraftThread { workspace, .. } => { if let Some(ws) = workspace { vec![ws.clone()] } else { - multi_workspace - .workspaces() - .find(|ws| PathList::new(&ws.read(cx).root_paths(cx)) == *key.path_list()) - .cloned() - .into_iter() - .collect() + // workspace: None means this is the active draft, + // which always lives on the current workspace. + vec![multi_workspace.workspace().clone()] } } - _ => Vec::new(), + ListEntry::ProjectHeader { key, .. } => multi_workspace + .workspaces_for_project_group(key, cx) + .cloned() + .collect(), + ListEntry::ViewMore { .. } => Vec::new(), } } } @@ -354,35 +363,76 @@ fn workspace_path_list(workspace: &Entity, cx: &App) -> PathList { /// /// For each path in the thread's `folder_paths`, produces a /// [`WorktreeInfo`] with a short display name, full path, and whether -/// the worktree is the main checkout or a linked git worktree. -fn worktree_info_from_thread_paths( - folder_paths: &PathList, - group_key: &project::ProjectGroupKey, -) -> impl Iterator { - let main_paths = group_key.path_list().paths(); - folder_paths.paths().iter().filter_map(|path| { - let is_main = main_paths.iter().any(|mp| mp.as_path() == path.as_path()); - if is_main { - let name = path.file_name()?.to_string_lossy().to_string(); - Some(WorktreeInfo { - name: SharedString::from(name), - full_path: SharedString::from(path.display().to_string()), +/// the worktree is the main checkout or a linked git worktree. When +/// multiple main paths exist and a linked worktree's short name alone +/// wouldn't identify which main project it belongs to, the main project +/// name is prefixed for disambiguation (e.g. `project:feature`). +/// +fn worktree_info_from_thread_paths(worktree_paths: &ThreadWorktreePaths) -> Vec { + let mut infos: Vec = Vec::new(); + let mut linked_short_names: Vec<(SharedString, SharedString)> = Vec::new(); + let mut unique_main_count = HashSet::new(); + + for (main_path, folder_path) in worktree_paths.ordered_pairs() { + unique_main_count.insert(main_path.clone()); + let is_linked = main_path != folder_path; + + if is_linked { + let short_name = linked_worktree_short_name(main_path, folder_path).unwrap_or_default(); + let project_name = main_path + .file_name() + .map(|n| SharedString::from(n.to_string_lossy().to_string())) + .unwrap_or_default(); + linked_short_names.push((short_name.clone(), project_name)); + infos.push(WorktreeInfo { + name: short_name, + full_path: SharedString::from(folder_path.display().to_string()), highlight_positions: Vec::new(), - kind: ui::WorktreeKind::Main, - }) + kind: ui::WorktreeKind::Linked, + }); } else { - let main_path = main_paths - .iter() - .find(|mp| mp.file_name() == path.file_name()) - .or(main_paths.first())?; - Some(WorktreeInfo { - name: linked_worktree_short_name(main_path, path).unwrap_or_default(), - full_path: SharedString::from(path.display().to_string()), + let Some(name) = folder_path.file_name() else { + continue; + }; + infos.push(WorktreeInfo { + name: SharedString::from(name.to_string_lossy().to_string()), + full_path: SharedString::from(folder_path.display().to_string()), highlight_positions: Vec::new(), - kind: ui::WorktreeKind::Linked, - }) + kind: ui::WorktreeKind::Main, + }); } - }) + } + + // When the group has multiple main worktree paths and the thread's + // folder paths don't all share the same short name, prefix each + // linked worktree chip with its main project name so the user knows + // which project it belongs to. + let all_same_name = infos.len() > 1 && infos.iter().all(|i| i.name == infos[0].name); + + if unique_main_count.len() > 1 && !all_same_name { + for (info, (_short_name, project_name)) in infos + .iter_mut() + .filter(|i| i.kind == ui::WorktreeKind::Linked) + .zip(linked_short_names.iter()) + { + info.name = SharedString::from(format!("{}:{}", project_name, info.name)); + } + } + + infos +} + +/// Shows a [`RemoteConnectionModal`] on the given workspace and establishes +/// an SSH connection. Suitable for passing to +/// [`MultiWorkspace::find_or_create_workspace`] as the `connect_remote` +/// argument. +fn connect_remote( + modal_workspace: Entity, + connection_options: RemoteConnectionOptions, + window: &mut Window, + cx: &mut Context, +) -> gpui::Task>>> { + remote_connection::connect_with_modal(&modal_workspace, connection_options, window, cx) } /// The sidebar re-derives its entire entry list from scratch on every @@ -403,8 +453,8 @@ pub struct Sidebar { /// Tracks which sidebar entry is currently active (highlighted). active_entry: Option, hovered_thread_index: Option, - collapsed_groups: HashSet, - expanded_groups: HashMap, + collapsed_groups: HashSet, + expanded_groups: HashMap, /// Updated only in response to explicit user actions (clicking a /// thread, confirming in the thread switcher, etc.) — never from /// background data changes. Used to sort the thread switcher popup. @@ -415,7 +465,9 @@ pub struct Sidebar { thread_last_message_sent_or_queued: HashMap>, thread_switcher: Option>, _thread_switcher_subscriptions: Vec, + pending_remote_thread_activation: Option, view: SidebarView, + restoring_tasks: HashMap>, recent_projects_popover_handle: PopoverMenuHandle, project_header_menu_ix: Option, _subscriptions: Vec, @@ -454,6 +506,34 @@ impl Sidebar { MultiWorkspaceEvent::WorkspaceRemoved(_) => { this.update_entries(cx); } + MultiWorkspaceEvent::WorktreePathAdded { + old_main_paths, + added_path, + } => { + let added_path = added_path.clone(); + ThreadMetadataStore::global(cx).update(cx, |store, cx| { + store.change_worktree_paths( + old_main_paths, + |paths| paths.add_path(&added_path, &added_path), + cx, + ); + }); + this.update_entries(cx); + } + MultiWorkspaceEvent::WorktreePathRemoved { + old_main_paths, + removed_path, + } => { + let removed_path = removed_path.clone(); + ThreadMetadataStore::global(cx).update(cx, |store, cx| { + store.change_worktree_paths( + old_main_paths, + |paths| paths.remove_main_path(&removed_path), + cx, + ); + }); + this.update_entries(cx); + } }, ) .detach(); @@ -501,7 +581,9 @@ impl Sidebar { thread_last_message_sent_or_queued: HashMap::new(), thread_switcher: None, _thread_switcher_subscriptions: Vec::new(), + pending_remote_thread_activation: None, view: SidebarView::default(), + restoring_tasks: HashMap::new(), recent_projects_popover_handle: PopoverMenuHandle::default(), project_header_menu_ix: None, _subscriptions: Vec::new(), @@ -711,28 +793,40 @@ impl Sidebar { result } - /// Finds the main worktree workspace for a project group. - fn workspace_for_group(&self, path_list: &PathList, cx: &App) -> Option> { - let mw = self.multi_workspace.upgrade()?; - mw.read(cx).workspace_for_paths(path_list, cx) - } - /// Opens a new workspace for a group that has no open workspaces. fn open_workspace_for_group( &mut self, - path_list: &PathList, + project_group_key: &ProjectGroupKey, window: &mut Window, cx: &mut Context, ) { let Some(multi_workspace) = self.multi_workspace.upgrade() else { return; }; + let path_list = project_group_key.path_list().clone(); + let host = project_group_key.host(); + let provisional_key = Some(project_group_key.clone()); + let active_workspace = multi_workspace.read(cx).workspace().clone(); + let modal_workspace = active_workspace.clone(); + + let task = multi_workspace.update(cx, |this, cx| { + this.find_or_create_workspace( + path_list, + host, + provisional_key, + |options, window, cx| connect_remote(active_workspace, options, window, cx), + window, + cx, + ) + }); - multi_workspace - .update(cx, |this, cx| { - this.find_or_create_local_workspace(path_list.clone(), window, cx) - }) - .detach_and_log_err(cx); + cx.spawn_in(window, async move |_this, cx| { + let result = task.await; + remote_connection::dismiss_connection_modal(&modal_workspace, cx); + result?; + anyhow::Ok(()) + }) + .detach_and_log_err(cx); } /// Rebuilds the sidebar contents from current workspace and thread state. @@ -770,15 +864,25 @@ impl Sidebar { // also appears as a "draft" (no messages yet). if let Some(active_ws) = &active_workspace { if let Some(panel) = active_ws.read(cx).panel::(cx) { - if panel.read(cx).active_thread_is_draft(cx) - || panel.read(cx).active_conversation_view().is_none() - { - let conversation_parent_id = panel - .read(cx) - .active_conversation_view() - .and_then(|cv| cv.read(cx).parent_id(cx)); - let preserving_thread = - if let Some(ActiveEntry::Thread { session_id, .. }) = &self.active_entry { + let active_thread_is_draft = panel.read(cx).active_thread_is_draft(cx); + let active_conversation_view = panel.read(cx).active_conversation_view(); + + if active_thread_is_draft || active_conversation_view.is_none() { + if active_conversation_view.is_none() + && let Some(session_id) = self.pending_remote_thread_activation.clone() + { + self.active_entry = Some(ActiveEntry::Thread { + session_id, + workspace: active_ws.clone(), + }); + } else { + let conversation_parent_id = + active_conversation_view.and_then(|cv| cv.read(cx).parent_id(cx)); + let preserving_thread = if let Some(ActiveEntry::Thread { + session_id, + .. + }) = &self.active_entry + { self.active_entry_workspace() == Some(active_ws) && conversation_parent_id .as_ref() @@ -786,14 +890,16 @@ impl Sidebar { } else { false }; - if !preserving_thread { - self.active_entry = Some(ActiveEntry::Draft(active_ws.clone())); + if !preserving_thread { + self.active_entry = Some(ActiveEntry::Draft(active_ws.clone())); + } } - } else if let Some(session_id) = panel - .read(cx) - .active_conversation_view() - .and_then(|cv| cv.read(cx).parent_id(cx)) + } else if let Some(session_id) = + active_conversation_view.and_then(|cv| cv.read(cx).parent_id(cx)) { + if self.pending_remote_thread_activation.as_ref() == Some(&session_id) { + self.pending_remote_thread_activation = None; + } self.active_entry = Some(ActiveEntry::Thread { session_id, workspace: active_ws.clone(), @@ -838,15 +944,29 @@ impl Sidebar { (icon, icon_from_external_svg) }; - for (group_key, group_workspaces) in mw.project_groups(cx) { - let path_list = group_key.path_list().clone(); - if path_list.paths().is_empty() { + let groups: Vec<_> = mw.project_groups(cx).collect(); + + let mut all_paths: Vec = groups + .iter() + .flat_map(|(key, _)| key.path_list().paths().iter().cloned()) + .collect(); + all_paths.sort(); + all_paths.dedup(); + let path_details = + util::disambiguate::compute_disambiguation_details(&all_paths, |path, detail| { + project::path_suffix(path, detail) + }); + let path_detail_map: HashMap = + all_paths.into_iter().zip(path_details).collect(); + + for (group_key, group_workspaces) in &groups { + if group_key.path_list().paths().is_empty() { continue; } - let label = group_key.display_name(); + let label = group_key.display_name(&path_detail_map); - let is_collapsed = self.collapsed_groups.contains(&path_list); + let is_collapsed = self.collapsed_groups.contains(&group_key); let should_load_threads = !is_collapsed || !query.is_empty(); let is_active = active_workspace @@ -881,40 +1001,41 @@ impl Sidebar { // Open; otherwise use Closed. let resolve_workspace = |row: &ThreadMetadata| -> ThreadEntryWorkspace { workspace_by_path_list - .get(&row.folder_paths) + .get(row.folder_paths()) .map(|ws| ThreadEntryWorkspace::Open((*ws).clone())) - .unwrap_or_else(|| ThreadEntryWorkspace::Closed(row.folder_paths.clone())) + .unwrap_or_else(|| ThreadEntryWorkspace::Closed { + folder_paths: row.folder_paths().clone(), + project_group_key: group_key.clone(), + }) }; // Build a ThreadEntry from a metadata row. - let make_thread_entry = |row: ThreadMetadata, - workspace: ThreadEntryWorkspace| - -> ThreadEntry { - let (icon, icon_from_external_svg) = resolve_agent_icon(&row.agent_id); - let worktrees: Vec = - worktree_info_from_thread_paths(&row.folder_paths, &group_key).collect(); - ThreadEntry { - metadata: row, - icon, - icon_from_external_svg, - status: AgentThreadStatus::default(), - workspace, - is_live: false, - is_background: false, - is_title_generating: false, - highlight_positions: Vec::new(), - worktrees, - diff_stats: DiffStats::default(), - } - }; + let make_thread_entry = + |row: ThreadMetadata, workspace: ThreadEntryWorkspace| -> ThreadEntry { + let (icon, icon_from_external_svg) = resolve_agent_icon(&row.agent_id); + let worktrees = worktree_info_from_thread_paths(&row.worktree_paths); + ThreadEntry { + metadata: row, + icon, + icon_from_external_svg, + status: AgentThreadStatus::default(), + workspace, + is_live: false, + is_background: false, + is_title_generating: false, + highlight_positions: Vec::new(), + worktrees, + diff_stats: DiffStats::default(), + } + }; - // === Main code path: one query per group via main_worktree_paths === + // Main code path: one query per group via main_worktree_paths. // The main_worktree_paths column is set on all new threads and // points to the group's canonical paths regardless of which // linked worktree the thread was opened in. for row in thread_store .read(cx) - .entries_for_main_worktree_path(&path_list) + .entries_for_main_worktree_path(group_key.path_list()) .cloned() { if !seen_session_ids.insert(row.session_id.clone()) { @@ -928,7 +1049,11 @@ impl Sidebar { // must be queried by their `folder_paths`. // Load any legacy threads for the main worktrees of this project group. - for row in thread_store.read(cx).entries_for_path(&path_list).cloned() { + for row in thread_store + .read(cx) + .entries_for_path(group_key.path_list()) + .cloned() + { if !seen_session_ids.insert(row.session_id.clone()) { continue; } @@ -938,7 +1063,7 @@ impl Sidebar { // Load any legacy threads for any single linked wortree of this project group. let mut linked_worktree_paths = HashSet::new(); - for workspace in &group_workspaces { + for workspace in group_workspaces { if workspace.read(cx).visible_worktrees(cx).count() != 1 { continue; } @@ -960,7 +1085,10 @@ impl Sidebar { } threads.push(make_thread_entry( row, - ThreadEntryWorkspace::Closed(worktree_path_list.clone()), + ThreadEntryWorkspace::Closed { + folder_paths: worktree_path_list.clone(), + project_group_key: group_key.clone(), + }, )); } } @@ -1033,6 +1161,20 @@ impl Sidebar { } } + let has_threads = if !threads.is_empty() { + true + } else { + let store = ThreadMetadataStore::global(cx).read(cx); + store + .entries_for_main_worktree_path(group_key.path_list()) + .next() + .is_some() + || store + .entries_for_path(group_key.path_list()) + .next() + .is_some() + }; + if !query.is_empty() { let workspace_highlight_positions = fuzzy_match_positions(&query, &label).unwrap_or_default(); @@ -1071,6 +1213,7 @@ impl Sidebar { has_running_threads, waiting_thread_count, is_active, + has_threads, }); for thread in matched_threads { @@ -1089,6 +1232,7 @@ impl Sidebar { has_running_threads, waiting_thread_count, is_active, + has_threads, }); if is_collapsed { @@ -1098,42 +1242,47 @@ impl Sidebar { // Emit a DraftThread entry when the active draft belongs to this group. if is_draft_for_group { if let Some(ActiveEntry::Draft(draft_ws)) = &self.active_entry { - let ws_path_list = workspace_path_list(draft_ws, cx); - let worktrees = worktree_info_from_thread_paths(&ws_path_list, &group_key); + let ws_worktree_paths = ThreadWorktreePaths::from_project( + draft_ws.read(cx).project().read(cx), + cx, + ); + let worktrees = worktree_info_from_thread_paths(&ws_worktree_paths); entries.push(ListEntry::DraftThread { - worktrees: worktrees.collect(), + key: group_key.clone(), + workspace: None, + worktrees, }); } } - // Emit NewThread entries: - // 1. When the group has zero threads (convenient affordance). - // 2. For each open linked worktree workspace in this group - // that has no threads (makes the workspace reachable and - // dismissable). - let group_has_no_threads = threads.is_empty() && !group_workspaces.is_empty(); - - if !is_draft_for_group && group_has_no_threads { - entries.push(ListEntry::NewThread { - key: group_key.clone(), - worktrees: Vec::new(), - workspace: None, - }); - } - - // Emit a NewThread for each open linked worktree workspace - // that has no threads. Skip the workspace if it's showing - // the active draft (it already has a DraftThread entry). - if !is_draft_for_group { + // Emit a DraftThread for each open linked worktree workspace + // that has no threads. Skip the specific workspace that is + // showing the active draft (it already has a DraftThread entry + // from the block above). + { + let draft_ws_id = if is_draft_for_group { + self.active_entry.as_ref().and_then(|e| match e { + ActiveEntry::Draft(ws) => Some(ws.entity_id()), + _ => None, + }) + } else { + None + }; let thread_store = ThreadMetadataStore::global(cx); - for ws in &group_workspaces { - let ws_path_list = workspace_path_list(ws, cx); + for ws in group_workspaces { + if Some(ws.entity_id()) == draft_ws_id { + continue; + } + let ws_worktree_paths = + ThreadWorktreePaths::from_project(ws.read(cx).project().read(cx), cx); let has_linked_worktrees = - worktree_info_from_thread_paths(&ws_path_list, &group_key) + worktree_info_from_thread_paths(&ws_worktree_paths) + .iter() .any(|wt| wt.kind == ui::WorktreeKind::Linked); if !has_linked_worktrees { continue; } + let ws_path_list = workspace_path_list(ws, cx); let store = thread_store.read(cx); let has_threads = store.entries_for_path(&ws_path_list).next().is_some() || store @@ -1143,19 +1292,19 @@ impl Sidebar { if has_threads { continue; } - let worktrees: Vec = - worktree_info_from_thread_paths(&ws_path_list, &group_key).collect(); - entries.push(ListEntry::NewThread { + let worktrees = worktree_info_from_thread_paths(&ws_worktree_paths); + + entries.push(ListEntry::DraftThread { key: group_key.clone(), - worktrees, workspace: Some(ws.clone()), + worktrees, }); } } let total = threads.len(); - let extra_batches = self.expanded_groups.get(&path_list).copied().unwrap_or(0); + let extra_batches = self.expanded_groups.get(&group_key).copied().unwrap_or(0); let threads_to_show = DEFAULT_THREADS_SHOWN + (extra_batches * DEFAULT_THREADS_SHOWN); let count = threads_to_show.min(total); @@ -1288,6 +1437,7 @@ impl Sidebar { has_running_threads, waiting_thread_count, is_active: is_active_group, + has_threads, } => self.render_project_header( ix, false, @@ -1298,21 +1448,25 @@ impl Sidebar { *waiting_thread_count, *is_active_group, is_selected, + *has_threads, cx, ), ListEntry::Thread(thread) => self.render_thread(ix, thread, is_active, is_selected, cx), ListEntry::ViewMore { key, is_fully_expanded, - } => self.render_view_more(ix, key.path_list(), *is_fully_expanded, is_selected, cx), - ListEntry::DraftThread { worktrees, .. } => { - self.render_draft_thread(ix, is_active, worktrees, is_selected, cx) - } - ListEntry::NewThread { + } => self.render_view_more(ix, key, *is_fully_expanded, is_selected, cx), + ListEntry::DraftThread { key, - worktrees, workspace, - } => self.render_new_thread(ix, key, worktrees, workspace.as_ref(), is_selected, cx), + worktrees, + } => { + if workspace.is_some() { + self.render_new_thread(ix, key, worktrees, workspace.as_ref(), is_selected, cx) + } else { + self.render_draft_thread(ix, is_active, worktrees, is_selected, cx) + } + } }; if is_group_header_after_first { @@ -1362,9 +1516,9 @@ impl Sidebar { waiting_thread_count: usize, is_active: bool, is_focused: bool, + has_threads: bool, cx: &mut Context, ) -> AnyElement { - let path_list = key.path_list(); let host = key.host(); let id_prefix = if is_sticky { "sticky-" } else { "" }; @@ -1372,26 +1526,27 @@ impl Sidebar { let disclosure_id = SharedString::from(format!("disclosure-{ix}")); let group_name = SharedString::from(format!("{id_prefix}header-group-{ix}")); - let is_collapsed = self.collapsed_groups.contains(path_list); + let is_collapsed = self.collapsed_groups.contains(key); let (disclosure_icon, disclosure_tooltip) = if is_collapsed { (IconName::ChevronRight, "Expand Project") } else { (IconName::ChevronDown, "Collapse Project") }; - let has_new_thread_entry = self.contents.entries.get(ix + 1).is_some_and(|entry| { - matches!( - entry, - ListEntry::NewThread { .. } | ListEntry::DraftThread { .. } - ) - }); + let has_new_thread_entry = self + .contents + .entries + .get(ix + 1) + .is_some_and(|entry| matches!(entry, ListEntry::DraftThread { .. })); let show_new_thread_button = !has_new_thread_entry && !self.has_filter_query(cx); + let workspace = self.multi_workspace.upgrade().and_then(|mw| { + mw.read(cx) + .workspace_for_paths(key.path_list(), key.host().as_ref(), cx) + }); - let workspace = self.workspace_for_group(path_list, cx); - - let path_list_for_toggle = path_list.clone(); - let path_list_for_collapse = path_list.clone(); - let view_more_expanded = self.expanded_groups.contains_key(path_list); + let key_for_toggle = key.clone(); + let key_for_collapse = key.clone(); + let view_more_expanded = self.expanded_groups.contains_key(key); let label = if highlight_positions.is_empty() { Label::new(label.clone()) @@ -1408,6 +1563,8 @@ impl Sidebar { .element_active .blend(color.element_background.opacity(0.2)); + let is_ellipsis_menu_open = self.project_header_menu_ix == Some(ix); + h_flex() .id(id) .group(&group_name) @@ -1426,7 +1583,6 @@ impl Sidebar { .justify_between() .child( h_flex() - .cursor_pointer() .relative() .min_w_0() .w_full() @@ -1439,7 +1595,7 @@ impl Sidebar { .tooltip(Tooltip::text(disclosure_tooltip)) .on_click(cx.listener(move |this, _, window, cx| { this.selection = None; - this.toggle_collapse(&path_list_for_toggle, window, cx); + this.toggle_collapse(&key_for_toggle, window, cx); })), ) .child(label) @@ -1479,13 +1635,13 @@ impl Sidebar { ) .child( h_flex() - .when(self.project_header_menu_ix != Some(ix), |this| { - this.visible_on_hover(group_name) + .when(!is_ellipsis_menu_open, |this| { + this.visible_on_hover(&group_name) }) .on_mouse_down(gpui::MouseButton::Left, |_, _, cx| { cx.stop_propagation(); }) - .child(self.render_project_header_menu(ix, id_prefix, key, cx)) + .child(self.render_project_header_ellipsis_menu(ix, id_prefix, key, cx)) .when(view_more_expanded && !is_collapsed, |this| { this.child( IconButton::new( @@ -1497,10 +1653,10 @@ impl Sidebar { .icon_size(IconSize::Small) .tooltip(Tooltip::text("Collapse Displayed Threads")) .on_click(cx.listener({ - let path_list_for_collapse = path_list_for_collapse.clone(); + let key_for_collapse = key_for_collapse.clone(); move |this, _, _window, cx| { this.selection = None; - this.expanded_groups.remove(&path_list_for_collapse); + this.expanded_groups.remove(&key_for_collapse); this.serialize(cx); this.update_entries(cx); } @@ -1510,7 +1666,8 @@ impl Sidebar { .when_some( workspace.filter(|_| show_new_thread_button), |this, workspace| { - let path_list = path_list.clone(); + let key = key.clone(); + let focus_handle = self.focus_handle.clone(); this.child( IconButton::new( SharedString::from(format!( @@ -1519,10 +1676,17 @@ impl Sidebar { IconName::Plus, ) .icon_size(IconSize::Small) - .tooltip(Tooltip::text("New Thread")) + .tooltip(move |_, cx| { + Tooltip::for_action_in( + "New Thread", + &NewThread, + &focus_handle, + cx, + ) + }) .on_click(cx.listener( move |this, _, window, cx| { - this.collapsed_groups.remove(&path_list); + this.collapsed_groups.remove(&key); this.selection = None; this.create_new_thread(&workspace, window, cx); }, @@ -1532,32 +1696,42 @@ impl Sidebar { ), ) .map(|this| { - let path_list = path_list.clone(); - this.cursor_pointer() - .when(!is_active, |this| this.hover(|s| s.bg(hover_color))) - .tooltip(Tooltip::text("Open Workspace")) - .on_click(cx.listener(move |this, _, window, cx| { - if let Some(workspace) = this.workspace_for_group(&path_list, cx) { - this.active_entry = Some(ActiveEntry::Draft(workspace.clone())); - if let Some(multi_workspace) = this.multi_workspace.upgrade() { - multi_workspace.update(cx, |multi_workspace, cx| { - multi_workspace.activate(workspace.clone(), window, cx); - }); - } - if AgentPanel::is_visible(&workspace, cx) { - workspace.update(cx, |workspace, cx| { - workspace.focus_panel::(window, cx); - }); + if !has_threads && is_active { + this + } else { + let key = key.clone(); + this.cursor_pointer() + .when(!is_active, |this| this.hover(|s| s.bg(hover_color))) + .tooltip(Tooltip::text("Open Workspace")) + .on_click(cx.listener(move |this, _, window, cx| { + if let Some(workspace) = this.multi_workspace.upgrade().and_then(|mw| { + mw.read(cx).workspace_for_paths( + key.path_list(), + key.host().as_ref(), + cx, + ) + }) { + this.active_entry = Some(ActiveEntry::Draft(workspace.clone())); + if let Some(multi_workspace) = this.multi_workspace.upgrade() { + multi_workspace.update(cx, |multi_workspace, cx| { + multi_workspace.activate(workspace.clone(), window, cx); + }); + } + if AgentPanel::is_visible(&workspace, cx) { + workspace.update(cx, |workspace, cx| { + workspace.focus_panel::(window, cx); + }); + } + } else { + this.open_workspace_for_group(&key, window, cx); } - } else { - this.open_workspace_for_group(&path_list, window, cx); - } - })) + })) + } }) .into_any_element() } - fn render_project_header_menu( + fn render_project_header_ellipsis_menu( &self, ix: usize, id_prefix: &str, @@ -1583,72 +1757,79 @@ impl Sidebar { let multi_workspace = multi_workspace.clone(); let project_group_key = project_group_key.clone(); - let menu = ContextMenu::build_persistent(window, cx, move |menu, _window, _cx| { - let mut menu = menu - .header("Project Folders") - .end_slot_action(Box::new(menu::EndSlot)); + let menu = + ContextMenu::build_persistent(window, cx, move |menu, _window, menu_cx| { + let weak_menu = menu_cx.weak_entity(); + let mut menu = menu + .header("Project Folders") + .end_slot_action(Box::new(menu::EndSlot)); - for path in project_group_key.path_list().paths() { - let Some(name) = path.file_name() else { - continue; - }; - let name: SharedString = name.to_string_lossy().into_owned().into(); - let path = path.clone(); - let project_group_key = project_group_key.clone(); - let multi_workspace = multi_workspace.clone(); - menu = menu.entry_with_end_slot_on_hover( - name.clone(), - None, - |_, _| {}, - IconName::Close, - "Remove Folder".into(), - move |_window, cx| { - multi_workspace - .update(cx, |multi_workspace, cx| { - multi_workspace.remove_folder_from_project_group( - &project_group_key, - &path, - cx, - ); - }) - .ok(); + for path in project_group_key.path_list().paths() { + let Some(name) = path.file_name() else { + continue; + }; + let name: SharedString = name.to_string_lossy().into_owned().into(); + let path = path.clone(); + let project_group_key = project_group_key.clone(); + let multi_workspace = multi_workspace.clone(); + let weak_menu = weak_menu.clone(); + menu = menu.entry_with_end_slot_on_hover( + name.clone(), + None, + |_, _| {}, + IconName::Close, + "Remove Folder".into(), + move |_window, cx| { + multi_workspace + .update(cx, |multi_workspace, cx| { + multi_workspace.remove_folder_from_project_group( + &project_group_key, + &path, + cx, + ); + }) + .ok(); + weak_menu.update(cx, |_, cx| cx.emit(DismissEvent)).ok(); + }, + ); + } + + let menu = menu.separator().entry( + "Add Folder to Project", + Some(Box::new(AddFolderToProject)), + { + let project_group_key = project_group_key.clone(); + let multi_workspace = multi_workspace.clone(); + let weak_menu = weak_menu.clone(); + move |window, cx| { + multi_workspace + .update(cx, |multi_workspace, cx| { + multi_workspace.prompt_to_add_folders_to_project_group( + &project_group_key, + window, + cx, + ); + }) + .ok(); + weak_menu.update(cx, |_, cx| cx.emit(DismissEvent)).ok(); + } }, ); - } - let menu = menu.separator().entry( - "Add Folder to Project", - Some(Box::new(AddFolderToProject)), - { - let project_group_key = project_group_key.clone(); - let multi_workspace = multi_workspace.clone(); - move |window, cx| { + let project_group_key = project_group_key.clone(); + let multi_workspace = multi_workspace.clone(); + menu.separator() + .entry("Remove Project", None, move |window, cx| { multi_workspace .update(cx, |multi_workspace, cx| { - multi_workspace.prompt_to_add_folders_to_project_group( - &project_group_key, - window, - cx, - ); + multi_workspace + .remove_project_group(&project_group_key, window, cx) + .detach_and_log_err(cx); }) .ok(); - } - }, - ); - - let project_group_key = project_group_key.clone(); - let multi_workspace = multi_workspace.clone(); - menu.separator() - .entry("Remove Project", None, move |window, cx| { - multi_workspace - .update(cx, |multi_workspace, cx| { - multi_workspace - .remove_project_group(&project_group_key, window, cx) - .detach_and_log_err(cx); - }) - .ok(); - }) - }); + weak_menu.update(cx, |_, cx| cx.emit(DismissEvent)).ok(); + }) + }); let this = this.clone(); window @@ -1706,6 +1887,7 @@ impl Sidebar { has_running_threads, waiting_thread_count, is_active, + has_threads, } = self.contents.entries.get(header_idx)? else { return None; @@ -1723,6 +1905,7 @@ impl Sidebar { *has_running_threads, *waiting_thread_count, *is_active, + *has_threads, is_selected, cx, ); @@ -1763,14 +1946,14 @@ impl Sidebar { fn toggle_collapse( &mut self, - path_list: &PathList, + project_group_key: &ProjectGroupKey, _window: &mut Window, cx: &mut Context, ) { - if self.collapsed_groups.contains(path_list) { - self.collapsed_groups.remove(path_list); + if self.collapsed_groups.contains(project_group_key) { + self.collapsed_groups.remove(project_group_key); } else { - self.collapsed_groups.insert(path_list.clone()); + self.collapsed_groups.insert(project_group_key.clone()); } self.serialize(cx); self.update_entries(cx); @@ -1944,8 +2127,8 @@ impl Sidebar { match entry { ListEntry::ProjectHeader { key, .. } => { - let path_list = key.path_list().clone(); - self.toggle_collapse(&path_list, window, cx); + let key = key.clone(); + self.toggle_collapse(&key, window, cx); } ListEntry::Thread(thread) => { let metadata = thread.metadata.clone(); @@ -1954,10 +2137,16 @@ impl Sidebar { let workspace = workspace.clone(); self.activate_thread(metadata, &workspace, false, window, cx); } - ThreadEntryWorkspace::Closed(path_list) => { + ThreadEntryWorkspace::Closed { + folder_paths, + project_group_key, + } => { + let folder_paths = folder_paths.clone(); + let project_group_key = project_group_key.clone(); self.open_workspace_and_activate_thread( metadata, - path_list.clone(), + folder_paths, + &project_group_key, window, cx, ); @@ -1969,25 +2158,25 @@ impl Sidebar { is_fully_expanded, .. } => { - let path_list = key.path_list().clone(); + let key = key.clone(); if *is_fully_expanded { - self.reset_thread_group_expansion(&path_list, cx); + self.reset_thread_group_expansion(&key, cx); } else { - self.expand_thread_group(&path_list, cx); + self.expand_thread_group(&key, cx); } } - ListEntry::DraftThread { .. } => { - // Already active — nothing to do. - } - ListEntry::NewThread { key, workspace, .. } => { - let path_list = key.path_list().clone(); - if let Some(workspace) = workspace - .clone() - .or_else(|| self.workspace_for_group(&path_list, cx)) - { + ListEntry::DraftThread { key, workspace, .. } => { + let key = key.clone(); + let workspace = workspace.clone(); + if let Some(workspace) = workspace.or_else(|| { + self.multi_workspace.upgrade().and_then(|mw| { + mw.read(cx) + .workspace_for_paths(key.path_list(), key.host().as_ref(), cx) + }) + }) { self.create_new_thread(&workspace, window, cx); } else { - self.open_workspace_for_group(&path_list, window, cx); + self.open_workspace_for_group(&key, window, cx); } } } @@ -2042,7 +2231,7 @@ impl Sidebar { panel.load_agent_thread( Agent::from(metadata.agent_id.clone()), metadata.session_id.clone(), - Some(metadata.folder_paths.clone()), + Some(metadata.folder_paths().clone()), Some(metadata.title.clone()), focus, window, @@ -2153,7 +2342,8 @@ impl Sidebar { fn open_workspace_and_activate_thread( &mut self, metadata: ThreadMetadata, - path_list: PathList, + folder_paths: PathList, + project_group_key: &ProjectGroupKey, window: &mut Window, cx: &mut Context, ) { @@ -2161,12 +2351,44 @@ impl Sidebar { return; }; + let pending_session_id = metadata.session_id.clone(); + let is_remote = project_group_key.host().is_some(); + if is_remote { + self.pending_remote_thread_activation = Some(pending_session_id.clone()); + } + + let host = project_group_key.host(); + let provisional_key = Some(project_group_key.clone()); + let active_workspace = multi_workspace.read(cx).workspace().clone(); + let modal_workspace = active_workspace.clone(); + let open_task = multi_workspace.update(cx, |this, cx| { - this.find_or_create_local_workspace(path_list, window, cx) + this.find_or_create_workspace( + folder_paths, + host, + provisional_key, + |options, window, cx| connect_remote(active_workspace, options, window, cx), + window, + cx, + ) }); cx.spawn_in(window, async move |this, cx| { - let workspace = open_task.await?; + let result = open_task.await; + // Dismiss the modal as soon as the open attempt completes so + // failures or cancellations do not leave a stale connection modal behind. + remote_connection::dismiss_connection_modal(&modal_workspace, cx); + + if result.is_err() || is_remote { + this.update(cx, |this, _cx| { + if this.pending_remote_thread_activation.as_ref() == Some(&pending_session_id) { + this.pending_remote_thread_activation = None; + } + }) + .ok(); + } + + let workspace = result?; this.update_in(cx, |this, window, cx| { this.activate_thread(metadata, &workspace, false, window, cx); })?; @@ -2201,31 +2423,169 @@ impl Sidebar { window: &mut Window, cx: &mut Context, ) { - ThreadMetadataStore::global(cx) - .update(cx, |store, cx| store.unarchive(&metadata.session_id, cx)); + let session_id = metadata.session_id.clone(); + let weak_archive_view = match &self.view { + SidebarView::Archive(view) => Some(view.downgrade()), + _ => None, + }; + + if metadata.folder_paths().paths().is_empty() { + ThreadMetadataStore::global(cx) + .update(cx, |store, cx| store.unarchive(&session_id, cx)); + + let active_workspace = self + .multi_workspace + .upgrade() + .map(|w| w.read(cx).workspace().clone()); - if !metadata.folder_paths.paths().is_empty() { - let path_list = metadata.folder_paths.clone(); - if let Some(workspace) = self.find_current_workspace_for_path_list(&path_list, cx) { + if let Some(workspace) = active_workspace { self.activate_thread_locally(&metadata, &workspace, false, window, cx); - } else if let Some((target_window, workspace)) = - self.find_open_workspace_for_path_list(&path_list, cx) - { - self.activate_thread_in_other_window(metadata, workspace, target_window, cx); } else { - self.open_workspace_and_activate_thread(metadata, path_list, window, cx); + let path_list = metadata.folder_paths().clone(); + if let Some((target_window, workspace)) = + self.find_open_workspace_for_path_list(&path_list, cx) + { + self.activate_thread_in_other_window(metadata, workspace, target_window, cx); + } else { + let key = ProjectGroupKey::new(None, path_list.clone()); + self.open_workspace_and_activate_thread(metadata, path_list, &key, window, cx); + } } + self.show_thread_list(window, cx); return; } - let active_workspace = self - .multi_workspace - .upgrade() - .map(|w| w.read(cx).workspace().clone()); + let store = ThreadMetadataStore::global(cx); + let task = store + .read(cx) + .get_archived_worktrees_for_thread(session_id.0.to_string(), cx); + let path_list = metadata.folder_paths().clone(); - if let Some(workspace) = active_workspace { - self.activate_thread_locally(&metadata, &workspace, false, window, cx); - } + let task_session_id = session_id.clone(); + let restore_task = cx.spawn_in(window, async move |this, cx| { + let result: anyhow::Result<()> = async { + let archived_worktrees = task.await?; + + if archived_worktrees.is_empty() { + this.update_in(cx, |this, window, cx| { + this.restoring_tasks.remove(&session_id); + ThreadMetadataStore::global(cx) + .update(cx, |store, cx| store.unarchive(&session_id, cx)); + + if let Some(workspace) = + this.find_current_workspace_for_path_list(&path_list, cx) + { + this.activate_thread_locally(&metadata, &workspace, false, window, cx); + } else if let Some((target_window, workspace)) = + this.find_open_workspace_for_path_list(&path_list, cx) + { + this.activate_thread_in_other_window( + metadata, + workspace, + target_window, + cx, + ); + } else { + let key = ProjectGroupKey::new(None, path_list.clone()); + this.open_workspace_and_activate_thread( + metadata, path_list, &key, window, cx, + ); + } + this.show_thread_list(window, cx); + })?; + return anyhow::Ok(()); + } + + let mut path_replacements: Vec<(PathBuf, PathBuf)> = Vec::new(); + for row in &archived_worktrees { + match thread_worktree_archive::restore_worktree_via_git(row, &mut *cx).await { + Ok(restored_path) => { + thread_worktree_archive::cleanup_archived_worktree_record( + row, &mut *cx, + ) + .await; + path_replacements.push((row.worktree_path.clone(), restored_path)); + } + Err(error) => { + log::error!("Failed to restore worktree: {error:#}"); + this.update_in(cx, |this, _window, cx| { + this.restoring_tasks.remove(&session_id); + if let Some(weak_archive_view) = &weak_archive_view { + weak_archive_view + .update(cx, |view, cx| { + view.clear_restoring(&session_id, cx); + }) + .ok(); + } + + if let Some(multi_workspace) = this.multi_workspace.upgrade() { + let workspace = multi_workspace.read(cx).workspace().clone(); + workspace.update(cx, |workspace, cx| { + struct RestoreWorktreeErrorToast; + workspace.show_toast( + Toast::new( + NotificationId::unique::( + ), + format!("Failed to restore worktree: {error:#}"), + ) + .autohide(), + cx, + ); + }); + } + }) + .ok(); + return anyhow::Ok(()); + } + } + } + + if !path_replacements.is_empty() { + cx.update(|_window, cx| { + store.update(cx, |store, cx| { + store.update_restored_worktree_paths( + &session_id, + &path_replacements, + cx, + ); + }); + })?; + + let updated_metadata = + cx.update(|_window, cx| store.read(cx).entry(&session_id).cloned())?; + + if let Some(updated_metadata) = updated_metadata { + let new_paths = updated_metadata.folder_paths().clone(); + + cx.update(|_window, cx| { + store.update(cx, |store, cx| { + store.unarchive(&updated_metadata.session_id, cx); + }); + })?; + + this.update_in(cx, |this, window, cx| { + this.restoring_tasks.remove(&session_id); + let key = ProjectGroupKey::new(None, new_paths.clone()); + this.open_workspace_and_activate_thread( + updated_metadata, + new_paths, + &key, + window, + cx, + ); + this.show_thread_list(window, cx); + })?; + } + } + + anyhow::Ok(()) + } + .await; + if let Err(error) = result { + log::error!("{error:#}"); + } + }); + self.restoring_tasks.insert(task_session_id, restore_task); } fn expand_selected_entry( @@ -2238,9 +2598,8 @@ impl Sidebar { match self.contents.entries.get(ix) { Some(ListEntry::ProjectHeader { key, .. }) => { - if self.collapsed_groups.contains(key.path_list()) { - let path_list = key.path_list().clone(); - self.collapsed_groups.remove(&path_list); + if self.collapsed_groups.contains(key) { + self.collapsed_groups.remove(key); self.update_entries(cx); } else if ix + 1 < self.contents.entries.len() { self.selection = Some(ix + 1); @@ -2262,22 +2621,19 @@ impl Sidebar { match self.contents.entries.get(ix) { Some(ListEntry::ProjectHeader { key, .. }) => { - if !self.collapsed_groups.contains(key.path_list()) { - self.collapsed_groups.insert(key.path_list().clone()); + if !self.collapsed_groups.contains(key) { + self.collapsed_groups.insert(key.clone()); self.update_entries(cx); } } Some( - ListEntry::Thread(_) - | ListEntry::ViewMore { .. } - | ListEntry::NewThread { .. } - | ListEntry::DraftThread { .. }, + ListEntry::Thread(_) | ListEntry::ViewMore { .. } | ListEntry::DraftThread { .. }, ) => { for i in (0..ix).rev() { if let Some(ListEntry::ProjectHeader { key, .. }) = self.contents.entries.get(i) { self.selection = Some(i); - self.collapsed_groups.insert(key.path_list().clone()); + self.collapsed_groups.insert(key.clone()); self.update_entries(cx); break; } @@ -2299,10 +2655,7 @@ impl Sidebar { let header_ix = match self.contents.entries.get(ix) { Some(ListEntry::ProjectHeader { .. }) => Some(ix), Some( - ListEntry::Thread(_) - | ListEntry::ViewMore { .. } - | ListEntry::NewThread { .. } - | ListEntry::DraftThread { .. }, + ListEntry::Thread(_) | ListEntry::ViewMore { .. } | ListEntry::DraftThread { .. }, ) => (0..ix).rev().find(|&i| { matches!( self.contents.entries.get(i), @@ -2315,12 +2668,11 @@ impl Sidebar { if let Some(header_ix) = header_ix { if let Some(ListEntry::ProjectHeader { key, .. }) = self.contents.entries.get(header_ix) { - let path_list = key.path_list(); - if self.collapsed_groups.contains(path_list) { - self.collapsed_groups.remove(path_list); + if self.collapsed_groups.contains(key) { + self.collapsed_groups.remove(key); } else { self.selection = Some(header_ix); - self.collapsed_groups.insert(path_list.clone()); + self.collapsed_groups.insert(key.clone()); } self.update_entries(cx); } @@ -2335,7 +2687,7 @@ impl Sidebar { ) { for entry in &self.contents.entries { if let ListEntry::ProjectHeader { key, .. } = entry { - self.collapsed_groups.insert(key.path_list().clone()); + self.collapsed_groups.insert(key.clone()); } } self.update_entries(cx); @@ -2374,10 +2726,52 @@ impl Sidebar { window: &mut Window, cx: &mut Context, ) { - let thread_folder_paths = ThreadMetadataStore::global(cx) + let metadata = ThreadMetadataStore::global(cx) .read(cx) .entry(session_id) - .map(|m| m.folder_paths.clone()); + .cloned(); + let thread_folder_paths = metadata.as_ref().map(|m| m.folder_paths().clone()); + + // Compute which linked worktree roots should be archived from disk if + // this thread is archived. This must happen before we remove any + // workspace from the MultiWorkspace, because `build_root_plan` needs + // the currently open workspaces in order to find the affected projects + // and repository handles for each linked worktree. + let roots_to_archive = metadata + .as_ref() + .map(|metadata| { + let mut workspaces = self + .multi_workspace + .upgrade() + .map(|multi_workspace| { + multi_workspace + .read(cx) + .workspaces() + .cloned() + .collect::>() + }) + .unwrap_or_default(); + for workspace in thread_worktree_archive::all_open_workspaces(cx) { + if !workspaces.contains(&workspace) { + workspaces.push(workspace); + } + } + metadata + .folder_paths() + .ordered_paths() + .filter_map(|path| { + thread_worktree_archive::build_root_plan(path, &workspaces, cx) + }) + .filter(|plan| { + !thread_worktree_archive::path_is_referenced_by_other_unarchived_threads( + session_id, + &plan.root_path, + cx, + ) + }) + .collect::>() + }) + .unwrap_or_default(); // Find the neighbor thread in the sidebar (by display position). // Look below first, then above, for the nearest thread that isn't @@ -2397,7 +2791,9 @@ impl Sidebar { ThreadEntryWorkspace::Open(ws) => { PathList::new(&ws.read(cx).root_paths(cx)) } - ThreadEntryWorkspace::Closed(paths) => paths.clone(), + ThreadEntryWorkspace::Closed { folder_paths, .. } => { + folder_paths.clone() + } }; Some((t.metadata.clone(), workspace_paths)) } @@ -2422,9 +2818,12 @@ impl Sidebar { } let multi_workspace = self.multi_workspace.upgrade()?; + // Thread metadata doesn't carry host info yet, so we pass + // `None` here. This may match a local workspace with the same + // paths instead of the intended remote one. let workspace = multi_workspace .read(cx) - .workspace_for_paths(folder_paths, cx)?; + .workspace_for_paths(folder_paths, None, cx)?; // Don't remove the main worktree workspace — the project // header always provides access to it. @@ -2466,10 +2865,13 @@ impl Sidebar { let removed = remove_task.await?; if removed { this.update_in(cx, |this, window, cx| { + let in_flight = + this.start_archive_worktree_task(&session_id, roots_to_archive, cx); this.archive_and_activate( &session_id, neighbor_metadata.as_ref(), thread_folder_paths.as_ref(), + in_flight, window, cx, ); @@ -2481,10 +2883,12 @@ impl Sidebar { } else { // Simple case: no workspace removal needed. let neighbor_metadata = neighbor.map(|(metadata, _)| metadata); + let in_flight = self.start_archive_worktree_task(session_id, roots_to_archive, cx); self.archive_and_activate( session_id, neighbor_metadata.as_ref(), thread_folder_paths.as_ref(), + in_flight, window, cx, ); @@ -2492,16 +2896,32 @@ impl Sidebar { } /// Archive a thread and activate the nearest neighbor or a draft. + /// + /// IMPORTANT: when activating a neighbor or creating a fallback draft, + /// this method also activates the target workspace in the MultiWorkspace. + /// This is critical because `rebuild_contents` derives the active + /// workspace from `mw.workspace()`. If the linked worktree workspace is + /// still active after archiving its last thread, `rebuild_contents` sees + /// the threadless linked worktree as active and emits a spurious + /// "+ New Thread" entry with the worktree chip — keeping the worktree + /// alive and preventing disk cleanup. + /// + /// When `in_flight_archive` is present, it is the background task that + /// persists the linked worktree's git state and deletes it from disk. + /// We attach it to the metadata store at the same time we mark the thread + /// archived so failures can automatically unarchive the thread and user- + /// initiated unarchive can cancel the task. fn archive_and_activate( &mut self, session_id: &acp::SessionId, neighbor: Option<&ThreadMetadata>, thread_folder_paths: Option<&PathList>, + in_flight_archive: Option<(Task<()>, smol::channel::Sender<()>)>, window: &mut Window, cx: &mut Context, ) { ThreadMetadataStore::global(cx).update(cx, |store, cx| { - store.archive(session_id, cx); + store.archive(session_id, in_flight_archive, cx); }); let is_active = self @@ -2517,7 +2937,7 @@ impl Sidebar { if let Some(workspace) = self .multi_workspace .upgrade() - .and_then(|mw| mw.read(cx).workspace_for_paths(folder_paths, cx)) + .and_then(|mw| mw.read(cx).workspace_for_paths(folder_paths, None, cx)) { if let Some(panel) = workspace.read(cx).panel::(cx) { let panel_shows_archived = panel @@ -2537,22 +2957,38 @@ impl Sidebar { } // Try to activate the neighbor thread. If its workspace is open, - // tell the panel to load it. `rebuild_contents` will reconcile - // `active_entry` once the thread finishes loading. + // tell the panel to load it and activate that workspace. + // `rebuild_contents` will reconcile `active_entry` once the thread + // finishes loading. if let Some(metadata) = neighbor { - if let Some(workspace) = self - .multi_workspace - .upgrade() - .and_then(|mw| mw.read(cx).workspace_for_paths(&metadata.folder_paths, cx)) - { + if let Some(workspace) = self.multi_workspace.upgrade().and_then(|mw| { + mw.read(cx) + .workspace_for_paths(metadata.folder_paths(), None, cx) + }) { + self.activate_workspace(&workspace, window, cx); Self::load_agent_thread_in_workspace(&workspace, metadata, true, window, cx); return; } } // No neighbor or its workspace isn't open — fall back to a new - // draft on the active workspace so the user has something to work with. - if let Some(workspace) = self.active_entry_workspace().cloned() { + // draft. Use the group workspace (main project) rather than the + // active entry workspace, which may be a linked worktree that is + // about to be cleaned up. + let fallback_workspace = thread_folder_paths + .and_then(|folder_paths| { + let mw = self.multi_workspace.upgrade()?; + let mw = mw.read(cx); + // Find the group's main workspace (whose root paths match + // the project group key, not the thread's folder paths). + let thread_workspace = mw.workspace_for_paths(folder_paths, None, cx)?; + let group_key = thread_workspace.read(cx).project_group_key(cx); + mw.workspace_for_paths(group_key.path_list(), None, cx) + }) + .or_else(|| self.active_entry_workspace().cloned()); + + if let Some(workspace) = fallback_workspace { + self.activate_workspace(&workspace, window, cx); if let Some(panel) = workspace.read(cx).panel::(cx) { panel.update(cx, |panel, cx| { panel.new_thread(&NewThread, window, cx); @@ -2561,6 +2997,108 @@ impl Sidebar { } } + fn start_archive_worktree_task( + &self, + session_id: &acp::SessionId, + roots: Vec, + cx: &mut Context, + ) -> Option<(Task<()>, smol::channel::Sender<()>)> { + if roots.is_empty() { + return None; + } + + let (cancel_tx, cancel_rx) = smol::channel::bounded::<()>(1); + let session_id = session_id.clone(); + let task = cx.spawn(async move |_this, cx| { + match Self::archive_worktree_roots(roots, cancel_rx, cx).await { + Ok(ArchiveWorktreeOutcome::Success) => { + cx.update(|cx| { + ThreadMetadataStore::global(cx).update(cx, |store, _cx| { + store.cleanup_completed_archive(&session_id); + }); + }); + } + Ok(ArchiveWorktreeOutcome::Cancelled) => {} + Err(error) => { + log::error!("Failed to archive worktree: {error:#}"); + cx.update(|cx| { + ThreadMetadataStore::global(cx).update(cx, |store, cx| { + store.unarchive(&session_id, cx); + }); + }); + } + } + }); + + Some((task, cancel_tx)) + } + + async fn archive_worktree_roots( + roots: Vec, + cancel_rx: smol::channel::Receiver<()>, + cx: &mut gpui::AsyncApp, + ) -> anyhow::Result { + let mut completed_persists: Vec<(i64, thread_worktree_archive::RootPlan)> = Vec::new(); + + for root in &roots { + if cancel_rx.is_closed() { + for &(id, ref completed_root) in completed_persists.iter().rev() { + thread_worktree_archive::rollback_persist(id, completed_root, cx).await; + } + return Ok(ArchiveWorktreeOutcome::Cancelled); + } + + if root.worktree_repo.is_some() { + match thread_worktree_archive::persist_worktree_state(root, cx).await { + Ok(id) => { + completed_persists.push((id, root.clone())); + } + Err(error) => { + for &(id, ref completed_root) in completed_persists.iter().rev() { + thread_worktree_archive::rollback_persist(id, completed_root, cx).await; + } + return Err(error); + } + } + } + + if cancel_rx.is_closed() { + for &(id, ref completed_root) in completed_persists.iter().rev() { + thread_worktree_archive::rollback_persist(id, completed_root, cx).await; + } + return Ok(ArchiveWorktreeOutcome::Cancelled); + } + + if let Err(error) = thread_worktree_archive::remove_root(root.clone(), cx).await { + if let Some(&(id, ref completed_root)) = completed_persists.last() { + if completed_root.root_path == root.root_path { + thread_worktree_archive::rollback_persist(id, completed_root, cx).await; + completed_persists.pop(); + } + } + for &(id, ref completed_root) in completed_persists.iter().rev() { + thread_worktree_archive::rollback_persist(id, completed_root, cx).await; + } + return Err(error); + } + } + + Ok(ArchiveWorktreeOutcome::Success) + } + + fn activate_workspace( + &self, + workspace: &Entity, + window: &mut Window, + cx: &mut Context, + ) { + if let Some(multi_workspace) = self.multi_workspace.upgrade() { + multi_workspace.update(cx, |mw, cx| { + mw.activate(workspace.clone(), window, cx); + }); + } + } + fn remove_selected_thread( &mut self, _: &RemoveSelectedThread, @@ -2581,7 +3119,7 @@ impl Sidebar { let session_id = thread.metadata.session_id.clone(); self.archive_thread(&session_id, window, cx); } - Some(ListEntry::NewThread { + Some(ListEntry::DraftThread { workspace: Some(workspace), .. }) => { @@ -2623,7 +3161,7 @@ impl Sidebar { fn mru_threads_for_switcher(&self, cx: &App) -> Vec { let mut current_header_label: Option = None; - let mut current_header_path_list: Option = None; + let mut current_header_key: Option = None; let mut entries: Vec = self .contents .entries @@ -2631,15 +3169,23 @@ impl Sidebar { .filter_map(|entry| match entry { ListEntry::ProjectHeader { label, key, .. } => { current_header_label = Some(label.clone()); - current_header_path_list = Some(key.path_list().clone()); + current_header_key = Some(key.clone()); None } ListEntry::Thread(thread) => { let workspace = match &thread.workspace { ThreadEntryWorkspace::Open(workspace) => Some(workspace.clone()), - ThreadEntryWorkspace::Closed(_) => current_header_path_list - .as_ref() - .and_then(|pl| self.workspace_for_group(pl, cx)), + ThreadEntryWorkspace::Closed { .. } => { + current_header_key.as_ref().and_then(|key| { + self.multi_workspace.upgrade().and_then(|mw| { + mw.read(cx).workspace_for_paths( + key.path_list(), + key.host().as_ref(), + cx, + ) + }) + }) + } }?; let notified = self .contents @@ -2927,10 +3473,13 @@ impl Sidebar { .unwrap_or(thread.metadata.updated_at), ); + let is_remote = thread.workspace.is_remote(cx); + ThreadItem::new(id, title) .base_bg(sidebar_bg) .icon(thread.icon) .status(thread.status) + .is_remote(is_remote) .when_some(thread.icon_from_external_svg.clone(), |this, svg| { this.custom_icon_from_external_svg(svg) }) @@ -3013,10 +3562,14 @@ impl Sidebar { ThreadEntryWorkspace::Open(workspace) => { this.activate_thread(metadata.clone(), workspace, false, window, cx); } - ThreadEntryWorkspace::Closed(path_list) => { + ThreadEntryWorkspace::Closed { + folder_paths, + project_group_key, + } => { this.open_workspace_and_activate_thread( metadata.clone(), - path_list.clone(), + folder_paths.clone(), + project_group_key, window, cx, ); @@ -3096,12 +3649,12 @@ impl Sidebar { fn render_view_more( &self, ix: usize, - path_list: &PathList, + key: &ProjectGroupKey, is_fully_expanded: bool, is_selected: bool, cx: &mut Context, ) -> AnyElement { - let path_list = path_list.clone(); + let key = key.clone(); let id = SharedString::from(format!("view-more-{}", ix)); let label: SharedString = if is_fully_expanded { @@ -3117,9 +3670,9 @@ impl Sidebar { .on_click(cx.listener(move |this, _, _window, cx| { this.selection = None; if is_fully_expanded { - this.reset_thread_group_expansion(&path_list, cx); + this.reset_thread_group_expansion(&key, cx); } else { - this.expand_thread_group(&path_list, cx); + this.expand_thread_group(&key, cx); } })) .into_any_element() @@ -3142,7 +3695,13 @@ impl Sidebar { .find(|&&header_ix| header_ix <= selected_ix) .and_then(|&header_ix| match &self.contents.entries[header_ix] { ListEntry::ProjectHeader { key, .. } => { - self.workspace_for_group(key.path_list(), cx) + self.multi_workspace.upgrade().and_then(|mw| { + mw.read(cx).workspace_for_paths( + key.path_list(), + key.host().as_ref(), + cx, + ) + }) } _ => None, }) @@ -3188,8 +3747,8 @@ impl Sidebar { fn active_project_group_key(&self, cx: &App) -> Option { let multi_workspace = self.multi_workspace.upgrade()?; - let mw = multi_workspace.read(cx); - Some(mw.workspace().read(cx).project_group_key(cx)) + let multi_workspace = multi_workspace.read(cx); + Some(multi_workspace.project_group_key_for_workspace(multi_workspace.workspace(), cx)) } fn active_project_header_position(&self, cx: &App) -> Option { @@ -3233,18 +3792,21 @@ impl Sidebar { else { return; }; - let path_list = key.path_list().clone(); + let key = key.clone(); // Uncollapse the target group so that threads become visible. - self.collapsed_groups.remove(&path_list); + self.collapsed_groups.remove(&key); - if let Some(workspace) = self.workspace_for_group(&path_list, cx) { + if let Some(workspace) = self.multi_workspace.upgrade().and_then(|mw| { + mw.read(cx) + .workspace_for_paths(key.path_list(), key.host().as_ref(), cx) + }) { multi_workspace.update(cx, |multi_workspace, cx| { multi_workspace.activate(workspace, window, cx); multi_workspace.retain_active_workspace(cx); }); } else { - self.open_workspace_for_group(&path_list, window, cx); + self.open_workspace_for_group(&key, window, cx); } } @@ -3306,8 +3868,19 @@ impl Sidebar { let workspace = workspace.clone(); self.activate_thread(metadata, &workspace, true, window, cx); } - ThreadEntryWorkspace::Closed(path_list) => { - self.open_workspace_and_activate_thread(metadata, path_list.clone(), window, cx); + ThreadEntryWorkspace::Closed { + folder_paths, + project_group_key, + } => { + let folder_paths = folder_paths.clone(); + let project_group_key = project_group_key.clone(); + self.open_workspace_and_activate_thread( + metadata, + folder_paths, + &project_group_key, + window, + cx, + ); } } } @@ -3325,26 +3898,40 @@ impl Sidebar { self.cycle_thread_impl(false, window, cx); } - fn expand_thread_group(&mut self, path_list: &PathList, cx: &mut Context) { - let current = self.expanded_groups.get(path_list).copied().unwrap_or(0); - self.expanded_groups.insert(path_list.clone(), current + 1); + fn expand_thread_group(&mut self, project_group_key: &ProjectGroupKey, cx: &mut Context) { + let current = self + .expanded_groups + .get(project_group_key) + .copied() + .unwrap_or(0); + self.expanded_groups + .insert(project_group_key.clone(), current + 1); self.serialize(cx); self.update_entries(cx); } - fn reset_thread_group_expansion(&mut self, path_list: &PathList, cx: &mut Context) { - self.expanded_groups.remove(path_list); + fn reset_thread_group_expansion( + &mut self, + project_group_key: &ProjectGroupKey, + cx: &mut Context, + ) { + self.expanded_groups.remove(project_group_key); self.serialize(cx); self.update_entries(cx); } - fn collapse_thread_group(&mut self, path_list: &PathList, cx: &mut Context) { - match self.expanded_groups.get(path_list).copied() { + fn collapse_thread_group( + &mut self, + project_group_key: &ProjectGroupKey, + cx: &mut Context, + ) { + match self.expanded_groups.get(project_group_key).copied() { Some(batches) if batches > 1 => { - self.expanded_groups.insert(path_list.clone(), batches - 1); + self.expanded_groups + .insert(project_group_key.clone(), batches - 1); } Some(_) => { - self.expanded_groups.remove(path_list); + self.expanded_groups.remove(project_group_key); } None => return, } @@ -3361,7 +3948,7 @@ impl Sidebar { let Some(active_key) = self.active_project_group_key(cx) else { return; }; - self.expand_thread_group(active_key.path_list(), cx); + self.expand_thread_group(&active_key, cx); } fn on_show_fewer_threads( @@ -3373,7 +3960,7 @@ impl Sidebar { let Some(active_key) = self.active_project_group_key(cx) else { return; }; - self.collapse_thread_group(active_key.path_list(), cx); + self.collapse_thread_group(&active_key, cx); } fn on_new_thread( @@ -3398,9 +3985,9 @@ impl Sidebar { ) -> AnyElement { let label: SharedString = if is_active { self.active_draft_text(cx) - .unwrap_or_else(|| "Untitled Thread".into()) + .unwrap_or_else(|| "New Thread".into()) } else { - "Untitled Thread".into() + "New Thread".into() }; let id = SharedString::from(format!("draft-thread-btn-{}", ix)); @@ -3420,7 +4007,16 @@ impl Sidebar { .collect(), ) .selected(true) - .focused(is_selected); + .focused(is_selected) + .on_click(cx.listener(|this, _, window, cx| { + if let Some(workspace) = this.active_workspace(cx) { + if !AgentPanel::is_visible(&workspace, cx) { + workspace.update(cx, |workspace, cx| { + workspace.focus_panel::(window, cx); + }); + } + } + })); div() .on_mouse_down(gpui::MouseButton::Left, |_, _, cx| { @@ -3440,7 +4036,7 @@ impl Sidebar { cx: &mut Context, ) -> AnyElement { let label: SharedString = DEFAULT_THREAD_TITLE.into(); - let path_list = key.path_list().clone(); + let key = key.clone(); let id = SharedString::from(format!("new-thread-btn-{}", ix)); @@ -3462,14 +4058,17 @@ impl Sidebar { .focused(is_selected) .on_click(cx.listener(move |this, _, window, cx| { this.selection = None; - if let Some(workspace) = this.workspace_for_group(&path_list, cx) { + if let Some(workspace) = this.multi_workspace.upgrade().and_then(|mw| { + mw.read(cx) + .workspace_for_paths(key.path_list(), key.host().as_ref(), cx) + }) { this.create_new_thread(&workspace, window, cx); } else { - this.open_workspace_for_group(&path_list, window, cx); + this.open_workspace_for_group(&key, window, cx); } })); - // Linked worktree NewThread entries can be dismissed, which removes + // Linked worktree DraftThread entries can be dismissed, which removes // the workspace from the multi-workspace. if let Some(workspace) = workspace.cloned() { thread_item = thread_item.action_slot( @@ -3883,9 +4482,11 @@ impl Sidebar { this.show_thread_list(window, cx); } ThreadsArchiveViewEvent::Unarchive { thread } => { - this.show_thread_list(window, cx); this.activate_archived_thread(thread.clone(), window, cx); } + ThreadsArchiveViewEvent::CancelRestore { session_id } => { + this.restoring_tasks.remove(session_id); + } }, ); @@ -3956,12 +4557,13 @@ impl WorkspaceSidebar for Sidebar { collapsed_groups: self .collapsed_groups .iter() - .map(|pl| pl.serialize()) + .cloned() + .map(SerializedProjectGroupKey::from) .collect(), expanded_groups: self .expanded_groups .iter() - .map(|(pl, count)| (pl.serialize(), *count)) + .map(|(key, count)| (SerializedProjectGroupKey::from(key.clone()), *count)) .collect(), active_view: match self.view { SidebarView::ThreadList => SerializedSidebarView::ThreadList, @@ -3984,12 +4586,12 @@ impl WorkspaceSidebar for Sidebar { self.collapsed_groups = serialized .collapsed_groups .into_iter() - .map(|s| PathList::deserialize(&s)) + .map(ProjectGroupKey::from) .collect(); self.expanded_groups = serialized .expanded_groups .into_iter() - .map(|(s, count)| (PathList::deserialize(&s), count)) + .map(|(s, count)| (ProjectGroupKey::from(s), count)) .collect(); if serialized.active_view == SerializedSidebarView::Archive { cx.defer_in(window, |this, window, cx| { @@ -4107,11 +4709,14 @@ fn all_thread_infos_for_workspace( return None.into_iter().flatten(); }; let agent_panel = agent_panel.read(cx); - let threads = agent_panel - .parent_threads(cx) + .conversation_views() .into_iter() - .map(|thread_view| { + .filter_map(|conversation_view| { + let has_pending_tool_call = conversation_view + .read(cx) + .root_thread_has_pending_tool_call(cx); + let thread_view = conversation_view.read(cx).root_thread(cx)?; let thread_view_ref = thread_view.read(cx); let thread = thread_view_ref.thread.read(cx); @@ -4125,7 +4730,7 @@ fn all_thread_infos_for_workspace( let session_id = thread.session_id().clone(); let is_background = agent_panel.is_background_thread(&session_id); - let status = if thread.is_waiting_for_confirmation() { + let status = if has_pending_tool_call { AgentThreadStatus::WaitingForConfirmation } else if thread.had_error() { AgentThreadStatus::Error @@ -4138,7 +4743,7 @@ fn all_thread_infos_for_workspace( let diff_stats = thread.action_log().read(cx).diff_stats(cx); - ActiveThreadInfo { + Some(ActiveThreadInfo { session_id, title, status, @@ -4147,7 +4752,7 @@ fn all_thread_infos_for_workspace( is_background, is_title_generating, diff_stats, - } + }) }); Some(threads).into_iter().flatten() @@ -4194,6 +4799,36 @@ pub fn dump_workspace_info( ) .ok(); + // project_group_key_for_workspace internally reads the workspace, + // so we can only call it for workspaces other than this_entity + // (which is already being updated). + if let Some(mw) = &multi_workspace { + if *ws == this_entity { + let workspace_key = workspace.project_group_key(cx); + writeln!(output, "ProjectGroupKey: {workspace_key:?}").ok(); + } else { + let effective_key = mw.read(cx).project_group_key_for_workspace(ws, cx); + let workspace_key = ws.read(cx).project_group_key(cx); + if effective_key != workspace_key { + writeln!( + output, + "ProjectGroupKey (multi_workspace): {effective_key:?}" + ) + .ok(); + writeln!( + output, + "ProjectGroupKey (workspace, DISAGREES): {workspace_key:?}" + ) + .ok(); + } else { + writeln!(output, "ProjectGroupKey: {effective_key:?}").ok(); + } + } + } else { + let workspace_key = workspace.project_group_key(cx); + writeln!(output, "ProjectGroupKey: {workspace_key:?}").ok(); + } + // The action handler is already inside an update on `this_entity`, // so we must avoid a nested read/update on that same entity. if *ws == this_entity { @@ -4310,7 +4945,14 @@ fn dump_single_workspace(workspace: &Workspace, output: &mut String, cx: &gpui:: let entry_count = thread.entries().len(); write!(output, "Active thread: {title} (session: {session_id})").ok(); write!(output, " [{status}, {entry_count} entries").ok(); - if thread.is_waiting_for_confirmation() { + if panel + .active_conversation_view() + .is_some_and(|conversation_view| { + conversation_view + .read(cx) + .root_thread_has_pending_tool_call(cx) + }) + { write!(output, ", awaiting confirmation").ok(); } writeln!(output, "]").ok(); @@ -4337,7 +4979,10 @@ fn dump_single_workspace(workspace: &Workspace, output: &mut String, cx: &gpui:: let entry_count = thread.entries().len(); write!(output, " - {title} (session: {session_id})").ok(); write!(output, " [{status}, {entry_count} entries").ok(); - if thread.is_waiting_for_confirmation() { + if conversation_view + .read(cx) + .root_thread_has_pending_tool_call(cx) + { write!(output, ", awaiting confirmation").ok(); } writeln!(output, "]").ok(); diff --git a/crates/sidebar/src/sidebar_tests.rs b/crates/sidebar/src/sidebar_tests.rs index 46ad941609e448f561054afc45390f77b2c58ff7..58ca19b7778db14f4b8b43bcfd40336c40c66bea 100644 --- a/crates/sidebar/src/sidebar_tests.rs +++ b/crates/sidebar/src/sidebar_tests.rs @@ -1,12 +1,12 @@ use super::*; -use acp_thread::StubAgentConnection; +use acp_thread::{AcpThread, PermissionOptions, StubAgentConnection}; use agent::ThreadStore; use agent_ui::{ test_support::{active_session_id, open_thread_with_connection, send_message}, - thread_metadata_store::ThreadMetadata, + thread_metadata_store::{ThreadMetadata, ThreadWorktreePaths}, }; use chrono::DateTime; -use fs::FakeFs; +use fs::{FakeFs, Fs}; use gpui::TestAppContext; use pretty_assertions::assert_eq; use project::AgentId; @@ -60,6 +60,75 @@ fn has_thread_entry(sidebar: &Sidebar, session_id: &acp::SessionId) -> bool { .any(|entry| matches!(entry, ListEntry::Thread(t) if &t.metadata.session_id == session_id)) } +#[track_caller] +fn assert_remote_project_integration_sidebar_state( + sidebar: &mut Sidebar, + main_thread_id: &acp::SessionId, + remote_thread_id: &acp::SessionId, +) { + let mut project_headers = sidebar.contents.entries.iter().filter_map(|entry| { + if let ListEntry::ProjectHeader { label, .. } = entry { + Some(label.as_ref()) + } else { + None + } + }); + + let Some(project_header) = project_headers.next() else { + panic!("expected exactly one sidebar project header named `project`, found none"); + }; + assert_eq!( + project_header, "project", + "expected the only sidebar project header to be `project`" + ); + if let Some(unexpected_header) = project_headers.next() { + panic!( + "expected exactly one sidebar project header named `project`, found extra header `{unexpected_header}`" + ); + } + + let mut saw_main_thread = false; + let mut saw_remote_thread = false; + for entry in &sidebar.contents.entries { + match entry { + ListEntry::ProjectHeader { label, .. } => { + assert_eq!( + label.as_ref(), + "project", + "expected the only sidebar project header to be `project`" + ); + } + ListEntry::Thread(thread) if &thread.metadata.session_id == main_thread_id => { + saw_main_thread = true; + } + ListEntry::Thread(thread) if &thread.metadata.session_id == remote_thread_id => { + saw_remote_thread = true; + } + ListEntry::Thread(thread) => { + let title = thread.metadata.title.as_ref(); + panic!( + "unexpected sidebar thread while simulating remote project integration flicker: title=`{title}`" + ); + } + ListEntry::ViewMore { .. } => { + panic!( + "unexpected `View More` entry while simulating remote project integration flicker" + ); + } + ListEntry::DraftThread { .. } => {} + } + } + + assert!( + saw_main_thread, + "expected the sidebar to keep showing `Main Thread` under `project`" + ); + assert!( + saw_remote_thread, + "expected the sidebar to keep showing `Worktree Thread` under `project`" + ); +} + async fn init_test_project( worktree_path: &str, cx: &mut TestAppContext, @@ -157,31 +226,49 @@ fn save_thread_metadata( cx: &mut TestAppContext, ) { cx.update(|cx| { - let (folder_paths, main_worktree_paths) = { - let project_ref = project.read(cx); - let paths: Vec> = project_ref - .visible_worktrees(cx) - .map(|worktree| worktree.read(cx).abs_path()) - .collect(); - let folder_paths = PathList::new(&paths); - let main_worktree_paths = project_ref.project_group_key(cx).path_list().clone(); - (folder_paths, main_worktree_paths) - }; + let worktree_paths = ThreadWorktreePaths::from_project(project.read(cx), cx); let metadata = ThreadMetadata { session_id, agent_id: agent::ZED_AGENT_ID.clone(), title, updated_at, created_at, - folder_paths, - main_worktree_paths, + worktree_paths, archived: false, + remote_connection: None, }; ThreadMetadataStore::global(cx).update(cx, |store, cx| store.save_manually(metadata, cx)); }); cx.run_until_parked(); } +fn save_thread_metadata_with_main_paths( + session_id: &str, + title: &str, + folder_paths: PathList, + main_worktree_paths: PathList, + cx: &mut TestAppContext, +) { + let session_id = acp::SessionId::new(Arc::from(session_id)); + let title = SharedString::from(title.to_string()); + let updated_at = chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(); + let metadata = ThreadMetadata { + session_id, + agent_id: agent::ZED_AGENT_ID.clone(), + title, + updated_at, + created_at: None, + worktree_paths: ThreadWorktreePaths::from_path_lists(main_worktree_paths, folder_paths) + .unwrap(), + archived: false, + remote_connection: None, + }; + cx.update(|cx| { + ThreadMetadataStore::global(cx).update(cx, |store, cx| store.save_manually(metadata, cx)); + }); + cx.run_until_parked(); +} + fn focus_sidebar(sidebar: &Entity, cx: &mut gpui::VisualTestContext) { sidebar.update_in(cx, |_, window, cx| { cx.focus_self(window); @@ -189,6 +276,35 @@ fn focus_sidebar(sidebar: &Entity, cx: &mut gpui::VisualTestContext) { cx.run_until_parked(); } +fn request_test_tool_authorization( + thread: &Entity, + tool_call_id: &str, + option_id: &str, + cx: &mut gpui::VisualTestContext, +) { + let tool_call_id = acp::ToolCallId::new(tool_call_id); + let label = format!("Tool {tool_call_id}"); + let option_id = acp::PermissionOptionId::new(option_id); + let _authorization_task = cx.update(|_, cx| { + thread.update(cx, |thread, cx| { + thread + .request_tool_call_authorization( + acp::ToolCall::new(tool_call_id, label) + .kind(acp::ToolKind::Edit) + .into(), + PermissionOptions::Flat(vec![acp::PermissionOption::new( + option_id, + "Allow", + acp::PermissionOptionKind::AllowOnce, + )]), + cx, + ) + .unwrap() + }) + }); + cx.run_until_parked(); +} + fn format_linked_worktree_chips(worktrees: &[WorktreeInfo]) -> String { let mut seen = Vec::new(); let mut chips = Vec::new(); @@ -224,6 +340,11 @@ fn visible_entries_as_strings( } else { "" }; + let is_active = sidebar + .active_entry + .as_ref() + .is_some_and(|active| active.matches_entry(entry)); + let active_indicator = if is_active { " (active)" } else { "" }; match entry { ListEntry::ProjectHeader { label, @@ -231,7 +352,7 @@ fn visible_entries_as_strings( highlight_positions: _, .. } => { - let icon = if sidebar.collapsed_groups.contains(key.path_list()) { + let icon = if sidebar.collapsed_groups.contains(key) { ">" } else { "v" @@ -240,7 +361,7 @@ fn visible_entries_as_strings( } ListEntry::Thread(thread) => { let title = thread.metadata.title.as_ref(); - let active = if thread.is_live { " *" } else { "" }; + let live = if thread.is_live { " *" } else { "" }; let status_str = match thread.status { AgentThreadStatus::Running => " (running)", AgentThreadStatus::Error => " (error)", @@ -256,7 +377,7 @@ fn visible_entries_as_strings( "" }; let worktree = format_linked_worktree_chips(&thread.worktrees); - format!(" {title}{worktree}{active}{status_str}{notified}{selected}") + format!(" {title}{worktree}{live}{status_str}{notified}{active_indicator}{selected}") } ListEntry::ViewMore { is_fully_expanded, .. @@ -267,13 +388,17 @@ fn visible_entries_as_strings( format!(" + View More{}", selected) } } - ListEntry::DraftThread { worktrees, .. } => { - let worktree = format_linked_worktree_chips(worktrees); - format!(" [~ Draft{}]{}", worktree, selected) - } - ListEntry::NewThread { worktrees, .. } => { + ListEntry::DraftThread { + workspace, + worktrees, + .. + } => { let worktree = format_linked_worktree_chips(worktrees); - format!(" [+ New Thread{}]{}", worktree, selected) + if workspace.is_some() { + format!(" [+ New Thread{}]{}", worktree, selected) + } else { + format!(" [~ Draft{}]{}{}", worktree, active_indicator, selected) + } } } }) @@ -290,15 +415,13 @@ async fn test_serialization_round_trip(cx: &mut TestAppContext) { save_n_test_threads(3, &project, cx).await; - let path_list = project.read_with(cx, |project, cx| { - project.project_group_key(cx).path_list().clone() - }); + let project_group_key = project.read_with(cx, |project, cx| project.project_group_key(cx)); // Set a custom width, collapse the group, and expand "View More". sidebar.update_in(cx, |sidebar, window, cx| { sidebar.set_width(Some(px(420.0)), cx); - sidebar.toggle_collapse(&path_list, window, cx); - sidebar.expanded_groups.insert(path_list.clone(), 2); + sidebar.toggle_collapse(&project_group_key, window, cx); + sidebar.expanded_groups.insert(project_group_key.clone(), 2); }); cx.run_until_parked(); @@ -336,8 +459,8 @@ async fn test_serialization_round_trip(cx: &mut TestAppContext) { assert_eq!(collapsed1, collapsed2); assert_eq!(expanded1, expanded2); assert_eq!(width1, px(420.0)); - assert!(collapsed1.contains(&path_list)); - assert_eq!(expanded1.get(&path_list), Some(&2)); + assert!(collapsed1.contains(&project_group_key)); + assert_eq!(expanded1.get(&project_group_key), Some(&2)); } #[gpui::test] @@ -443,7 +566,10 @@ async fn test_single_workspace_no_threads(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " [+ New Thread]"] + vec![ + // + "v [my-project]", + ] ); } @@ -479,6 +605,7 @@ async fn test_single_workspace_with_saved_threads(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [my-project]", " Fix crash in project panel", " Add inline diff view", @@ -509,7 +636,11 @@ async fn test_workspace_lifecycle(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project-a]", " Thread A1"] + vec![ + // + "v [project-a]", + " Thread A1", + ] ); // Add a second workspace @@ -520,7 +651,11 @@ async fn test_workspace_lifecycle(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project-a]", " Thread A1",] + vec![ + // + "v [project-a]", + " Thread A1", + ] ); } @@ -539,6 +674,7 @@ async fn test_view_more_pagination(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [my-project]", " Thread 12", " Thread 11", @@ -560,9 +696,7 @@ async fn test_view_more_batched_expansion(cx: &mut TestAppContext) { // Create 17 threads: initially shows 5, then 10, then 15, then all 17 with Collapse save_n_test_threads(17, &project, cx).await; - let path_list = project.read_with(cx, |project, cx| { - project.project_group_key(cx).path_list().clone() - }); + let project_group_key = project.read_with(cx, |project, cx| project.project_group_key(cx)); multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); cx.run_until_parked(); @@ -587,8 +721,13 @@ async fn test_view_more_batched_expansion(cx: &mut TestAppContext) { // Expand again by one batch sidebar.update_in(cx, |s, _window, cx| { - let current = s.expanded_groups.get(&path_list).copied().unwrap_or(0); - s.expanded_groups.insert(path_list.clone(), current + 1); + let current = s + .expanded_groups + .get(&project_group_key) + .copied() + .unwrap_or(0); + s.expanded_groups + .insert(project_group_key.clone(), current + 1); s.update_entries(cx); }); cx.run_until_parked(); @@ -600,8 +739,13 @@ async fn test_view_more_batched_expansion(cx: &mut TestAppContext) { // Expand one more time - should show all 17 threads with Collapse button sidebar.update_in(cx, |s, _window, cx| { - let current = s.expanded_groups.get(&path_list).copied().unwrap_or(0); - s.expanded_groups.insert(path_list.clone(), current + 1); + let current = s + .expanded_groups + .get(&project_group_key) + .copied() + .unwrap_or(0); + s.expanded_groups + .insert(project_group_key.clone(), current + 1); s.update_entries(cx); }); cx.run_until_parked(); @@ -614,7 +758,7 @@ async fn test_view_more_batched_expansion(cx: &mut TestAppContext) { // Click collapse - should go back to showing 5 threads sidebar.update_in(cx, |s, _window, cx| { - s.expanded_groups.remove(&path_list); + s.expanded_groups.remove(&project_group_key); s.update_entries(cx); }); cx.run_until_parked(); @@ -634,38 +778,47 @@ async fn test_collapse_and_expand_group(cx: &mut TestAppContext) { save_n_test_threads(1, &project, cx).await; - let path_list = project.read_with(cx, |project, cx| { - project.project_group_key(cx).path_list().clone() - }); + let project_group_key = project.read_with(cx, |project, cx| project.project_group_key(cx)); multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); cx.run_until_parked(); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Thread 1"] + vec![ + // + "v [my-project]", + " Thread 1", + ] ); // Collapse sidebar.update_in(cx, |s, window, cx| { - s.toggle_collapse(&path_list, window, cx); + s.toggle_collapse(&project_group_key, window, cx); }); cx.run_until_parked(); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["> [my-project]"] + vec![ + // + "> [my-project]", + ] ); // Expand sidebar.update_in(cx, |s, window, cx| { - s.toggle_collapse(&path_list, window, cx); + s.toggle_collapse(&project_group_key, window, cx); }); cx.run_until_parked(); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Thread 1"] + vec![ + // + "v [my-project]", + " Thread 1", + ] ); } @@ -681,7 +834,8 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) { let collapsed_path = PathList::new(&[std::path::PathBuf::from("/collapsed")]); sidebar.update_in(cx, |s, _window, _cx| { - s.collapsed_groups.insert(collapsed_path.clone()); + s.collapsed_groups + .insert(project::ProjectGroupKey::new(None, collapsed_path.clone())); s.contents .notified_threads .insert(acp::SessionId::new(Arc::from("t-5"))); @@ -694,17 +848,18 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) { has_running_threads: false, waiting_thread_count: 0, is_active: true, + has_threads: true, }, ListEntry::Thread(ThreadEntry { metadata: ThreadMetadata { session_id: acp::SessionId::new(Arc::from("t-1")), agent_id: AgentId::new("zed-agent"), - folder_paths: PathList::default(), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::default(), title: "Completed thread".into(), updated_at: Utc::now(), created_at: Some(Utc::now()), archived: false, + remote_connection: None, }, icon: IconName::ZedAgent, icon_from_external_svg: None, @@ -722,12 +877,12 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) { metadata: ThreadMetadata { session_id: acp::SessionId::new(Arc::from("t-2")), agent_id: AgentId::new("zed-agent"), - folder_paths: PathList::default(), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::default(), title: "Running thread".into(), updated_at: Utc::now(), created_at: Some(Utc::now()), archived: false, + remote_connection: None, }, icon: IconName::ZedAgent, icon_from_external_svg: None, @@ -745,12 +900,12 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) { metadata: ThreadMetadata { session_id: acp::SessionId::new(Arc::from("t-3")), agent_id: AgentId::new("zed-agent"), - folder_paths: PathList::default(), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::default(), title: "Error thread".into(), updated_at: Utc::now(), created_at: Some(Utc::now()), archived: false, + remote_connection: None, }, icon: IconName::ZedAgent, icon_from_external_svg: None, @@ -764,16 +919,17 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) { diff_stats: DiffStats::default(), }), // Thread with WaitingForConfirmation status, not active + // remote_connection: None, ListEntry::Thread(ThreadEntry { metadata: ThreadMetadata { session_id: acp::SessionId::new(Arc::from("t-4")), agent_id: AgentId::new("zed-agent"), - folder_paths: PathList::default(), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::default(), title: "Waiting thread".into(), updated_at: Utc::now(), created_at: Some(Utc::now()), archived: false, + remote_connection: None, }, icon: IconName::ZedAgent, icon_from_external_svg: None, @@ -787,16 +943,17 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) { diff_stats: DiffStats::default(), }), // Background thread that completed (should show notification) + // remote_connection: None, ListEntry::Thread(ThreadEntry { metadata: ThreadMetadata { session_id: acp::SessionId::new(Arc::from("t-5")), agent_id: AgentId::new("zed-agent"), - folder_paths: PathList::default(), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::default(), title: "Notified thread".into(), updated_at: Utc::now(), created_at: Some(Utc::now()), archived: false, + remote_connection: None, }, icon: IconName::ZedAgent, icon_from_external_svg: None, @@ -822,6 +979,7 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) { has_running_threads: false, waiting_thread_count: 0, is_active: false, + has_threads: false, }, ]; @@ -832,6 +990,7 @@ async fn test_visible_entries_as_strings(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [expanded-project]", " Completed thread", " Running thread * (running) <== selected", @@ -995,10 +1154,14 @@ async fn test_keyboard_confirm_on_project_header_toggles_collapse(cx: &mut TestA assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Thread 1"] + vec![ + // + "v [my-project]", + " Thread 1", + ] ); - // Focus the sidebar and select the header (index 0) + // Focus the sidebar and select the header focus_sidebar(&sidebar, cx); sidebar.update_in(cx, |sidebar, _window, _cx| { sidebar.selection = Some(0); @@ -1010,7 +1173,10 @@ async fn test_keyboard_confirm_on_project_header_toggles_collapse(cx: &mut TestA assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["> [my-project] <== selected"] + vec![ + // + "> [my-project] <== selected", + ] ); // Confirm again expands the group @@ -1019,7 +1185,11 @@ async fn test_keyboard_confirm_on_project_header_toggles_collapse(cx: &mut TestA assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project] <== selected", " Thread 1",] + vec![ + // + "v [my-project] <== selected", + " Thread 1", + ] ); } @@ -1070,7 +1240,11 @@ async fn test_keyboard_expand_and_collapse_selected_entry(cx: &mut TestAppContex assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Thread 1"] + vec![ + // + "v [my-project]", + " Thread 1", + ] ); // Focus sidebar and manually select the header (index 0). Press left to collapse. @@ -1084,7 +1258,10 @@ async fn test_keyboard_expand_and_collapse_selected_entry(cx: &mut TestAppContex assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["> [my-project] <== selected"] + vec![ + // + "> [my-project] <== selected", + ] ); // Press right to expand @@ -1093,7 +1270,11 @@ async fn test_keyboard_expand_and_collapse_selected_entry(cx: &mut TestAppContex assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project] <== selected", " Thread 1",] + vec![ + // + "v [my-project] <== selected", + " Thread 1", + ] ); // Press right again on already-expanded header moves selection down @@ -1120,7 +1301,11 @@ async fn test_keyboard_collapse_from_child_selects_parent(cx: &mut TestAppContex assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Thread 1 <== selected",] + vec![ + // + "v [my-project]", + " Thread 1 <== selected", + ] ); // Pressing left on a child collapses the parent group and selects it @@ -1130,7 +1315,10 @@ async fn test_keyboard_collapse_from_child_selects_parent(cx: &mut TestAppContex assert_eq!(sidebar.read_with(cx, |s, _| s.selection), Some(0)); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["> [my-project] <== selected"] + vec![ + // + "> [my-project] <== selected", + ] ); } @@ -1141,10 +1329,13 @@ async fn test_keyboard_navigation_on_empty_list(cx: &mut TestAppContext) { cx.add_window_view(|window, cx| MultiWorkspace::test_new(project, window, cx)); let sidebar = setup_sidebar(&multi_workspace, cx); - // An empty project has the header and a new thread button. + // An empty project has only the header. assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [empty-project]", " [+ New Thread]"] + vec![ + // + "v [empty-project]", + ] ); // Focus sidebar — focus_in does not set a selection @@ -1155,11 +1346,7 @@ async fn test_keyboard_navigation_on_empty_list(cx: &mut TestAppContext) { cx.dispatch_action(SelectNext); assert_eq!(sidebar.read_with(cx, |s, _| s.selection), Some(0)); - // SelectNext moves to the new thread button - cx.dispatch_action(SelectNext); - assert_eq!(sidebar.read_with(cx, |s, _| s.selection), Some(1)); - - // At the end, wraps back to first entry + // At the end (only one entry), wraps back to first entry cx.dispatch_action(SelectNext); assert_eq!(sidebar.read_with(cx, |s, _| s.selection), Some(0)); @@ -1280,10 +1467,69 @@ async fn test_parallel_threads_shown_with_live_status(cx: &mut TestAppContext) { entries[1..].sort(); assert_eq!( entries, - vec!["v [my-project]", " Hello *", " Hello * (running)",] + vec![ + // + "v [my-project]", + " Hello * (active)", + " Hello * (running)", + ] ); } +#[gpui::test] +async fn test_subagent_permission_request_marks_parent_sidebar_thread_waiting( + cx: &mut TestAppContext, +) { + let project = init_test_project_with_agent_panel("/my-project", cx).await; + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + let (sidebar, panel) = setup_sidebar_with_agent_panel(&multi_workspace, cx); + + let connection = StubAgentConnection::new().with_supports_load_session(true); + connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( + acp::ContentChunk::new("Done".into()), + )]); + open_thread_with_connection(&panel, connection, cx); + send_message(&panel, cx); + + let parent_session_id = active_session_id(&panel, cx); + save_test_thread_metadata(&parent_session_id, &project, cx).await; + + let subagent_session_id = acp::SessionId::new("subagent-session"); + cx.update(|_, cx| { + let parent_thread = panel.read(cx).active_agent_thread(cx).unwrap(); + parent_thread.update(cx, |thread: &mut AcpThread, cx| { + thread.subagent_spawned(subagent_session_id.clone(), cx); + }); + }); + cx.run_until_parked(); + + let subagent_thread = panel.read_with(cx, |panel, cx| { + panel + .active_conversation_view() + .and_then(|conversation| conversation.read(cx).thread_view(&subagent_session_id)) + .map(|thread_view| thread_view.read(cx).thread.clone()) + .expect("Expected subagent thread to be loaded into the conversation") + }); + request_test_tool_authorization(&subagent_thread, "subagent-tool-call", "allow-subagent", cx); + + let parent_status = sidebar.read_with(cx, |sidebar, _cx| { + sidebar + .contents + .entries + .iter() + .find_map(|entry| match entry { + ListEntry::Thread(thread) if thread.metadata.session_id == parent_session_id => { + Some(thread.status) + } + _ => None, + }) + .expect("Expected parent thread entry in sidebar") + }); + + assert_eq!(parent_status, AgentThreadStatus::WaitingForConfirmation); +} + #[gpui::test] async fn test_background_thread_completion_triggers_notification(cx: &mut TestAppContext) { let project_a = init_test_project_with_agent_panel("/project-a", cx).await; @@ -1319,7 +1565,11 @@ async fn test_background_thread_completion_triggers_notification(cx: &mut TestAp // Thread A is still running; no notification yet. assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project-a]", " Hello * (running)",] + vec![ + // + "v [project-a]", + " Hello * (running) (active)", + ] ); // Complete thread A's turn (transition Running → Completed). @@ -1329,7 +1579,11 @@ async fn test_background_thread_completion_triggers_notification(cx: &mut TestAp // The completed background thread shows a notification indicator. assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project-a]", " Hello * (!)",] + vec![ + // + "v [project-a]", + " Hello * (!) (active)", + ] ); } @@ -1369,6 +1623,7 @@ async fn test_search_narrows_visible_threads_to_matches(cx: &mut TestAppContext) assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [my-project]", " Fix crash in project panel", " Add inline diff view", @@ -1381,7 +1636,11 @@ async fn test_search_narrows_visible_threads_to_matches(cx: &mut TestAppContext) type_in_search(&sidebar, "diff", cx); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Add inline diff view <== selected",] + vec![ + // + "v [my-project]", + " Add inline diff view <== selected", + ] ); // User changes query to something with no matches — list is empty. @@ -1416,6 +1675,7 @@ async fn test_search_matches_regardless_of_case(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [my-project]", " Fix Crash In Project Panel <== selected", ] @@ -1426,6 +1686,7 @@ async fn test_search_matches_regardless_of_case(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [my-project]", " Fix Crash In Project Panel <== selected", ] @@ -1456,7 +1717,12 @@ async fn test_escape_clears_search_and_restores_full_list(cx: &mut TestAppContex // Confirm the full list is showing. assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Alpha thread", " Beta thread",] + vec![ + // + "v [my-project]", + " Alpha thread", + " Beta thread", + ] ); // User types a search query to filter down. @@ -1464,7 +1730,11 @@ async fn test_escape_clears_search_and_restores_full_list(cx: &mut TestAppContex type_in_search(&sidebar, "alpha", cx); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Alpha thread <== selected",] + vec![ + // + "v [my-project]", + " Alpha thread <== selected", + ] ); // User presses Escape — filter clears, full list is restored. @@ -1474,6 +1744,7 @@ async fn test_escape_clears_search_and_restores_full_list(cx: &mut TestAppContex assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [my-project]", " Alpha thread <== selected", " Beta thread", @@ -1530,6 +1801,7 @@ async fn test_search_only_shows_workspace_headers_with_matches(cx: &mut TestAppC assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [project-a]", " Fix bug in sidebar", " Add tests for editor", @@ -1540,7 +1812,11 @@ async fn test_search_only_shows_workspace_headers_with_matches(cx: &mut TestAppC type_in_search(&sidebar, "sidebar", cx); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project-a]", " Fix bug in sidebar <== selected",] + vec![ + // + "v [project-a]", + " Fix bug in sidebar <== selected", + ] ); // "typo" only matches in the second workspace — the first header disappears. @@ -1556,6 +1832,7 @@ async fn test_search_only_shows_workspace_headers_with_matches(cx: &mut TestAppC assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [project-a]", " Fix bug in sidebar <== selected", " Add tests for editor", @@ -1615,6 +1892,7 @@ async fn test_search_matches_workspace_name(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [alpha-project]", " Fix bug in sidebar <== selected", " Add tests for editor", @@ -1626,7 +1904,11 @@ async fn test_search_matches_workspace_name(cx: &mut TestAppContext) { type_in_search(&sidebar, "sidebar", cx); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [alpha-project]", " Fix bug in sidebar <== selected",] + vec![ + // + "v [alpha-project]", + " Fix bug in sidebar <== selected", + ] ); // "alpha sidebar" matches the workspace name "alpha-project" (fuzzy: a-l-p-h-a-s-i-d-e-b-a-r @@ -1636,7 +1918,11 @@ async fn test_search_matches_workspace_name(cx: &mut TestAppContext) { type_in_search(&sidebar, "fix", cx); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [alpha-project]", " Fix bug in sidebar <== selected",] + vec![ + // + "v [alpha-project]", + " Fix bug in sidebar <== selected", + ] ); // A query that matches a workspace name AND a thread in that same workspace. @@ -1645,6 +1931,7 @@ async fn test_search_matches_workspace_name(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [alpha-project]", " Fix bug in sidebar <== selected", " Add tests for editor", @@ -1658,6 +1945,7 @@ async fn test_search_matches_workspace_name(cx: &mut TestAppContext) { assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [alpha-project]", " Fix bug in sidebar <== selected", " Add tests for editor", @@ -1707,7 +1995,11 @@ async fn test_search_finds_threads_hidden_behind_view_more(cx: &mut TestAppConte let filtered = visible_entries_as_strings(&sidebar, cx); assert_eq!( filtered, - vec!["v [my-project]", " Hidden gem thread <== selected",] + vec![ + // + "v [my-project]", + " Hidden gem thread <== selected", + ] ); assert!( !filtered.iter().any(|e| e.contains("View More")), @@ -1743,14 +2035,21 @@ async fn test_search_finds_threads_inside_collapsed_groups(cx: &mut TestAppConte assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["> [my-project] <== selected"] + vec![ + // + "> [my-project] <== selected", + ] ); // User types a search — the thread appears even though its group is collapsed. type_in_search(&sidebar, "important", cx); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["> [my-project]", " Important thread <== selected",] + vec![ + // + "> [my-project]", + " Important thread <== selected", + ] ); } @@ -1784,6 +2083,7 @@ async fn test_search_then_keyboard_navigate_and_confirm(cx: &mut TestAppContext) assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [my-project]", " Fix crash in panel <== selected", " Fix lint warnings", @@ -1796,6 +2096,7 @@ async fn test_search_then_keyboard_navigate_and_confirm(cx: &mut TestAppContext) assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [my-project]", " Fix crash in panel", " Fix lint warnings <== selected", @@ -1807,6 +2108,7 @@ async fn test_search_then_keyboard_navigate_and_confirm(cx: &mut TestAppContext) assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [my-project]", " Fix crash in panel <== selected", " Fix lint warnings", @@ -1847,7 +2149,11 @@ async fn test_confirm_on_historical_thread_activates_workspace(cx: &mut TestAppC assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Historical Thread",] + vec![ + // + "v [my-project]", + " Historical Thread", + ] ); // Switch to workspace 1 so we can verify the confirm switches back. @@ -1908,7 +2214,12 @@ async fn test_click_clears_selection_and_focus_in_restores_it(cx: &mut TestAppCo assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Thread A", " Thread B",] + vec![ + // + "v [my-project]", + " Thread A", + " Thread B", + ] ); // Keyboard confirm preserves selection. @@ -1927,7 +2238,8 @@ async fn test_click_clears_selection_and_focus_in_restores_it(cx: &mut TestAppCo sidebar.update_in(cx, |sidebar, window, cx| { sidebar.selection = None; let path_list = PathList::new(&[std::path::PathBuf::from("/my-project")]); - sidebar.toggle_collapse(&path_list, window, cx); + let project_group_key = project::ProjectGroupKey::new(None, path_list); + sidebar.toggle_collapse(&project_group_key, window, cx); }); assert_eq!(sidebar.read_with(cx, |sidebar, _| sidebar.selection), None); @@ -1959,7 +2271,11 @@ async fn test_thread_title_update_propagates_to_sidebar(cx: &mut TestAppContext) assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Hello *"] + vec![ + // + "v [my-project]", + " Hello * (active)", + ] ); // Simulate the agent generating a title. The notification chain is: @@ -1981,7 +2297,11 @@ async fn test_thread_title_update_propagates_to_sidebar(cx: &mut TestAppContext) assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Friendly Greeting with AI *"] + vec![ + // + "v [my-project]", + " Friendly Greeting with AI * (active)", + ] ); } @@ -2034,9 +2354,9 @@ async fn test_focused_thread_tracks_user_intent(cx: &mut TestAppContext) { title: "Test".into(), updated_at: Utc::now(), created_at: None, - folder_paths: PathList::default(), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::default(), archived: false, + remote_connection: None, }, &workspace_a, false, @@ -2090,9 +2410,9 @@ async fn test_focused_thread_tracks_user_intent(cx: &mut TestAppContext) { title: "Thread B".into(), updated_at: Utc::now(), created_at: None, - folder_paths: PathList::default(), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::default(), archived: false, + remote_connection: None, }, &workspace_b, false, @@ -2235,7 +2555,11 @@ async fn test_new_thread_button_works_after_adding_folder(cx: &mut TestAppContex // Verify the thread appears in the sidebar. assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project-a]", " Hello *",] + vec![ + // + "v [project-a]", + " Hello * (active)", + ] ); // The "New Thread" button should NOT be in "active/draft" state @@ -2262,15 +2586,14 @@ async fn test_new_thread_button_works_after_adding_folder(cx: &mut TestAppContex // The workspace path_list is now [project-a, project-b]. The active // thread's metadata was re-saved with the new paths by the agent panel's - // project subscription, so it stays visible under the updated group. - // The old [project-a] group persists in the sidebar (empty) because - // project_group_keys is append-only. + // project subscription. The old [project-a] key is replaced by the new + // key since no other workspace claims it. assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ - "v [project-a, project-b]", // - " Hello *", - "v [project-a]", + // + "v [project-a, project-b]", + " Hello * (active)", ] ); @@ -2306,119 +2629,161 @@ async fn test_new_thread_button_works_after_adding_folder(cx: &mut TestAppContex } #[gpui::test] -async fn test_cmd_n_shows_new_thread_entry(cx: &mut TestAppContext) { - // When the user presses Cmd-N (NewThread action) while viewing a - // non-empty thread, the sidebar should show the "New Thread" entry. - // This exercises the same code path as the workspace action handler - // (which bypasses the sidebar's create_new_thread method). - let project = init_test_project_with_agent_panel("/my-project", cx).await; +async fn test_worktree_add_and_remove_migrates_threads(cx: &mut TestAppContext) { + // When a worktree is added to a project, the project group key changes + // and all historical threads should be migrated to the new key. Removing + // the worktree should migrate them back. + let (_fs, project) = init_multi_project_test(&["/project-a", "/project-b"], cx).await; let (multi_workspace, cx) = cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); - let (sidebar, panel) = setup_sidebar_with_agent_panel(&multi_workspace, cx); - - // Create a non-empty thread (has messages). - let connection = StubAgentConnection::new(); - connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( - acp::ContentChunk::new("Done".into()), - )]); - open_thread_with_connection(&panel, connection, cx); - send_message(&panel, cx); + let sidebar = setup_sidebar(&multi_workspace, cx); - let session_id = active_session_id(&panel, cx); - save_test_thread_metadata(&session_id, &project, cx).await; + // Save two threads against the initial project group [/project-a]. + save_n_test_threads(2, &project, cx).await; + sidebar.update_in(cx, |sidebar, _window, cx| sidebar.update_entries(cx)); cx.run_until_parked(); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Hello *"] + vec![ + // + "v [project-a]", + " Thread 2", + " Thread 1", + ] ); - // Simulate cmd-n - let workspace = multi_workspace.read_with(cx, |mw, _cx| mw.workspace().clone()); - panel.update_in(cx, |panel, window, cx| { - panel.new_thread(&NewThread, window, cx); + // Verify the metadata store has threads under the old key. + let old_key_paths = PathList::new(&[PathBuf::from("/project-a")]); + cx.update(|_window, cx| { + let store = ThreadMetadataStore::global(cx).read(cx); + assert_eq!( + store.entries_for_main_worktree_path(&old_key_paths).count(), + 2, + "should have 2 threads under old key before add" + ); }); - workspace.update_in(cx, |workspace, window, cx| { - workspace.focus_panel::(window, cx); + + // Add a second worktree to the same project. + project + .update(cx, |project, cx| { + project.find_or_create_worktree("/project-b", true, cx) + }) + .await + .expect("should add worktree"); + cx.run_until_parked(); + + // The project group key should now be [/project-a, /project-b]. + let new_key_paths = PathList::new(&[PathBuf::from("/project-a"), PathBuf::from("/project-b")]); + + // Verify multi-workspace state: exactly one project group key, the new one. + multi_workspace.read_with(cx, |mw, _cx| { + let keys: Vec<_> = mw.project_group_keys().cloned().collect(); + assert_eq!( + keys.len(), + 1, + "should have exactly 1 project group key after add" + ); + assert_eq!( + keys[0].path_list(), + &new_key_paths, + "the key should be the new combined path list" + ); + }); + + // Verify threads were migrated to the new key. + cx.update(|_window, cx| { + let store = ThreadMetadataStore::global(cx).read(cx); + assert_eq!( + store.entries_for_main_worktree_path(&old_key_paths).count(), + 0, + "should have 0 threads under old key after migration" + ); + assert_eq!( + store.entries_for_main_worktree_path(&new_key_paths).count(), + 2, + "should have 2 threads under new key after migration" + ); }); + + // Sidebar should show threads under the new header. + sidebar.update_in(cx, |sidebar, _window, cx| sidebar.update_entries(cx)); cx.run_until_parked(); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " [~ Draft]", " Hello *"], - "After Cmd-N the sidebar should show a highlighted Draft entry" + vec![ + // + "v [project-a, project-b]", + " Thread 2", + " Thread 1", + ] ); - sidebar.read_with(cx, |sidebar, _cx| { - assert_active_draft( - sidebar, - &workspace, - "active_entry should be Draft after Cmd-N", - ); + // Now remove the second worktree. + let worktree_id = project.read_with(cx, |project, cx| { + project + .visible_worktrees(cx) + .find(|wt| wt.read(cx).abs_path().as_ref() == Path::new("/project-b")) + .map(|wt| wt.read(cx).id()) + .expect("should find project-b worktree") + }); + project.update(cx, |project, cx| { + project.remove_worktree(worktree_id, cx); }); -} - -#[gpui::test] -async fn test_draft_with_server_session_shows_as_draft(cx: &mut TestAppContext) { - let project = init_test_project_with_agent_panel("/my-project", cx).await; - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); - let (sidebar, panel) = setup_sidebar_with_agent_panel(&multi_workspace, cx); - - // Create a saved thread so the workspace has history. - let connection = StubAgentConnection::new(); - connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( - acp::ContentChunk::new("Done".into()), - )]); - open_thread_with_connection(&panel, connection, cx); - send_message(&panel, cx); - let saved_session_id = active_session_id(&panel, cx); - save_test_thread_metadata(&saved_session_id, &project, cx).await; cx.run_until_parked(); - assert_eq!( - visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " Hello *"] - ); + // The key should revert to [/project-a]. + multi_workspace.read_with(cx, |mw, _cx| { + let keys: Vec<_> = mw.project_group_keys().cloned().collect(); + assert_eq!( + keys.len(), + 1, + "should have exactly 1 project group key after remove" + ); + assert_eq!( + keys[0].path_list(), + &old_key_paths, + "the key should revert to the original path list" + ); + }); - // Open a new draft thread via a server connection. This gives the - // conversation a parent_id (session assigned by the server) but - // no messages have been sent, so active_thread_is_draft() is true. - let draft_connection = StubAgentConnection::new(); - open_thread_with_connection(&panel, draft_connection, cx); + // Threads should be migrated back to the old key. + cx.update(|_window, cx| { + let store = ThreadMetadataStore::global(cx).read(cx); + assert_eq!( + store.entries_for_main_worktree_path(&new_key_paths).count(), + 0, + "should have 0 threads under new key after revert" + ); + assert_eq!( + store.entries_for_main_worktree_path(&old_key_paths).count(), + 2, + "should have 2 threads under old key after revert" + ); + }); + + sidebar.update_in(cx, |sidebar, _window, cx| sidebar.update_entries(cx)); cx.run_until_parked(); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [my-project]", " [~ Draft]", " Hello *"], + vec![ + // + "v [project-a]", + " Thread 2", + " Thread 1", + ] ); - - let workspace = multi_workspace.read_with(cx, |mw, _cx| mw.workspace().clone()); - sidebar.read_with(cx, |sidebar, _cx| { - assert_active_draft( - sidebar, - &workspace, - "Draft with server session should be Draft, not Thread", - ); - }); } #[gpui::test] -async fn test_cmd_n_shows_new_thread_entry_in_absorbed_worktree(cx: &mut TestAppContext) { - // When the active workspace is an absorbed git worktree, cmd-n - // should still show the "New Thread" entry under the main repo's - // header and highlight it as active. - agent_ui::test_support::init_test(cx); - cx.update(|cx| { - ThreadStore::init_global(cx); - ThreadMetadataStore::init_global(cx); - language_model::LanguageModelRegistry::test(cx); - prompt_store::init(cx); - }); - +async fn test_worktree_add_and_remove_preserves_thread_path_associations(cx: &mut TestAppContext) { + // Verifies that adding/removing folders to a project correctly updates + // each thread's worktree_paths (both folder_paths and main_worktree_paths) + // while preserving per-path associations for linked worktrees. + init_test(cx); let fs = FakeFs::new(cx.executor()); - - // Main repo with a linked worktree. fs.insert_tree( "/project", serde_json::json!({ @@ -2427,535 +2792,728 @@ async fn test_cmd_n_shows_new_thread_entry_in_absorbed_worktree(cx: &mut TestApp }), ) .await; - - // Worktree checkout pointing back to the main repo. fs.add_linked_worktree_for_repo( Path::new("/project/.git"), false, git::repository::Worktree { - path: std::path::PathBuf::from("/wt-feature-a"), - ref_name: Some("refs/heads/feature-a".into()), + path: PathBuf::from("/wt-feature"), + ref_name: Some("refs/heads/feature".into()), sha: "aaa".into(), is_main: false, }, ) .await; + fs.insert_tree("/other-project", serde_json::json!({ ".git": {} })) + .await; + cx.update(|cx| ::set_global(fs.clone(), cx)); - cx.update(|cx| ::set_global(fs.clone(), cx)); + // Start with a linked worktree workspace: visible root is /wt-feature, + // main repo is /project. + let project = + project::Project::test(fs.clone() as Arc, ["/wt-feature".as_ref()], cx).await; + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + let _sidebar = setup_sidebar(&multi_workspace, cx); - let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; - let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; + // Save a thread. It should have folder_paths=[/wt-feature], main=[/project]. + save_named_thread_metadata("thread-1", "Thread 1", &project, cx).await; - main_project - .update(cx, |p, cx| p.git_scans_complete(cx)) - .await; - worktree_project - .update(cx, |p, cx| p.git_scans_complete(cx)) - .await; + let session_id = acp::SessionId::new(Arc::from("thread-1")); + cx.update(|_window, cx| { + let store = ThreadMetadataStore::global(cx).read(cx); + let thread = store.entry(&session_id).expect("thread should exist"); + assert_eq!( + thread.folder_paths().paths(), + &[PathBuf::from("/wt-feature")], + "initial folder_paths should be the linked worktree" + ); + assert_eq!( + thread.main_worktree_paths().paths(), + &[PathBuf::from("/project")], + "initial main_worktree_paths should be the main repo" + ); + }); - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); + // Add /other-project to the workspace. + project + .update(cx, |project, cx| { + project.find_or_create_worktree("/other-project", true, cx) + }) + .await + .expect("should add worktree"); + cx.run_until_parked(); - let sidebar = setup_sidebar(&multi_workspace, cx); + // Thread should now have both paths, with correct associations. + cx.update(|_window, cx| { + let store = ThreadMetadataStore::global(cx).read(cx); + let thread = store.entry(&session_id).expect("thread should exist"); + let pairs: Vec<_> = thread + .worktree_paths + .ordered_pairs() + .map(|(m, f)| (m.clone(), f.clone())) + .collect(); + assert!( + pairs.contains(&(PathBuf::from("/project"), PathBuf::from("/wt-feature"))), + "linked worktree association should be preserved, got: {:?}", + pairs + ); + assert!( + pairs.contains(&( + PathBuf::from("/other-project"), + PathBuf::from("/other-project") + )), + "new folder should have main == folder, got: {:?}", + pairs + ); + }); - let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| { - mw.test_add_workspace(worktree_project.clone(), window, cx) + // Remove /other-project. + let worktree_id = project.read_with(cx, |project, cx| { + project + .visible_worktrees(cx) + .find(|wt| wt.read(cx).abs_path().as_ref() == Path::new("/other-project")) + .map(|wt| wt.read(cx).id()) + .expect("should find other-project worktree") + }); + project.update(cx, |project, cx| { + project.remove_worktree(worktree_id, cx); }); + cx.run_until_parked(); - let worktree_panel = add_agent_panel(&worktree_workspace, cx); + // Thread should be back to original state. + cx.update(|_window, cx| { + let store = ThreadMetadataStore::global(cx).read(cx); + let thread = store.entry(&session_id).expect("thread should exist"); + assert_eq!( + thread.folder_paths().paths(), + &[PathBuf::from("/wt-feature")], + "folder_paths should revert to just the linked worktree" + ); + assert_eq!( + thread.main_worktree_paths().paths(), + &[PathBuf::from("/project")], + "main_worktree_paths should revert to just the main repo" + ); + let pairs: Vec<_> = thread + .worktree_paths + .ordered_pairs() + .map(|(m, f)| (m.clone(), f.clone())) + .collect(); + assert_eq!( + pairs, + vec![(PathBuf::from("/project"), PathBuf::from("/wt-feature"))], + "linked worktree association should be preserved through add+remove cycle" + ); + }); +} - // Switch to the worktree workspace. +#[gpui::test] +async fn test_worktree_add_key_collision_removes_duplicate_workspace(cx: &mut TestAppContext) { + // When a worktree is added to workspace A and the resulting key matches + // an existing workspace B's key (and B has the same root paths), B + // should be removed as a true duplicate. + let (fs, project_a) = init_multi_project_test(&["/project-a", "/project-b"], cx).await; + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); + let sidebar = setup_sidebar(&multi_workspace, cx); + + // Save a thread against workspace A [/project-a]. + save_named_thread_metadata("thread-a", "Thread A", &project_a, cx).await; + + // Create workspace B with both worktrees [/project-a, /project-b]. + let project_b = project::Project::test( + fs.clone() as Arc, + ["/project-a".as_ref(), "/project-b".as_ref()], + cx, + ) + .await; + let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(project_b.clone(), window, cx) + }); + cx.run_until_parked(); + + // Switch back to workspace A so it's the active workspace when the collision happens. + let workspace_a = + multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone()); multi_workspace.update_in(cx, |mw, window, cx| { - let workspace = mw.workspaces().nth(1).unwrap().clone(); - mw.activate(workspace, window, cx); + mw.activate(workspace_a, window, cx); }); + cx.run_until_parked(); - // Create a non-empty thread in the worktree workspace. - let connection = StubAgentConnection::new(); - connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( - acp::ContentChunk::new("Done".into()), - )]); - open_thread_with_connection(&worktree_panel, connection, cx); - send_message(&worktree_panel, cx); + // Save a thread against workspace B [/project-a, /project-b]. + save_named_thread_metadata("thread-b", "Thread B", &project_b, cx).await; - let session_id = active_session_id(&worktree_panel, cx); - save_test_thread_metadata(&session_id, &worktree_project, cx).await; + sidebar.update_in(cx, |sidebar, _window, cx| sidebar.update_entries(cx)); cx.run_until_parked(); + // Both project groups should be visible. assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project]", " Hello {wt-feature-a} *"] + vec![ + // + "v [project-a, project-b]", + " Thread B", + "v [project-a]", + " Thread A", + ] ); - // Simulate Cmd-N in the worktree workspace. - worktree_panel.update_in(cx, |panel, window, cx| { - panel.new_thread(&NewThread, window, cx); + let workspace_b_id = workspace_b.entity_id(); + + // Now add /project-b to workspace A's project, causing a key collision. + project_a + .update(cx, |project, cx| { + project.find_or_create_worktree("/project-b", true, cx) + }) + .await + .expect("should add worktree"); + cx.run_until_parked(); + + // Workspace B should have been removed (true duplicate — same root paths). + multi_workspace.read_with(cx, |mw, _cx| { + let workspace_ids: Vec<_> = mw.workspaces().map(|ws| ws.entity_id()).collect(); + assert!( + !workspace_ids.contains(&workspace_b_id), + "workspace B should have been removed after key collision" + ); }); - worktree_workspace.update_in(cx, |workspace, window, cx| { - workspace.focus_panel::(window, cx); + + // There should be exactly one project group key now. + let combined_paths = PathList::new(&[PathBuf::from("/project-a"), PathBuf::from("/project-b")]); + multi_workspace.read_with(cx, |mw, _cx| { + let keys: Vec<_> = mw.project_group_keys().cloned().collect(); + assert_eq!( + keys.len(), + 1, + "should have exactly 1 project group key after collision" + ); + assert_eq!( + keys[0].path_list(), + &combined_paths, + "the remaining key should be the combined paths" + ); }); + + // Both threads should be visible under the merged group. + sidebar.update_in(cx, |sidebar, _window, cx| sidebar.update_entries(cx)); cx.run_until_parked(); assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ - "v [project]", - " [~ Draft {wt-feature-a}]", - " Hello {wt-feature-a} *" - ], - "After Cmd-N in an absorbed worktree, the sidebar should show \ - a highlighted Draft entry under the main repo header" + // + "v [project-a, project-b]", + " Thread A", + " Thread B", + ] ); - - sidebar.read_with(cx, |sidebar, _cx| { - assert_active_draft( - sidebar, - &worktree_workspace, - "active_entry should be Draft after Cmd-N", - ); - }); } -async fn init_test_project_with_git( - worktree_path: &str, - cx: &mut TestAppContext, -) -> (Entity, Arc) { +#[gpui::test] +async fn test_worktree_collision_keeps_active_workspace(cx: &mut TestAppContext) { + // When workspace A adds a folder that makes it collide with workspace B, + // and B is the *active* workspace, A (the incoming one) should be + // dropped so the user stays on B. A linked worktree sibling of A + // should migrate into B's group. init_test(cx); let fs = FakeFs::new(cx.executor()); + + // Set up /project-a with a linked worktree. fs.insert_tree( - worktree_path, + "/project-a", serde_json::json!({ - ".git": {}, + ".git": { + "worktrees": { + "feature": { + "commondir": "../../", + "HEAD": "ref: refs/heads/feature", + }, + }, + }, "src": {}, }), ) .await; - cx.update(|cx| ::set_global(fs.clone(), cx)); - let project = project::Project::test(fs.clone(), [worktree_path.as_ref()], cx).await; - (project, fs) -} - -#[gpui::test] -async fn test_search_matches_worktree_name(cx: &mut TestAppContext) { - let (project, fs) = init_test_project_with_git("/project", cx).await; - - fs.as_fake() - .add_linked_worktree_for_repo( - Path::new("/project/.git"), - false, - git::repository::Worktree { - path: std::path::PathBuf::from("/wt/rosewood"), - ref_name: Some("refs/heads/rosewood".into()), - sha: "abc".into(), - is_main: false, - }, - ) + fs.insert_tree( + "/wt-feature", + serde_json::json!({ + ".git": "gitdir: /project-a/.git/worktrees/feature", + "src": {}, + }), + ) + .await; + fs.add_linked_worktree_for_repo( + Path::new("/project-a/.git"), + false, + git::repository::Worktree { + path: PathBuf::from("/wt-feature"), + ref_name: Some("refs/heads/feature".into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; + fs.insert_tree("/project-b", serde_json::json!({ ".git": {}, "src": {} })) .await; + cx.update(|cx| ::set_global(fs.clone(), cx)); - project - .update(cx, |project, cx| project.git_scans_complete(cx)) - .await; + let project_a = project::Project::test(fs.clone(), ["/project-a".as_ref()], cx).await; + project_a.update(cx, |p, cx| p.git_scans_complete(cx)).await; - let worktree_project = project::Project::test(fs.clone(), ["/wt/rosewood".as_ref()], cx).await; - worktree_project + // Linked worktree sibling of A. + let project_wt = project::Project::test(fs.clone(), ["/wt-feature".as_ref()], cx).await; + project_wt .update(cx, |p, cx| p.git_scans_complete(cx)) .await; + // Workspace B has both folders already. + let project_b = project::Project::test( + fs.clone() as Arc, + ["/project-a".as_ref(), "/project-b".as_ref()], + cx, + ) + .await; + let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); let sidebar = setup_sidebar(&multi_workspace, cx); - save_named_thread_metadata("main-t", "Unrelated Thread", &project, cx).await; - save_named_thread_metadata("wt-t", "Fix Bug", &worktree_project, cx).await; + // Add agent panels to all workspaces. + let workspace_a_entity = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()); + add_agent_panel(&workspace_a_entity, cx); - multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + // Add the linked worktree workspace (sibling of A). + let workspace_wt = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(project_wt.clone(), window, cx) + }); + add_agent_panel(&workspace_wt, cx); cx.run_until_parked(); - // Search for "rosewood" — should match the worktree name, not the title. - type_in_search(&sidebar, "rosewood", cx); + // Add workspace B (will become active). + let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(project_b.clone(), window, cx) + }); + add_agent_panel(&workspace_b, cx); + cx.run_until_parked(); - assert_eq!( - visible_entries_as_strings(&sidebar, cx), - vec!["v [project]", " Fix Bug {rosewood} <== selected"], + // Save threads in each group. + save_named_thread_metadata("thread-a", "Thread A", &project_a, cx).await; + save_thread_metadata_with_main_paths( + "thread-wt", + "Worktree Thread", + PathList::new(&[PathBuf::from("/wt-feature")]), + PathList::new(&[PathBuf::from("/project-a")]), + cx, ); -} - -#[gpui::test] -async fn test_git_worktree_added_live_updates_sidebar(cx: &mut TestAppContext) { - let (project, fs) = init_test_project_with_git("/project", cx).await; + save_named_thread_metadata("thread-b", "Thread B", &project_b, cx).await; - project - .update(cx, |project, cx| project.git_scans_complete(cx)) - .await; + sidebar.update_in(cx, |sidebar, _window, cx| sidebar.update_entries(cx)); + cx.run_until_parked(); - let worktree_project = project::Project::test(fs.clone(), ["/wt/rosewood".as_ref()], cx).await; - worktree_project - .update(cx, |p, cx| p.git_scans_complete(cx)) - .await; + // B is active, A and wt-feature are in one group, B in another. + assert_eq!( + multi_workspace.read_with(cx, |mw, _| mw.workspace().entity_id()), + workspace_b.entity_id(), + "workspace B should be active" + ); + multi_workspace.read_with(cx, |mw, _cx| { + assert_eq!(mw.project_group_keys().count(), 2, "should have 2 groups"); + assert_eq!(mw.workspaces().count(), 3, "should have 3 workspaces"); + }); - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); - let sidebar = setup_sidebar(&multi_workspace, cx); + assert_eq!( + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [project-a, project-b]", + " [~ Draft] (active)", + " Thread B", + "v [project-a]", + " Thread A", + " Worktree Thread {wt-feature}", + ] + ); - // Save a thread against a worktree path that doesn't exist yet. - save_named_thread_metadata("wt-thread", "Worktree Thread", &worktree_project, cx).await; + let workspace_a = multi_workspace.read_with(cx, |mw, _| { + mw.workspaces() + .find(|ws| { + ws.entity_id() != workspace_b.entity_id() + && ws.entity_id() != workspace_wt.entity_id() + }) + .unwrap() + .clone() + }); - multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + // Add /project-b to workspace A's project, causing a collision with B. + project_a + .update(cx, |project, cx| { + project.find_or_create_worktree("/project-b", true, cx) + }) + .await + .expect("should add worktree"); cx.run_until_parked(); - // Thread is not visible yet — no worktree knows about this path. + // Workspace A (the incoming duplicate) should have been dropped. + multi_workspace.read_with(cx, |mw, _cx| { + let workspace_ids: Vec<_> = mw.workspaces().map(|ws| ws.entity_id()).collect(); + assert!( + !workspace_ids.contains(&workspace_a.entity_id()), + "workspace A should have been dropped" + ); + }); + + // The active workspace should still be B. assert_eq!( - visible_entries_as_strings(&sidebar, cx), - vec!["v [project]", " [+ New Thread]"] + multi_workspace.read_with(cx, |mw, _| mw.workspace().entity_id()), + workspace_b.entity_id(), + "workspace B should still be active" ); - // Now add the worktree to the git state and trigger a rescan. - fs.as_fake() - .add_linked_worktree_for_repo( - Path::new("/project/.git"), - true, - git::repository::Worktree { - path: std::path::PathBuf::from("/wt/rosewood"), - ref_name: Some("refs/heads/rosewood".into()), - sha: "abc".into(), - is_main: false, - }, - ) - .await; + // The linked worktree sibling should have migrated into B's group + // (it got the folder add and now shares the same key). + multi_workspace.read_with(cx, |mw, _cx| { + let workspace_ids: Vec<_> = mw.workspaces().map(|ws| ws.entity_id()).collect(); + assert!( + workspace_ids.contains(&workspace_wt.entity_id()), + "linked worktree workspace should still exist" + ); + assert_eq!( + mw.project_group_keys().count(), + 1, + "should have 1 group after merge" + ); + assert_eq!( + mw.workspaces().count(), + 2, + "should have 2 workspaces (B + linked worktree)" + ); + }); + + // The linked worktree workspace should have gotten the new folder. + let wt_worktree_count = + project_wt.read_with(cx, |project, cx| project.visible_worktrees(cx).count()); + assert_eq!( + wt_worktree_count, 2, + "linked worktree project should have gotten /project-b" + ); + // After: everything merged under one group. Thread A migrated, + // worktree thread shows its chip, B's thread and draft remain. + sidebar.update_in(cx, |sidebar, _window, cx| sidebar.update_entries(cx)); cx.run_until_parked(); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project]", " Worktree Thread {rosewood}",] + vec![ + // + "v [project-a, project-b]", + " [~ Draft] (active)", + " Thread A", + " Worktree Thread {project-a:wt-feature}", + " Thread B", + ] ); } #[gpui::test] -async fn test_two_worktree_workspaces_absorbed_when_main_added(cx: &mut TestAppContext) { +async fn test_worktree_add_syncs_linked_worktree_sibling(cx: &mut TestAppContext) { + // When a worktree is added to the main workspace, a linked worktree + // sibling (different root paths, same project group key) should also + // get the new folder added to its project. init_test(cx); let fs = FakeFs::new(cx.executor()); - // Create the main repo directory (not opened as a workspace yet). fs.insert_tree( "/project", serde_json::json!({ ".git": { + "worktrees": { + "feature": { + "commondir": "../../", + "HEAD": "ref: refs/heads/feature", + }, + }, }, "src": {}, }), ) .await; - // Two worktree checkouts whose .git files point back to the main repo. + fs.insert_tree( + "/wt-feature", + serde_json::json!({ + ".git": "gitdir: /project/.git/worktrees/feature", + "src": {}, + }), + ) + .await; + fs.add_linked_worktree_for_repo( Path::new("/project/.git"), false, git::repository::Worktree { - path: std::path::PathBuf::from("/wt-feature-a"), - ref_name: Some("refs/heads/feature-a".into()), + path: PathBuf::from("/wt-feature"), + ref_name: Some("refs/heads/feature".into()), sha: "aaa".into(), is_main: false, }, ) .await; - fs.add_linked_worktree_for_repo( - Path::new("/project/.git"), - false, - git::repository::Worktree { - path: std::path::PathBuf::from("/wt-feature-b"), - ref_name: Some("refs/heads/feature-b".into()), - sha: "bbb".into(), - is_main: false, - }, + + // Create a second independent project to add as a folder later. + fs.insert_tree( + "/other-project", + serde_json::json!({ ".git": {}, "src": {} }), ) .await; cx.update(|cx| ::set_global(fs.clone(), cx)); - let project_a = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; - let project_b = project::Project::test(fs.clone(), ["/wt-feature-b".as_ref()], cx).await; + let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; + let worktree_project = project::Project::test(fs.clone(), ["/wt-feature".as_ref()], cx).await; - project_a.update(cx, |p, cx| p.git_scans_complete(cx)).await; - project_b.update(cx, |p, cx| p.git_scans_complete(cx)).await; + main_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + worktree_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; - // Open both worktrees as workspaces — no main repo yet. let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); - multi_workspace.update_in(cx, |mw, window, cx| { - mw.test_add_workspace(project_b.clone(), window, cx); - }); + cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); let sidebar = setup_sidebar(&multi_workspace, cx); - save_named_thread_metadata("thread-a", "Thread A", &project_a, cx).await; - save_named_thread_metadata("thread-b", "Thread B", &project_b, cx).await; + // Add agent panel to the main workspace. + let main_workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()); + add_agent_panel(&main_workspace, cx); - multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + // Open the linked worktree as a separate workspace. + let wt_workspace = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(worktree_project.clone(), window, cx) + }); + add_agent_panel(&wt_workspace, cx); cx.run_until_parked(); - // Without the main repo, each worktree has its own header. - assert_eq!( - visible_entries_as_strings(&sidebar, cx), - vec![ - "v [project]", - " Thread A {wt-feature-a}", - " Thread B {wt-feature-b}", - ] - ); + // Both workspaces should share the same project group key [/project]. + multi_workspace.read_with(cx, |mw, _cx| { + assert_eq!( + mw.project_group_keys().count(), + 1, + "should have 1 project group key before add" + ); + assert_eq!(mw.workspaces().count(), 2, "should have 2 workspaces"); + }); - let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; - main_project - .update(cx, |p, cx| p.git_scans_complete(cx)) - .await; + // Save threads against each workspace. + save_named_thread_metadata("main-thread", "Main Thread", &main_project, cx).await; + save_named_thread_metadata("wt-thread", "Worktree Thread", &worktree_project, cx).await; - multi_workspace.update_in(cx, |mw, window, cx| { - mw.test_add_workspace(main_project.clone(), window, cx); + // Verify both threads are under the old key [/project]. + let old_key_paths = PathList::new(&[PathBuf::from("/project")]); + cx.update(|_window, cx| { + let store = ThreadMetadataStore::global(cx).read(cx); + assert_eq!( + store.entries_for_main_worktree_path(&old_key_paths).count(), + 2, + "should have 2 threads under old key before add" + ); }); + + sidebar.update_in(cx, |sidebar, _window, cx| sidebar.update_entries(cx)); cx.run_until_parked(); - // Both worktree workspaces should now be absorbed under the main - // repo header, with worktree chips. assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [project]", - " Thread A {wt-feature-a}", - " Thread B {wt-feature-b}", + " [~ Draft {wt-feature}] (active)", + " Worktree Thread {wt-feature}", + " Main Thread", ] ); -} - -#[gpui::test] -async fn test_threadless_workspace_shows_new_thread_with_worktree_chip(cx: &mut TestAppContext) { - // When a group has two workspaces — one with threads and one - // without — the threadless workspace should appear as a - // "New Thread" button with its worktree chip. - init_test(cx); - let fs = FakeFs::new(cx.executor()); - - // Main repo with two linked worktrees. - fs.insert_tree( - "/project", - serde_json::json!({ - ".git": {}, - "src": {}, - }), - ) - .await; - fs.add_linked_worktree_for_repo( - Path::new("/project/.git"), - false, - git::repository::Worktree { - path: std::path::PathBuf::from("/wt-feature-a"), - ref_name: Some("refs/heads/feature-a".into()), - sha: "aaa".into(), - is_main: false, - }, - ) - .await; - fs.add_linked_worktree_for_repo( - Path::new("/project/.git"), - false, - git::repository::Worktree { - path: std::path::PathBuf::from("/wt-feature-b"), - ref_name: Some("refs/heads/feature-b".into()), - sha: "bbb".into(), - is_main: false, - }, - ) - .await; - cx.update(|cx| ::set_global(fs.clone(), cx)); - - // Workspace A: worktree feature-a (has threads). - let project_a = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; - project_a.update(cx, |p, cx| p.git_scans_complete(cx)).await; + // Add /other-project as a folder to the main workspace. + main_project + .update(cx, |project, cx| { + project.find_or_create_worktree("/other-project", true, cx) + }) + .await + .expect("should add worktree"); + cx.run_until_parked(); - // Workspace B: worktree feature-b (no threads). - let project_b = project::Project::test(fs.clone(), ["/wt-feature-b".as_ref()], cx).await; - project_b.update(cx, |p, cx| p.git_scans_complete(cx)).await; + // The linked worktree workspace should have gotten the new folder too. + let wt_worktree_count = + worktree_project.read_with(cx, |project, cx| project.visible_worktrees(cx).count()); + assert_eq!( + wt_worktree_count, 2, + "linked worktree project should have gotten the new folder" + ); - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); - multi_workspace.update_in(cx, |mw, window, cx| { - mw.test_add_workspace(project_b.clone(), window, cx); + // Both workspaces should still exist under one key. + multi_workspace.read_with(cx, |mw, _cx| { + assert_eq!(mw.workspaces().count(), 2, "both workspaces should survive"); + assert_eq!( + mw.project_group_keys().count(), + 1, + "should still have 1 project group key" + ); }); - let sidebar = setup_sidebar(&multi_workspace, cx); - // Only save a thread for workspace A. - save_named_thread_metadata("thread-a", "Thread A", &project_a, cx).await; + // Threads should have been migrated to the new key. + let new_key_paths = + PathList::new(&[PathBuf::from("/other-project"), PathBuf::from("/project")]); + cx.update(|_window, cx| { + let store = ThreadMetadataStore::global(cx).read(cx); + assert_eq!( + store.entries_for_main_worktree_path(&old_key_paths).count(), + 0, + "should have 0 threads under old key after migration" + ); + assert_eq!( + store.entries_for_main_worktree_path(&new_key_paths).count(), + 2, + "should have 2 threads under new key after migration" + ); + }); - multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + // Both threads should still be visible in the sidebar. + sidebar.update_in(cx, |sidebar, _window, cx| sidebar.update_entries(cx)); cx.run_until_parked(); - // Workspace A's thread appears normally. Workspace B (threadless) - // appears as a "New Thread" button with its worktree chip. assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ - "v [project]", - " [+ New Thread {wt-feature-b}]", - " Thread A {wt-feature-a}", + // + "v [other-project, project]", + " [~ Draft {project:wt-feature}] (active)", + " Worktree Thread {project:wt-feature}", + " Main Thread", ] ); } #[gpui::test] -async fn test_multi_worktree_thread_shows_multiple_chips(cx: &mut TestAppContext) { - // A thread created in a workspace with roots from different git - // worktrees should show a chip for each distinct worktree name. - init_test(cx); - let fs = FakeFs::new(cx.executor()); +async fn test_cmd_n_shows_new_thread_entry(cx: &mut TestAppContext) { + // When the user presses Cmd-N (NewThread action) while viewing a + // non-empty thread, the sidebar should show the "New Thread" entry. + // This exercises the same code path as the workspace action handler + // (which bypasses the sidebar's create_new_thread method). + let project = init_test_project_with_agent_panel("/my-project", cx).await; + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + let (sidebar, panel) = setup_sidebar_with_agent_panel(&multi_workspace, cx); - // Two main repos. - fs.insert_tree( - "/project_a", - serde_json::json!({ - ".git": {}, - "src": {}, - }), - ) - .await; - fs.insert_tree( - "/project_b", - serde_json::json!({ - ".git": {}, - "src": {}, - }), - ) - .await; - - // Worktree checkouts. - for repo in &["project_a", "project_b"] { - let git_path = format!("/{repo}/.git"); - for branch in &["olivetti", "selectric"] { - fs.add_linked_worktree_for_repo( - Path::new(&git_path), - false, - git::repository::Worktree { - path: std::path::PathBuf::from(format!("/worktrees/{repo}/{branch}/{repo}")), - ref_name: Some(format!("refs/heads/{branch}").into()), - sha: "aaa".into(), - is_main: false, - }, - ) - .await; - } - } - - cx.update(|cx| ::set_global(fs.clone(), cx)); - - // Open a workspace with the worktree checkout paths as roots - // (this is the workspace the thread was created in). - let project = project::Project::test( - fs.clone(), - [ - "/worktrees/project_a/olivetti/project_a".as_ref(), - "/worktrees/project_b/selectric/project_b".as_ref(), - ], - cx, - ) - .await; - project.update(cx, |p, cx| p.git_scans_complete(cx)).await; - - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); - let sidebar = setup_sidebar(&multi_workspace, cx); - - // Save a thread under the same paths as the workspace roots. - save_named_thread_metadata("wt-thread", "Cross Worktree Thread", &project, cx).await; + // Create a non-empty thread (has messages). + let connection = StubAgentConnection::new(); + connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( + acp::ContentChunk::new("Done".into()), + )]); + open_thread_with_connection(&panel, connection, cx); + send_message(&panel, cx); - multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + let session_id = active_session_id(&panel, cx); + save_test_thread_metadata(&session_id, &project, cx).await; cx.run_until_parked(); - // Should show two distinct worktree chips. assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ - "v [project_a, project_b]", - " Cross Worktree Thread {olivetti}, {selectric}", + // + "v [my-project]", + " Hello * (active)", ] ); -} - -#[gpui::test] -async fn test_same_named_worktree_chips_are_deduplicated(cx: &mut TestAppContext) { - // When a thread's roots span multiple repos but share the same - // worktree name (e.g. both in "olivetti"), only one chip should - // appear. - init_test(cx); - let fs = FakeFs::new(cx.executor()); - - fs.insert_tree( - "/project_a", - serde_json::json!({ - ".git": {}, - "src": {}, - }), - ) - .await; - fs.insert_tree( - "/project_b", - serde_json::json!({ - ".git": {}, - "src": {}, - }), - ) - .await; - - for repo in &["project_a", "project_b"] { - let git_path = format!("/{repo}/.git"); - fs.add_linked_worktree_for_repo( - Path::new(&git_path), - false, - git::repository::Worktree { - path: std::path::PathBuf::from(format!("/worktrees/{repo}/olivetti/{repo}")), - ref_name: Some("refs/heads/olivetti".into()), - sha: "aaa".into(), - is_main: false, - }, - ) - .await; - } - cx.update(|cx| ::set_global(fs.clone(), cx)); + // Simulate cmd-n + let workspace = multi_workspace.read_with(cx, |mw, _cx| mw.workspace().clone()); + panel.update_in(cx, |panel, window, cx| { + panel.new_thread(&NewThread, window, cx); + }); + workspace.update_in(cx, |workspace, window, cx| { + workspace.focus_panel::(window, cx); + }); + cx.run_until_parked(); - let project = project::Project::test( - fs.clone(), - [ - "/worktrees/project_a/olivetti/project_a".as_ref(), - "/worktrees/project_b/olivetti/project_b".as_ref(), + assert_eq!( + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [my-project]", + " [~ Draft] (active)", + " Hello *", ], - cx, - ) - .await; - project.update(cx, |p, cx| p.git_scans_complete(cx)).await; + "After Cmd-N the sidebar should show a highlighted Draft entry" + ); + + sidebar.read_with(cx, |sidebar, _cx| { + assert_active_draft( + sidebar, + &workspace, + "active_entry should be Draft after Cmd-N", + ); + }); +} +#[gpui::test] +async fn test_draft_with_server_session_shows_as_draft(cx: &mut TestAppContext) { + let project = init_test_project_with_agent_panel("/my-project", cx).await; let (multi_workspace, cx) = cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); - let sidebar = setup_sidebar(&multi_workspace, cx); - - // Thread with roots in both repos' "olivetti" worktrees. - save_named_thread_metadata("wt-thread", "Same Branch Thread", &project, cx).await; + let (sidebar, panel) = setup_sidebar_with_agent_panel(&multi_workspace, cx); - multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + // Create a saved thread so the workspace has history. + let connection = StubAgentConnection::new(); + connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( + acp::ContentChunk::new("Done".into()), + )]); + open_thread_with_connection(&panel, connection, cx); + send_message(&panel, cx); + let saved_session_id = active_session_id(&panel, cx); + save_test_thread_metadata(&saved_session_id, &project, cx).await; cx.run_until_parked(); - // Both worktree paths have the name "olivetti", so only one chip. assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ - "v [project_a, project_b]", - " Same Branch Thread {olivetti}", + // + "v [my-project]", + " Hello * (active)", ] ); + + // Open a new draft thread via a server connection. This gives the + // conversation a parent_id (session assigned by the server) but + // no messages have been sent, so active_thread_is_draft() is true. + let draft_connection = StubAgentConnection::new(); + open_thread_with_connection(&panel, draft_connection, cx); + cx.run_until_parked(); + + assert_eq!( + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [my-project]", + " [~ Draft] (active)", + " Hello *", + ], + ); + + let workspace = multi_workspace.read_with(cx, |mw, _cx| mw.workspace().clone()); + sidebar.read_with(cx, |sidebar, _cx| { + assert_active_draft( + sidebar, + &workspace, + "Draft with server session should be Draft, not Thread", + ); + }); } #[gpui::test] -async fn test_absorbed_worktree_running_thread_shows_live_status(cx: &mut TestAppContext) { - // When a worktree workspace is absorbed under the main repo, a - // running thread in the worktree's agent panel should still show - // live status (spinner + "(running)") in the sidebar. +async fn test_cmd_n_shows_new_thread_entry_in_absorbed_worktree(cx: &mut TestAppContext) { + // When the active workspace is an absorbed git worktree, cmd-n + // should still show the "New Thread" entry under the main repo's + // header and highlight it as active. agent_ui::test_support::init_test(cx); cx.update(|cx| { ThreadStore::init_global(cx); @@ -3001,7 +3559,6 @@ async fn test_absorbed_worktree_running_thread_shows_live_status(cx: &mut TestAp .update(cx, |p, cx| p.git_scans_complete(cx)) .await; - // Create the MultiWorkspace with both projects. let (multi_workspace, cx) = cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); @@ -3011,160 +3568,217 @@ async fn test_absorbed_worktree_running_thread_shows_live_status(cx: &mut TestAp mw.test_add_workspace(worktree_project.clone(), window, cx) }); - // Add an agent panel to the worktree workspace so we can run a - // thread inside it. let worktree_panel = add_agent_panel(&worktree_workspace, cx); - // Switch back to the main workspace before setting up the sidebar. + // Switch to the worktree workspace. multi_workspace.update_in(cx, |mw, window, cx| { - let workspace = mw.workspaces().next().unwrap().clone(); + let workspace = mw.workspaces().nth(1).unwrap().clone(); mw.activate(workspace, window, cx); }); - // Start a thread in the worktree workspace's panel and keep it - // generating (don't resolve it). + // Create a non-empty thread in the worktree workspace. let connection = StubAgentConnection::new(); - open_thread_with_connection(&worktree_panel, connection.clone(), cx); + connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( + acp::ContentChunk::new("Done".into()), + )]); + open_thread_with_connection(&worktree_panel, connection, cx); send_message(&worktree_panel, cx); let session_id = active_session_id(&worktree_panel, cx); - - // Save metadata so the sidebar knows about this thread. save_test_thread_metadata(&session_id, &worktree_project, cx).await; - - // Keep the thread generating by sending a chunk without ending - // the turn. - cx.update(|_, cx| { - connection.send_update( - session_id.clone(), - acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("working...".into())), - cx, - ); - }); cx.run_until_parked(); - // The worktree thread should be absorbed under the main project - // and show live running status. - let entries = visible_entries_as_strings(&sidebar, cx); assert_eq!( - entries, + visible_entries_as_strings(&sidebar, cx), vec![ + // "v [project]", - " [~ Draft]", - " Hello {wt-feature-a} * (running)", + " Hello {wt-feature-a} * (active)", ] ); -} -#[gpui::test] -async fn test_absorbed_worktree_completion_triggers_notification(cx: &mut TestAppContext) { - agent_ui::test_support::init_test(cx); - cx.update(|cx| { - ThreadStore::init_global(cx); - ThreadMetadataStore::init_global(cx); - language_model::LanguageModelRegistry::test(cx); - prompt_store::init(cx); + // Simulate Cmd-N in the worktree workspace. + worktree_panel.update_in(cx, |panel, window, cx| { + panel.new_thread(&NewThread, window, cx); + }); + worktree_workspace.update_in(cx, |workspace, window, cx| { + workspace.focus_panel::(window, cx); }); + cx.run_until_parked(); - let fs = FakeFs::new(cx.executor()); + assert_eq!( + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [project]", + " [~ Draft {wt-feature-a}] (active)", + " Hello {wt-feature-a} *", + ], + "After Cmd-N in an absorbed worktree, the sidebar should show \ + a highlighted Draft entry under the main repo header" + ); - fs.insert_tree( - "/project", + sidebar.read_with(cx, |sidebar, _cx| { + assert_active_draft( + sidebar, + &worktree_workspace, + "active_entry should be Draft after Cmd-N", + ); + }); +} + +async fn init_test_project_with_git( + worktree_path: &str, + cx: &mut TestAppContext, +) -> (Entity, Arc) { + init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + worktree_path, serde_json::json!({ ".git": {}, "src": {}, }), ) .await; - - fs.add_linked_worktree_for_repo( - Path::new("/project/.git"), - false, - git::repository::Worktree { - path: std::path::PathBuf::from("/wt-feature-a"), - ref_name: Some("refs/heads/feature-a".into()), - sha: "aaa".into(), - is_main: false, - }, - ) - .await; - cx.update(|cx| ::set_global(fs.clone(), cx)); + let project = project::Project::test(fs.clone(), [worktree_path.as_ref()], cx).await; + (project, fs) +} - let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; - let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; +#[gpui::test] +async fn test_search_matches_worktree_name(cx: &mut TestAppContext) { + let (project, fs) = init_test_project_with_git("/project", cx).await; - main_project - .update(cx, |p, cx| p.git_scans_complete(cx)) + fs.as_fake() + .add_linked_worktree_for_repo( + Path::new("/project/.git"), + false, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt/rosewood"), + ref_name: Some("refs/heads/rosewood".into()), + sha: "abc".into(), + is_main: false, + }, + ) + .await; + + project + .update(cx, |project, cx| project.git_scans_complete(cx)) .await; + + let worktree_project = project::Project::test(fs.clone(), ["/wt/rosewood".as_ref()], cx).await; worktree_project .update(cx, |p, cx| p.git_scans_complete(cx)) .await; let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); - + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); let sidebar = setup_sidebar(&multi_workspace, cx); - let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| { - mw.test_add_workspace(worktree_project.clone(), window, cx) - }); + save_named_thread_metadata("main-t", "Unrelated Thread", &project, cx).await; + save_named_thread_metadata("wt-t", "Fix Bug", &worktree_project, cx).await; - let worktree_panel = add_agent_panel(&worktree_workspace, cx); + multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + cx.run_until_parked(); - multi_workspace.update_in(cx, |mw, window, cx| { - let workspace = mw.workspaces().next().unwrap().clone(); - mw.activate(workspace, window, cx); - }); + // Search for "rosewood" — should match the worktree name, not the title. + type_in_search(&sidebar, "rosewood", cx); - let connection = StubAgentConnection::new(); - open_thread_with_connection(&worktree_panel, connection.clone(), cx); - send_message(&worktree_panel, cx); + assert_eq!( + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [project]", + " Fix Bug {rosewood} <== selected", + ], + ); +} - let session_id = active_session_id(&worktree_panel, cx); - save_test_thread_metadata(&session_id, &worktree_project, cx).await; +#[gpui::test] +async fn test_git_worktree_added_live_updates_sidebar(cx: &mut TestAppContext) { + let (project, fs) = init_test_project_with_git("/project", cx).await; - cx.update(|_, cx| { - connection.send_update( - session_id.clone(), - acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("working...".into())), - cx, - ); - }); + project + .update(cx, |project, cx| project.git_scans_complete(cx)) + .await; + + let worktree_project = project::Project::test(fs.clone(), ["/wt/rosewood".as_ref()], cx).await; + worktree_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + let sidebar = setup_sidebar(&multi_workspace, cx); + + // Save a thread against a worktree path with the correct main + // worktree association (as if the git state had been resolved). + save_thread_metadata_with_main_paths( + "wt-thread", + "Worktree Thread", + PathList::new(&[PathBuf::from("/wt/rosewood")]), + PathList::new(&[PathBuf::from("/project")]), + cx, + ); + + multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); cx.run_until_parked(); + // Thread is visible because its main_worktree_paths match the group. + // The chip name is derived from the path even before git discovery. assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [project]", - " [~ Draft]", - " Hello {wt-feature-a} * (running)", + " Worktree Thread {rosewood}", ] ); - connection.end_turn(session_id, acp::StopReason::EndTurn); + // Now add the worktree to the git state and trigger a rescan. + fs.as_fake() + .add_linked_worktree_for_repo( + Path::new("/project/.git"), + true, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt/rosewood"), + ref_name: Some("refs/heads/rosewood".into()), + sha: "abc".into(), + is_main: false, + }, + ) + .await; + cx.run_until_parked(); assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project]", " [~ Draft]", " Hello {wt-feature-a} * (!)",] + vec![ + // + "v [project]", + " Worktree Thread {rosewood}", + ] ); } #[gpui::test] -async fn test_clicking_worktree_thread_opens_workspace_when_none_exists(cx: &mut TestAppContext) { +async fn test_two_worktree_workspaces_absorbed_when_main_added(cx: &mut TestAppContext) { init_test(cx); let fs = FakeFs::new(cx.executor()); + // Create the main repo directory (not opened as a workspace yet). fs.insert_tree( "/project", serde_json::json!({ - ".git": {}, + ".git": { + }, "src": {}, }), ) .await; + // Two worktree checkouts whose .git files point back to the main repo. fs.add_linked_worktree_for_repo( Path::new("/project/.git"), false, @@ -3176,78 +3790,83 @@ async fn test_clicking_worktree_thread_opens_workspace_when_none_exists(cx: &mut }, ) .await; + fs.add_linked_worktree_for_repo( + Path::new("/project/.git"), + false, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt-feature-b"), + ref_name: Some("refs/heads/feature-b".into()), + sha: "bbb".into(), + is_main: false, + }, + ) + .await; cx.update(|cx| ::set_global(fs.clone(), cx)); - // Only open the main repo — no workspace for the worktree. - let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; - main_project - .update(cx, |p, cx| p.git_scans_complete(cx)) - .await; + let project_a = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; + let project_b = project::Project::test(fs.clone(), ["/wt-feature-b".as_ref()], cx).await; - let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; - worktree_project - .update(cx, |p, cx| p.git_scans_complete(cx)) - .await; + project_a.update(cx, |p, cx| p.git_scans_complete(cx)).await; + project_b.update(cx, |p, cx| p.git_scans_complete(cx)).await; + // Open both worktrees as workspaces — no main repo yet. let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); + multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(project_b.clone(), window, cx); + }); let sidebar = setup_sidebar(&multi_workspace, cx); - // Save a thread for the worktree path (no workspace for it). - save_named_thread_metadata("thread-wt", "WT Thread", &worktree_project, cx).await; + save_named_thread_metadata("thread-a", "Thread A", &project_a, cx).await; + save_named_thread_metadata("thread-b", "Thread B", &project_b, cx).await; multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); cx.run_until_parked(); - // Thread should appear under the main repo with a worktree chip. + // Without the main repo, each worktree has its own header. assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project]", " WT Thread {wt-feature-a}"], + vec![ + // + "v [project]", + " Thread A {wt-feature-a}", + " Thread B {wt-feature-b}", + ] ); - // Only 1 workspace should exist. - assert_eq!( - multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()), - 1, - ); + let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; + main_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; - // Focus the sidebar and select the worktree thread. - focus_sidebar(&sidebar, cx); - sidebar.update_in(cx, |sidebar, _window, _cx| { - sidebar.selection = Some(1); // index 0 is header, 1 is the thread + multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(main_project.clone(), window, cx); }); - - // Confirm to open the worktree thread. - cx.dispatch_action(Confirm); cx.run_until_parked(); - // A new workspace should have been created for the worktree path. - let new_workspace = multi_workspace.read_with(cx, |mw, _| { - assert_eq!( - mw.workspaces().count(), - 2, - "confirming a worktree thread without a workspace should open one", - ); - mw.workspaces().nth(1).unwrap().clone() - }); - - let new_path_list = - new_workspace.read_with(cx, |_, cx| workspace_path_list(&new_workspace, cx)); + // Both worktree workspaces should now be absorbed under the main + // repo header, with worktree chips. assert_eq!( - new_path_list, - PathList::new(&[std::path::PathBuf::from("/wt-feature-a")]), - "the new workspace should have been opened for the worktree path", + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [project]", + " Thread A {wt-feature-a}", + " Thread B {wt-feature-b}", + ] ); } #[gpui::test] -async fn test_clicking_worktree_thread_does_not_briefly_render_as_separate_project( - cx: &mut TestAppContext, -) { +async fn test_threadless_workspace_shows_new_thread_with_worktree_chip(cx: &mut TestAppContext) { + // When a group has two workspaces — one with threads and one + // without — the threadless workspace should appear as a + // "New Thread" button with its worktree chip. init_test(cx); let fs = FakeFs::new(cx.executor()); + // Main repo with two linked worktrees. fs.insert_tree( "/project", serde_json::json!({ @@ -3256,7 +3875,6 @@ async fn test_clicking_worktree_thread_does_not_briefly_render_as_separate_proje }), ) .await; - fs.add_linked_worktree_for_repo( Path::new("/project/.git"), false, @@ -3268,123 +3886,223 @@ async fn test_clicking_worktree_thread_does_not_briefly_render_as_separate_proje }, ) .await; + fs.add_linked_worktree_for_repo( + Path::new("/project/.git"), + false, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt-feature-b"), + ref_name: Some("refs/heads/feature-b".into()), + sha: "bbb".into(), + is_main: false, + }, + ) + .await; cx.update(|cx| ::set_global(fs.clone(), cx)); - let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; - main_project - .update(cx, |p, cx| p.git_scans_complete(cx)) - .await; + // Workspace A: worktree feature-a (has threads). + let project_a = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; + project_a.update(cx, |p, cx| p.git_scans_complete(cx)).await; - let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; - worktree_project - .update(cx, |p, cx| p.git_scans_complete(cx)) - .await; + // Workspace B: worktree feature-b (no threads). + let project_b = project::Project::test(fs.clone(), ["/wt-feature-b".as_ref()], cx).await; + project_b.update(cx, |p, cx| p.git_scans_complete(cx)).await; let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); + multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(project_b.clone(), window, cx); + }); let sidebar = setup_sidebar(&multi_workspace, cx); - save_named_thread_metadata("thread-wt", "WT Thread", &worktree_project, cx).await; + // Only save a thread for workspace A. + save_named_thread_metadata("thread-a", "Thread A", &project_a, cx).await; multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); cx.run_until_parked(); + // Workspace A's thread appears normally. Workspace B (threadless) + // appears as a "New Thread" button with its worktree chip. assert_eq!( visible_entries_as_strings(&sidebar, cx), - vec!["v [project]", " WT Thread {wt-feature-a}"], + vec![ + // + "v [project]", + " [+ New Thread {wt-feature-b}]", + " Thread A {wt-feature-a}", + ] ); +} - focus_sidebar(&sidebar, cx); - sidebar.update_in(cx, |sidebar, _window, _cx| { - sidebar.selection = Some(1); // index 0 is header, 1 is the thread - }); +#[gpui::test] +async fn test_multi_worktree_thread_shows_multiple_chips(cx: &mut TestAppContext) { + // A thread created in a workspace with roots from different git + // worktrees should show a chip for each distinct worktree name. + init_test(cx); + let fs = FakeFs::new(cx.executor()); - let assert_sidebar_state = |sidebar: &mut Sidebar, _cx: &mut Context| { - let mut project_headers = sidebar.contents.entries.iter().filter_map(|entry| { - if let ListEntry::ProjectHeader { label, .. } = entry { - Some(label.as_ref()) - } else { - None - } - }); + // Two main repos. + fs.insert_tree( + "/project_a", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + fs.insert_tree( + "/project_b", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; - let Some(project_header) = project_headers.next() else { - panic!("expected exactly one sidebar project header named `project`, found none"); - }; - assert_eq!( - project_header, "project", - "expected the only sidebar project header to be `project`" - ); - if let Some(unexpected_header) = project_headers.next() { - panic!( - "expected exactly one sidebar project header named `project`, found extra header `{unexpected_header}`" - ); + // Worktree checkouts. + for repo in &["project_a", "project_b"] { + let git_path = format!("/{repo}/.git"); + for branch in &["olivetti", "selectric"] { + fs.add_linked_worktree_for_repo( + Path::new(&git_path), + false, + git::repository::Worktree { + path: std::path::PathBuf::from(format!("/worktrees/{repo}/{branch}/{repo}")), + ref_name: Some(format!("refs/heads/{branch}").into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; } + } - let mut saw_expected_thread = false; - for entry in &sidebar.contents.entries { - match entry { - ListEntry::ProjectHeader { label, .. } => { - assert_eq!( - label.as_ref(), - "project", - "expected the only sidebar project header to be `project`" - ); - } - ListEntry::Thread(thread) - if thread.metadata.title.as_ref() == "WT Thread" - && thread.worktrees.first().map(|wt| wt.name.as_ref()) - == Some("wt-feature-a") => - { - saw_expected_thread = true; - } - ListEntry::Thread(thread) => { - let title = thread.metadata.title.as_ref(); - let worktree_name = thread - .worktrees - .first() - .map(|wt| wt.name.as_ref()) - .unwrap_or(""); - panic!( - "unexpected sidebar thread while opening linked worktree thread: title=`{title}`, worktree=`{worktree_name}`" - ); - } - ListEntry::ViewMore { .. } => { - panic!("unexpected `View More` entry while opening linked worktree thread"); - } - ListEntry::DraftThread { .. } | ListEntry::NewThread { .. } => {} - } - } + cx.update(|cx| ::set_global(fs.clone(), cx)); - assert!( - saw_expected_thread, - "expected the sidebar to keep showing `WT Thread {{wt-feature-a}}` under `project`" - ); - }; + // Open a workspace with the worktree checkout paths as roots + // (this is the workspace the thread was created in). + let project = project::Project::test( + fs.clone(), + [ + "/worktrees/project_a/olivetti/project_a".as_ref(), + "/worktrees/project_b/selectric/project_b".as_ref(), + ], + cx, + ) + .await; + project.update(cx, |p, cx| p.git_scans_complete(cx)).await; - sidebar - .update(cx, |_, cx| cx.observe_self(assert_sidebar_state)) - .detach(); + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + let sidebar = setup_sidebar(&multi_workspace, cx); - let window = cx.windows()[0]; - cx.update_window(window, |_, window, cx| { - window.dispatch_action(Confirm.boxed_clone(), cx); - }) - .unwrap(); + // Save a thread under the same paths as the workspace roots. + save_named_thread_metadata("wt-thread", "Cross Worktree Thread", &project, cx).await; + multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); cx.run_until_parked(); - sidebar.update(cx, assert_sidebar_state); + // Should show two distinct worktree chips. + assert_eq!( + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [project_a, project_b]", + " Cross Worktree Thread {project_a:olivetti}, {project_b:selectric}", + ] + ); } #[gpui::test] -async fn test_clicking_absorbed_worktree_thread_activates_worktree_workspace( - cx: &mut TestAppContext, -) { +async fn test_same_named_worktree_chips_are_deduplicated(cx: &mut TestAppContext) { + // When a thread's roots span multiple repos but share the same + // worktree name (e.g. both in "olivetti"), only one chip should + // appear. init_test(cx); let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/project_a", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + fs.insert_tree( + "/project_b", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + + for repo in &["project_a", "project_b"] { + let git_path = format!("/{repo}/.git"); + fs.add_linked_worktree_for_repo( + Path::new(&git_path), + false, + git::repository::Worktree { + path: std::path::PathBuf::from(format!("/worktrees/{repo}/olivetti/{repo}")), + ref_name: Some("refs/heads/olivetti".into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; + } + + cx.update(|cx| ::set_global(fs.clone(), cx)); + + let project = project::Project::test( + fs.clone(), + [ + "/worktrees/project_a/olivetti/project_a".as_ref(), + "/worktrees/project_b/olivetti/project_b".as_ref(), + ], + cx, + ) + .await; + project.update(cx, |p, cx| p.git_scans_complete(cx)).await; + + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + let sidebar = setup_sidebar(&multi_workspace, cx); + + // Thread with roots in both repos' "olivetti" worktrees. + save_named_thread_metadata("wt-thread", "Same Branch Thread", &project, cx).await; + + multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + cx.run_until_parked(); + + // Both worktree paths have the name "olivetti", so only one chip. + assert_eq!( + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [project_a, project_b]", + " Same Branch Thread {olivetti}", + ] + ); +} + +#[gpui::test] +async fn test_absorbed_worktree_running_thread_shows_live_status(cx: &mut TestAppContext) { + // When a worktree workspace is absorbed under the main repo, a + // running thread in the worktree's agent panel should still show + // live status (spinner + "(running)") in the sidebar. + agent_ui::test_support::init_test(cx); + cx.update(|cx| { + ThreadStore::init_global(cx); + ThreadMetadataStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + prompt_store::init(cx); + }); + + let fs = FakeFs::new(cx.executor()); + + // Main repo with a linked worktree. fs.insert_tree( "/project", serde_json::json!({ @@ -3394,6 +4112,7 @@ async fn test_clicking_absorbed_worktree_thread_activates_worktree_workspace( ) .await; + // Worktree checkout pointing back to the main repo. fs.add_linked_worktree_for_repo( Path::new("/project/.git"), false, @@ -3418,6 +4137,7 @@ async fn test_clicking_absorbed_worktree_thread_activates_worktree_workspace( .update(cx, |p, cx| p.git_scans_complete(cx)) .await; + // Create the MultiWorkspace with both projects. let (multi_workspace, cx) = cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); @@ -3427,191 +4147,626 @@ async fn test_clicking_absorbed_worktree_thread_activates_worktree_workspace( mw.test_add_workspace(worktree_project.clone(), window, cx) }); - // Activate the main workspace before setting up the sidebar. - let main_workspace = multi_workspace.update_in(cx, |mw, window, cx| { + // Add an agent panel to the worktree workspace so we can run a + // thread inside it. + let worktree_panel = add_agent_panel(&worktree_workspace, cx); + + // Switch back to the main workspace before setting up the sidebar. + multi_workspace.update_in(cx, |mw, window, cx| { let workspace = mw.workspaces().next().unwrap().clone(); - mw.activate(workspace.clone(), window, cx); - workspace + mw.activate(workspace, window, cx); }); - save_named_thread_metadata("thread-main", "Main Thread", &main_project, cx).await; - save_named_thread_metadata("thread-wt", "WT Thread", &worktree_project, cx).await; - - multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); - cx.run_until_parked(); - - // The worktree workspace should be absorbed under the main repo. - let entries = visible_entries_as_strings(&sidebar, cx); - assert_eq!(entries.len(), 3); - assert_eq!(entries[0], "v [project]"); - assert!(entries.contains(&" Main Thread".to_string())); - assert!(entries.contains(&" WT Thread {wt-feature-a}".to_string())); + // Start a thread in the worktree workspace's panel and keep it + // generating (don't resolve it). + let connection = StubAgentConnection::new(); + open_thread_with_connection(&worktree_panel, connection.clone(), cx); + send_message(&worktree_panel, cx); - let wt_thread_index = entries - .iter() - .position(|e| e.contains("WT Thread")) - .expect("should find the worktree thread entry"); + let session_id = active_session_id(&worktree_panel, cx); - assert_eq!( - multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()), - main_workspace, - "main workspace should be active initially" - ); + // Save metadata so the sidebar knows about this thread. + save_test_thread_metadata(&session_id, &worktree_project, cx).await; - // Focus the sidebar and select the absorbed worktree thread. - focus_sidebar(&sidebar, cx); - sidebar.update_in(cx, |sidebar, _window, _cx| { - sidebar.selection = Some(wt_thread_index); + // Keep the thread generating by sending a chunk without ending + // the turn. + cx.update(|_, cx| { + connection.send_update( + session_id.clone(), + acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("working...".into())), + cx, + ); }); - - // Confirm to activate the worktree thread. - cx.dispatch_action(Confirm); cx.run_until_parked(); - // The worktree workspace should now be active, not the main one. - let active_workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()); + // The worktree thread should be absorbed under the main project + // and show live running status. + let entries = visible_entries_as_strings(&sidebar, cx); assert_eq!( - active_workspace, worktree_workspace, - "clicking an absorbed worktree thread should activate the worktree workspace" + entries, + vec![ + // + "v [project]", + " [~ Draft] (active)", + " Hello {wt-feature-a} * (running)", + ] ); } #[gpui::test] -async fn test_activate_archived_thread_with_saved_paths_activates_matching_workspace( - cx: &mut TestAppContext, -) { - // Thread has saved metadata in ThreadStore. A matching workspace is - // already open. Expected: activates the matching workspace. - init_test(cx); +async fn test_absorbed_worktree_completion_triggers_notification(cx: &mut TestAppContext) { + agent_ui::test_support::init_test(cx); + cx.update(|cx| { + ThreadStore::init_global(cx); + ThreadMetadataStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + prompt_store::init(cx); + }); + let fs = FakeFs::new(cx.executor()); - fs.insert_tree("/project-a", serde_json::json!({ "src": {} })) - .await; - fs.insert_tree("/project-b", serde_json::json!({ "src": {} })) - .await; + + fs.insert_tree( + "/project", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + + fs.add_linked_worktree_for_repo( + Path::new("/project/.git"), + false, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt-feature-a"), + ref_name: Some("refs/heads/feature-a".into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; + cx.update(|cx| ::set_global(fs.clone(), cx)); - let project_a = project::Project::test(fs.clone(), ["/project-a".as_ref()], cx).await; - let project_b = project::Project::test(fs.clone(), ["/project-b".as_ref()], cx).await; + let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; + let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; + + main_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + worktree_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); + cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); let sidebar = setup_sidebar(&multi_workspace, cx); - let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| { - mw.test_add_workspace(project_b.clone(), window, cx) + let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(worktree_project.clone(), window, cx) }); - let workspace_a = - multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone()); - // Save a thread with path_list pointing to project-b. - let session_id = acp::SessionId::new(Arc::from("archived-1")); - save_test_thread_metadata(&session_id, &project_b, cx).await; + let worktree_panel = add_agent_panel(&worktree_workspace, cx); - // Ensure workspace A is active. multi_workspace.update_in(cx, |mw, window, cx| { let workspace = mw.workspaces().next().unwrap().clone(); mw.activate(workspace, window, cx); }); - cx.run_until_parked(); - assert_eq!( - multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()), - workspace_a - ); - // Call activate_archived_thread – should resolve saved paths and - // switch to the workspace for project-b. - sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.activate_archived_thread( - ThreadMetadata { - session_id: session_id.clone(), - agent_id: agent::ZED_AGENT_ID.clone(), - title: "Archived Thread".into(), - updated_at: Utc::now(), - created_at: None, - folder_paths: PathList::new(&[PathBuf::from("/project-b")]), - main_worktree_paths: PathList::default(), - archived: false, - }, - window, + let connection = StubAgentConnection::new(); + open_thread_with_connection(&worktree_panel, connection.clone(), cx); + send_message(&worktree_panel, cx); + + let session_id = active_session_id(&worktree_panel, cx); + save_test_thread_metadata(&session_id, &worktree_project, cx).await; + + cx.update(|_, cx| { + connection.send_update( + session_id.clone(), + acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("working...".into())), cx, ); }); cx.run_until_parked(); assert_eq!( - multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()), - workspace_b, - "should have activated the workspace matching the saved path_list" + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [project]", + " [~ Draft] (active)", + " Hello {wt-feature-a} * (running)", + ] + ); + + connection.end_turn(session_id, acp::StopReason::EndTurn); + cx.run_until_parked(); + + assert_eq!( + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [project]", + " [~ Draft] (active)", + " Hello {wt-feature-a} * (!)", + ] ); } #[gpui::test] -async fn test_activate_archived_thread_cwd_fallback_with_matching_workspace( - cx: &mut TestAppContext, -) { - // Thread has no saved metadata but session_info has cwd. A matching - // workspace is open. Expected: uses cwd to find and activate it. +async fn test_clicking_worktree_thread_opens_workspace_when_none_exists(cx: &mut TestAppContext) { init_test(cx); let fs = FakeFs::new(cx.executor()); - fs.insert_tree("/project-a", serde_json::json!({ "src": {} })) - .await; - fs.insert_tree("/project-b", serde_json::json!({ "src": {} })) - .await; + + fs.insert_tree( + "/project", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + + fs.add_linked_worktree_for_repo( + Path::new("/project/.git"), + false, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt-feature-a"), + ref_name: Some("refs/heads/feature-a".into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; + cx.update(|cx| ::set_global(fs.clone(), cx)); - let project_a = project::Project::test(fs.clone(), ["/project-a".as_ref()], cx).await; - let project_b = project::Project::test(fs.clone(), ["/project-b".as_ref()], cx).await; + // Only open the main repo — no workspace for the worktree. + let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; + main_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a, window, cx)); + let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; + worktree_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); let sidebar = setup_sidebar(&multi_workspace, cx); - let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| { - mw.test_add_workspace(project_b, window, cx) - }); - let workspace_a = - multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone()); + // Save a thread for the worktree path (no workspace for it). + save_named_thread_metadata("thread-wt", "WT Thread", &worktree_project, cx).await; - // Start with workspace A active. - multi_workspace.update_in(cx, |mw, window, cx| { - let workspace = mw.workspaces().next().unwrap().clone(); - mw.activate(workspace, window, cx); - }); + multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); cx.run_until_parked(); + + // Thread should appear under the main repo with a worktree chip. assert_eq!( - multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()), - workspace_a + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [project]", + " WT Thread {wt-feature-a}", + ], ); - // No thread saved to the store – cwd is the only path hint. - sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.activate_archived_thread( - ThreadMetadata { - session_id: acp::SessionId::new(Arc::from("unknown-session")), - agent_id: agent::ZED_AGENT_ID.clone(), - title: "CWD Thread".into(), - updated_at: Utc::now(), - created_at: None, - folder_paths: PathList::new(&[std::path::PathBuf::from("/project-b")]), - main_worktree_paths: PathList::default(), - archived: false, - }, - window, - cx, - ); - }); - cx.run_until_parked(); - + // Only 1 workspace should exist. assert_eq!( - multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()), - workspace_b, - "should have activated the workspace matching the cwd" + multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()), + 1, ); -} -#[gpui::test] + // Focus the sidebar and select the worktree thread. + focus_sidebar(&sidebar, cx); + sidebar.update_in(cx, |sidebar, _window, _cx| { + sidebar.selection = Some(1); // index 0 is header, 1 is the thread + }); + + // Confirm to open the worktree thread. + cx.dispatch_action(Confirm); + cx.run_until_parked(); + + // A new workspace should have been created for the worktree path. + let new_workspace = multi_workspace.read_with(cx, |mw, _| { + assert_eq!( + mw.workspaces().count(), + 2, + "confirming a worktree thread without a workspace should open one", + ); + mw.workspaces().nth(1).unwrap().clone() + }); + + let new_path_list = + new_workspace.read_with(cx, |_, cx| workspace_path_list(&new_workspace, cx)); + assert_eq!( + new_path_list, + PathList::new(&[std::path::PathBuf::from("/wt-feature-a")]), + "the new workspace should have been opened for the worktree path", + ); +} + +#[gpui::test] +async fn test_clicking_worktree_thread_does_not_briefly_render_as_separate_project( + cx: &mut TestAppContext, +) { + init_test(cx); + let fs = FakeFs::new(cx.executor()); + + fs.insert_tree( + "/project", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + + fs.add_linked_worktree_for_repo( + Path::new("/project/.git"), + false, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt-feature-a"), + ref_name: Some("refs/heads/feature-a".into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; + + cx.update(|cx| ::set_global(fs.clone(), cx)); + + let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; + main_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + + let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; + worktree_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); + let sidebar = setup_sidebar(&multi_workspace, cx); + + save_named_thread_metadata("thread-wt", "WT Thread", &worktree_project, cx).await; + + multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + cx.run_until_parked(); + + assert_eq!( + visible_entries_as_strings(&sidebar, cx), + vec![ + // + "v [project]", + " WT Thread {wt-feature-a}", + ], + ); + + focus_sidebar(&sidebar, cx); + sidebar.update_in(cx, |sidebar, _window, _cx| { + sidebar.selection = Some(1); // index 0 is header, 1 is the thread + }); + + let assert_sidebar_state = |sidebar: &mut Sidebar, _cx: &mut Context| { + let mut project_headers = sidebar.contents.entries.iter().filter_map(|entry| { + if let ListEntry::ProjectHeader { label, .. } = entry { + Some(label.as_ref()) + } else { + None + } + }); + + let Some(project_header) = project_headers.next() else { + panic!("expected exactly one sidebar project header named `project`, found none"); + }; + assert_eq!( + project_header, "project", + "expected the only sidebar project header to be `project`" + ); + if let Some(unexpected_header) = project_headers.next() { + panic!( + "expected exactly one sidebar project header named `project`, found extra header `{unexpected_header}`" + ); + } + + let mut saw_expected_thread = false; + for entry in &sidebar.contents.entries { + match entry { + ListEntry::ProjectHeader { label, .. } => { + assert_eq!( + label.as_ref(), + "project", + "expected the only sidebar project header to be `project`" + ); + } + ListEntry::Thread(thread) + if thread.metadata.title.as_ref() == "WT Thread" + && thread.worktrees.first().map(|wt| wt.name.as_ref()) + == Some("wt-feature-a") => + { + saw_expected_thread = true; + } + ListEntry::Thread(thread) => { + let title = thread.metadata.title.as_ref(); + let worktree_name = thread + .worktrees + .first() + .map(|wt| wt.name.as_ref()) + .unwrap_or(""); + panic!( + "unexpected sidebar thread while opening linked worktree thread: title=`{title}`, worktree=`{worktree_name}`" + ); + } + ListEntry::ViewMore { .. } => { + panic!("unexpected `View More` entry while opening linked worktree thread"); + } + ListEntry::DraftThread { .. } => {} + } + } + + assert!( + saw_expected_thread, + "expected the sidebar to keep showing `WT Thread {{wt-feature-a}}` under `project`" + ); + }; + + sidebar + .update(cx, |_, cx| cx.observe_self(assert_sidebar_state)) + .detach(); + + let window = cx.windows()[0]; + cx.update_window(window, |_, window, cx| { + window.dispatch_action(Confirm.boxed_clone(), cx); + }) + .unwrap(); + + cx.run_until_parked(); + + sidebar.update(cx, assert_sidebar_state); +} + +#[gpui::test] +async fn test_clicking_absorbed_worktree_thread_activates_worktree_workspace( + cx: &mut TestAppContext, +) { + init_test(cx); + let fs = FakeFs::new(cx.executor()); + + fs.insert_tree( + "/project", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + + fs.add_linked_worktree_for_repo( + Path::new("/project/.git"), + false, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt-feature-a"), + ref_name: Some("refs/heads/feature-a".into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; + + cx.update(|cx| ::set_global(fs.clone(), cx)); + + let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; + let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await; + + main_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + worktree_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); + + let sidebar = setup_sidebar(&multi_workspace, cx); + + let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(worktree_project.clone(), window, cx) + }); + + // Activate the main workspace before setting up the sidebar. + let main_workspace = multi_workspace.update_in(cx, |mw, window, cx| { + let workspace = mw.workspaces().next().unwrap().clone(); + mw.activate(workspace.clone(), window, cx); + workspace + }); + + save_named_thread_metadata("thread-main", "Main Thread", &main_project, cx).await; + save_named_thread_metadata("thread-wt", "WT Thread", &worktree_project, cx).await; + + multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + cx.run_until_parked(); + + // The worktree workspace should be absorbed under the main repo. + let entries = visible_entries_as_strings(&sidebar, cx); + assert_eq!(entries.len(), 3); + assert_eq!(entries[0], "v [project]"); + assert!(entries.contains(&" Main Thread".to_string())); + assert!(entries.contains(&" WT Thread {wt-feature-a}".to_string())); + + let wt_thread_index = entries + .iter() + .position(|e| e.contains("WT Thread")) + .expect("should find the worktree thread entry"); + + assert_eq!( + multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()), + main_workspace, + "main workspace should be active initially" + ); + + // Focus the sidebar and select the absorbed worktree thread. + focus_sidebar(&sidebar, cx); + sidebar.update_in(cx, |sidebar, _window, _cx| { + sidebar.selection = Some(wt_thread_index); + }); + + // Confirm to activate the worktree thread. + cx.dispatch_action(Confirm); + cx.run_until_parked(); + + // The worktree workspace should now be active, not the main one. + let active_workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()); + assert_eq!( + active_workspace, worktree_workspace, + "clicking an absorbed worktree thread should activate the worktree workspace" + ); +} + +#[gpui::test] +async fn test_activate_archived_thread_with_saved_paths_activates_matching_workspace( + cx: &mut TestAppContext, +) { + // Thread has saved metadata in ThreadStore. A matching workspace is + // already open. Expected: activates the matching workspace. + init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree("/project-a", serde_json::json!({ "src": {} })) + .await; + fs.insert_tree("/project-b", serde_json::json!({ "src": {} })) + .await; + cx.update(|cx| ::set_global(fs.clone(), cx)); + + let project_a = project::Project::test(fs.clone(), ["/project-a".as_ref()], cx).await; + let project_b = project::Project::test(fs.clone(), ["/project-b".as_ref()], cx).await; + + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); + + let sidebar = setup_sidebar(&multi_workspace, cx); + + let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(project_b.clone(), window, cx) + }); + let workspace_a = + multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone()); + + // Save a thread with path_list pointing to project-b. + let session_id = acp::SessionId::new(Arc::from("archived-1")); + save_test_thread_metadata(&session_id, &project_b, cx).await; + + // Ensure workspace A is active. + multi_workspace.update_in(cx, |mw, window, cx| { + let workspace = mw.workspaces().next().unwrap().clone(); + mw.activate(workspace, window, cx); + }); + cx.run_until_parked(); + assert_eq!( + multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()), + workspace_a + ); + + // Call activate_archived_thread – should resolve saved paths and + // switch to the workspace for project-b. + sidebar.update_in(cx, |sidebar, window, cx| { + sidebar.activate_archived_thread( + ThreadMetadata { + session_id: session_id.clone(), + agent_id: agent::ZED_AGENT_ID.clone(), + title: "Archived Thread".into(), + updated_at: Utc::now(), + created_at: None, + worktree_paths: ThreadWorktreePaths::from_folder_paths(&PathList::new(&[ + PathBuf::from("/project-b"), + ])), + archived: false, + remote_connection: None, + }, + window, + cx, + ); + }); + cx.run_until_parked(); + + assert_eq!( + multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()), + workspace_b, + "should have switched to the workspace matching the saved paths" + ); +} + +#[gpui::test] +async fn test_activate_archived_thread_cwd_fallback_with_matching_workspace( + cx: &mut TestAppContext, +) { + // Thread has no saved metadata but session_info has cwd. A matching + // workspace is open. Expected: uses cwd to find and activate it. + init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree("/project-a", serde_json::json!({ "src": {} })) + .await; + fs.insert_tree("/project-b", serde_json::json!({ "src": {} })) + .await; + cx.update(|cx| ::set_global(fs.clone(), cx)); + + let project_a = project::Project::test(fs.clone(), ["/project-a".as_ref()], cx).await; + let project_b = project::Project::test(fs.clone(), ["/project-b".as_ref()], cx).await; + + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a, window, cx)); + + let sidebar = setup_sidebar(&multi_workspace, cx); + + let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(project_b, window, cx) + }); + let workspace_a = + multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone()); + + // Start with workspace A active. + multi_workspace.update_in(cx, |mw, window, cx| { + let workspace = mw.workspaces().next().unwrap().clone(); + mw.activate(workspace, window, cx); + }); + cx.run_until_parked(); + assert_eq!( + multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()), + workspace_a + ); + + // No thread saved to the store – cwd is the only path hint. + sidebar.update_in(cx, |sidebar, window, cx| { + sidebar.activate_archived_thread( + ThreadMetadata { + session_id: acp::SessionId::new(Arc::from("unknown-session")), + agent_id: agent::ZED_AGENT_ID.clone(), + title: "CWD Thread".into(), + updated_at: Utc::now(), + created_at: None, + worktree_paths: ThreadWorktreePaths::from_folder_paths(&PathList::new(&[ + std::path::PathBuf::from("/project-b"), + ])), + archived: false, + remote_connection: None, + }, + window, + cx, + ); + }); + cx.run_until_parked(); + + assert_eq!( + multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()), + workspace_b, + "should have activated the workspace matching the cwd" + ); +} + +#[gpui::test] async fn test_activate_archived_thread_no_paths_no_cwd_uses_active_workspace( cx: &mut TestAppContext, ) { @@ -3657,9 +4812,9 @@ async fn test_activate_archived_thread_no_paths_no_cwd_uses_active_workspace( title: "Contextless Thread".into(), updated_at: Utc::now(), created_at: None, - folder_paths: PathList::default(), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::default(), archived: false, + remote_connection: None, }, window, cx, @@ -3712,9 +4867,9 @@ async fn test_activate_archived_thread_saved_paths_opens_new_workspace(cx: &mut title: "New WS Thread".into(), updated_at: Utc::now(), created_at: None, - folder_paths: path_list_b, - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::from_folder_paths(&path_list_b), archived: false, + remote_connection: None, }, window, cx, @@ -3766,9 +4921,11 @@ async fn test_activate_archived_thread_reuses_workspace_in_another_window(cx: &m title: "Cross Window Thread".into(), updated_at: Utc::now(), created_at: None, - folder_paths: PathList::new(&[PathBuf::from("/project-b")]), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::from_folder_paths(&PathList::new(&[ + PathBuf::from("/project-b"), + ])), archived: false, + remote_connection: None, }, window, cx, @@ -3843,9 +5000,11 @@ async fn test_activate_archived_thread_reuses_workspace_in_another_window_with_t title: "Cross Window Thread".into(), updated_at: Utc::now(), created_at: None, - folder_paths: PathList::new(&[PathBuf::from("/project-b")]), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::from_folder_paths(&PathList::new(&[ + PathBuf::from("/project-b"), + ])), archived: false, + remote_connection: None, }, window, cx, @@ -3923,9 +5082,11 @@ async fn test_activate_archived_thread_prefers_current_window_for_matching_paths title: "Current Window Thread".into(), updated_at: Utc::now(), created_at: None, - folder_paths: PathList::new(&[PathBuf::from("/project-a")]), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::from_folder_paths(&PathList::new(&[ + PathBuf::from("/project-a"), + ])), archived: false, + remote_connection: None, }, window, cx, @@ -4223,6 +5384,13 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon sidebar.update_in(cx, |sidebar: &mut Sidebar, window, cx| { sidebar.archive_thread(&wt_thread_id, window, cx); }); + + // archive_thread spawns a multi-layered chain of tasks (workspace + // removal → git persist → disk removal), each of which may spawn + // further background work. Each run_until_parked() call drives one + // layer of pending work. + cx.run_until_parked(); + cx.run_until_parked(); cx.run_until_parked(); // The linked worktree workspace should have been removed. @@ -4232,6 +5400,12 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon "linked worktree workspace should be removed after archiving its last thread" ); + // The linked worktree checkout directory should also be removed from disk. + assert!( + !fs.is_dir(Path::new("/wt-feature-a")).await, + "linked worktree directory should be removed from disk after archiving its last thread" + ); + // The main thread should still be visible. let entries = visible_entries_as_strings(&sidebar, cx); assert!( @@ -4322,8 +5496,8 @@ async fn test_linked_worktree_threads_not_duplicated_across_groups(cx: &mut Test assert_eq!( visible_entries_as_strings(&sidebar, cx), vec![ + // "v [other, project]", - " [+ New Thread]", "v [project]", " Worktree Thread {wt-feature-a}", ] @@ -4379,356 +5553,759 @@ async fn test_thread_switcher_ordering(cx: &mut TestAppContext) { send_message(&panel, cx); let session_id_c = active_session_id(&panel, cx); save_thread_metadata( - session_id_c.clone(), - "Thread C".into(), - chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(), - Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap()), + session_id_c.clone(), + "Thread C".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(), + Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap()), + &project, + cx, + ); + + let connection_b = StubAgentConnection::new(); + connection_b.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( + acp::ContentChunk::new("Done B".into()), + )]); + open_thread_with_connection(&panel, connection_b, cx); + send_message(&panel, cx); + let session_id_b = active_session_id(&panel, cx); + save_thread_metadata( + session_id_b.clone(), + "Thread B".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(), + Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap()), + &project, + cx, + ); + + let connection_a = StubAgentConnection::new(); + connection_a.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( + acp::ContentChunk::new("Done A".into()), + )]); + open_thread_with_connection(&panel, connection_a, cx); + send_message(&panel, cx); + let session_id_a = active_session_id(&panel, cx); + save_thread_metadata( + session_id_a.clone(), + "Thread A".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 3, 0, 0, 0).unwrap(), + Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 3, 0, 0, 0).unwrap()), + &project, + cx, + ); + + // All three threads are now live. Thread A was opened last, so it's + // the one being viewed. Opening each thread called record_thread_access, + // so all three have last_accessed_at set. + // Access order is: A (most recent), B, C (oldest). + + // ── 1. Open switcher: threads sorted by last_accessed_at ───────────────── + focus_sidebar(&sidebar, cx); + sidebar.update_in(cx, |sidebar, window, cx| { + sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); + }); + cx.run_until_parked(); + + // All three have last_accessed_at, so they sort by access time. + // A was accessed most recently (it's the currently viewed thread), + // then B, then C. + assert_eq!( + switcher_ids(&sidebar, cx), + vec![ + session_id_a.clone(), + session_id_b.clone(), + session_id_c.clone() + ], + ); + // First ctrl-tab selects the second entry (B). + assert_eq!(switcher_selected_id(&sidebar, cx), session_id_b); + + // Dismiss the switcher without confirming. + sidebar.update_in(cx, |sidebar, _window, cx| { + sidebar.dismiss_thread_switcher(cx); + }); + cx.run_until_parked(); + + // ── 2. Confirm on Thread C: it becomes most-recently-accessed ────── + sidebar.update_in(cx, |sidebar, window, cx| { + sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); + }); + cx.run_until_parked(); + + // Cycle twice to land on Thread C (index 2). + sidebar.read_with(cx, |sidebar, cx| { + let switcher = sidebar.thread_switcher.as_ref().unwrap(); + assert_eq!(switcher.read(cx).selected_index(), 1); + }); + sidebar.update_in(cx, |sidebar, _window, cx| { + sidebar + .thread_switcher + .as_ref() + .unwrap() + .update(cx, |s, cx| s.cycle_selection(cx)); + }); + cx.run_until_parked(); + assert_eq!(switcher_selected_id(&sidebar, cx), session_id_c); + + assert!(sidebar.update(cx, |sidebar, _cx| sidebar.thread_last_accessed.is_empty())); + + // Confirm on Thread C. + sidebar.update_in(cx, |sidebar, window, cx| { + let switcher = sidebar.thread_switcher.as_ref().unwrap(); + let focus = switcher.focus_handle(cx); + focus.dispatch_action(&menu::Confirm, window, cx); + }); + cx.run_until_parked(); + + // Switcher should be dismissed after confirm. + sidebar.read_with(cx, |sidebar, _cx| { + assert!( + sidebar.thread_switcher.is_none(), + "switcher should be dismissed" + ); + }); + + sidebar.update(cx, |sidebar, _cx| { + let last_accessed = sidebar + .thread_last_accessed + .keys() + .cloned() + .collect::>(); + assert_eq!(last_accessed.len(), 1); + assert!(last_accessed.contains(&session_id_c)); + assert!( + sidebar + .active_entry + .as_ref() + .expect("active_entry should be set") + .is_active_thread(&session_id_c) + ); + }); + + sidebar.update_in(cx, |sidebar, window, cx| { + sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); + }); + cx.run_until_parked(); + + assert_eq!( + switcher_ids(&sidebar, cx), + vec![ + session_id_c.clone(), + session_id_a.clone(), + session_id_b.clone() + ], + ); + + // Confirm on Thread A. + sidebar.update_in(cx, |sidebar, window, cx| { + let switcher = sidebar.thread_switcher.as_ref().unwrap(); + let focus = switcher.focus_handle(cx); + focus.dispatch_action(&menu::Confirm, window, cx); + }); + cx.run_until_parked(); + + sidebar.update(cx, |sidebar, _cx| { + let last_accessed = sidebar + .thread_last_accessed + .keys() + .cloned() + .collect::>(); + assert_eq!(last_accessed.len(), 2); + assert!(last_accessed.contains(&session_id_c)); + assert!(last_accessed.contains(&session_id_a)); + assert!( + sidebar + .active_entry + .as_ref() + .expect("active_entry should be set") + .is_active_thread(&session_id_a) + ); + }); + + sidebar.update_in(cx, |sidebar, window, cx| { + sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); + }); + cx.run_until_parked(); + + assert_eq!( + switcher_ids(&sidebar, cx), + vec![ + session_id_a.clone(), + session_id_c.clone(), + session_id_b.clone(), + ], + ); + + sidebar.update_in(cx, |sidebar, _window, cx| { + let switcher = sidebar.thread_switcher.as_ref().unwrap(); + switcher.update(cx, |switcher, cx| switcher.cycle_selection(cx)); + }); + cx.run_until_parked(); + + // Confirm on Thread B. + sidebar.update_in(cx, |sidebar, window, cx| { + let switcher = sidebar.thread_switcher.as_ref().unwrap(); + let focus = switcher.focus_handle(cx); + focus.dispatch_action(&menu::Confirm, window, cx); + }); + cx.run_until_parked(); + + sidebar.update(cx, |sidebar, _cx| { + let last_accessed = sidebar + .thread_last_accessed + .keys() + .cloned() + .collect::>(); + assert_eq!(last_accessed.len(), 3); + assert!(last_accessed.contains(&session_id_c)); + assert!(last_accessed.contains(&session_id_a)); + assert!(last_accessed.contains(&session_id_b)); + assert!( + sidebar + .active_entry + .as_ref() + .expect("active_entry should be set") + .is_active_thread(&session_id_b) + ); + }); + + // ── 3. Add a historical thread (no last_accessed_at, no message sent) ── + // This thread was never opened in a panel — it only exists in metadata. + save_thread_metadata( + acp::SessionId::new(Arc::from("thread-historical")), + "Historical Thread".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 6, 1, 0, 0, 0).unwrap(), + Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 6, 1, 0, 0, 0).unwrap()), &project, cx, ); - let connection_b = StubAgentConnection::new(); - connection_b.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( - acp::ContentChunk::new("Done B".into()), - )]); - open_thread_with_connection(&panel, connection_b, cx); - send_message(&panel, cx); - let session_id_b = active_session_id(&panel, cx); - save_thread_metadata( - session_id_b.clone(), - "Thread B".into(), - chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(), - Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap()), - &project, - cx, + sidebar.update_in(cx, |sidebar, window, cx| { + sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); + }); + cx.run_until_parked(); + + // Historical Thread has no last_accessed_at and no last_message_sent_or_queued, + // so it falls to tier 3 (sorted by created_at). It should appear after all + // accessed threads, even though its created_at (June 2024) is much later + // than the others. + // + // But the live threads (A, B, C) each had send_message called which sets + // last_message_sent_or_queued. So for the accessed threads (tier 1) the + // sort key is last_accessed_at; for Historical Thread (tier 3) it's created_at. + let session_id_hist = acp::SessionId::new(Arc::from("thread-historical")); + + let ids = switcher_ids(&sidebar, cx); + assert_eq!( + ids, + vec![ + session_id_b.clone(), + session_id_a.clone(), + session_id_c.clone(), + session_id_hist.clone() + ], ); - let connection_a = StubAgentConnection::new(); - connection_a.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( - acp::ContentChunk::new("Done A".into()), - )]); - open_thread_with_connection(&panel, connection_a, cx); - send_message(&panel, cx); - let session_id_a = active_session_id(&panel, cx); + sidebar.update_in(cx, |sidebar, _window, cx| { + sidebar.dismiss_thread_switcher(cx); + }); + cx.run_until_parked(); + + // ── 4. Add another historical thread with older created_at ───────── save_thread_metadata( - session_id_a.clone(), - "Thread A".into(), - chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 3, 0, 0, 0).unwrap(), - Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 3, 0, 0, 0).unwrap()), + acp::SessionId::new(Arc::from("thread-old-historical")), + "Old Historical Thread".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2023, 6, 1, 0, 0, 0).unwrap(), + Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2023, 6, 1, 0, 0, 0).unwrap()), &project, cx, ); - // All three threads are now live. Thread A was opened last, so it's - // the one being viewed. Opening each thread called record_thread_access, - // so all three have last_accessed_at set. - // Access order is: A (most recent), B, C (oldest). - - // ── 1. Open switcher: threads sorted by last_accessed_at ───────────────── - focus_sidebar(&sidebar, cx); sidebar.update_in(cx, |sidebar, window, cx| { sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); }); cx.run_until_parked(); - // All three have last_accessed_at, so they sort by access time. - // A was accessed most recently (it's the currently viewed thread), - // then B, then C. + // Both historical threads have no access or message times. They should + // appear after accessed threads, sorted by created_at (newest first). + let session_id_old_hist = acp::SessionId::new(Arc::from("thread-old-historical")); + let ids = switcher_ids(&sidebar, cx); assert_eq!( - switcher_ids(&sidebar, cx), + ids, vec![ - session_id_a.clone(), - session_id_b.clone(), - session_id_c.clone() + session_id_b, + session_id_a, + session_id_c, + session_id_hist, + session_id_old_hist, ], ); - // First ctrl-tab selects the second entry (B). - assert_eq!(switcher_selected_id(&sidebar, cx), session_id_b); - // Dismiss the switcher without confirming. sidebar.update_in(cx, |sidebar, _window, cx| { sidebar.dismiss_thread_switcher(cx); }); cx.run_until_parked(); +} + +#[gpui::test] +async fn test_archive_thread_keeps_metadata_but_hides_from_sidebar(cx: &mut TestAppContext) { + let project = init_test_project("/my-project", cx).await; + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + let sidebar = setup_sidebar(&multi_workspace, cx); + + save_thread_metadata( + acp::SessionId::new(Arc::from("thread-to-archive")), + "Thread To Archive".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(), + None, + &project, + cx, + ); + cx.run_until_parked(); + + multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + cx.run_until_parked(); + + let entries = visible_entries_as_strings(&sidebar, cx); + assert!( + entries.iter().any(|e| e.contains("Thread To Archive")), + "expected thread to be visible before archiving, got: {entries:?}" + ); - // ── 2. Confirm on Thread C: it becomes most-recently-accessed ────── sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); + sidebar.archive_thread( + &acp::SessionId::new(Arc::from("thread-to-archive")), + window, + cx, + ); }); cx.run_until_parked(); - // Cycle twice to land on Thread C (index 2). - sidebar.read_with(cx, |sidebar, cx| { - let switcher = sidebar.thread_switcher.as_ref().unwrap(); - assert_eq!(switcher.read(cx).selected_index(), 1); + let entries = visible_entries_as_strings(&sidebar, cx); + assert!( + !entries.iter().any(|e| e.contains("Thread To Archive")), + "expected thread to be hidden after archiving, got: {entries:?}" + ); + + cx.update(|_, cx| { + let store = ThreadMetadataStore::global(cx); + let archived: Vec<_> = store.read(cx).archived_entries().collect(); + assert_eq!(archived.len(), 1); + assert_eq!(archived[0].session_id.0.as_ref(), "thread-to-archive"); + assert!(archived[0].archived); }); - sidebar.update_in(cx, |sidebar, _window, cx| { - sidebar - .thread_switcher - .as_ref() - .unwrap() - .update(cx, |s, cx| s.cycle_selection(cx)); +} + +#[gpui::test] +async fn test_archive_thread_active_entry_management(cx: &mut TestAppContext) { + // Tests two archive scenarios: + // 1. Archiving a thread in a non-active workspace leaves active_entry + // as the current draft. + // 2. Archiving the thread the user is looking at falls back to a draft + // on the same workspace. + agent_ui::test_support::init_test(cx); + cx.update(|cx| { + ThreadStore::init_global(cx); + ThreadMetadataStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + prompt_store::init(cx); + }); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree("/project-a", serde_json::json!({ "src": {} })) + .await; + fs.insert_tree("/project-b", serde_json::json!({ "src": {} })) + .await; + cx.update(|cx| ::set_global(fs.clone(), cx)); + + let project_a = project::Project::test(fs.clone(), ["/project-a".as_ref()], cx).await; + let project_b = project::Project::test(fs.clone(), ["/project-b".as_ref()], cx).await; + + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); + let (sidebar, panel_a) = setup_sidebar_with_agent_panel(&multi_workspace, cx); + + let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(project_b.clone(), window, cx) }); + let panel_b = add_agent_panel(&workspace_b, cx); cx.run_until_parked(); - assert_eq!(switcher_selected_id(&sidebar, cx), session_id_c); - assert!(sidebar.update(cx, |sidebar, _cx| sidebar.thread_last_accessed.is_empty())); + // --- Scenario 1: archive a thread in the non-active workspace --- + + // Create a thread in project-a (non-active — project-b is active). + let connection = acp_thread::StubAgentConnection::new(); + connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( + acp::ContentChunk::new("Done".into()), + )]); + agent_ui::test_support::open_thread_with_connection(&panel_a, connection, cx); + agent_ui::test_support::send_message(&panel_a, cx); + let thread_a = agent_ui::test_support::active_session_id(&panel_a, cx); + cx.run_until_parked(); - // Confirm on Thread C. sidebar.update_in(cx, |sidebar, window, cx| { - let switcher = sidebar.thread_switcher.as_ref().unwrap(); - let focus = switcher.focus_handle(cx); - focus.dispatch_action(&menu::Confirm, window, cx); + sidebar.archive_thread(&thread_a, window, cx); }); cx.run_until_parked(); - // Switcher should be dismissed after confirm. - sidebar.read_with(cx, |sidebar, _cx| { + // active_entry should still be a draft on workspace_b (the active one). + sidebar.read_with(cx, |sidebar, _| { assert!( - sidebar.thread_switcher.is_none(), - "switcher should be dismissed" + matches!(&sidebar.active_entry, Some(ActiveEntry::Draft(ws)) if ws == &workspace_b), + "expected Draft(workspace_b) after archiving non-active thread, got: {:?}", + sidebar.active_entry, ); }); - sidebar.update(cx, |sidebar, _cx| { - let last_accessed = sidebar - .thread_last_accessed - .keys() - .cloned() - .collect::>(); - assert_eq!(last_accessed.len(), 1); - assert!(last_accessed.contains(&session_id_c)); + // --- Scenario 2: archive the thread the user is looking at --- + + // Create a thread in project-b (the active workspace) and verify it + // becomes the active entry. + let connection = acp_thread::StubAgentConnection::new(); + connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( + acp::ContentChunk::new("Done".into()), + )]); + agent_ui::test_support::open_thread_with_connection(&panel_b, connection, cx); + agent_ui::test_support::send_message(&panel_b, cx); + let thread_b = agent_ui::test_support::active_session_id(&panel_b, cx); + cx.run_until_parked(); + + sidebar.read_with(cx, |sidebar, _| { + assert!( + matches!(&sidebar.active_entry, Some(ActiveEntry::Thread { session_id, .. }) if *session_id == thread_b), + "expected active_entry to be Thread({thread_b}), got: {:?}", + sidebar.active_entry, + ); + }); + + sidebar.update_in(cx, |sidebar, window, cx| { + sidebar.archive_thread(&thread_b, window, cx); + }); + cx.run_until_parked(); + + // Should fall back to a draft on the same workspace. + sidebar.read_with(cx, |sidebar, _| { assert!( - sidebar - .active_entry - .as_ref() - .expect("active_entry should be set") - .is_active_thread(&session_id_c) + matches!(&sidebar.active_entry, Some(ActiveEntry::Draft(ws)) if ws == &workspace_b), + "expected Draft(workspace_b) after archiving active thread, got: {:?}", + sidebar.active_entry, ); }); +} - sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); +#[gpui::test] +async fn test_switch_to_workspace_with_archived_thread_shows_draft(cx: &mut TestAppContext) { + // When a thread is archived while the user is in a different workspace, + // the archiving code clears the thread from its panel (via + // `clear_active_thread`). Switching back to that workspace should show + // a draft, not the archived thread. + agent_ui::test_support::init_test(cx); + cx.update(|cx| { + ThreadStore::init_global(cx); + ThreadMetadataStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + prompt_store::init(cx); }); - cx.run_until_parked(); - assert_eq!( - switcher_ids(&sidebar, cx), - vec![ - session_id_c.clone(), - session_id_a.clone(), - session_id_b.clone() - ], - ); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree("/project-a", serde_json::json!({ "src": {} })) + .await; + fs.insert_tree("/project-b", serde_json::json!({ "src": {} })) + .await; + cx.update(|cx| ::set_global(fs.clone(), cx)); - // Confirm on Thread A. - sidebar.update_in(cx, |sidebar, window, cx| { - let switcher = sidebar.thread_switcher.as_ref().unwrap(); - let focus = switcher.focus_handle(cx); - focus.dispatch_action(&menu::Confirm, window, cx); + let project_a = project::Project::test(fs.clone(), ["/project-a".as_ref()], cx).await; + let project_b = project::Project::test(fs.clone(), ["/project-b".as_ref()], cx).await; + + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); + let (sidebar, panel_a) = setup_sidebar_with_agent_panel(&multi_workspace, cx); + + let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(project_b.clone(), window, cx) }); + let _panel_b = add_agent_panel(&workspace_b, cx); cx.run_until_parked(); - sidebar.update(cx, |sidebar, _cx| { - let last_accessed = sidebar - .thread_last_accessed - .keys() - .cloned() - .collect::>(); - assert_eq!(last_accessed.len(), 2); - assert!(last_accessed.contains(&session_id_c)); - assert!(last_accessed.contains(&session_id_a)); - assert!( - sidebar - .active_entry - .as_ref() - .expect("active_entry should be set") - .is_active_thread(&session_id_a) - ); - }); + // Create a thread in project-a's panel (currently non-active). + let connection = acp_thread::StubAgentConnection::new(); + connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( + acp::ContentChunk::new("Done".into()), + )]); + agent_ui::test_support::open_thread_with_connection(&panel_a, connection, cx); + agent_ui::test_support::send_message(&panel_a, cx); + let thread_a = agent_ui::test_support::active_session_id(&panel_a, cx); + cx.run_until_parked(); + // Archive it while project-b is active. sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); + sidebar.archive_thread(&thread_a, window, cx); }); cx.run_until_parked(); - assert_eq!( - switcher_ids(&sidebar, cx), - vec![ - session_id_a.clone(), - session_id_c.clone(), - session_id_b.clone(), - ], - ); - - sidebar.update_in(cx, |sidebar, _window, cx| { - let switcher = sidebar.thread_switcher.as_ref().unwrap(); - switcher.update(cx, |switcher, cx| switcher.cycle_selection(cx)); + // Switch back to project-a. Its panel was cleared during archiving, + // so active_entry should be Draft. + let workspace_a = + multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone()); + multi_workspace.update_in(cx, |mw, window, cx| { + mw.activate(workspace_a.clone(), window, cx); }); cx.run_until_parked(); - // Confirm on Thread B. - sidebar.update_in(cx, |sidebar, window, cx| { - let switcher = sidebar.thread_switcher.as_ref().unwrap(); - let focus = switcher.focus_handle(cx); - focus.dispatch_action(&menu::Confirm, window, cx); + sidebar.update_in(cx, |sidebar, _window, cx| { + sidebar.update_entries(cx); }); cx.run_until_parked(); - sidebar.update(cx, |sidebar, _cx| { - let last_accessed = sidebar - .thread_last_accessed - .keys() - .cloned() - .collect::>(); - assert_eq!(last_accessed.len(), 3); - assert!(last_accessed.contains(&session_id_c)); - assert!(last_accessed.contains(&session_id_a)); - assert!(last_accessed.contains(&session_id_b)); + sidebar.read_with(cx, |sidebar, _| { assert!( - sidebar - .active_entry - .as_ref() - .expect("active_entry should be set") - .is_active_thread(&session_id_b) + matches!(&sidebar.active_entry, Some(ActiveEntry::Draft(ws)) if ws == &workspace_a), + "expected Draft(workspace_a) after switching to workspace with archived thread, got: {:?}", + sidebar.active_entry, ); }); +} + +#[gpui::test] +async fn test_archived_threads_excluded_from_sidebar_entries(cx: &mut TestAppContext) { + let project = init_test_project("/my-project", cx).await; + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + let sidebar = setup_sidebar(&multi_workspace, cx); - // ── 3. Add a historical thread (no last_accessed_at, no message sent) ── - // This thread was never opened in a panel — it only exists in metadata. save_thread_metadata( - acp::SessionId::new(Arc::from("thread-historical")), - "Historical Thread".into(), - chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 6, 1, 0, 0, 0).unwrap(), - Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 6, 1, 0, 0, 0).unwrap()), + acp::SessionId::new(Arc::from("visible-thread")), + "Visible Thread".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(), + None, &project, cx, ); - sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); + let archived_thread_session_id = acp::SessionId::new(Arc::from("archived-thread")); + save_thread_metadata( + archived_thread_session_id.clone(), + "Archived Thread".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(), + None, + &project, + cx, + ); + + cx.update(|_, cx| { + ThreadMetadataStore::global(cx).update(cx, |store, cx| { + store.archive(&archived_thread_session_id, None, cx) + }) }); cx.run_until_parked(); - // Historical Thread has no last_accessed_at and no last_message_sent_or_queued, - // so it falls to tier 3 (sorted by created_at). It should appear after all - // accessed threads, even though its created_at (June 2024) is much later - // than the others. - // - // But the live threads (A, B, C) each had send_message called which sets - // last_message_sent_or_queued. So for the accessed threads (tier 1) the - // sort key is last_accessed_at; for Historical Thread (tier 3) it's created_at. - let session_id_hist = acp::SessionId::new(Arc::from("thread-historical")); + multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + cx.run_until_parked(); - let ids = switcher_ids(&sidebar, cx); - assert_eq!( - ids, - vec![ - session_id_b.clone(), - session_id_a.clone(), - session_id_c.clone(), - session_id_hist.clone() - ], + let entries = visible_entries_as_strings(&sidebar, cx); + assert!( + entries.iter().any(|e| e.contains("Visible Thread")), + "expected visible thread in sidebar, got: {entries:?}" + ); + assert!( + !entries.iter().any(|e| e.contains("Archived Thread")), + "expected archived thread to be hidden from sidebar, got: {entries:?}" ); - sidebar.update_in(cx, |sidebar, _window, cx| { - sidebar.dismiss_thread_switcher(cx); + cx.update(|_, cx| { + let store = ThreadMetadataStore::global(cx); + let all: Vec<_> = store.read(cx).entries().collect(); + assert_eq!( + all.len(), + 2, + "expected 2 total entries in the store, got: {}", + all.len() + ); + + let archived: Vec<_> = store.read(cx).archived_entries().collect(); + assert_eq!(archived.len(), 1); + assert_eq!(archived[0].session_id.0.as_ref(), "archived-thread"); + }); +} + +#[gpui::test] +async fn test_archive_last_thread_on_linked_worktree_does_not_create_new_thread_on_worktree( + cx: &mut TestAppContext, +) { + // When a linked worktree has a single thread and that thread is archived, + // the sidebar must NOT create a new thread on the same worktree (which + // would prevent the worktree from being cleaned up on disk). Instead, + // archive_thread switches to a sibling thread on the main workspace (or + // creates a draft there) before archiving the metadata. + agent_ui::test_support::init_test(cx); + cx.update(|cx| { + ThreadStore::init_global(cx); + ThreadMetadataStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + prompt_store::init(cx); + }); + + let fs = FakeFs::new(cx.executor()); + + fs.insert_tree( + "/project", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + + fs.add_linked_worktree_for_repo( + Path::new("/project/.git"), + false, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt-ochre-drift"), + ref_name: Some("refs/heads/ochre-drift".into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; + + cx.update(|cx| ::set_global(fs.clone(), cx)); + + let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; + let worktree_project = + project::Project::test(fs.clone(), ["/wt-ochre-drift".as_ref()], cx).await; + + main_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + worktree_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); + + let sidebar = setup_sidebar(&multi_workspace, cx); + + let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(worktree_project.clone(), window, cx) }); - cx.run_until_parked(); - // ── 4. Add another historical thread with older created_at ───────── - save_thread_metadata( - acp::SessionId::new(Arc::from("thread-old-historical")), - "Old Historical Thread".into(), - chrono::TimeZone::with_ymd_and_hms(&Utc, 2023, 6, 1, 0, 0, 0).unwrap(), - Some(chrono::TimeZone::with_ymd_and_hms(&Utc, 2023, 6, 1, 0, 0, 0).unwrap()), - &project, - cx, - ); + // Set up both workspaces with agent panels. + let main_workspace = + multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone()); + let _main_panel = add_agent_panel(&main_workspace, cx); + let worktree_panel = add_agent_panel(&worktree_workspace, cx); - sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.on_toggle_thread_switcher(&ToggleThreadSwitcher::default(), window, cx); + // Activate the linked worktree workspace so the sidebar tracks it. + multi_workspace.update_in(cx, |mw, window, cx| { + mw.activate(worktree_workspace.clone(), window, cx); }); - cx.run_until_parked(); - // Both historical threads have no access or message times. They should - // appear after accessed threads, sorted by created_at (newest first). - let session_id_old_hist = acp::SessionId::new(Arc::from("thread-old-historical")); - let ids = switcher_ids(&sidebar, cx); - assert_eq!( - ids, - vec![ - session_id_b, - session_id_a, - session_id_c, - session_id_hist, - session_id_old_hist, - ], - ); + // Open a thread in the linked worktree panel and send a message + // so it becomes the active thread. + let connection = StubAgentConnection::new(); + open_thread_with_connection(&worktree_panel, connection.clone(), cx); + send_message(&worktree_panel, cx); - sidebar.update_in(cx, |sidebar, _window, cx| { - sidebar.dismiss_thread_switcher(cx); + let worktree_thread_id = active_session_id(&worktree_panel, cx); + + // Give the thread a response chunk so it has content. + cx.update(|_, cx| { + connection.send_update( + worktree_thread_id.clone(), + acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("done".into())), + cx, + ); }); - cx.run_until_parked(); -} -#[gpui::test] -async fn test_archive_thread_keeps_metadata_but_hides_from_sidebar(cx: &mut TestAppContext) { - let project = init_test_project("/my-project", cx).await; - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); - let sidebar = setup_sidebar(&multi_workspace, cx); + // Save the worktree thread's metadata. + save_thread_metadata( + worktree_thread_id.clone(), + "Ochre Drift Thread".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(), + None, + &worktree_project, + cx, + ); + // Also save a thread on the main project so there's a sibling in the + // group that can be selected after archiving. save_thread_metadata( - acp::SessionId::new(Arc::from("thread-to-archive")), - "Thread To Archive".into(), + acp::SessionId::new(Arc::from("main-project-thread")), + "Main Project Thread".into(), chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(), None, - &project, + &main_project, cx, ); - cx.run_until_parked(); - multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); cx.run_until_parked(); - let entries = visible_entries_as_strings(&sidebar, cx); + // Verify the linked worktree thread appears with its chip. + // The live thread title comes from the message text ("Hello"), not + // the metadata title we saved. + let entries_before = visible_entries_as_strings(&sidebar, cx); assert!( - entries.iter().any(|e| e.contains("Thread To Archive")), - "expected thread to be visible before archiving, got: {entries:?}" + entries_before + .iter() + .any(|s| s.contains("{wt-ochre-drift}")), + "expected worktree thread with chip before archiving, got: {entries_before:?}" + ); + assert!( + entries_before + .iter() + .any(|s| s.contains("Main Project Thread")), + "expected main project thread before archiving, got: {entries_before:?}" ); - sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.archive_thread( - &acp::SessionId::new(Arc::from("thread-to-archive")), - window, - cx, + // Confirm the worktree thread is the active entry. + sidebar.read_with(cx, |s, _| { + assert_active_thread( + s, + &worktree_thread_id, + "worktree thread should be active before archiving", ); }); + + // Archive the worktree thread — it's the only thread using ochre-drift. + sidebar.update_in(cx, |sidebar, window, cx| { + sidebar.archive_thread(&worktree_thread_id, window, cx); + }); + cx.run_until_parked(); - let entries = visible_entries_as_strings(&sidebar, cx); + // The archived thread should no longer appear in the sidebar. + let entries_after = visible_entries_as_strings(&sidebar, cx); assert!( - !entries.iter().any(|e| e.contains("Thread To Archive")), - "expected thread to be hidden after archiving, got: {entries:?}" + !entries_after + .iter() + .any(|s| s.contains("Ochre Drift Thread")), + "archived thread should be hidden, got: {entries_after:?}" ); - cx.update(|_, cx| { - let store = ThreadMetadataStore::global(cx); - let archived: Vec<_> = store.read(cx).archived_entries().collect(); - assert_eq!(archived.len(), 1); - assert_eq!(archived[0].session_id.0.as_ref(), "thread-to-archive"); - assert!(archived[0].archived); - }); + // No "+ New Thread" entry should appear with the ochre-drift worktree + // chip — that would keep the worktree alive and prevent cleanup. + assert!( + !entries_after.iter().any(|s| s.contains("{wt-ochre-drift}")), + "no entry should reference the archived worktree, got: {entries_after:?}" + ); + + // The main project thread should still be visible. + assert!( + entries_after + .iter() + .any(|s| s.contains("Main Project Thread")), + "main project thread should still be visible, got: {entries_after:?}" + ); } #[gpui::test] -async fn test_archive_thread_active_entry_management(cx: &mut TestAppContext) { - // Tests two archive scenarios: - // 1. Archiving a thread in a non-active workspace leaves active_entry - // as the current draft. - // 2. Archiving the thread the user is looking at falls back to a draft - // on the same workspace. +async fn test_archive_last_thread_on_linked_worktree_with_no_siblings_creates_draft_on_main( + cx: &mut TestAppContext, +) { + // When a linked worktree thread is the ONLY thread in the project group + // (no threads on the main repo either), archiving it should create a + // draft on the main workspace, not the linked worktree workspace. agent_ui::test_support::init_test(cx); cx.update(|cx| { ThreadStore::init_global(cx); @@ -4738,226 +6315,253 @@ async fn test_archive_thread_active_entry_management(cx: &mut TestAppContext) { }); let fs = FakeFs::new(cx.executor()); - fs.insert_tree("/project-a", serde_json::json!({ "src": {} })) - .await; - fs.insert_tree("/project-b", serde_json::json!({ "src": {} })) - .await; + + fs.insert_tree( + "/project", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + + fs.add_linked_worktree_for_repo( + Path::new("/project/.git"), + false, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt-ochre-drift"), + ref_name: Some("refs/heads/ochre-drift".into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; + cx.update(|cx| ::set_global(fs.clone(), cx)); - let project_a = project::Project::test(fs.clone(), ["/project-a".as_ref()], cx).await; - let project_b = project::Project::test(fs.clone(), ["/project-b".as_ref()], cx).await; + let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; + let worktree_project = + project::Project::test(fs.clone(), ["/wt-ochre-drift".as_ref()], cx).await; + + main_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + worktree_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); - let (sidebar, panel_a) = setup_sidebar_with_agent_panel(&multi_workspace, cx); + cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); - let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| { - mw.test_add_workspace(project_b.clone(), window, cx) - }); - let panel_b = add_agent_panel(&workspace_b, cx); - cx.run_until_parked(); + let sidebar = setup_sidebar(&multi_workspace, cx); - // --- Scenario 1: archive a thread in the non-active workspace --- + let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(worktree_project.clone(), window, cx) + }); - // Create a thread in project-a (non-active — project-b is active). - let connection = acp_thread::StubAgentConnection::new(); - connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( - acp::ContentChunk::new("Done".into()), - )]); - agent_ui::test_support::open_thread_with_connection(&panel_a, connection, cx); - agent_ui::test_support::send_message(&panel_a, cx); - let thread_a = agent_ui::test_support::active_session_id(&panel_a, cx); - cx.run_until_parked(); + let main_workspace = + multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone()); + let _main_panel = add_agent_panel(&main_workspace, cx); + let worktree_panel = add_agent_panel(&worktree_workspace, cx); - sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.archive_thread(&thread_a, window, cx); + // Activate the linked worktree workspace. + multi_workspace.update_in(cx, |mw, window, cx| { + mw.activate(worktree_workspace.clone(), window, cx); }); - cx.run_until_parked(); - // active_entry should still be a draft on workspace_b (the active one). - sidebar.read_with(cx, |sidebar, _| { - assert!( - matches!(&sidebar.active_entry, Some(ActiveEntry::Draft(ws)) if ws == &workspace_b), - "expected Draft(workspace_b) after archiving non-active thread, got: {:?}", - sidebar.active_entry, + // Open a thread on the linked worktree — this is the ONLY thread. + let connection = StubAgentConnection::new(); + open_thread_with_connection(&worktree_panel, connection.clone(), cx); + send_message(&worktree_panel, cx); + + let worktree_thread_id = active_session_id(&worktree_panel, cx); + + cx.update(|_, cx| { + connection.send_update( + worktree_thread_id.clone(), + acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("done".into())), + cx, ); }); - // --- Scenario 2: archive the thread the user is looking at --- + save_thread_metadata( + worktree_thread_id.clone(), + "Ochre Drift Thread".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(), + None, + &worktree_project, + cx, + ); - // Create a thread in project-b (the active workspace) and verify it - // becomes the active entry. - let connection = acp_thread::StubAgentConnection::new(); - connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( - acp::ContentChunk::new("Done".into()), - )]); - agent_ui::test_support::open_thread_with_connection(&panel_b, connection, cx); - agent_ui::test_support::send_message(&panel_b, cx); - let thread_b = agent_ui::test_support::active_session_id(&panel_b, cx); cx.run_until_parked(); - sidebar.read_with(cx, |sidebar, _| { - assert!( - matches!(&sidebar.active_entry, Some(ActiveEntry::Thread { session_id, .. }) if *session_id == thread_b), - "expected active_entry to be Thread({thread_b}), got: {:?}", - sidebar.active_entry, - ); - }); - + // Archive it — there are no other threads in the group. sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.archive_thread(&thread_b, window, cx); + sidebar.archive_thread(&worktree_thread_id, window, cx); }); + cx.run_until_parked(); - // Should fall back to a draft on the same workspace. - sidebar.read_with(cx, |sidebar, _| { - assert!( - matches!(&sidebar.active_entry, Some(ActiveEntry::Draft(ws)) if ws == &workspace_b), - "expected Draft(workspace_b) after archiving active thread, got: {:?}", - sidebar.active_entry, + let entries_after = visible_entries_as_strings(&sidebar, cx); + + // No entry should reference the linked worktree. + assert!( + !entries_after.iter().any(|s| s.contains("{wt-ochre-drift}")), + "no entry should reference the archived worktree, got: {entries_after:?}" + ); + + // The active entry should be a draft on the main workspace. + sidebar.read_with(cx, |s, _| { + assert_active_draft( + s, + &main_workspace, + "active entry should be a draft on the main workspace", ); }); } -#[gpui::test] -async fn test_switch_to_workspace_with_archived_thread_shows_draft(cx: &mut TestAppContext) { - // When a thread is archived while the user is in a different workspace, - // the archiving code clears the thread from its panel (via - // `clear_active_thread`). Switching back to that workspace should show - // a draft, not the archived thread. - agent_ui::test_support::init_test(cx); - cx.update(|cx| { - ThreadStore::init_global(cx); - ThreadMetadataStore::init_global(cx); - language_model::LanguageModelRegistry::test(cx); - prompt_store::init(cx); - }); +#[gpui::test] +async fn test_archive_thread_on_linked_worktree_selects_sibling_thread(cx: &mut TestAppContext) { + // When a linked worktree thread is archived but the group has other + // threads (e.g. on the main project), archive_thread should select + // the nearest sibling. + agent_ui::test_support::init_test(cx); + cx.update(|cx| { + ThreadStore::init_global(cx); + ThreadMetadataStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + prompt_store::init(cx); + }); + + let fs = FakeFs::new(cx.executor()); + + fs.insert_tree( + "/project", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + + fs.add_linked_worktree_for_repo( + Path::new("/project/.git"), + false, + git::repository::Worktree { + path: std::path::PathBuf::from("/wt-ochre-drift"), + ref_name: Some("refs/heads/ochre-drift".into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; + + cx.update(|cx| ::set_global(fs.clone(), cx)); - let fs = FakeFs::new(cx.executor()); - fs.insert_tree("/project-a", serde_json::json!({ "src": {} })) + let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await; + let worktree_project = + project::Project::test(fs.clone(), ["/wt-ochre-drift".as_ref()], cx).await; + + main_project + .update(cx, |p, cx| p.git_scans_complete(cx)) .await; - fs.insert_tree("/project-b", serde_json::json!({ "src": {} })) + worktree_project + .update(cx, |p, cx| p.git_scans_complete(cx)) .await; - cx.update(|cx| ::set_global(fs.clone(), cx)); - - let project_a = project::Project::test(fs.clone(), ["/project-a".as_ref()], cx).await; - let project_b = project::Project::test(fs.clone(), ["/project-b".as_ref()], cx).await; let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); - let (sidebar, panel_a) = setup_sidebar_with_agent_panel(&multi_workspace, cx); - - let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| { - mw.test_add_workspace(project_b.clone(), window, cx) - }); - let _panel_b = add_agent_panel(&workspace_b, cx); - cx.run_until_parked(); + cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx)); - // Create a thread in project-a's panel (currently non-active). - let connection = acp_thread::StubAgentConnection::new(); - connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk( - acp::ContentChunk::new("Done".into()), - )]); - agent_ui::test_support::open_thread_with_connection(&panel_a, connection, cx); - agent_ui::test_support::send_message(&panel_a, cx); - let thread_a = agent_ui::test_support::active_session_id(&panel_a, cx); - cx.run_until_parked(); + let sidebar = setup_sidebar(&multi_workspace, cx); - // Archive it while project-b is active. - sidebar.update_in(cx, |sidebar, window, cx| { - sidebar.archive_thread(&thread_a, window, cx); + let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(worktree_project.clone(), window, cx) }); - cx.run_until_parked(); - // Switch back to project-a. Its panel was cleared during archiving, - // so active_entry should be Draft. - let workspace_a = + let main_workspace = multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().unwrap().clone()); + let _main_panel = add_agent_panel(&main_workspace, cx); + let worktree_panel = add_agent_panel(&worktree_workspace, cx); + + // Activate the linked worktree workspace. multi_workspace.update_in(cx, |mw, window, cx| { - mw.activate(workspace_a.clone(), window, cx); + mw.activate(worktree_workspace.clone(), window, cx); }); - cx.run_until_parked(); - sidebar.update_in(cx, |sidebar, _window, cx| { - sidebar.update_entries(cx); - }); - cx.run_until_parked(); + // Open a thread on the linked worktree. + let connection = StubAgentConnection::new(); + open_thread_with_connection(&worktree_panel, connection.clone(), cx); + send_message(&worktree_panel, cx); - sidebar.read_with(cx, |sidebar, _| { - assert!( - matches!(&sidebar.active_entry, Some(ActiveEntry::Draft(ws)) if ws == &workspace_a), - "expected Draft(workspace_a) after switching to workspace with archived thread, got: {:?}", - sidebar.active_entry, + let worktree_thread_id = active_session_id(&worktree_panel, cx); + + cx.update(|_, cx| { + connection.send_update( + worktree_thread_id.clone(), + acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("done".into())), + cx, ); }); -} - -#[gpui::test] -async fn test_archived_threads_excluded_from_sidebar_entries(cx: &mut TestAppContext) { - let project = init_test_project("/my-project", cx).await; - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); - let sidebar = setup_sidebar(&multi_workspace, cx); save_thread_metadata( - acp::SessionId::new(Arc::from("visible-thread")), - "Visible Thread".into(), + worktree_thread_id.clone(), + "Ochre Drift Thread".into(), chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(), None, - &project, + &worktree_project, cx, ); - let archived_thread_session_id = acp::SessionId::new(Arc::from("archived-thread")); + // Save a sibling thread on the main project. + let main_thread_id = acp::SessionId::new(Arc::from("main-project-thread")); save_thread_metadata( - archived_thread_session_id.clone(), - "Archived Thread".into(), + main_thread_id, + "Main Project Thread".into(), chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(), None, - &project, + &main_project, cx, ); - cx.update(|_, cx| { - ThreadMetadataStore::global(cx).update(cx, |store, cx| { - store.archive(&archived_thread_session_id, cx) - }) - }); cx.run_until_parked(); - multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + // Confirm the worktree thread is active. + sidebar.read_with(cx, |s, _| { + assert_active_thread( + s, + &worktree_thread_id, + "worktree thread should be active before archiving", + ); + }); + + // Archive the worktree thread. + sidebar.update_in(cx, |sidebar, window, cx| { + sidebar.archive_thread(&worktree_thread_id, window, cx); + }); + cx.run_until_parked(); - let entries = visible_entries_as_strings(&sidebar, cx); + // The worktree workspace was removed and a draft was created on the + // main workspace. No entry should reference the linked worktree. + let entries_after = visible_entries_as_strings(&sidebar, cx); assert!( - entries.iter().any(|e| e.contains("Visible Thread")), - "expected visible thread in sidebar, got: {entries:?}" + !entries_after.iter().any(|s| s.contains("{wt-ochre-drift}")), + "no entry should reference the archived worktree, got: {entries_after:?}" ); + + // The main project thread should still be visible. assert!( - !entries.iter().any(|e| e.contains("Archived Thread")), - "expected archived thread to be hidden from sidebar, got: {entries:?}" + entries_after + .iter() + .any(|s| s.contains("Main Project Thread")), + "main project thread should still be visible, got: {entries_after:?}" ); - - cx.update(|_, cx| { - let store = ThreadMetadataStore::global(cx); - let all: Vec<_> = store.read(cx).entries().collect(); - assert_eq!( - all.len(), - 2, - "expected 2 total entries in the store, got: {}", - all.len() - ); - - let archived: Vec<_> = store.read(cx).archived_entries().collect(); - assert_eq!(archived.len(), 1); - assert_eq!(archived[0].session_id.0.as_ref(), "archived-thread"); - }); } #[gpui::test] async fn test_linked_worktree_workspace_reachable_and_dismissable(cx: &mut TestAppContext) { // When a linked worktree is opened as its own workspace and the user - // switches away, the workspace must still be reachable from a NewThread + // switches away, the workspace must still be reachable from a DraftThread // sidebar entry. Pressing RemoveSelectedThread (shift-backspace) on that // entry should remove the workspace. init_test(cx); @@ -5052,7 +6656,7 @@ async fn test_linked_worktree_workspace_reachable_and_dismissable(cx: &mut TestA "linked worktree workspace should be reachable, but reachable are: {reachable:?}" ); - // Find the NewThread entry for the linked worktree and dismiss it. + // Find the DraftThread entry for the linked worktree and dismiss it. let new_thread_ix = sidebar.read_with(cx, |sidebar, _| { sidebar .contents @@ -5061,13 +6665,13 @@ async fn test_linked_worktree_workspace_reachable_and_dismissable(cx: &mut TestA .position(|entry| { matches!( entry, - ListEntry::NewThread { + ListEntry::DraftThread { workspace: Some(_), .. } ) }) - .expect("expected a NewThread entry for the linked worktree") + .expect("expected a DraftThread entry for the linked worktree") }); assert_eq!( @@ -5084,7 +6688,7 @@ async fn test_linked_worktree_workspace_reachable_and_dismissable(cx: &mut TestA assert_eq!( multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()), 1, - "linked worktree workspace should be removed after dismissing NewThread entry" + "linked worktree workspace should be removed after dismissing DraftThread entry" ); } @@ -5401,53 +7005,224 @@ async fn test_legacy_thread_with_canonical_path_opens_main_repo_workspace(cx: &m title: "Legacy Main Thread".into(), updated_at: chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(), created_at: None, - folder_paths: PathList::new(&[PathBuf::from("/project")]), - main_worktree_paths: PathList::default(), + worktree_paths: ThreadWorktreePaths::from_folder_paths(&PathList::new(&[ + PathBuf::from("/project"), + ])), archived: false, + remote_connection: None, }; ThreadMetadataStore::global(cx).update(cx, |store, cx| store.save_manually(metadata, cx)); }); cx.run_until_parked(); - multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + multi_workspace.update_in(cx, |_, _window, cx| cx.notify()); + cx.run_until_parked(); + + // The legacy thread should appear in the sidebar under the project group. + let entries = visible_entries_as_strings(&sidebar, cx); + assert!( + entries.iter().any(|e| e.contains("Legacy Main Thread")), + "legacy thread should be visible: {entries:?}", + ); + + // Verify only 1 workspace before clicking. + assert_eq!( + multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()), + 1, + ); + + // Focus and select the legacy thread, then confirm. + focus_sidebar(&sidebar, cx); + let thread_index = sidebar.read_with(cx, |sidebar, _| { + sidebar + .contents + .entries + .iter() + .position(|e| e.session_id().is_some_and(|id| id == &legacy_session)) + .expect("legacy thread should be in entries") + }); + sidebar.update_in(cx, |sidebar, _window, _cx| { + sidebar.selection = Some(thread_index); + }); + cx.dispatch_action(Confirm); + cx.run_until_parked(); + + let new_workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()); + let new_path_list = + new_workspace.read_with(cx, |_, cx| workspace_path_list(&new_workspace, cx)); + assert_eq!( + new_path_list, + PathList::new(&[PathBuf::from("/project")]), + "the new workspace should be for the main repo, not the linked worktree", + ); +} + +#[gpui::test] +async fn test_linked_worktree_workspace_reachable_after_adding_unrelated_project( + cx: &mut TestAppContext, +) { + // Regression test for a property-test finding: + // AddLinkedWorktree { project_group_index: 0 } + // AddProject { use_worktree: true } + // AddProject { use_worktree: false } + // After these three steps, the linked-worktree workspace was not + // reachable from any sidebar entry. + agent_ui::test_support::init_test(cx); + cx.update(|cx| { + ThreadStore::init_global(cx); + ThreadMetadataStore::init_global(cx); + language_model::LanguageModelRegistry::test(cx); + prompt_store::init(cx); + + cx.observe_new( + |workspace: &mut Workspace, + window: Option<&mut Window>, + cx: &mut gpui::Context| { + if let Some(window) = window { + let panel = cx.new(|cx| AgentPanel::test_new(workspace, window, cx)); + workspace.add_panel(panel, window, cx); + } + }, + ) + .detach(); + }); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/my-project", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + cx.update(|cx| ::set_global(fs.clone(), cx)); + let project = + project::Project::test(fs.clone() as Arc, ["/my-project".as_ref()], cx).await; + project.update(cx, |p, cx| p.git_scans_complete(cx)).await; + + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + let sidebar = setup_sidebar(&multi_workspace, cx); + + // Step 1: Create a linked worktree for the main project. + let worktree_name = "wt-0"; + let worktree_path = "/worktrees/wt-0"; + + fs.insert_tree( + worktree_path, + serde_json::json!({ + ".git": "gitdir: /my-project/.git/worktrees/wt-0", + "src": {}, + }), + ) + .await; + fs.insert_tree( + "/my-project/.git/worktrees/wt-0", + serde_json::json!({ + "commondir": "../../", + "HEAD": "ref: refs/heads/wt-0", + }), + ) + .await; + fs.add_linked_worktree_for_repo( + Path::new("/my-project/.git"), + false, + git::repository::Worktree { + path: PathBuf::from(worktree_path), + ref_name: Some(format!("refs/heads/{}", worktree_name).into()), + sha: "aaa".into(), + is_main: false, + }, + ) + .await; + + let main_workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()); + let main_project = main_workspace.read_with(cx, |ws, _| ws.project().clone()); + main_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + cx.run_until_parked(); + + // Step 2: Open the linked worktree as its own workspace. + let worktree_project = + project::Project::test(fs.clone() as Arc, [worktree_path.as_ref()], cx).await; + worktree_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + let worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(worktree_project.clone(), window, cx) + }); + cx.run_until_parked(); + + // Step 3: Add an unrelated project. + fs.insert_tree( + "/other-project", + serde_json::json!({ + ".git": {}, + "src": {}, + }), + ) + .await; + let other_project = project::Project::test( + fs.clone() as Arc, + ["/other-project".as_ref()], + cx, + ) + .await; + other_project + .update(cx, |p, cx| p.git_scans_complete(cx)) + .await; + multi_workspace.update_in(cx, |mw, window, cx| { + mw.test_add_workspace(other_project.clone(), window, cx); + }); + cx.run_until_parked(); + + // Force a full sidebar rebuild with all groups expanded. + sidebar.update_in(cx, |sidebar, _window, cx| { + sidebar.collapsed_groups.clear(); + let group_keys: Vec = sidebar + .contents + .entries + .iter() + .filter_map(|entry| match entry { + ListEntry::ProjectHeader { key, .. } => Some(key.clone()), + _ => None, + }) + .collect(); + for group_key in group_keys { + sidebar.expanded_groups.insert(group_key, 10_000); + } + sidebar.update_entries(cx); + }); cx.run_until_parked(); - // The legacy thread should appear in the sidebar under the project group. - let entries = visible_entries_as_strings(&sidebar, cx); - assert!( - entries.iter().any(|e| e.contains("Legacy Main Thread")), - "legacy thread should be visible: {entries:?}", - ); - - // Verify only 1 workspace before clicking. - assert_eq!( - multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()), - 1, - ); + // The linked-worktree workspace must be reachable from at least one + // sidebar entry — otherwise the user has no way to navigate to it. + let worktree_ws_id = worktree_workspace.entity_id(); + let (all_ids, reachable_ids) = sidebar.read_with(cx, |sidebar, cx| { + let mw = multi_workspace.read(cx); - // Focus and select the legacy thread, then confirm. - focus_sidebar(&sidebar, cx); - let thread_index = sidebar.read_with(cx, |sidebar, _| { - sidebar + let all: HashSet = mw.workspaces().map(|ws| ws.entity_id()).collect(); + let reachable: HashSet = sidebar .contents .entries .iter() - .position(|e| e.session_id().is_some_and(|id| id == &legacy_session)) - .expect("legacy thread should be in entries") - }); - sidebar.update_in(cx, |sidebar, _window, _cx| { - sidebar.selection = Some(thread_index); + .flat_map(|entry| entry.reachable_workspaces(mw, cx)) + .map(|ws| ws.entity_id()) + .collect(); + (all, reachable) }); - cx.dispatch_action(Confirm); - cx.run_until_parked(); - let new_workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone()); - let new_path_list = - new_workspace.read_with(cx, |_, cx| workspace_path_list(&new_workspace, cx)); - assert_eq!( - new_path_list, - PathList::new(&[PathBuf::from("/project")]), - "the new workspace should be for the main repo, not the linked worktree", + let unreachable = &all_ids - &reachable_ids; + eprintln!("{}", visible_entries_as_strings(&sidebar, cx).join("\n")); + + assert!( + unreachable.is_empty(), + "workspaces not reachable from any sidebar entry: {:?}\n\ + (linked-worktree workspace id: {:?})", + unreachable, + worktree_ws_id, ); } @@ -5511,19 +7286,23 @@ mod property_test { SwitchToThread { index: usize }, SwitchToProjectGroup { index: usize }, AddLinkedWorktree { project_group_index: usize }, + AddWorktreeToProject { project_group_index: usize }, + RemoveWorktreeFromProject { project_group_index: usize }, } - // Distribution (out of 20 slots): - // SaveThread: 5 slots (~25%) - // SaveWorktreeThread: 2 slots (~10%) - // ToggleAgentPanel: 1 slot (~5%) - // CreateDraftThread: 1 slot (~5%) - // AddProject: 1 slot (~5%) - // ArchiveThread: 2 slots (~10%) - // SwitchToThread: 2 slots (~10%) - // SwitchToProjectGroup: 2 slots (~10%) - // AddLinkedWorktree: 4 slots (~20%) - const DISTRIBUTION_SLOTS: u32 = 20; + // Distribution (out of 24 slots): + // SaveThread: 5 slots (~21%) + // SaveWorktreeThread: 2 slots (~8%) + // ToggleAgentPanel: 1 slot (~4%) + // CreateDraftThread: 1 slot (~4%) + // AddProject: 1 slot (~4%) + // ArchiveThread: 2 slots (~8%) + // SwitchToThread: 2 slots (~8%) + // SwitchToProjectGroup: 2 slots (~8%) + // AddLinkedWorktree: 4 slots (~17%) + // AddWorktreeToProject: 2 slots (~8%) + // RemoveWorktreeFromProject: 2 slots (~8%) + const DISTRIBUTION_SLOTS: u32 = 24; impl TestState { fn generate_operation(&self, raw: u32, project_group_count: usize) -> Operation { @@ -5565,6 +7344,18 @@ mod property_test { 16..=19 => Operation::SaveThread { project_group_index: extra % project_group_count, }, + 20..=21 if project_group_count > 0 => Operation::AddWorktreeToProject { + project_group_index: extra % project_group_count, + }, + 20..=21 => Operation::SaveThread { + project_group_index: extra % project_group_count, + }, + 22..=23 if project_group_count > 0 => Operation::RemoveWorktreeFromProject { + project_group_index: extra % project_group_count, + }, + 22..=23 => Operation::SaveThread { + project_group_index: extra % project_group_count, + }, _ => unreachable!(), } } @@ -5587,9 +7378,10 @@ mod property_test { title, updated_at, created_at: None, - folder_paths: path_list, - main_worktree_paths, + worktree_paths: ThreadWorktreePaths::from_path_lists(main_worktree_paths, path_list) + .unwrap(), archived: false, + remote_connection: None, }; cx.update(|_, cx| { ThreadMetadataStore::global(cx) @@ -5709,6 +7501,7 @@ mod property_test { mw.test_add_workspace(project.clone(), window, cx) }); } + Operation::ArchiveThread { index } => { let session_id = state.saved_thread_ids[index].clone(); sidebar.update_in(cx, |sidebar: &mut Sidebar, window, cx| { @@ -5821,23 +7614,74 @@ mod property_test { main_workspace_path: main_path.clone(), }); } + Operation::AddWorktreeToProject { + project_group_index, + } => { + let workspace = multi_workspace.read_with(cx, |mw, cx| { + let key = mw.project_group_keys().nth(project_group_index).unwrap(); + mw.workspaces_for_project_group(key, cx).next().cloned() + }); + let Some(workspace) = workspace else { return }; + let project = workspace.read_with(cx, |ws, _| ws.project().clone()); + + let new_path = state.next_workspace_path(); + state + .fs + .insert_tree(&new_path, serde_json::json!({ ".git": {}, "src": {} })) + .await; + + let result = project + .update(cx, |project, cx| { + project.find_or_create_worktree(&new_path, true, cx) + }) + .await; + if result.is_err() { + return; + } + cx.run_until_parked(); + } + Operation::RemoveWorktreeFromProject { + project_group_index, + } => { + let workspace = multi_workspace.read_with(cx, |mw, cx| { + let key = mw.project_group_keys().nth(project_group_index).unwrap(); + mw.workspaces_for_project_group(key, cx).next().cloned() + }); + let Some(workspace) = workspace else { return }; + let project = workspace.read_with(cx, |ws, _| ws.project().clone()); + + let worktree_count = project.read_with(cx, |p, cx| p.visible_worktrees(cx).count()); + if worktree_count <= 1 { + return; + } + + let worktree_id = project.read_with(cx, |p, cx| { + p.visible_worktrees(cx).last().map(|wt| wt.read(cx).id()) + }); + if let Some(worktree_id) = worktree_id { + project.update(cx, |project, cx| { + project.remove_worktree(worktree_id, cx); + }); + cx.run_until_parked(); + } + } } } fn update_sidebar(sidebar: &Entity, cx: &mut gpui::VisualTestContext) { sidebar.update_in(cx, |sidebar, _window, cx| { sidebar.collapsed_groups.clear(); - let path_lists: Vec = sidebar + let group_keys: Vec = sidebar .contents .entries .iter() .filter_map(|entry| match entry { - ListEntry::ProjectHeader { key, .. } => Some(key.path_list().clone()), + ListEntry::ProjectHeader { key, .. } => Some(key.clone()), _ => None, }) .collect(); - for path_list in path_lists { - sidebar.expanded_groups.insert(path_list, 10_000); + for group_key in group_keys { + sidebar.expanded_groups.insert(group_key, 10_000); } sidebar.update_entries(cx); }); @@ -5845,9 +7689,35 @@ mod property_test { fn validate_sidebar_properties(sidebar: &Sidebar, cx: &App) -> anyhow::Result<()> { verify_every_group_in_multiworkspace_is_shown(sidebar, cx)?; + verify_no_duplicate_threads(sidebar)?; verify_all_threads_are_shown(sidebar, cx)?; verify_active_state_matches_current_workspace(sidebar, cx)?; verify_all_workspaces_are_reachable(sidebar, cx)?; + verify_workspace_group_key_integrity(sidebar, cx)?; + Ok(()) + } + + fn verify_no_duplicate_threads(sidebar: &Sidebar) -> anyhow::Result<()> { + let mut seen: HashSet = HashSet::default(); + let mut duplicates: Vec<(acp::SessionId, String)> = Vec::new(); + + for entry in &sidebar.contents.entries { + if let Some(session_id) = entry.session_id() { + if !seen.insert(session_id.clone()) { + let title = match entry { + ListEntry::Thread(thread) => thread.metadata.title.to_string(), + _ => "".to_string(), + }; + duplicates.push((session_id.clone(), title)); + } + } + } + + anyhow::ensure!( + duplicates.is_empty(), + "threads appear more than once in sidebar: {:?}", + duplicates, + ); Ok(()) } @@ -6073,18 +7943,20 @@ mod property_test { anyhow::bail!("sidebar should still have an associated multi-workspace"); }; - let mw = multi_workspace.read(cx); + let multi_workspace = multi_workspace.read(cx); let reachable_workspaces: HashSet = sidebar .contents .entries .iter() - .flat_map(|entry| entry.reachable_workspaces(mw, cx)) + .flat_map(|entry| entry.reachable_workspaces(multi_workspace, cx)) .map(|ws| ws.entity_id()) .collect(); - let all_workspace_ids: HashSet = - mw.workspaces().map(|ws| ws.entity_id()).collect(); + let all_workspace_ids: HashSet = multi_workspace + .workspaces() + .map(|ws| ws.entity_id()) + .collect(); let unreachable = &all_workspace_ids - &reachable_workspaces; @@ -6097,11 +7969,20 @@ mod property_test { Ok(()) } + fn verify_workspace_group_key_integrity(sidebar: &Sidebar, cx: &App) -> anyhow::Result<()> { + let Some(multi_workspace) = sidebar.multi_workspace.upgrade() else { + anyhow::bail!("sidebar should still have an associated multi-workspace"); + }; + multi_workspace + .read(cx) + .assert_project_group_key_integrity(cx) + } + #[gpui::property_test(config = ProptestConfig { - cases: 50, - ..Default::default() - })] - async fn _test_sidebar_invariants( + cases: 50, + ..Default::default() + })] + async fn test_sidebar_invariants( #[strategy = gpui::proptest::collection::vec(0u32..DISTRIBUTION_SLOTS * 10, 1..5)] raw_operations: Vec, cx: &mut TestAppContext, @@ -6176,3 +8057,298 @@ mod property_test { } } } + +#[gpui::test] +async fn test_remote_project_integration_does_not_briefly_render_as_separate_project( + cx: &mut TestAppContext, + server_cx: &mut TestAppContext, +) { + init_test(cx); + + cx.update(|cx| { + release_channel::init(semver::Version::new(0, 0, 0), cx); + }); + + let app_state = cx.update(|cx| { + let app_state = workspace::AppState::test(cx); + workspace::init(app_state.clone(), cx); + app_state + }); + + // Set up the remote server side. + let server_fs = FakeFs::new(server_cx.executor()); + server_fs + .insert_tree( + "/project", + serde_json::json!({ + ".git": {}, + "src": { "main.rs": "fn main() {}" } + }), + ) + .await; + server_fs.set_branch_name(Path::new("/project/.git"), Some("main")); + + // Create the linked worktree checkout path on the remote server, + // but do not yet register it as a git-linked worktree. The real + // regrouping update in this test should happen only after the + // sidebar opens the closed remote thread. + server_fs + .insert_tree( + "/project-wt-1", + serde_json::json!({ + "src": { "main.rs": "fn main() {}" } + }), + ) + .await; + + server_cx.update(|cx| { + release_channel::init(semver::Version::new(0, 0, 0), cx); + }); + + let (original_opts, server_session, _) = remote::RemoteClient::fake_server(cx, server_cx); + + server_cx.update(remote_server::HeadlessProject::init); + let server_executor = server_cx.executor(); + let _headless = server_cx.new(|cx| { + remote_server::HeadlessProject::new( + remote_server::HeadlessAppState { + session: server_session, + fs: server_fs.clone(), + http_client: Arc::new(http_client::BlockedHttpClient), + node_runtime: node_runtime::NodeRuntime::unavailable(), + languages: Arc::new(language::LanguageRegistry::new(server_executor.clone())), + extension_host_proxy: Arc::new(extension::ExtensionHostProxy::new()), + startup_time: std::time::Instant::now(), + }, + false, + cx, + ) + }); + + // Connect the client side and build a remote project. + let remote_client = remote::RemoteClient::connect_mock(original_opts.clone(), cx).await; + let project = cx.update(|cx| { + let project_client = client::Client::new( + Arc::new(clock::FakeSystemClock::new()), + http_client::FakeHttpClient::with_404_response(), + cx, + ); + let user_store = cx.new(|cx| client::UserStore::new(project_client.clone(), cx)); + project::Project::remote( + remote_client, + project_client, + node_runtime::NodeRuntime::unavailable(), + user_store, + app_state.languages.clone(), + app_state.fs.clone(), + false, + cx, + ) + }); + + // Open the remote worktree. + project + .update(cx, |project, cx| { + project.find_or_create_worktree(Path::new("/project"), true, cx) + }) + .await + .expect("should open remote worktree"); + cx.run_until_parked(); + + // Verify the project is remote. + project.read_with(cx, |project, cx| { + assert!(!project.is_local(), "project should be remote"); + assert!( + project.remote_connection_options(cx).is_some(), + "project should have remote connection options" + ); + }); + + cx.update(|cx| ::set_global(app_state.fs.clone(), cx)); + + // Create MultiWorkspace with the remote project. + let (multi_workspace, cx) = + cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); + let sidebar = setup_sidebar(&multi_workspace, cx); + + cx.run_until_parked(); + + // Save a thread for the main remote workspace (folder_paths match + // the open workspace, so it will be classified as Open). + let main_thread_id = acp::SessionId::new(Arc::from("main-thread")); + save_thread_metadata( + main_thread_id.clone(), + "Main Thread".into(), + chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(), + None, + &project, + cx, + ); + cx.run_until_parked(); + + // Save a thread whose folder_paths point to a linked worktree path + // that doesn't have an open workspace ("/project-wt-1"), but whose + // main_worktree_paths match the project group key so it appears + // in the sidebar under the same remote group. This simulates a + // linked worktree workspace that was closed. + let remote_thread_id = acp::SessionId::new(Arc::from("remote-thread")); + let main_worktree_paths = + project.read_with(cx, |p, cx| p.project_group_key(cx).path_list().clone()); + cx.update(|_window, cx| { + let metadata = ThreadMetadata { + session_id: remote_thread_id.clone(), + agent_id: agent::ZED_AGENT_ID.clone(), + title: "Worktree Thread".into(), + updated_at: chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 1).unwrap(), + created_at: None, + worktree_paths: ThreadWorktreePaths::from_path_lists( + main_worktree_paths, + PathList::new(&[PathBuf::from("/project-wt-1")]), + ) + .unwrap(), + archived: false, + remote_connection: None, + }; + ThreadMetadataStore::global(cx).update(cx, |store, cx| store.save_manually(metadata, cx)); + }); + cx.run_until_parked(); + + focus_sidebar(&sidebar, cx); + sidebar.update_in(cx, |sidebar, _window, _cx| { + sidebar.selection = sidebar.contents.entries.iter().position(|entry| { + matches!( + entry, + ListEntry::Thread(thread) if thread.metadata.session_id == remote_thread_id + ) + }); + }); + + let saw_separate_project_header = Arc::new(std::sync::atomic::AtomicBool::new(false)); + let saw_separate_project_header_for_observer = saw_separate_project_header.clone(); + + sidebar + .update(cx, |_, cx| { + cx.observe_self(move |sidebar, _cx| { + let mut project_headers = sidebar.contents.entries.iter().filter_map(|entry| { + if let ListEntry::ProjectHeader { label, .. } = entry { + Some(label.as_ref()) + } else { + None + } + }); + + let Some(project_header) = project_headers.next() else { + saw_separate_project_header_for_observer + .store(true, std::sync::atomic::Ordering::SeqCst); + return; + }; + + if project_header != "project" || project_headers.next().is_some() { + saw_separate_project_header_for_observer + .store(true, std::sync::atomic::Ordering::SeqCst); + } + }) + }) + .detach(); + + multi_workspace.update(cx, |multi_workspace, cx| { + let workspace = multi_workspace.workspace().clone(); + workspace.update(cx, |workspace: &mut Workspace, cx| { + let remote_client = workspace + .project() + .read(cx) + .remote_client() + .expect("main remote project should have a remote client"); + remote_client.update(cx, |remote_client: &mut remote::RemoteClient, cx| { + remote_client.force_server_not_running(cx); + }); + }); + }); + cx.run_until_parked(); + + let (server_session_2, connect_guard_2) = + remote::RemoteClient::fake_server_with_opts(&original_opts, cx, server_cx); + let _headless_2 = server_cx.new(|cx| { + remote_server::HeadlessProject::new( + remote_server::HeadlessAppState { + session: server_session_2, + fs: server_fs.clone(), + http_client: Arc::new(http_client::BlockedHttpClient), + node_runtime: node_runtime::NodeRuntime::unavailable(), + languages: Arc::new(language::LanguageRegistry::new(server_executor.clone())), + extension_host_proxy: Arc::new(extension::ExtensionHostProxy::new()), + startup_time: std::time::Instant::now(), + }, + false, + cx, + ) + }); + drop(connect_guard_2); + + let window = cx.windows()[0]; + cx.update_window(window, |_, window, cx| { + window.dispatch_action(Confirm.boxed_clone(), cx); + }) + .unwrap(); + + cx.run_until_parked(); + + let new_workspace = multi_workspace.read_with(cx, |mw, _| { + assert_eq!( + mw.workspaces().count(), + 2, + "confirming a closed remote thread should open a second workspace" + ); + mw.workspaces() + .find(|workspace| workspace.entity_id() != mw.workspace().entity_id()) + .unwrap() + .clone() + }); + + server_fs + .add_linked_worktree_for_repo( + Path::new("/project/.git"), + true, + git::repository::Worktree { + path: PathBuf::from("/project-wt-1"), + ref_name: Some("refs/heads/feature-wt".into()), + sha: "abc123".into(), + is_main: false, + }, + ) + .await; + + server_cx.run_until_parked(); + cx.run_until_parked(); + server_cx.run_until_parked(); + cx.run_until_parked(); + + let entries_after_update = visible_entries_as_strings(&sidebar, cx); + let group_after_update = new_workspace.read_with(cx, |workspace, cx| { + workspace.project().read(cx).project_group_key(cx) + }); + + assert_eq!( + group_after_update, + project.read_with(cx, |project, cx| project.project_group_key(cx)), + "expected the remote worktree workspace to be grouped under the main remote project after the real update; \ + final sidebar entries: {:?}", + entries_after_update, + ); + + sidebar.update(cx, |sidebar, _cx| { + assert_remote_project_integration_sidebar_state( + sidebar, + &main_thread_id, + &remote_thread_id, + ); + }); + + assert!( + !saw_separate_project_header.load(std::sync::atomic::Ordering::SeqCst), + "sidebar briefly rendered the remote worktree as a separate project during the real remote open/update sequence; \ + final group: {:?}; final sidebar entries: {:?}", + group_after_update, + entries_after_update, + ); +} diff --git a/crates/ui/src/components/ai/thread_item.rs b/crates/ui/src/components/ai/thread_item.rs index 34aa6b4869d44aa4835f4f1d2ed2557f4dd138b4..c920f854081236d58b00d1ba197bd3805915cad4 100644 --- a/crates/ui/src/components/ai/thread_item.rs +++ b/crates/ui/src/components/ai/thread_item.rs @@ -54,6 +54,7 @@ pub struct ThreadItem { project_paths: Option>, project_name: Option, worktrees: Vec, + is_remote: bool, on_click: Option>, on_hover: Box, action_slot: Option, @@ -86,6 +87,7 @@ impl ThreadItem { project_paths: None, project_name: None, worktrees: Vec::new(), + is_remote: false, on_click: None, on_hover: Box::new(|_, _, _| {}), action_slot: None, @@ -179,6 +181,11 @@ impl ThreadItem { self } + pub fn is_remote(mut self, is_remote: bool) -> Self { + self.is_remote = is_remote; + self + } + pub fn hovered(mut self, hovered: bool) -> Self { self.hovered = hovered; self @@ -443,10 +450,11 @@ impl RenderOnce for ThreadItem { .join("\n") .into(); - let worktree_tooltip_title = if self.worktrees.len() > 1 { - "Thread Running in Local Git Worktrees" - } else { - "Thread Running in a Local Git Worktree" + let worktree_tooltip_title = match (self.is_remote, self.worktrees.len() > 1) { + (true, true) => "Thread Running in Remote Git Worktrees", + (true, false) => "Thread Running in a Remote Git Worktree", + (false, true) => "Thread Running in Local Git Worktrees", + (false, false) => "Thread Running in a Local Git Worktree", }; // Deduplicate chips by name — e.g. two paths both named diff --git a/crates/util/src/disambiguate.rs b/crates/util/src/disambiguate.rs new file mode 100644 index 0000000000000000000000000000000000000000..490182598b52ab3419633d0c56700e85f91d81a9 --- /dev/null +++ b/crates/util/src/disambiguate.rs @@ -0,0 +1,202 @@ +use std::collections::HashMap; +use std::hash::Hash; + +/// Computes the minimum detail level needed for each item so that no two items +/// share the same description. Items whose descriptions are unique at level 0 +/// stay at 0; items that collide get their detail level incremented until either +/// the collision is resolved or increasing the level no longer changes the +/// description (preventing infinite loops for truly identical items). +/// +/// The `get_description` closure must return a sequence that eventually reaches +/// a "fixed point" where increasing `detail` no longer changes the output. If +/// an item reaches its fixed point, it is assumed it will no longer change and +/// will no longer be checked for collisions. +pub fn compute_disambiguation_details( + items: &[T], + get_description: impl Fn(&T, usize) -> D, +) -> Vec +where + D: Eq + Hash + Clone, +{ + let mut details = vec![0usize; items.len()]; + let mut descriptions: HashMap> = HashMap::default(); + let mut current_descriptions: Vec = + items.iter().map(|item| get_description(item, 0)).collect(); + + loop { + let mut any_collisions = false; + + for (index, (item, &detail)) in items.iter().zip(&details).enumerate() { + if detail > 0 { + let new_description = get_description(item, detail); + if new_description == current_descriptions[index] { + continue; + } + current_descriptions[index] = new_description; + } + descriptions + .entry(current_descriptions[index].clone()) + .or_insert_with(Vec::new) + .push(index); + } + + for (_, indices) in descriptions.drain() { + if indices.len() > 1 { + any_collisions = true; + for index in indices { + details[index] += 1; + } + } + } + + if !any_collisions { + break; + } + } + + details +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_no_conflicts() { + let items = vec!["alpha", "beta", "gamma"]; + let details = compute_disambiguation_details(&items, |item, _detail| item.to_string()); + assert_eq!(details, vec![0, 0, 0]); + } + + #[test] + fn test_simple_two_way_conflict() { + // Two items with the same base name but different parents. + let items = vec![("src/foo.rs", "foo.rs"), ("lib/foo.rs", "foo.rs")]; + let details = compute_disambiguation_details(&items, |item, detail| match detail { + 0 => item.1.to_string(), + _ => item.0.to_string(), + }); + assert_eq!(details, vec![1, 1]); + } + + #[test] + fn test_three_way_conflict() { + let items = vec![ + ("foo.rs", "a/foo.rs"), + ("foo.rs", "b/foo.rs"), + ("foo.rs", "c/foo.rs"), + ]; + let details = compute_disambiguation_details(&items, |item, detail| match detail { + 0 => item.0.to_string(), + _ => item.1.to_string(), + }); + assert_eq!(details, vec![1, 1, 1]); + } + + #[test] + fn test_deeper_conflict() { + // At detail 0, all three show "file.rs". + // At detail 1, items 0 and 1 both show "src/file.rs", item 2 shows "lib/file.rs". + // At detail 2, item 0 shows "a/src/file.rs", item 1 shows "b/src/file.rs". + let items = vec![ + vec!["file.rs", "src/file.rs", "a/src/file.rs"], + vec!["file.rs", "src/file.rs", "b/src/file.rs"], + vec!["file.rs", "lib/file.rs", "x/lib/file.rs"], + ]; + let details = compute_disambiguation_details(&items, |item, detail| { + let clamped = detail.min(item.len() - 1); + item[clamped].to_string() + }); + assert_eq!(details, vec![2, 2, 1]); + } + + #[test] + fn test_mixed_conflicting_and_unique() { + let items = vec![ + ("src/foo.rs", "foo.rs"), + ("lib/foo.rs", "foo.rs"), + ("src/bar.rs", "bar.rs"), + ]; + let details = compute_disambiguation_details(&items, |item, detail| match detail { + 0 => item.1.to_string(), + _ => item.0.to_string(), + }); + assert_eq!(details, vec![1, 1, 0]); + } + + #[test] + fn test_identical_items_terminates() { + // All items return the same description at every detail level. + // The algorithm must terminate rather than looping forever. + let items = vec!["same", "same", "same"]; + let details = compute_disambiguation_details(&items, |item, _detail| item.to_string()); + // After bumping to 1, the description doesn't change from level 0, + // so the items are skipped and the loop terminates. + assert_eq!(details, vec![1, 1, 1]); + } + + #[test] + fn test_single_item() { + let items = vec!["only"]; + let details = compute_disambiguation_details(&items, |item, _detail| item.to_string()); + assert_eq!(details, vec![0]); + } + + #[test] + fn test_empty_input() { + let items: Vec<&str> = vec![]; + let details = compute_disambiguation_details(&items, |item, _detail| item.to_string()); + let expected: Vec = vec![]; + assert_eq!(details, expected); + } + + #[test] + fn test_duplicate_paths_from_multiple_groups() { + use std::path::Path; + + // Simulates the sidebar scenario: a path like /Users/rtfeldman/code/zed + // appears in two project groups (e.g. "zed" alone and "zed, roc"). + // After deduplication, only unique paths should be disambiguated. + // + // Paths: + // /Users/rtfeldman/code/worktrees/zed/focal-arrow/zed (group 1) + // /Users/rtfeldman/code/zed (group 2) + // /Users/rtfeldman/code/zed (group 3, same path as group 2) + // /Users/rtfeldman/code/roc (group 3) + // + // A naive flat_map collects duplicates. The duplicate /code/zed entries + // collide with each other and drive the detail to the full path. + // The fix is to deduplicate before disambiguating. + + fn path_suffix(path: &Path, detail: usize) -> String { + let mut components: Vec<_> = path + .components() + .rev() + .filter_map(|c| match c { + std::path::Component::Normal(s) => Some(s.to_string_lossy()), + _ => None, + }) + .take(detail + 1) + .collect(); + components.reverse(); + components.join("/") + } + + let all_paths: Vec<&Path> = vec![ + Path::new("/Users/rtfeldman/code/worktrees/zed/focal-arrow/zed"), + Path::new("/Users/rtfeldman/code/zed"), + Path::new("/Users/rtfeldman/code/roc"), + ]; + + let details = + compute_disambiguation_details(&all_paths, |path, detail| path_suffix(path, detail)); + + // focal-arrow/zed and code/zed both end in "zed", so they need detail 1. + // "roc" is unique at detail 0. + assert_eq!(details, vec![1, 1, 0]); + + assert_eq!(path_suffix(all_paths[0], details[0]), "focal-arrow/zed"); + assert_eq!(path_suffix(all_paths[1], details[1]), "code/zed"); + assert_eq!(path_suffix(all_paths[2], details[2]), "roc"); + } +} diff --git a/crates/util/src/util.rs b/crates/util/src/util.rs index bd8ab4e2d4d99864c5e0dc228410904f3338d7c6..3b704e50a531c5302024e215754cb9a866f0036b 100644 --- a/crates/util/src/util.rs +++ b/crates/util/src/util.rs @@ -1,5 +1,6 @@ pub mod archive; pub mod command; +pub mod disambiguate; pub mod fs; pub mod markdown; pub mod path_list; diff --git a/crates/workspace/src/multi_workspace.rs b/crates/workspace/src/multi_workspace.rs index e9baccb4d93c9406d7b766db3d8e4ce4cb7198b7..9ef81194639e625b4944c48be41b7518fee0bbe3 100644 --- a/crates/workspace/src/multi_workspace.rs +++ b/crates/workspace/src/multi_workspace.rs @@ -6,8 +6,10 @@ use gpui::{ actions, deferred, px, }; use project::{DirectoryLister, DisableAiSettings, Project, ProjectGroupKey}; +use remote::RemoteConnectionOptions; use settings::Settings; pub use settings::SidebarSide; +use std::collections::{HashMap, HashSet}; use std::future::Future; use std::path::Path; use std::path::PathBuf; @@ -22,6 +24,7 @@ use ui::{ContextMenu, right_click_menu}; const SIDEBAR_RESIZE_HANDLE_SIZE: Pixels = px(6.0); +use crate::open_remote_project_with_existing_connection; use crate::{ CloseIntent, CloseWindow, DockPosition, Event as WorkspaceEvent, Item, ModalView, OpenMode, Panel, Workspace, WorkspaceId, client_side_decorations, @@ -98,6 +101,14 @@ pub enum MultiWorkspaceEvent { ActiveWorkspaceChanged, WorkspaceAdded(Entity), WorkspaceRemoved(EntityId), + WorktreePathAdded { + old_main_paths: PathList, + added_path: PathBuf, + }, + WorktreePathRemoved { + old_main_paths: PathList, + removed_path: PathBuf, + }, } pub enum SidebarEvent { @@ -299,6 +310,7 @@ pub struct MultiWorkspace { workspaces: Vec>, active_workspace: ActiveWorkspace, project_group_keys: Vec, + workspace_group_keys: HashMap, sidebar: Option>, sidebar_open: bool, sidebar_overlay: Option, @@ -351,6 +363,7 @@ impl MultiWorkspace { Self { window_id: window.window_handle().window_id(), project_group_keys: Vec::new(), + workspace_group_keys: HashMap::default(), workspaces: Vec::new(), active_workspace: ActiveWorkspace::Transient(workspace), sidebar: None, @@ -491,7 +504,15 @@ impl MultiWorkspace { workspace.set_sidebar_focus_handle(None); }); } - self.restore_previous_focus(true, window, cx); + let sidebar_has_focus = self + .sidebar + .as_ref() + .is_some_and(|s| s.focus_handle(cx).contains_focused(window, cx)); + if sidebar_has_focus { + self.restore_previous_focus(true, window, cx); + } else { + self.previous_focus_handle.take(); + } self.serialize(cx); cx.notify(); } @@ -546,9 +567,11 @@ impl MultiWorkspace { cx.subscribe_in(&project, window, { let workspace = workspace.downgrade(); move |this, _project, event, _window, cx| match event { - project::Event::WorktreeAdded(_) | project::Event::WorktreeRemoved(_) => { + project::Event::WorktreeAdded(_) + | project::Event::WorktreeRemoved(_) + | project::Event::WorktreeUpdatedRootRepoCommonDir(_) => { if let Some(workspace) = workspace.upgrade() { - this.add_project_group_key(workspace.read(cx).project_group_key(cx)); + this.handle_workspace_key_change(&workspace, cx); } } _ => {} @@ -564,7 +587,124 @@ impl MultiWorkspace { .detach(); } - pub fn add_project_group_key(&mut self, project_group_key: ProjectGroupKey) { + fn handle_workspace_key_change( + &mut self, + workspace: &Entity, + cx: &mut Context, + ) { + let workspace_id = workspace.entity_id(); + let old_key = self.project_group_key_for_workspace(workspace, cx); + let new_key = workspace.read(cx).project_group_key(cx); + + if new_key.path_list().paths().is_empty() || old_key == new_key { + return; + } + + let active_workspace = self.workspace().clone(); + + self.set_workspace_group_key(workspace, new_key.clone()); + + let changed_root_paths = workspace.read(cx).root_paths(cx); + let old_paths = old_key.path_list().paths(); + let new_paths = new_key.path_list().paths(); + + // Remove workspaces that already had the new key and have the same + // root paths (true duplicates that this workspace is replacing). + // + // NOTE: These are dropped without prompting for unsaved changes because + // the user explicitly added a folder that makes this workspace + // identical to the duplicate — they are intentionally overwriting it. + let duplicate_workspaces: Vec> = self + .workspaces + .iter() + .filter(|ws| { + ws.entity_id() != workspace_id + && self.project_group_key_for_workspace(ws, cx) == new_key + && ws.read(cx).root_paths(cx) == changed_root_paths + }) + .cloned() + .collect(); + + if duplicate_workspaces.contains(&active_workspace) { + // The active workspace is among the duplicates — drop the + // incoming workspace instead so the user stays where they are. + self.detach_workspace(workspace, cx); + self.workspaces.retain(|w| w != workspace); + } else { + for ws in &duplicate_workspaces { + self.detach_workspace(ws, cx); + self.workspaces.retain(|w| w != ws); + } + } + + // Propagate folder adds/removes to linked worktree siblings + // (different root paths, same old key) so they stay in the group. + let group_workspaces: Vec> = self + .workspaces + .iter() + .filter(|ws| { + ws.entity_id() != workspace_id + && self.project_group_key_for_workspace(ws, cx) == old_key + }) + .cloned() + .collect(); + + for workspace in &group_workspaces { + // Pre-set this to stop later WorktreeAdded events from triggering + self.set_workspace_group_key(&workspace, new_key.clone()); + + let project = workspace.read(cx).project().clone(); + + for added_path in new_paths.iter().filter(|p| !old_paths.contains(p)) { + project + .update(cx, |project, cx| { + project.find_or_create_worktree(added_path, true, cx) + }) + .detach_and_log_err(cx); + } + + for removed_path in old_paths.iter().filter(|p| !new_paths.contains(p)) { + project.update(cx, |project, cx| { + project.remove_worktree_for_main_worktree_path(removed_path, cx); + }); + } + } + + // Restore the active workspace after removals may have shifted + // the index. If the previously active workspace was removed, + // fall back to the workspace whose key just changed. + if let ActiveWorkspace::Persistent(_) = &self.active_workspace { + let target = if self.workspaces.contains(&active_workspace) { + &active_workspace + } else { + workspace + }; + if let Some(new_index) = self.workspaces.iter().position(|ws| ws == target) { + self.active_workspace = ActiveWorkspace::Persistent(new_index); + } + } + + self.remove_stale_project_group_keys(cx); + + let old_main_paths = old_key.path_list().clone(); + for added_path in new_paths.iter().filter(|p| !old_paths.contains(p)) { + cx.emit(MultiWorkspaceEvent::WorktreePathAdded { + old_main_paths: old_main_paths.clone(), + added_path: added_path.clone(), + }); + } + for removed_path in old_paths.iter().filter(|p| !new_paths.contains(p)) { + cx.emit(MultiWorkspaceEvent::WorktreePathRemoved { + old_main_paths: old_main_paths.clone(), + removed_path: removed_path.clone(), + }); + } + + self.serialize(cx); + cx.notify(); + } + + fn add_project_group_key(&mut self, project_group_key: ProjectGroupKey) { if project_group_key.path_list().paths().is_empty() { return; } @@ -575,9 +715,43 @@ impl MultiWorkspace { self.project_group_keys.insert(0, project_group_key); } + pub(crate) fn set_workspace_group_key( + &mut self, + workspace: &Entity, + project_group_key: ProjectGroupKey, + ) { + self.workspace_group_keys + .insert(workspace.entity_id(), project_group_key.clone()); + self.add_project_group_key(project_group_key); + } + + pub fn project_group_key_for_workspace( + &self, + workspace: &Entity, + cx: &App, + ) -> ProjectGroupKey { + self.workspace_group_keys + .get(&workspace.entity_id()) + .cloned() + .unwrap_or_else(|| workspace.read(cx).project_group_key(cx)) + } + + fn remove_stale_project_group_keys(&mut self, cx: &App) { + let workspace_keys: HashSet = self + .workspaces + .iter() + .map(|workspace| self.project_group_key_for_workspace(workspace, cx)) + .collect(); + self.project_group_keys + .retain(|key| workspace_keys.contains(key)); + } + pub fn restore_project_group_keys(&mut self, keys: Vec) { let mut restored: Vec = Vec::with_capacity(keys.len()); for key in keys { + if key.path_list().paths().is_empty() { + continue; + } if !restored.contains(&key) { restored.push(key); } @@ -605,7 +779,7 @@ impl MultiWorkspace { .map(|key| (key.clone(), Vec::new())) .collect::>(); for workspace in &self.workspaces { - let key = workspace.read(cx).project_group_key(cx); + let key = self.project_group_key_for_workspace(workspace, cx); if let Some((_, workspaces)) = groups.iter_mut().find(|(k, _)| k == &key) { workspaces.push(workspace.clone()); } @@ -618,9 +792,9 @@ impl MultiWorkspace { project_group_key: &ProjectGroupKey, cx: &App, ) -> impl Iterator> { - self.workspaces - .iter() - .filter(move |ws| ws.read(cx).project_group_key(cx) == *project_group_key) + self.workspaces.iter().filter(move |workspace| { + self.project_group_key_for_workspace(workspace, cx) == *project_group_key + }) } pub fn remove_folder_from_project_group( @@ -781,14 +955,104 @@ impl MultiWorkspace { ) } - /// Finds an existing workspace whose root paths exactly match the given path list. - pub fn workspace_for_paths(&self, path_list: &PathList, cx: &App) -> Option> { + /// Finds an existing workspace whose root paths and host exactly match. + pub fn workspace_for_paths( + &self, + path_list: &PathList, + host: Option<&RemoteConnectionOptions>, + cx: &App, + ) -> Option> { self.workspaces .iter() - .find(|ws| PathList::new(&ws.read(cx).root_paths(cx)) == *path_list) + .find(|ws| { + let key = ws.read(cx).project_group_key(cx); + key.host().as_ref() == host + && PathList::new(&ws.read(cx).root_paths(cx)) == *path_list + }) .cloned() } + /// Finds an existing workspace whose paths match, or creates a new one. + /// + /// For local projects (`host` is `None`), this delegates to + /// [`Self::find_or_create_local_workspace`]. For remote projects, it + /// tries an exact path match and, if no existing workspace is found, + /// calls `connect_remote` to establish a connection and creates a new + /// remote workspace. + /// + /// The `connect_remote` closure is responsible for any user-facing + /// connection UI (e.g. password prompts). It receives the connection + /// options and should return a [`Task`] that resolves to the + /// [`RemoteClient`] session, or `None` if the connection was + /// cancelled. + pub fn find_or_create_workspace( + &mut self, + paths: PathList, + host: Option, + provisional_project_group_key: Option, + connect_remote: impl FnOnce( + RemoteConnectionOptions, + &mut Window, + &mut Context, + ) -> Task>>> + + 'static, + window: &mut Window, + cx: &mut Context, + ) -> Task>> { + if let Some(workspace) = self.workspace_for_paths(&paths, host.as_ref(), cx) { + self.activate(workspace.clone(), window, cx); + return Task::ready(Ok(workspace)); + } + + let Some(connection_options) = host else { + return self.find_or_create_local_workspace(paths, window, cx); + }; + + let app_state = self.workspace().read(cx).app_state().clone(); + let window_handle = window.window_handle().downcast::(); + let connect_task = connect_remote(connection_options.clone(), window, cx); + let paths_vec = paths.paths().to_vec(); + + cx.spawn(async move |_this, cx| { + let session = connect_task + .await? + .ok_or_else(|| anyhow::anyhow!("Remote connection was cancelled"))?; + + let new_project = cx.update(|cx| { + Project::remote( + session, + app_state.client.clone(), + app_state.node_runtime.clone(), + app_state.user_store.clone(), + app_state.languages.clone(), + app_state.fs.clone(), + true, + cx, + ) + }); + + let window_handle = + window_handle.ok_or_else(|| anyhow::anyhow!("Window is not a MultiWorkspace"))?; + + open_remote_project_with_existing_connection( + connection_options, + new_project, + paths_vec, + app_state, + window_handle, + provisional_project_group_key, + cx, + ) + .await?; + + window_handle.update(cx, |multi_workspace, window, cx| { + let workspace = multi_workspace.workspace().clone(); + multi_workspace.add(workspace.clone(), window, cx); + workspace + }) + }) + } + /// Finds an existing workspace in this multi-workspace whose paths match, /// or creates a new one (deserializing its saved state from the database). /// Never searches other windows or matches workspaces with a superset of @@ -799,7 +1063,7 @@ impl MultiWorkspace { window: &mut Window, cx: &mut Context, ) -> Task>> { - if let Some(workspace) = self.workspace_for_paths(&path_list, cx) { + if let Some(workspace) = self.workspace_for_paths(&path_list, None, cx) { self.activate(workspace.clone(), window, cx); return Task::ready(Ok(workspace)); } @@ -882,7 +1146,6 @@ impl MultiWorkspace { self.promote_transient(old, cx); } else { self.detach_workspace(&old, cx); - cx.emit(MultiWorkspaceEvent::WorkspaceRemoved(old.entity_id())); } } } else { @@ -893,7 +1156,6 @@ impl MultiWorkspace { }); if let Some(old) = self.active_workspace.set_transient(workspace) { self.detach_workspace(&old, cx); - cx.emit(MultiWorkspaceEvent::WorkspaceRemoved(old.entity_id())); } } @@ -919,8 +1181,8 @@ impl MultiWorkspace { /// Promotes a former transient workspace into the persistent list. /// Returns the index of the newly inserted workspace. fn promote_transient(&mut self, workspace: Entity, cx: &mut Context) -> usize { - let project_group_key = workspace.read(cx).project().read(cx).project_group_key(cx); - self.add_project_group_key(project_group_key); + let project_group_key = self.project_group_key_for_workspace(&workspace, cx); + self.set_workspace_group_key(&workspace, project_group_key); self.workspaces.push(workspace.clone()); cx.emit(MultiWorkspaceEvent::WorkspaceAdded(workspace)); self.workspaces.len() - 1 @@ -936,10 +1198,10 @@ impl MultiWorkspace { for workspace in std::mem::take(&mut self.workspaces) { if workspace != active { self.detach_workspace(&workspace, cx); - cx.emit(MultiWorkspaceEvent::WorkspaceRemoved(workspace.entity_id())); } } self.project_group_keys.clear(); + self.workspace_group_keys.clear(); self.active_workspace = ActiveWorkspace::Transient(active); cx.notify(); } @@ -956,7 +1218,7 @@ impl MultiWorkspace { if let Some(index) = self.workspaces.iter().position(|w| *w == workspace) { index } else { - let project_group_key = workspace.read(cx).project().read(cx).project_group_key(cx); + let project_group_key = self.project_group_key_for_workspace(&workspace, cx); Self::subscribe_to_workspace(&workspace, window, cx); self.sync_sidebar_to_workspace(&workspace, cx); @@ -965,7 +1227,7 @@ impl MultiWorkspace { workspace.set_multi_workspace(weak_self, cx); }); - self.add_project_group_key(project_group_key); + self.set_workspace_group_key(&workspace, project_group_key); self.workspaces.push(workspace.clone()); cx.emit(MultiWorkspaceEvent::WorkspaceAdded(workspace)); cx.notify(); @@ -973,10 +1235,12 @@ impl MultiWorkspace { } } - /// Clears session state and DB binding for a workspace that is being - /// removed or replaced. The DB row is preserved so the workspace still - /// appears in the recent-projects list. + /// Detaches a workspace: clears session state, DB binding, cached + /// group key, and emits `WorkspaceRemoved`. The DB row is preserved + /// so the workspace still appears in the recent-projects list. fn detach_workspace(&mut self, workspace: &Entity, cx: &mut Context) { + self.workspace_group_keys.remove(&workspace.entity_id()); + cx.emit(MultiWorkspaceEvent::WorkspaceRemoved(workspace.entity_id())); workspace.update(cx, |workspace, _cx| { workspace.session_id.take(); workspace._schedule_serialize_workspace.take(); @@ -1150,6 +1414,46 @@ impl MultiWorkspace { tasks } + #[cfg(any(test, feature = "test-support"))] + pub fn assert_project_group_key_integrity(&self, cx: &App) -> anyhow::Result<()> { + let stored_keys: HashSet<&ProjectGroupKey> = self.project_group_keys().collect(); + + let workspace_group_keys: HashSet<&ProjectGroupKey> = + self.workspace_group_keys.values().collect(); + let extra_keys = &workspace_group_keys - &stored_keys; + anyhow::ensure!( + extra_keys.is_empty(), + "workspace_group_keys values not in project_group_keys: {:?}", + extra_keys, + ); + + let cached_ids: HashSet = self.workspace_group_keys.keys().copied().collect(); + let workspace_ids: HashSet = + self.workspaces.iter().map(|ws| ws.entity_id()).collect(); + anyhow::ensure!( + cached_ids == workspace_ids, + "workspace_group_keys entity IDs don't match workspaces.\n\ + only in cache: {:?}\n\ + only in workspaces: {:?}", + &cached_ids - &workspace_ids, + &workspace_ids - &cached_ids, + ); + + for workspace in self.workspaces() { + let live_key = workspace.read(cx).project_group_key(cx); + let cached_key = &self.workspace_group_keys[&workspace.entity_id()]; + anyhow::ensure!( + *cached_key == live_key, + "workspace {:?} has live key {:?} but cached key {:?}", + workspace.entity_id(), + live_key, + cached_key, + ); + } + + Ok(()) + } + #[cfg(any(test, feature = "test-support"))] pub fn set_random_database_id(&mut self, cx: &mut Context) { self.workspace().update(cx, |workspace, _cx| { @@ -1308,7 +1612,6 @@ impl MultiWorkspace { for workspace in &removed_workspaces { this.detach_workspace(workspace, cx); - cx.emit(MultiWorkspaceEvent::WorkspaceRemoved(workspace.entity_id())); } let removed_any = !removed_workspaces.is_empty(); diff --git a/crates/workspace/src/multi_workspace_tests.rs b/crates/workspace/src/multi_workspace_tests.rs index 259346fe097826b3dcc19fb8fad0b8f07ddd0488..9cab28c0ca4ab34b2189985e898285dd82dd4f32 100644 --- a/crates/workspace/src/multi_workspace_tests.rs +++ b/crates/workspace/src/multi_workspace_tests.rs @@ -185,157 +185,3 @@ async fn test_project_group_keys_duplicate_not_added(cx: &mut TestAppContext) { ); }); } - -#[gpui::test] -async fn test_project_group_keys_on_worktree_added(cx: &mut TestAppContext) { - init_test(cx); - let fs = FakeFs::new(cx.executor()); - fs.insert_tree("/root_a", json!({ "file.txt": "" })).await; - fs.insert_tree("/root_b", json!({ "file.txt": "" })).await; - let project = Project::test(fs, ["/root_a".as_ref()], cx).await; - - let initial_key = project.read_with(cx, |p, cx| p.project_group_key(cx)); - - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); - - multi_workspace.update(cx, |mw, cx| { - mw.open_sidebar(cx); - }); - - // Add a second worktree to the same project. - let (worktree, _) = project - .update(cx, |project, cx| { - project.find_or_create_worktree("/root_b", true, cx) - }) - .await - .unwrap(); - worktree - .read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - cx.run_until_parked(); - - let updated_key = project.read_with(cx, |p, cx| p.project_group_key(cx)); - assert_ne!( - initial_key, updated_key, - "key should change after adding a worktree" - ); - - multi_workspace.read_with(cx, |mw, _cx| { - let keys: Vec<&ProjectGroupKey> = mw.project_group_keys().collect(); - assert_eq!( - keys.len(), - 2, - "should have both the original and updated key" - ); - assert_eq!(*keys[0], updated_key); - assert_eq!(*keys[1], initial_key); - }); -} - -#[gpui::test] -async fn test_project_group_keys_on_worktree_removed(cx: &mut TestAppContext) { - init_test(cx); - let fs = FakeFs::new(cx.executor()); - fs.insert_tree("/root_a", json!({ "file.txt": "" })).await; - fs.insert_tree("/root_b", json!({ "file.txt": "" })).await; - let project = Project::test(fs, ["/root_a".as_ref(), "/root_b".as_ref()], cx).await; - - let initial_key = project.read_with(cx, |p, cx| p.project_group_key(cx)); - - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx)); - - multi_workspace.update(cx, |mw, cx| { - mw.open_sidebar(cx); - }); - - // Remove one worktree. - let worktree_b_id = project.read_with(cx, |project, cx| { - project - .worktrees(cx) - .find(|wt| wt.read(cx).root_name().as_unix_str() == "root_b") - .unwrap() - .read(cx) - .id() - }); - project.update(cx, |project, cx| { - project.remove_worktree(worktree_b_id, cx); - }); - cx.run_until_parked(); - - let updated_key = project.read_with(cx, |p, cx| p.project_group_key(cx)); - assert_ne!( - initial_key, updated_key, - "key should change after removing a worktree" - ); - - multi_workspace.read_with(cx, |mw, _cx| { - let keys: Vec<&ProjectGroupKey> = mw.project_group_keys().collect(); - assert_eq!( - keys.len(), - 2, - "should accumulate both the original and post-removal key" - ); - assert_eq!(*keys[0], updated_key); - assert_eq!(*keys[1], initial_key); - }); -} - -#[gpui::test] -async fn test_project_group_keys_across_multiple_workspaces_and_worktree_changes( - cx: &mut TestAppContext, -) { - init_test(cx); - let fs = FakeFs::new(cx.executor()); - fs.insert_tree("/root_a", json!({ "file.txt": "" })).await; - fs.insert_tree("/root_b", json!({ "file.txt": "" })).await; - fs.insert_tree("/root_c", json!({ "file.txt": "" })).await; - let project_a = Project::test(fs.clone(), ["/root_a".as_ref()], cx).await; - let project_b = Project::test(fs.clone(), ["/root_b".as_ref()], cx).await; - - let key_a = project_a.read_with(cx, |p, cx| p.project_group_key(cx)); - let key_b = project_b.read_with(cx, |p, cx| p.project_group_key(cx)); - - let (multi_workspace, cx) = - cx.add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx)); - - multi_workspace.update(cx, |mw, cx| { - mw.open_sidebar(cx); - }); - - multi_workspace.update_in(cx, |mw, window, cx| { - mw.test_add_workspace(project_b, window, cx); - }); - - multi_workspace.read_with(cx, |mw, _cx| { - assert_eq!(mw.project_group_keys().count(), 2); - }); - - // Now add a worktree to project_a. This should produce a third key. - let (worktree, _) = project_a - .update(cx, |project, cx| { - project.find_or_create_worktree("/root_c", true, cx) - }) - .await - .unwrap(); - worktree - .read_with(cx, |tree, _| tree.as_local().unwrap().scan_complete()) - .await; - cx.run_until_parked(); - - let key_a_updated = project_a.read_with(cx, |p, cx| p.project_group_key(cx)); - assert_ne!(key_a, key_a_updated); - - multi_workspace.read_with(cx, |mw, _cx| { - let keys: Vec<&ProjectGroupKey> = mw.project_group_keys().collect(); - assert_eq!( - keys.len(), - 3, - "should have key_a, key_b, and the updated key_a with root_c" - ); - assert_eq!(*keys[0], key_a_updated); - assert_eq!(*keys[1], key_b); - assert_eq!(*keys[2], key_a); - }); -} diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index cbcd60b734644cb61473bef85e27f2403e3c7d3c..785d4111a38ad859f415983209bcd1eeae484d5e 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -4897,36 +4897,9 @@ fn dirty_message_for(buffer_path: Option, path_style: PathStyle) -> } pub fn tab_details(items: &[Box], _window: &Window, cx: &App) -> Vec { - let mut tab_details = items.iter().map(|_| 0).collect::>(); - let mut tab_descriptions = HashMap::default(); - let mut done = false; - while !done { - done = true; - - // Store item indices by their tab description. - for (ix, (item, detail)) in items.iter().zip(&tab_details).enumerate() { - let description = item.tab_content_text(*detail, cx); - if *detail == 0 || description != item.tab_content_text(detail - 1, cx) { - tab_descriptions - .entry(description) - .or_insert(Vec::new()) - .push(ix); - } - } - - // If two or more items have the same tab description, increase their level - // of detail and try again. - for (_, item_ixs) in tab_descriptions.drain() { - if item_ixs.len() > 1 { - done = false; - for ix in item_ixs { - tab_details[ix] += 1; - } - } - } - } - - tab_details + util::disambiguate::compute_disambiguation_details(items, |item, detail| { + item.tab_content_text(detail, cx) + }) } pub fn render_item_indicator(item: Box, cx: &App) -> Option { diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 67383740a8b3287bb237748776b0c7ab2654d7ba..9ae44ef3db2e6c18979694440744043a6abc055e 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -1804,16 +1804,12 @@ impl WorkspaceDb { } } - async fn all_paths_exist_with_a_directory( - paths: &[PathBuf], - fs: &dyn Fs, - timestamp: Option>, - ) -> bool { + async fn all_paths_exist_with_a_directory(paths: &[PathBuf], fs: &dyn Fs) -> bool { let mut any_dir = false; for path in paths { match fs.metadata(path).await.ok().flatten() { None => { - return timestamp.is_some_and(|t| Utc::now() - t < chrono::Duration::days(7)); + return false; } Some(meta) => { if meta.is_dir { @@ -1839,9 +1835,9 @@ impl WorkspaceDb { )>, > { let mut result = Vec::new(); - let mut delete_tasks = Vec::new(); + let mut workspaces_to_delete = Vec::new(); let remote_connections = self.remote_connections()?; - + let now = Utc::now(); for (id, paths, remote_connection_id, timestamp) in self.recent_workspaces()? { if let Some(remote_connection_id) = remote_connection_id { if let Some(connection_options) = remote_connections.get(&remote_connection_id) { @@ -1852,34 +1848,40 @@ impl WorkspaceDb { timestamp, )); } else { - delete_tasks.push(self.delete_workspace_by_id(id)); + workspaces_to_delete.push(id); } continue; } - let has_wsl_path = if cfg!(windows) { - paths + // Delete the workspace if any of the paths are WSL paths. If a + // local workspace points to WSL, attempting to read its metadata + // will wait for the WSL VM and file server to boot up. This can + // block for many seconds. Supported scenarios use remote + // workspaces. + if cfg!(windows) { + let has_wsl_path = paths .paths() .iter() - .any(|path| util::paths::WslPath::from_path(path).is_some()) - } else { - false - }; + .any(|path| util::paths::WslPath::from_path(path).is_some()); + if has_wsl_path { + workspaces_to_delete.push(id); + continue; + } + } - // Delete the workspace if any of the paths are WSL paths. - // If a local workspace points to WSL, this check will cause us to wait for the - // WSL VM and file server to boot up. This can block for many seconds. - // Supported scenarios use remote workspaces. - if !has_wsl_path - && Self::all_paths_exist_with_a_directory(paths.paths(), fs, Some(timestamp)).await - { + if Self::all_paths_exist_with_a_directory(paths.paths(), fs).await { result.push((id, SerializedWorkspaceLocation::Local, paths, timestamp)); - } else { - delete_tasks.push(self.delete_workspace_by_id(id)); + } else if now - timestamp >= chrono::Duration::days(7) { + workspaces_to_delete.push(id); } } - futures::future::join_all(delete_tasks).await; + futures::future::join_all( + workspaces_to_delete + .into_iter() + .map(|id| self.delete_workspace_by_id(id)), + ) + .await; Ok(result) } @@ -1932,7 +1934,7 @@ impl WorkspaceDb { window_id, }); } else { - if Self::all_paths_exist_with_a_directory(paths.paths(), fs, None).await { + if Self::all_paths_exist_with_a_directory(paths.paths(), fs).await { workspaces.push(SessionWorkspace { workspace_id, location: SerializedWorkspaceLocation::Local, diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 15b9cced9322426761d66f43a96fa10a695ae130..b25e9c4128b7ecfa428f328c59d3344ed634b293 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -86,7 +86,7 @@ pub use persistence::{ WorkspaceDb, delete_unloaded_items, model::{ DockStructure, ItemId, MultiWorkspaceState, SerializedMultiWorkspace, - SerializedWorkspaceLocation, SessionWorkspace, + SerializedProjectGroupKey, SerializedWorkspaceLocation, SessionWorkspace, }, read_serialized_multi_workspaces, resolve_worktree_workspaces, }; @@ -3299,6 +3299,18 @@ impl Workspace { state.task.clone().unwrap() } + /// Prompts the user to save or discard each dirty item, returning + /// `true` if they confirmed (saved/discarded everything) or `false` + /// if they cancelled. Used before removing worktree roots during + /// thread archival. + pub fn prompt_to_save_or_discard_dirty_items( + &mut self, + window: &mut Window, + cx: &mut Context, + ) -> Task> { + self.save_all_internal(SaveIntent::Close, window, cx) + } + fn save_all_internal( &mut self, mut save_intent: SaveIntent, @@ -8682,12 +8694,6 @@ pub async fn restore_multiworkspace( active_workspace, state, } = multi_workspace; - let MultiWorkspaceState { - sidebar_open, - project_group_keys, - sidebar_state, - .. - } = state; let workspace_result = if active_workspace.paths.is_empty() { cx.update(|cx| { @@ -8715,9 +8721,8 @@ pub async fn restore_multiworkspace( Err(err) => { log::error!("Failed to restore active workspace: {err:#}"); - // Try each project group's paths as a fallback. let mut fallback_handle = None; - for key in &project_group_keys { + for key in &state.project_group_keys { let key: ProjectGroupKey = key.clone().into(); let paths = key.path_list().paths().to_vec(); match cx @@ -8748,17 +8753,47 @@ pub async fn restore_multiworkspace( } }; - if !project_group_keys.is_empty() { - let fs = app_state.fs.clone(); + apply_restored_multiworkspace_state(window_handle, &state, app_state.fs.clone(), cx).await; + + window_handle + .update(cx, |_, window, _cx| { + window.activate_window(); + }) + .ok(); + + Ok(window_handle) +} +pub async fn apply_restored_multiworkspace_state( + window_handle: WindowHandle, + state: &MultiWorkspaceState, + fs: Arc, + cx: &mut AsyncApp, +) { + let MultiWorkspaceState { + sidebar_open, + project_group_keys, + sidebar_state, + .. + } = state; + + if !project_group_keys.is_empty() { // Resolve linked worktree paths to their main repo paths so // stale keys from previous sessions get normalized and deduped. let mut resolved_keys: Vec = Vec::new(); - for key in project_group_keys.into_iter().map(ProjectGroupKey::from) { + for key in project_group_keys + .iter() + .cloned() + .map(ProjectGroupKey::from) + { + if key.path_list().paths().is_empty() { + continue; + } let mut resolved_paths = Vec::new(); for path in key.path_list().paths() { - if let Some(common_dir) = - project::discover_root_repo_common_dir(path, fs.as_ref()).await + if key.host().is_none() + && let Some(common_dir) = + project::discover_root_repo_common_dir(path, fs.as_ref()).await { let main_path = common_dir.parent().unwrap_or(&common_dir); resolved_paths.push(main_path.to_path_buf()); @@ -8779,7 +8814,7 @@ pub async fn restore_multiworkspace( .ok(); } - if sidebar_open { + if *sidebar_open { window_handle .update(cx, |multi_workspace, _, cx| { multi_workspace.open_sidebar(cx); @@ -8791,20 +8826,12 @@ pub async fn restore_multiworkspace( window_handle .update(cx, |multi_workspace, window, cx| { if let Some(sidebar) = multi_workspace.sidebar() { - sidebar.restore_serialized_state(&sidebar_state, window, cx); + sidebar.restore_serialized_state(sidebar_state, window, cx); } multi_workspace.serialize(cx); }) .ok(); } - - window_handle - .update(cx, |_, window, _cx| { - window.activate_window(); - }) - .ok(); - - Ok(window_handle) } actions!( @@ -9733,6 +9760,7 @@ pub fn open_remote_project_with_new_connection( serialized_workspace, app_state, window, + None, cx, ) .await @@ -9745,6 +9773,7 @@ pub fn open_remote_project_with_existing_connection( paths: Vec, app_state: Arc, window: WindowHandle, + provisional_project_group_key: Option, cx: &mut AsyncApp, ) -> Task>>>> { cx.spawn(async move |cx| { @@ -9758,6 +9787,7 @@ pub fn open_remote_project_with_existing_connection( serialized_workspace, app_state, window, + provisional_project_group_key, cx, ) .await @@ -9771,6 +9801,7 @@ async fn open_remote_project_inner( serialized_workspace: Option, app_state: Arc, window: WindowHandle, + provisional_project_group_key: Option, cx: &mut AsyncApp, ) -> Result>>> { let db = cx.update(|cx| WorkspaceDb::global(cx)); @@ -9831,6 +9862,9 @@ async fn open_remote_project_inner( workspace }); + if let Some(project_group_key) = provisional_project_group_key.clone() { + multi_workspace.set_workspace_group_key(&new_workspace, project_group_key); + } multi_workspace.activate(new_workspace.clone(), window, cx); new_workspace })?; diff --git a/crates/worktree/src/worktree.rs b/crates/worktree/src/worktree.rs index e1f15683e2d120f79fb7aaae0d8a3b5bff51f5f5..fea550e0c2ca1987fd3d9fa88c48f38596c4dd1b 100644 --- a/crates/worktree/src/worktree.rs +++ b/crates/worktree/src/worktree.rs @@ -510,7 +510,7 @@ impl Worktree { cx: &mut App, ) -> Entity { cx.new(|cx: &mut Context| { - let snapshot = Snapshot::new( + let mut snapshot = Snapshot::new( WorktreeId::from_proto(worktree.id), RelPath::from_proto(&worktree.root_name) .unwrap_or_else(|_| RelPath::empty().into()), @@ -518,6 +518,10 @@ impl Worktree { path_style, ); + snapshot.root_repo_common_dir = worktree + .root_repo_common_dir + .map(|p| SanitizedPath::new_arc(Path::new(&p))); + let background_snapshot = Arc::new(Mutex::new(( snapshot.clone(), Vec::::new(), @@ -676,6 +680,9 @@ impl Worktree { root_name: self.root_name().to_proto(), visible: self.is_visible(), abs_path: self.abs_path().to_string_lossy().into_owned(), + root_repo_common_dir: self + .root_repo_common_dir() + .map(|p| p.to_string_lossy().into_owned()), } } @@ -2430,9 +2437,12 @@ impl Snapshot { self.entries_by_path.edit(entries_by_path_edits, ()); self.entries_by_id.edit(entries_by_id_edits, ()); - self.root_repo_common_dir = update + if let Some(dir) = update .root_repo_common_dir - .map(|p| SanitizedPath::new_arc(Path::new(&p))); + .map(|p| SanitizedPath::new_arc(Path::new(&p))) + { + self.root_repo_common_dir = Some(dir); + } self.scan_id = update.scan_id as usize; if update.is_last_update { diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 5937b91665b892084aa7b4d1f8b94ec1e2d864da..97caf14639ce23a8c85392aa630267f146902602 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -7,7 +7,7 @@ mod zed; use agent::{SharedThread, ThreadStore}; use agent_client_protocol; use agent_ui::AgentPanel; -use anyhow::{Context as _, Error, Result}; +use anyhow::{Context as _, Result}; use clap::Parser; use cli::FORCE_CLI_MODE_ENV_VAR_NAME; use client::{Client, ProxySettings, RefreshLlmTokenListener, UserStore, parse_zed_link}; @@ -1357,54 +1357,56 @@ pub(crate) async fn restore_or_create_workspace( cx: &mut AsyncApp, ) -> Result<()> { let kvp = cx.update(|cx| KeyValueStore::global(cx)); - if let Some((multi_workspaces, remote_workspaces)) = restorable_workspaces(cx, &app_state).await - { - let mut results: Vec> = Vec::new(); - let mut tasks = Vec::new(); - + if let Some(multi_workspaces) = restorable_workspaces(cx, &app_state).await { + let mut error_count = 0; for multi_workspace in multi_workspaces { - if let Err(error) = restore_multiworkspace(multi_workspace, app_state.clone(), cx).await - { - log::error!("Failed to restore workspace: {error:#}"); - results.push(Err(error)); - } - } + let result = match &multi_workspace.active_workspace.location { + SerializedWorkspaceLocation::Local => { + restore_multiworkspace(multi_workspace, app_state.clone(), cx) + .await + .map(|_| ()) + } + SerializedWorkspaceLocation::Remote(connection_options) => { + let mut connection_options = connection_options.clone(); + if let RemoteConnectionOptions::Ssh(options) = &mut connection_options { + cx.update(|cx| { + RemoteSettings::get_global(cx) + .fill_connection_options_from_settings(options) + }); + } - for session_workspace in remote_workspaces { - let app_state = app_state.clone(); - let SerializedWorkspaceLocation::Remote(mut connection_options) = - session_workspace.location - else { - continue; + let paths = multi_workspace + .active_workspace + .paths + .paths() + .iter() + .map(PathBuf::from) + .collect::>(); + let state = multi_workspace.state.clone(); + async { + let window = open_remote_project( + connection_options, + paths, + app_state.clone(), + workspace::OpenOptions::default(), + cx, + ) + .await?; + workspace::apply_restored_multiworkspace_state( + window, + &state, + app_state.fs.clone(), + cx, + ) + .await; + Ok::<(), anyhow::Error>(()) + } + .await + } }; - let paths = session_workspace.paths; - if let RemoteConnectionOptions::Ssh(options) = &mut connection_options { - cx.update(|cx| { - RemoteSettings::get_global(cx).fill_connection_options_from_settings(options) - }); - } - let task = cx.spawn(async move |cx| { - recent_projects::open_remote_project( - connection_options, - paths.paths().iter().map(PathBuf::from).collect(), - app_state, - workspace::OpenOptions::default(), - cx, - ) - .await - .map_err(|e| anyhow::anyhow!(e)) - }); - tasks.push(task); - } - // Wait for all window groups and remote workspaces to open concurrently - results.extend(future::join_all(tasks).await); - - // Show notifications for any errors that occurred - let mut error_count = 0; - for result in results { - if let Err(e) = result { - log::error!("Failed to restore workspace: {}", e); + if let Err(error) = result { + log::error!("Failed to restore workspace: {error:#}"); error_count += 1; } } @@ -1487,17 +1489,9 @@ pub(crate) async fn restore_or_create_workspace( async fn restorable_workspaces( cx: &mut AsyncApp, app_state: &Arc, -) -> Option<( - Vec, - Vec, -)> { +) -> Option> { let locations = restorable_workspace_locations(cx, app_state).await?; - let (remote_workspaces, local_workspaces) = locations - .into_iter() - .partition(|sw| matches!(sw.location, SerializedWorkspaceLocation::Remote(_))); - let multi_workspaces = - cx.update(|cx| workspace::read_serialized_multi_workspaces(local_workspaces, cx)); - Some((multi_workspaces, remote_workspaces)) + Some(cx.update(|cx| workspace::read_serialized_multi_workspaces(locations, cx))) } pub(crate) async fn restorable_workspace_locations( diff --git a/crates/zed/src/visual_test_runner.rs b/crates/zed/src/visual_test_runner.rs index af1a60589483443e56506e7eeb7a8424d16a4143..2f043bfb0c9e66d4ee56bfc78d0b9d69244d3777 100644 --- a/crates/zed/src/visual_test_runner.rs +++ b/crates/zed/src/visual_test_runner.rs @@ -573,6 +573,27 @@ fn run_visual_tests(project_path: PathBuf, update_baseline: bool) -> Result<()> } } + // Run Test: Sidebar with duplicate project names + println!("\n--- Test: sidebar_duplicate_names ---"); + match run_sidebar_duplicate_project_names_visual_tests( + app_state.clone(), + &mut cx, + update_baseline, + ) { + Ok(TestResult::Passed) => { + println!("✓ sidebar_duplicate_names: PASSED"); + passed += 1; + } + Ok(TestResult::BaselineUpdated(_)) => { + println!("✓ sidebar_duplicate_names: Baselines updated"); + updated += 1; + } + Err(e) => { + eprintln!("✗ sidebar_duplicate_names: FAILED - {}", e); + failed += 1; + } + } + // Run Test 9: Tool Permissions Settings UI visual test println!("\n--- Test 9: tool_permissions_settings ---"); match run_tool_permissions_visual_tests(app_state.clone(), &mut cx, update_baseline) { @@ -3069,6 +3090,279 @@ fn run_git_command(args: &[&str], dir: &std::path::Path) -> Result<()> { Ok(()) } +#[cfg(target_os = "macos")] +/// Helper to create a project, add a worktree at the given path, and return the project. +fn create_project_with_worktree( + worktree_dir: &Path, + app_state: &Arc, + cx: &mut VisualTestAppContext, +) -> Result> { + let project = cx.update(|cx| { + project::Project::local( + app_state.client.clone(), + app_state.node_runtime.clone(), + app_state.user_store.clone(), + app_state.languages.clone(), + app_state.fs.clone(), + None, + project::LocalProjectFlags { + init_worktree_trust: false, + ..Default::default() + }, + cx, + ) + }); + + let add_task = cx.update(|cx| { + project.update(cx, |project, cx| { + project.find_or_create_worktree(worktree_dir, true, cx) + }) + }); + + cx.background_executor.allow_parking(); + cx.foreground_executor + .block_test(add_task) + .context("Failed to add worktree")?; + cx.background_executor.forbid_parking(); + + cx.run_until_parked(); + Ok(project) +} + +#[cfg(target_os = "macos")] +fn open_sidebar_test_window( + projects: Vec>, + app_state: &Arc, + cx: &mut VisualTestAppContext, +) -> Result> { + anyhow::ensure!(!projects.is_empty(), "need at least one project"); + + let window_size = size(px(400.0), px(600.0)); + let bounds = Bounds { + origin: point(px(0.0), px(0.0)), + size: window_size, + }; + + let mut projects_iter = projects.into_iter(); + let first_project = projects_iter + .next() + .ok_or_else(|| anyhow::anyhow!("need at least one project"))?; + let remaining: Vec<_> = projects_iter.collect(); + + let multi_workspace_window: WindowHandle = cx + .update(|cx| { + cx.open_window( + WindowOptions { + window_bounds: Some(WindowBounds::Windowed(bounds)), + focus: false, + show: false, + ..Default::default() + }, + |window, cx| { + let first_ws = cx.new(|cx| { + Workspace::new(None, first_project.clone(), app_state.clone(), window, cx) + }); + cx.new(|cx| { + let mut mw = MultiWorkspace::new(first_ws, window, cx); + for project in remaining { + let ws = cx.new(|cx| { + Workspace::new(None, project, app_state.clone(), window, cx) + }); + mw.activate(ws, window, cx); + } + mw + }) + }, + ) + }) + .context("Failed to open MultiWorkspace window")?; + + cx.run_until_parked(); + + // Create the sidebar outside the MultiWorkspace update to avoid a + // re-entrant read panic (Sidebar::new reads the MultiWorkspace). + let sidebar = cx + .update_window(multi_workspace_window.into(), |root_view, window, cx| { + let mw_handle: Entity = root_view + .downcast() + .map_err(|_| anyhow::anyhow!("Failed to downcast root view to MultiWorkspace"))?; + Ok::<_, anyhow::Error>(cx.new(|cx| sidebar::Sidebar::new(mw_handle, window, cx))) + }) + .context("Failed to create sidebar")??; + + multi_workspace_window + .update(cx, |mw, _window, cx| { + mw.register_sidebar(sidebar.clone(), cx); + }) + .context("Failed to register sidebar")?; + + cx.run_until_parked(); + + // Open the sidebar + multi_workspace_window + .update(cx, |mw, window, cx| { + mw.toggle_sidebar(window, cx); + }) + .context("Failed to toggle sidebar")?; + + // Let rendering settle + for _ in 0..10 { + cx.advance_clock(Duration::from_millis(100)); + cx.run_until_parked(); + } + + // Refresh the window + cx.update_window(multi_workspace_window.into(), |_, window, _cx| { + window.refresh(); + })?; + + cx.run_until_parked(); + + Ok(multi_workspace_window) +} + +#[cfg(target_os = "macos")] +fn cleanup_sidebar_test_window( + window: WindowHandle, + cx: &mut VisualTestAppContext, +) -> Result<()> { + window.update(cx, |mw, _window, cx| { + for workspace in mw.workspaces() { + let project = workspace.read(cx).project().clone(); + project.update(cx, |project, cx| { + let ids: Vec<_> = project.worktrees(cx).map(|wt| wt.read(cx).id()).collect(); + for id in ids { + project.remove_worktree(id, cx); + } + }); + } + })?; + + cx.run_until_parked(); + + cx.update_window(window.into(), |_, window, _cx| { + window.remove_window(); + })?; + + cx.run_until_parked(); + + for _ in 0..15 { + cx.advance_clock(Duration::from_millis(100)); + cx.run_until_parked(); + } + + Ok(()) +} + +#[cfg(target_os = "macos")] +fn run_sidebar_duplicate_project_names_visual_tests( + app_state: Arc, + cx: &mut VisualTestAppContext, + update_baseline: bool, +) -> Result { + let temp_dir = tempfile::tempdir()?; + let temp_path = temp_dir.keep(); + let canonical_temp = temp_path.canonicalize()?; + + // Create directory structure where every leaf directory is named "zed" but + // lives at a distinct path. This lets us test that the sidebar correctly + // disambiguates projects whose names would otherwise collide. + // + // code/zed/ — project1 (single worktree) + // code/foo/zed/ — project2 (single worktree) + // code/bar/zed/ — project3, first worktree + // code/baz/zed/ — project3, second worktree + // + // No two projects share a worktree path, so ProjectGroupBuilder will + // place each in its own group. + let code_zed = canonical_temp.join("code").join("zed"); + let foo_zed = canonical_temp.join("code").join("foo").join("zed"); + let bar_zed = canonical_temp.join("code").join("bar").join("zed"); + let baz_zed = canonical_temp.join("code").join("baz").join("zed"); + std::fs::create_dir_all(&code_zed)?; + std::fs::create_dir_all(&foo_zed)?; + std::fs::create_dir_all(&bar_zed)?; + std::fs::create_dir_all(&baz_zed)?; + + cx.update(|cx| { + cx.update_flags(true, vec!["agent-v2".to_string()]); + }); + + let mut has_baseline_update = None; + + // Two single-worktree projects whose leaf name is "zed" + { + let project1 = create_project_with_worktree(&code_zed, &app_state, cx)?; + let project2 = create_project_with_worktree(&foo_zed, &app_state, cx)?; + + let window = open_sidebar_test_window(vec![project1, project2], &app_state, cx)?; + + let result = run_visual_test( + "sidebar_two_projects_same_leaf_name", + window.into(), + cx, + update_baseline, + ); + + cleanup_sidebar_test_window(window, cx)?; + match result? { + TestResult::Passed => {} + TestResult::BaselineUpdated(path) => { + has_baseline_update = Some(path); + } + } + } + + // Three projects, third has two worktrees (all leaf names "zed") + // + // project1: code/zed + // project2: code/foo/zed + // project3: code/bar/zed + code/baz/zed + // + // Each project has a unique set of worktree paths, so they form + // separate groups. The sidebar must disambiguate all three. + { + let project1 = create_project_with_worktree(&code_zed, &app_state, cx)?; + let project2 = create_project_with_worktree(&foo_zed, &app_state, cx)?; + + let project3 = create_project_with_worktree(&bar_zed, &app_state, cx)?; + let add_second_worktree = cx.update(|cx| { + project3.update(cx, |project, cx| { + project.find_or_create_worktree(&baz_zed, true, cx) + }) + }); + cx.background_executor.allow_parking(); + cx.foreground_executor + .block_test(add_second_worktree) + .context("Failed to add second worktree to project 3")?; + cx.background_executor.forbid_parking(); + cx.run_until_parked(); + + let window = open_sidebar_test_window(vec![project1, project2, project3], &app_state, cx)?; + + let result = run_visual_test( + "sidebar_three_projects_with_multi_worktree", + window.into(), + cx, + update_baseline, + ); + + cleanup_sidebar_test_window(window, cx)?; + match result? { + TestResult::Passed => {} + TestResult::BaselineUpdated(path) => { + has_baseline_update = Some(path); + } + } + } + + if let Some(path) = has_baseline_update { + Ok(TestResult::BaselineUpdated(path)) + } else { + Ok(TestResult::Passed) + } +} + #[cfg(all(target_os = "macos", feature = "visual-tests"))] fn run_start_thread_in_selector_visual_tests( app_state: Arc, diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 9a8c11530d4c18ba43285db7ba8fe5c0fa707801..5fc7e330417f9cb7b1140eca407c7755b173c0a6 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -2052,6 +2052,7 @@ pub fn open_new_ssh_project_from_project( cx, ) .await + .map(|_| ()) }) }