agent: Fix find_or_create repository to support remote (#54074)

Anthony Eid and Smit Barmase created

### Summary

Follow up to: https://github.com/zed-industries/zed/pull/54001

This PR makes
`agent_ui::thread_worktree_archive::find_or_create_repository` whether
or not the repository it's searching for is on a remote machine. In the
case where it's on a remote machine that has an active connection in
Zed's connection pool, we return the repository; otherwise, we error
out.

I implemented it this way so archiving a thread on a remote machine that
isn't connected deletes the metadata without showing a remote connection
modal to a user. In the future, we can work around this by having better
git worktree garbage collection, where remote machines remove git
worktree's that were created by Zed and have no thread metadata on
connection too.

Finally, this PR gets archive threads working on remotes!


#### Follow Ups 

1. Replace instances of `std::fs` with Zed's Fs trait in the archive
thread codepaths
2. Make archive thread work properly for workspace's that are not open
(local and remote)

Self-Review Checklist:

- [x] I've reviewed my own diff for quality, security, and reliability
- [x] Unsafe blocks (if any) have justifying comments
- [x] The content is consistent with the [UI/UX
checklist](https://github.com/zed-industries/zed/blob/main/CONTRIBUTING.md#uiux-checklist)
- [x] Tests cover the new/changed behavior
- [x] Performance impact has been considered and is acceptable

Release Notes:

- N/A

---------

Co-authored-by: Smit Barmase <heysmitbarmase@gmail.com>

Change summary

crates/agent_ui/src/thread_worktree_archive.rs    | 276 ++++++++----
crates/project/src/git_store.rs                   |  31 +
crates/remote_connection/src/remote_connection.rs |   2 
crates/sidebar/src/sidebar.rs                     |  31 
crates/sidebar/src/sidebar_tests.rs               | 384 +++++++++++++++++
5 files changed, 616 insertions(+), 108 deletions(-)

Detailed changes

crates/agent_ui/src/thread_worktree_archive.rs 🔗

@@ -4,13 +4,13 @@ use std::{
 };
 
 use anyhow::{Context as _, Result, anyhow};
-use fs::{Fs, RemoveOptions};
 use gpui::{App, AsyncApp, Entity, Task};
 use project::{
     LocalProjectFlags, Project, WorktreeId,
     git_store::{Repository, resolve_git_worktree_to_main_repo, worktrees_directory_for_repo},
     project_settings::ProjectSettings,
 };
+use remote::{RemoteConnectionOptions, same_remote_connection_identity};
 use settings::Settings;
 use util::ResultExt;
 use workspace::{AppState, MultiWorkspace, Workspace};
@@ -48,6 +48,10 @@ pub struct RootPlan {
     /// The branch the worktree was on, so it can be restored later.
     /// `None` if the worktree was in detached HEAD state.
     pub branch_name: Option<String>,
+    /// Remote connection options for the project that owns this worktree,
+    /// used to create temporary remote projects when the main repo isn't
+    /// loaded in any open workspace.
+    pub remote_connection: Option<RemoteConnectionOptions>,
 }
 
 /// A `Project` that references a worktree being archived, paired with the
@@ -97,15 +101,26 @@ fn worktrees_base_for_repo(main_repo_path: &Path, cx: &App) -> Option<PathBuf> {
 /// cannot be archived to disk) or if no open project has it loaded.
 pub fn build_root_plan(
     path: &Path,
+    remote_connection: Option<&RemoteConnectionOptions>,
     workspaces: &[Entity<Workspace>],
     cx: &App,
 ) -> Option<RootPlan> {
     let path = path.to_path_buf();
 
+    let matches_target_connection = |project: &Entity<Project>, cx: &App| {
+        same_remote_connection_identity(
+            project.read(cx).remote_connection_options(cx).as_ref(),
+            remote_connection,
+        )
+    };
+
     let affected_projects = workspaces
         .iter()
         .filter_map(|workspace| {
             let project = workspace.read(cx).project().clone();
+            if !matches_target_connection(&project, cx) {
+                return None;
+            }
             let worktree = project
                 .read(cx)
                 .visible_worktrees(cx)
@@ -124,6 +139,7 @@ pub fn build_root_plan(
 
     let linked_repo = workspaces
         .iter()
+        .filter(|workspace| matches_target_connection(workspace.read(cx).project(), cx))
         .flat_map(|workspace| {
             workspace
                 .read(cx)
@@ -158,12 +174,14 @@ pub fn build_root_plan(
         .branch
         .as_ref()
         .map(|branch| branch.name().to_string());
+
     Some(RootPlan {
         root_path: path,
         main_repo_path,
         affected_projects,
         worktree_repo: repo,
         branch_name,
+        remote_connection: remote_connection.cloned(),
     })
 }
 
@@ -175,7 +193,7 @@ pub fn build_root_plan(
 /// each project to fully release it, then asks the main repository to
 /// delete the worktree directory. If the git removal fails, the worktree
 /// is re-added to each project via [`rollback_root`].
-pub async fn remove_root(root: RootPlan, fs: Arc<dyn Fs>, cx: &mut AsyncApp) -> Result<()> {
+pub async fn remove_root(root: RootPlan, cx: &mut AsyncApp) -> Result<()> {
     let release_tasks: Vec<_> = root
         .affected_projects
         .iter()
@@ -190,7 +208,7 @@ pub async fn remove_root(root: RootPlan, fs: Arc<dyn Fs>, cx: &mut AsyncApp) ->
         })
         .collect();
 
-    if let Err(error) = remove_root_after_worktree_removal(&root, fs, release_tasks, cx).await {
+    if let Err(error) = remove_root_after_worktree_removal(&root, release_tasks, cx).await {
         rollback_root(&root, cx).await;
         return Err(error);
     }
@@ -200,7 +218,6 @@ pub async fn remove_root(root: RootPlan, fs: Arc<dyn Fs>, cx: &mut AsyncApp) ->
 
 async fn remove_root_after_worktree_removal(
     root: &RootPlan,
-    fs: Arc<dyn Fs>,
     release_tasks: Vec<Task<Result<()>>>,
     cx: &mut AsyncApp,
 ) -> Result<()> {
@@ -210,45 +227,36 @@ async fn remove_root_after_worktree_removal(
         }
     }
 
-    // Delete the directory ourselves first, then tell git to clean up the
-    // metadata. This avoids a problem where `git worktree remove` can
-    // remove the metadata in `.git/worktrees/<name>` but fail to delete
-    // the directory (git continues past directory-removal errors), leaving
-    // an orphaned folder on disk. By deleting the directory first, we
-    // guarantee it's gone, and `git worktree remove --force` with a
-    // missing working tree just cleans up the admin entry.
-    fs.remove_dir(
-        &root.root_path,
-        RemoveOptions {
-            recursive: true,
-            ignore_if_not_exists: true,
-        },
-    )
-    .await
-    .with_context(|| {
-        format!(
-            "failed to delete worktree directory '{}'",
-            root.root_path.display()
-        )
-    })?;
+    let (repo, project) =
+        find_or_create_repository(&root.main_repo_path, root.remote_connection.as_ref(), cx)
+            .await?;
 
-    let (repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx).await?;
+    // `Repository::remove_worktree` with `force = true` deletes the working
+    // directory before running `git worktree remove --force`, so there's no
+    // need to touch the filesystem here. For remote projects that cleanup
+    // runs on the headless server via the `GitRemoveWorktree` RPC, which is
+    // the only code path with access to the remote machine's filesystem.
     let receiver = repo.update(cx, |repo: &mut Repository, _cx| {
         repo.remove_worktree(root.root_path.clone(), true)
     });
     let result = receiver
         .await
         .map_err(|_| anyhow!("git worktree metadata cleanup was canceled"))?;
-    // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
-    drop(_temp_project);
+    // `project` may be a live workspace project or a temporary one created
+    // by `find_or_create_repository`. In the temporary case we must keep it
+    // alive until the repo removes the worktree
+    drop(project);
     result.context("git worktree metadata cleanup failed")?;
 
-    remove_empty_parent_dirs_up_to_worktrees_base(
-        root.root_path.clone(),
-        root.main_repo_path.clone(),
-        cx,
-    )
-    .await;
+    // Empty-parent cleanup uses local std::fs — skip for remote projects.
+    if root.remote_connection.is_none() {
+        remove_empty_parent_dirs_up_to_worktrees_base(
+            root.root_path.clone(),
+            root.main_repo_path.clone(),
+            cx,
+        )
+        .await;
+    }
 
     Ok(())
 }
@@ -312,13 +320,17 @@ fn remove_empty_ancestors(child_path: &Path, base_path: &Path) {
 }
 
 /// Finds a live `Repository` entity for the given path, or creates a temporary
-/// `Project::local` to obtain one.
+/// project to obtain one.
 ///
 /// `Repository` entities can only be obtained through a `Project` because
 /// `GitStore` (which creates and manages `Repository` entities) is owned by
 /// `Project`. When no open workspace contains the repo we need, we spin up a
-/// headless `Project::local` just to get a `Repository` handle. The caller
-/// keeps the returned `Option<Entity<Project>>` alive for the duration of the
+/// headless project just to get a `Repository` handle. For local paths this is
+/// a `Project::local`; for remote paths we build a `Project::remote` through
+/// the connection pool (reusing the existing SSH transport), which requires
+/// the caller to pass the matching `RemoteConnectionOptions` so we only match
+/// and fall back onto projects that share the same remote identity. The
+/// caller keeps the returned `Entity<Project>` alive for the duration of the
 /// git operations, then drops it.
 ///
 /// Future improvement: decoupling `GitStore` from `Project` so that
@@ -326,46 +338,91 @@ fn remove_empty_ancestors(child_path: &Path, base_path: &Path) {
 /// temporary-project workaround.
 async fn find_or_create_repository(
     repo_path: &Path,
+    remote_connection: Option<&RemoteConnectionOptions>,
     cx: &mut AsyncApp,
-) -> Result<(Entity<Repository>, Option<Entity<Project>>)> {
+) -> Result<(Entity<Repository>, Entity<Project>)> {
     let repo_path_owned = repo_path.to_path_buf();
+    let remote_connection_owned = remote_connection.cloned();
+
+    // First, try to find a live repository in any open workspace whose
+    // remote connection matches (so a local `/project` and a remote
+    // `/project` are not confused).
     let live_repo = cx.update(|cx| {
         all_open_workspaces(cx)
             .into_iter()
-            .flat_map(|workspace| {
-                workspace
-                    .read(cx)
-                    .project()
-                    .read(cx)
-                    .repositories(cx)
-                    .values()
-                    .cloned()
-                    .collect::<Vec<_>>()
-            })
-            .find(|repo| {
-                repo.read(cx).snapshot().work_directory_abs_path.as_ref()
-                    == repo_path_owned.as_path()
+            .filter_map(|workspace| {
+                let project = workspace.read(cx).project().clone();
+                let project_connection = project.read(cx).remote_connection_options(cx);
+                if !same_remote_connection_identity(
+                    project_connection.as_ref(),
+                    remote_connection_owned.as_ref(),
+                ) {
+                    return None;
+                }
+                Some((
+                    project
+                        .read(cx)
+                        .repositories(cx)
+                        .values()
+                        .find(|repo| {
+                            repo.read(cx).snapshot().work_directory_abs_path.as_ref()
+                                == repo_path_owned.as_path()
+                        })
+                        .cloned()?,
+                    project.clone(),
+                ))
             })
+            .next()
     });
 
-    if let Some(repo) = live_repo {
-        return Ok((repo, None));
+    if let Some((repo, project)) = live_repo {
+        return Ok((repo, project));
     }
 
     let app_state =
         current_app_state(cx).context("no app state available for temporary project")?;
-    let temp_project = cx.update(|cx| {
-        Project::local(
-            app_state.client.clone(),
-            app_state.node_runtime.clone(),
-            app_state.user_store.clone(),
-            app_state.languages.clone(),
-            app_state.fs.clone(),
-            None,
-            LocalProjectFlags::default(),
-            cx,
-        )
-    });
+
+    // For remote paths, create a fresh RemoteClient through the connection
+    // pool (reusing the existing SSH transport) and build a temporary
+    // remote project. Each RemoteClient gets its own server-side headless
+    // project, so there are no RPC routing conflicts with other projects.
+    let temp_project = if let Some(connection) = remote_connection_owned {
+        let remote_client = cx
+            .update(|cx| {
+                if !remote::has_active_connection(&connection, cx) {
+                    anyhow::bail!("cannot open repository on disconnected remote machine");
+                }
+                Ok(remote_connection::connect_reusing_pool(connection, cx))
+            })?
+            .await?
+            .context("remote connection was canceled")?;
+
+        cx.update(|cx| {
+            Project::remote(
+                remote_client,
+                app_state.client.clone(),
+                app_state.node_runtime.clone(),
+                app_state.user_store.clone(),
+                app_state.languages.clone(),
+                app_state.fs.clone(),
+                false,
+                cx,
+            )
+        })
+    } else {
+        cx.update(|cx| {
+            Project::local(
+                app_state.client.clone(),
+                app_state.node_runtime.clone(),
+                app_state.user_store.clone(),
+                app_state.languages.clone(),
+                app_state.fs.clone(),
+                None,
+                LocalProjectFlags::default(),
+                cx,
+            )
+        })
+    };
 
     let repo_path_for_worktree = repo_path.to_path_buf();
     let create_worktree = temp_project.update(cx, |project, cx| {
@@ -393,7 +450,7 @@ async fn find_or_create_repository(
     barrier
         .await
         .map_err(|_| anyhow!("temporary repository barrier canceled"))?;
-    Ok((repo, Some(temp_project)))
+    Ok((repo, temp_project))
 }
 
 /// Re-adds the worktree to every affected project after a failed
@@ -514,9 +571,10 @@ pub async fn persist_worktree_state(root: &RootPlan, cx: &mut AsyncApp) -> Resul
     // This is fatal: without the ref, git gc will eventually collect the
     // WIP commits and a later restore will silently fail.
     let ref_name = archived_worktree_ref_name(archived_worktree_id);
-    let (main_repo, _temp_project) = find_or_create_repository(&root.main_repo_path, cx)
-        .await
-        .context("could not open main repo to create archive ref")?;
+    let (main_repo, _temp_project) =
+        find_or_create_repository(&root.main_repo_path, root.remote_connection.as_ref(), cx)
+            .await
+            .context("could not open main repo to create archive ref")?;
     let rx = main_repo.update(cx, |repo, _cx| {
         repo.update_ref(ref_name.clone(), unstaged_commit_hash.clone())
     });
@@ -524,6 +582,8 @@ pub async fn persist_worktree_state(root: &RootPlan, cx: &mut AsyncApp) -> Resul
         .map_err(|_| anyhow!("update_ref canceled"))
         .and_then(|r| r)
         .with_context(|| format!("failed to create ref {ref_name} on main repo"))?;
+    // See note in `remove_root_after_worktree_removal`: this may be a live
+    // or temporary project; dropping only matters in the temporary case.
     drop(_temp_project);
 
     Ok(archived_worktree_id)
@@ -536,11 +596,14 @@ pub async fn persist_worktree_state(root: &RootPlan, cx: &mut AsyncApp) -> Resul
 pub async fn rollback_persist(archived_worktree_id: i64, root: &RootPlan, cx: &mut AsyncApp) {
     // Delete the git ref on main repo
     if let Ok((main_repo, _temp_project)) =
-        find_or_create_repository(&root.main_repo_path, cx).await
+        find_or_create_repository(&root.main_repo_path, root.remote_connection.as_ref(), cx).await
     {
         let ref_name = archived_worktree_ref_name(archived_worktree_id);
         let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
         rx.await.ok().and_then(|r| r.log_err());
+        // See note in `remove_root_after_worktree_removal`: this may be a
+        // live or temporary project; dropping only matters in the temporary
+        // case.
         drop(_temp_project);
     }
 
@@ -564,9 +627,11 @@ pub async fn rollback_persist(archived_worktree_id: i64, root: &RootPlan, cx: &m
 /// unstaged state from the WIP commit trees.
 pub async fn restore_worktree_via_git(
     row: &ArchivedGitWorktree,
+    remote_connection: Option<&RemoteConnectionOptions>,
     cx: &mut AsyncApp,
 ) -> Result<PathBuf> {
-    let (main_repo, _temp_project) = find_or_create_repository(&row.main_repo_path, cx).await?;
+    let (main_repo, _temp_project) =
+        find_or_create_repository(&row.main_repo_path, remote_connection, cx).await?;
 
     let worktree_path = &row.worktree_path;
     let app_state = current_app_state(cx).context("no app state available")?;
@@ -597,13 +662,15 @@ pub async fn restore_worktree_via_git(
         true
     };
 
-    let (wt_repo, _temp_wt_project) = match find_or_create_repository(worktree_path, cx).await {
-        Ok(result) => result,
-        Err(error) => {
-            remove_new_worktree_on_error(created_new_worktree, &main_repo, worktree_path, cx).await;
-            return Err(error);
-        }
-    };
+    let (wt_repo, _temp_wt_project) =
+        match find_or_create_repository(worktree_path, remote_connection, cx).await {
+            Ok(result) => result,
+            Err(error) => {
+                remove_new_worktree_on_error(created_new_worktree, &main_repo, worktree_path, cx)
+                    .await;
+                return Err(error);
+            }
+        };
 
     if let Some(branch_name) = &row.branch_name {
         // Attempt to check out the branch the worktree was previously on.
@@ -724,9 +791,14 @@ async fn remove_new_worktree_on_error(
 
 /// Deletes the git ref and DB records for a single archived worktree.
 /// Used when an archived worktree is no longer referenced by any thread.
-pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mut AsyncApp) {
+pub async fn cleanup_archived_worktree_record(
+    row: &ArchivedGitWorktree,
+    remote_connection: Option<&RemoteConnectionOptions>,
+    cx: &mut AsyncApp,
+) {
     // Delete the git ref from the main repo
-    if let Ok((main_repo, _temp_project)) = find_or_create_repository(&row.main_repo_path, cx).await
+    if let Ok((main_repo, _temp_project)) =
+        find_or_create_repository(&row.main_repo_path, remote_connection, cx).await
     {
         let ref_name = archived_worktree_ref_name(row.id);
         let rx = main_repo.update(cx, |repo, _cx| repo.delete_ref(ref_name));
@@ -735,7 +807,9 @@ pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mu
             Ok(Err(error)) => log::warn!("Failed to delete archive ref: {error}"),
             Err(_) => log::warn!("Archive ref deletion was canceled"),
         }
-        // Keep _temp_project alive until after the await so the headless project isn't dropped mid-operation
+        // See note in `remove_root_after_worktree_removal`: this may be a
+        // live or temporary project; dropping only matters in the temporary
+        // case.
         drop(_temp_project);
     }
 
@@ -754,6 +828,11 @@ pub async fn cleanup_archived_worktree_record(row: &ArchivedGitWorktree, cx: &mu
 /// deletes the git ref and DB records.
 pub async fn cleanup_thread_archived_worktrees(thread_id: ThreadId, cx: &mut AsyncApp) {
     let store = cx.update(|cx| ThreadMetadataStore::global(cx));
+    let remote_connection = store.read_with(cx, |store, _cx| {
+        store
+            .entry(thread_id)
+            .and_then(|t| t.remote_connection.clone())
+    });
 
     let archived_worktrees = store
         .read_with(cx, |store, cx| {
@@ -791,7 +870,7 @@ pub async fn cleanup_thread_archived_worktrees(thread_id: ThreadId, cx: &mut Asy
         match still_referenced {
             Ok(true) => {}
             Ok(false) => {
-                cleanup_archived_worktree_record(row, cx).await;
+                cleanup_archived_worktree_record(row, remote_connection.as_ref(), cx).await;
             }
             Err(error) => {
                 log::error!(
@@ -828,7 +907,7 @@ fn current_app_state(cx: &mut AsyncApp) -> Option<Arc<AppState>> {
 #[cfg(test)]
 mod tests {
     use super::*;
-    use fs::FakeFs;
+    use fs::{FakeFs, Fs as _};
     use git::repository::Worktree as GitWorktree;
     use gpui::{BorrowAppContext, TestAppContext};
     use project::Project;
@@ -985,7 +1064,12 @@ mod tests {
 
         // The main worktree should NOT produce a root plan.
         workspace.read_with(cx, |_workspace, cx| {
-            let plan = build_root_plan(Path::new("/project"), std::slice::from_ref(&workspace), cx);
+            let plan = build_root_plan(
+                Path::new("/project"),
+                None,
+                std::slice::from_ref(&workspace),
+                cx,
+            );
             assert!(
                 plan.is_none(),
                 "build_root_plan should return None for a main worktree",
@@ -1047,6 +1131,7 @@ mod tests {
             // The linked worktree SHOULD produce a root plan.
             let plan = build_root_plan(
                 Path::new("/worktrees/project/feature/project"),
+                None,
                 std::slice::from_ref(&workspace),
                 cx,
             );
@@ -1062,8 +1147,12 @@ mod tests {
             assert_eq!(plan.main_repo_path, PathBuf::from("/project"));
 
             // The main worktree should still return None.
-            let main_plan =
-                build_root_plan(Path::new("/project"), std::slice::from_ref(&workspace), cx);
+            let main_plan = build_root_plan(
+                Path::new("/project"),
+                None,
+                std::slice::from_ref(&workspace),
+                cx,
+            );
             assert!(
                 main_plan.is_none(),
                 "build_root_plan should return None for the main worktree \
@@ -1124,6 +1213,7 @@ mod tests {
         workspace.read_with(cx, |_workspace, cx| {
             let plan = build_root_plan(
                 Path::new("/external-worktree"),
+                None,
                 std::slice::from_ref(&workspace),
                 cx,
             );
@@ -1218,6 +1308,7 @@ mod tests {
             // Worktree inside the custom managed directory SHOULD be archivable.
             let plan = build_root_plan(
                 Path::new("/custom-worktrees/project/feature/project"),
+                None,
                 std::slice::from_ref(&workspace),
                 cx,
             );
@@ -1231,6 +1322,7 @@ mod tests {
             // because the setting points elsewhere.
             let plan = build_root_plan(
                 Path::new("/worktrees/project/feature2/project"),
+                None,
                 std::slice::from_ref(&workspace),
                 cx,
             );
@@ -1297,6 +1389,7 @@ mod tests {
             .read_with(cx, |_workspace, cx| {
                 build_root_plan(
                     Path::new("/worktrees/project/feature/project"),
+                    None,
                     std::slice::from_ref(&workspace),
                     cx,
                 )
@@ -1309,8 +1402,7 @@ mod tests {
         );
 
         // Remove the root.
-        let fs_clone = fs.clone();
-        let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, fs_clone, cx).await));
+        let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, cx).await));
         task.await.expect("remove_root should succeed");
 
         cx.run_until_parked();
@@ -1377,6 +1469,7 @@ mod tests {
             .read_with(cx, |_workspace, cx| {
                 build_root_plan(
                     Path::new("/worktrees/project/feature/project"),
+                    None,
                     std::slice::from_ref(&workspace),
                     cx,
                 )
@@ -1404,8 +1497,7 @@ mod tests {
         // remove_root should still succeed — fs.remove_dir with
         // ignore_if_not_exists handles NotFound, and git worktree remove
         // handles a missing working tree directory.
-        let fs_clone = fs.clone();
-        let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, fs_clone, cx).await));
+        let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, cx).await));
         task.await
             .expect("remove_root should succeed even when directory is already gone");
     }
@@ -1466,6 +1558,7 @@ mod tests {
             .read_with(cx, |_workspace, cx| {
                 build_root_plan(
                     Path::new("/worktrees/project/feature/project"),
+                    None,
                     std::slice::from_ref(&workspace),
                     cx,
                 )
@@ -1492,8 +1585,7 @@ mod tests {
             "path should now be a file, not a directory"
         );
 
-        let fs_clone = fs.clone();
-        let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, fs_clone, cx).await));
+        let task = cx.update(|cx| cx.spawn(async move |cx| remove_root(root, cx).await));
         let result = task.await;
 
         assert!(

crates/project/src/git_store.rs 🔗

@@ -17,7 +17,7 @@ use buffer_diff::{BufferDiff, BufferDiffEvent};
 use client::ProjectId;
 use collections::HashMap;
 pub use conflict_set::{ConflictRegion, ConflictSet, ConflictSetSnapshot, ConflictSetUpdate};
-use fs::Fs;
+use fs::{Fs, RemoveOptions};
 use futures::{
     FutureExt, StreamExt,
     channel::{
@@ -6350,7 +6350,34 @@ impl Repository {
             Some(format!("git worktree remove: {}", path.display()).into()),
             move |repo, _cx| async move {
                 match repo {
-                    RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
+                    RepositoryState::Local(LocalRepositoryState { backend, fs, .. }) => {
+                        // When forcing, delete the worktree directory ourselves before
+                        // invoking git. `git worktree remove` can remove the admin
+                        // metadata in `.git/worktrees/<name>` but fail to delete the
+                        // working directory (it continues past directory-removal errors),
+                        // leaving an orphaned folder on disk. Deleting first guarantees
+                        // the directory is gone, and `git worktree remove --force`
+                        // tolerates a missing working tree while cleaning up the admin
+                        // entry. We keep this inside the `Local` arm so that for remote
+                        // projects the deletion runs on the remote machine (where the
+                        // `GitRemoveWorktree` RPC is handled against the local repo on
+                        // the headless server) using its own filesystem.
+                        //
+                        // Non-force removals are left untouched: `git worktree remove`
+                        // must see the dirty working tree to refuse the operation.
+                        if force {
+                            fs.remove_dir(
+                                &path,
+                                RemoveOptions {
+                                    recursive: true,
+                                    ignore_if_not_exists: true,
+                                },
+                            )
+                            .await
+                            .with_context(|| {
+                                format!("failed to delete worktree directory '{}'", path.display())
+                            })?;
+                        }
                         backend.remove_worktree(path, force).await
                     }
                     RepositoryState::Remote(RemoteRepositoryState { project_id, client }) => {

crates/remote_connection/src/remote_connection.rs 🔗

@@ -594,7 +594,7 @@ pub fn dismiss_connection_modal(workspace: &Entity<Workspace>, cx: &mut gpui::As
 /// Creates a [`RemoteClient`] by reusing an existing connection from the
 /// global pool. No interactive UI is shown. This should only be called
 /// when [`remote::has_active_connection`] returns `true`.
-fn connect_reusing_pool(
+pub fn connect_reusing_pool(
     connection_options: RemoteConnectionOptions,
     cx: &mut App,
 ) -> Task<Result<Option<Entity<RemoteClient>>>> {

crates/sidebar/src/sidebar.rs 🔗

@@ -2518,10 +2518,18 @@ impl Sidebar {
 
                 let mut path_replacements: Vec<(PathBuf, PathBuf)> = Vec::new();
                 for row in &archived_worktrees {
-                    match thread_worktree_archive::restore_worktree_via_git(row, &mut *cx).await {
+                    match thread_worktree_archive::restore_worktree_via_git(
+                        row,
+                        metadata.remote_connection.as_ref(),
+                        &mut *cx,
+                    )
+                    .await
+                    {
                         Ok(restored_path) => {
                             thread_worktree_archive::cleanup_archived_worktree_record(
-                                row, &mut *cx,
+                                row,
+                                metadata.remote_connection.as_ref(),
+                                &mut *cx,
                             )
                             .await;
                             path_replacements.push((row.worktree_path.clone(), restored_path));
@@ -2798,7 +2806,12 @@ impl Sidebar {
                     .folder_paths()
                     .ordered_paths()
                     .filter_map(|path| {
-                        thread_worktree_archive::build_root_plan(path, &workspaces, cx)
+                        thread_worktree_archive::build_root_plan(
+                            path,
+                            metadata.remote_connection.as_ref(),
+                            &workspaces,
+                            cx,
+                        )
                     })
                     .filter(|plan| {
                         thread_id.map_or(true, |tid| {
@@ -3189,14 +3202,9 @@ impl Sidebar {
             return None;
         }
 
-        let fs = self
-            .multi_workspace
-            .upgrade()
-            .map(|mw| mw.read(cx).workspace().read(cx).app_state().fs.clone())?;
-
         let (cancel_tx, cancel_rx) = smol::channel::bounded::<()>(1);
         let task = cx.spawn(async move |_this, cx| {
-            match Self::archive_worktree_roots(roots, fs, cancel_rx, cx).await {
+            match Self::archive_worktree_roots(roots, cancel_rx, cx).await {
                 Ok(ArchiveWorktreeOutcome::Success) => {
                     cx.update(|cx| {
                         ThreadMetadataStore::global(cx).update(cx, |store, _cx| {
@@ -3221,7 +3229,6 @@ impl Sidebar {
 
     async fn archive_worktree_roots(
         roots: Vec<thread_worktree_archive::RootPlan>,
-        fs: Arc<dyn fs::Fs>,
         cancel_rx: smol::channel::Receiver<()>,
         cx: &mut gpui::AsyncApp,
     ) -> anyhow::Result<ArchiveWorktreeOutcome> {
@@ -3254,9 +3261,7 @@ impl Sidebar {
                 return Ok(ArchiveWorktreeOutcome::Cancelled);
             }
 
-            if let Err(error) =
-                thread_worktree_archive::remove_root(root.clone(), fs.clone(), cx).await
-            {
+            if let Err(error) = thread_worktree_archive::remove_root(root.clone(), cx).await {
                 if let Some(&(id, ref completed_root)) = completed_persists.last() {
                     if completed_root.root_path == root.root_path {
                         thread_worktree_archive::rollback_persist(id, completed_root, cx).await;

crates/sidebar/src/sidebar_tests.rs 🔗

@@ -246,6 +246,90 @@ async fn save_named_thread_metadata(
     cx.run_until_parked();
 }
 
+/// Spins up a fresh remote project backed by a headless server sharing
+/// `server_fs`, opens the given worktree path on it, and returns the
+/// project together with the headless entity (which the caller must keep
+/// alive for the duration of the test) and the `RemoteConnectionOptions`
+/// used for the fake server. Passing those options back into
+/// `reuse_opts` on a subsequent call makes the new project share the
+/// same `RemoteConnectionIdentity`, matching how Zed treats multiple
+/// projects on the same SSH host.
+async fn start_remote_project(
+    server_fs: &Arc<FakeFs>,
+    worktree_path: &Path,
+    app_state: &Arc<workspace::AppState>,
+    reuse_opts: Option<&remote::RemoteConnectionOptions>,
+    cx: &mut TestAppContext,
+    server_cx: &mut TestAppContext,
+) -> (
+    Entity<project::Project>,
+    Entity<remote_server::HeadlessProject>,
+    remote::RemoteConnectionOptions,
+) {
+    // Bare `_` on the guard so it's dropped immediately; holding onto it
+    // would deadlock `connect_mock` below since the client waits on the
+    // guard before completing the mock handshake.
+    let (opts, server_session) = match reuse_opts {
+        Some(existing) => {
+            let (session, _) = remote::RemoteClient::fake_server_with_opts(existing, cx, server_cx);
+            (existing.clone(), session)
+        }
+        None => {
+            let (opts, session, _) = remote::RemoteClient::fake_server(cx, server_cx);
+            (opts, session)
+        }
+    };
+
+    server_cx.update(remote_server::HeadlessProject::init);
+    let server_executor = server_cx.executor();
+    let fs = server_fs.clone();
+    let headless = server_cx.new(|cx| {
+        remote_server::HeadlessProject::new(
+            remote_server::HeadlessAppState {
+                session: server_session,
+                fs,
+                http_client: Arc::new(http_client::BlockedHttpClient),
+                node_runtime: node_runtime::NodeRuntime::unavailable(),
+                languages: Arc::new(language::LanguageRegistry::new(server_executor.clone())),
+                extension_host_proxy: Arc::new(extension::ExtensionHostProxy::new()),
+                startup_time: std::time::Instant::now(),
+            },
+            false,
+            cx,
+        )
+    });
+
+    let remote_client = remote::RemoteClient::connect_mock(opts.clone(), cx).await;
+    let project = cx.update(|cx| {
+        let project_client = client::Client::new(
+            Arc::new(clock::FakeSystemClock::new()),
+            http_client::FakeHttpClient::with_404_response(),
+            cx,
+        );
+        let user_store = cx.new(|cx| client::UserStore::new(project_client.clone(), cx));
+        project::Project::remote(
+            remote_client,
+            project_client,
+            node_runtime::NodeRuntime::unavailable(),
+            user_store,
+            app_state.languages.clone(),
+            app_state.fs.clone(),
+            false,
+            cx,
+        )
+    });
+
+    project
+        .update(cx, |project, cx| {
+            project.find_or_create_worktree(worktree_path, true, cx)
+        })
+        .await
+        .expect("should open remote worktree");
+    cx.run_until_parked();
+
+    (project, headless, opts)
+}
+
 fn save_thread_metadata(
     session_id: acp::SessionId,
     title: Option<SharedString>,
@@ -4777,6 +4861,7 @@ async fn test_restore_worktree_when_branch_has_moved(cx: &mut TestAppContext) {
                     unstaged_commit_hash: unstaged_hash,
                     original_commit_hash: "original-sha".to_string(),
                 },
+                None,
                 &mut cx,
             )
             .await
@@ -4885,6 +4970,7 @@ async fn test_restore_worktree_when_branch_has_not_moved(cx: &mut TestAppContext
                     unstaged_commit_hash: unstaged_hash,
                     original_commit_hash: "original-sha".to_string(),
                 },
+                None,
                 &mut cx,
             )
             .await
@@ -4985,6 +5071,7 @@ async fn test_restore_worktree_when_branch_does_not_exist(cx: &mut TestAppContex
                     unstaged_commit_hash: unstaged_hash,
                     original_commit_hash: "original-sha".to_string(),
                 },
+                None,
                 &mut cx,
             )
             .await
@@ -10298,6 +10385,303 @@ fn test_worktree_info_missing_branch_returns_none() {
     assert_eq!(infos[0].name, SharedString::from("myapp"));
 }
 
+#[gpui::test]
+async fn test_remote_archive_thread_with_active_connection(
+    cx: &mut TestAppContext,
+    server_cx: &mut TestAppContext,
+) {
+    // End-to-end test of archiving a remote thread tied to a linked git
+    // worktree. Archival should:
+    //  1. Persist the worktree's git state via the remote repository RPCs
+    //     (head_sha / create_archive_checkpoint / update_ref).
+    //  2. Remove the linked worktree directory from the *remote* filesystem
+    //     via the GitRemoveWorktree RPC.
+    //  3. Mark the thread metadata archived and hide it from the sidebar.
+    //
+    // The mock remote transport only supports one live `RemoteClient` per
+    // connection at a time (each client's `start_proxy` replaces the
+    // previous server channel), so we can't split the main repo and the
+    // linked worktree across two remote projects the way Zed does in
+    // production. Opening both as visible worktrees of a single remote
+    // project still exercises every interesting path of the archive flow
+    // while staying within the mock's multiplexing limits.
+    init_test(cx);
+
+    cx.update(|cx| {
+        release_channel::init(semver::Version::new(0, 0, 0), cx);
+    });
+
+    let app_state = cx.update(|cx| {
+        let app_state = workspace::AppState::test(cx);
+        workspace::init(app_state.clone(), cx);
+        app_state
+    });
+
+    server_cx.update(|cx| {
+        release_channel::init(semver::Version::new(0, 0, 0), cx);
+    });
+
+    // Set up the remote filesystem with a main repo and one linked worktree.
+    let server_fs = FakeFs::new(server_cx.executor());
+    server_fs
+        .insert_tree(
+            "/project",
+            serde_json::json!({
+                ".git": {
+                    "worktrees": {
+                        "feature-a": {
+                            "commondir": "../../",
+                            "HEAD": "ref: refs/heads/feature-a",
+                        },
+                    },
+                },
+                "src": { "main.rs": "fn main() {}" },
+            }),
+        )
+        .await;
+    server_fs
+        .insert_tree(
+            "/worktrees/project/feature-a/project",
+            serde_json::json!({
+                ".git": "gitdir: /project/.git/worktrees/feature-a",
+                "src": { "lib.rs": "// feature" },
+            }),
+        )
+        .await;
+    server_fs
+        .add_linked_worktree_for_repo(
+            Path::new("/project/.git"),
+            false,
+            git::repository::Worktree {
+                path: PathBuf::from("/worktrees/project/feature-a/project"),
+                ref_name: Some("refs/heads/feature-a".into()),
+                sha: "abc".into(),
+                is_main: false,
+                is_bare: false,
+            },
+        )
+        .await;
+    server_fs.set_branch_name(Path::new("/project/.git"), Some("main"));
+    server_fs.set_head_for_repo(
+        Path::new("/project/.git"),
+        &[("src/main.rs", "fn main() {}".into())],
+        "head-sha",
+    );
+
+    // Open a single remote project with both the main repo and the linked
+    // worktree as visible worktrees. The mock transport doesn't multiplex
+    // multiple `RemoteClient`s over one pooled connection cleanly (each
+    // client's `start_proxy` clobbers the previous one's server channel),
+    // so we can't build two separate `Project::remote` instances in this
+    // test. Folding both worktrees into one project still exercises the
+    // archive flow's interesting paths: `build_root_plan` classifies the
+    // linked worktree correctly, and `find_or_create_repository` finds
+    // the main repo live on that same project — avoiding the temp-project
+    // fallback that would also run into the multiplexing limitation.
+    let (project, _headless, _opts) = start_remote_project(
+        &server_fs,
+        Path::new("/project"),
+        &app_state,
+        None,
+        cx,
+        server_cx,
+    )
+    .await;
+    project
+        .update(cx, |project, cx| {
+            project.find_or_create_worktree(
+                Path::new("/worktrees/project/feature-a/project"),
+                true,
+                cx,
+            )
+        })
+        .await
+        .expect("should open linked worktree on remote");
+    project.update(cx, |p, cx| p.git_scans_complete(cx)).await;
+    cx.run_until_parked();
+
+    cx.update(|cx| <dyn fs::Fs>::set_global(app_state.fs.clone(), cx));
+
+    let (multi_workspace, cx) =
+        cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
+    let sidebar = setup_sidebar(&multi_workspace, cx);
+
+    // The worktree thread's (main_worktree_path, folder_path) pair points
+    // the folder at the linked worktree checkout and the main at the
+    // parent repo, so `build_root_plan` targets the linked worktree
+    // specifically and knows which main repo owns it.
+    let remote_connection = project.read_with(cx, |p, cx| p.remote_connection_options(cx));
+    let wt_thread_id = acp::SessionId::new(Arc::from("worktree-thread"));
+    cx.update(|_window, cx| {
+        let metadata = ThreadMetadata {
+            thread_id: ThreadId::new(),
+            session_id: Some(wt_thread_id.clone()),
+            agent_id: agent::ZED_AGENT_ID.clone(),
+            title: Some("Worktree Thread".into()),
+            updated_at: chrono::TimeZone::with_ymd_and_hms(&chrono::Utc, 2024, 1, 1, 0, 0, 0)
+                .unwrap(),
+            created_at: None,
+            worktree_paths: WorktreePaths::from_path_lists(
+                PathList::new(&[PathBuf::from("/project")]),
+                PathList::new(&[PathBuf::from("/worktrees/project/feature-a/project")]),
+            )
+            .unwrap(),
+            archived: false,
+            remote_connection,
+        };
+        ThreadMetadataStore::global(cx).update(cx, |store, cx| store.save(metadata, cx));
+    });
+    cx.run_until_parked();
+
+    assert!(
+        server_fs
+            .is_dir(Path::new("/worktrees/project/feature-a/project"))
+            .await,
+        "linked worktree directory should exist on remote before archiving"
+    );
+
+    sidebar.update_in(cx, |sidebar: &mut Sidebar, window, cx| {
+        sidebar.archive_thread(&wt_thread_id, window, cx);
+    });
+    cx.run_until_parked();
+    server_cx.run_until_parked();
+
+    let is_archived = cx.update(|_window, cx| {
+        ThreadMetadataStore::global(cx)
+            .read(cx)
+            .entry_by_session(&wt_thread_id)
+            .map(|t| t.archived)
+            .unwrap_or(false)
+    });
+    assert!(is_archived, "worktree thread should be archived");
+
+    assert!(
+        !server_fs
+            .is_dir(Path::new("/worktrees/project/feature-a/project"))
+            .await,
+        "linked worktree directory should be removed from remote fs \
+         (the GitRemoveWorktree RPC runs `Repository::remove_worktree` \
+         on the headless server, which deletes the directory via `Fs::remove_dir` \
+         before running `git worktree remove --force`)"
+    );
+
+    let entries = visible_entries_as_strings(&sidebar, cx);
+    assert!(
+        !entries.iter().any(|e| e.contains("Worktree Thread")),
+        "archived worktree thread should be hidden from sidebar: {entries:?}"
+    );
+}
+
+#[gpui::test]
+async fn test_remote_archive_thread_with_disconnected_remote(
+    cx: &mut TestAppContext,
+    server_cx: &mut TestAppContext,
+) {
+    // When a remote thread has no linked-worktree state to archive (only
+    // a main worktree), archival is a pure metadata operation: no RPCs
+    // are issued against the remote server. This must succeed even when
+    // the connection has dropped out, because losing connectivity should
+    // not block users from cleaning up their thread list.
+    //
+    // Threads that *do* have linked-worktree state require a live
+    // connection to run the git worktree removal on the server; that
+    // path is covered by `test_remote_archive_thread_with_active_connection`.
+    init_test(cx);
+
+    cx.update(|cx| {
+        release_channel::init(semver::Version::new(0, 0, 0), cx);
+    });
+
+    let app_state = cx.update(|cx| {
+        let app_state = workspace::AppState::test(cx);
+        workspace::init(app_state.clone(), cx);
+        app_state
+    });
+
+    server_cx.update(|cx| {
+        release_channel::init(semver::Version::new(0, 0, 0), cx);
+    });
+
+    let server_fs = FakeFs::new(server_cx.executor());
+    server_fs
+        .insert_tree(
+            "/project",
+            serde_json::json!({
+                ".git": {},
+                "src": { "main.rs": "fn main() {}" },
+            }),
+        )
+        .await;
+    server_fs.set_branch_name(Path::new("/project/.git"), Some("main"));
+
+    let (project, _headless, _opts) = start_remote_project(
+        &server_fs,
+        Path::new("/project"),
+        &app_state,
+        None,
+        cx,
+        server_cx,
+    )
+    .await;
+    let remote_client = project
+        .read_with(cx, |project, _cx| project.remote_client())
+        .expect("remote project should expose its client");
+
+    cx.update(|cx| <dyn fs::Fs>::set_global(app_state.fs.clone(), cx));
+
+    let (multi_workspace, cx) =
+        cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
+    let sidebar = setup_sidebar(&multi_workspace, cx);
+
+    let thread_id = acp::SessionId::new(Arc::from("remote-thread"));
+    save_thread_metadata(
+        thread_id.clone(),
+        Some("Remote Thread".into()),
+        chrono::TimeZone::with_ymd_and_hms(&chrono::Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
+        None,
+        &project,
+        cx,
+    );
+    cx.run_until_parked();
+
+    // Sanity-check: there is nothing on the remote fs outside the main
+    // repo, so archival should not need to touch the server.
+    assert!(
+        !server_fs.is_dir(Path::new("/worktrees")).await,
+        "no linked worktrees on the server before archiving"
+    );
+
+    // Disconnect the remote connection before archiving. We don't
+    // `run_until_parked` here because the disconnect itself triggers
+    // reconnection work that can't complete in the test environment.
+    remote_client.update_in(cx, |client, _window, cx| {
+        client.simulate_disconnect(cx).detach();
+    });
+
+    sidebar.update_in(cx, |sidebar, window, cx| {
+        sidebar.archive_thread(&thread_id, window, cx);
+    });
+    cx.run_until_parked();
+
+    let is_archived = cx.update(|_window, cx| {
+        ThreadMetadataStore::global(cx)
+            .read(cx)
+            .entry_by_session(&thread_id)
+            .map(|t| t.archived)
+            .unwrap_or(false)
+    });
+    assert!(
+        is_archived,
+        "thread should be archived even when remote is disconnected"
+    );
+
+    let entries = visible_entries_as_strings(&sidebar, cx);
+    assert!(
+        !entries.iter().any(|e| e.contains("Remote Thread")),
+        "archived thread should be hidden from sidebar: {entries:?}"
+    );
+}
+
 #[gpui::test]
 async fn test_collab_guest_move_thread_paths_is_noop(cx: &mut TestAppContext) {
     init_test(cx);