sidebar: Filter thread groups by remote host identity (#53918)

Anthony Eid , Max Brunsfeld , and Richard Feldman created

### Summary 

Follow up on: https://github.com/zed-industries/zed/pull/53550

Threads in the sidebar are now filtered by remote connection in addition
to path list. Previously, two different machines with the same absolute
project paths (e.g. both at `/home/user/project`) would have their
threads merged into a single sidebar group. Now each remote host gets
its own group.

To support this, this PR introduces `RemoteConnectionIdentity`, a
normalized subset of `RemoteConnectionOptions` that includes only the
fields relevant for identity matching (e.g. SSH host + username + port,
but not nickname or connection timeout). All remote host comparisons in
the sidebar, thread metadata store, and workspace persistence now go
through `same_remote_connection_identity()` instead of raw `==` on
`RemoteConnectionOptions`.

#### Follow Up on remoting for sidebar

1. Make the neighbor in archive thread fallback to the main git worktree
instead of the next thread
2. Start implementing remote git operations that archive feature depends
on
3. Make remote connection pool equality use `RemoteConnectionIdentity`

Self-Review Checklist:

- [x] I've reviewed my own diff for quality, security, and reliability
- [x] Unsafe blocks (if any) have justifying comments
- [x] The content is consistent with the [UI/UX
checklist](https://github.com/zed-industries/zed/blob/main/CONTRIBUTING.md#uiux-checklist)
- [x] Tests cover the new/changed behavior
- [x] Performance impact has been considered and is acceptable

Release Notes:

- N/A

---------

Co-authored-by: Max Brunsfeld <maxbrunsfeld@gmail.com>
Co-authored-by: Richard Feldman <oss@rtfeldman.com>

Change summary

crates/agent_ui/src/agent_panel.rs             |   1 
crates/agent_ui/src/thread_metadata_store.rs   | 223 +++++++++++++++--
crates/agent_ui/src/thread_worktree_archive.rs |  22 -
crates/remote/src/remote.rs                    |   4 
crates/remote/src/remote_identity.rs           | 168 ++++++++++++++
crates/sidebar/src/sidebar.rs                  |  96 ++++++--
crates/sidebar/src/sidebar_tests.rs            | 240 ++++++++++++++++---
crates/workspace/src/multi_workspace.rs        |   3 
crates/workspace/src/multi_workspace_tests.rs  |   1 
crates/workspace/src/persistence.rs            |  54 +++-
crates/workspace/src/workspace.rs              |   3 
11 files changed, 679 insertions(+), 136 deletions(-)

Detailed changes

crates/agent_ui/src/thread_metadata_store.rs 🔗

@@ -1,4 +1,7 @@
-use std::{path::PathBuf, sync::Arc};
+use std::{
+    path::{Path, PathBuf},
+    sync::Arc,
+};
 
 use agent::{ThreadStore, ZED_AGENT_ID};
 use agent_client_protocol as acp;
@@ -20,7 +23,7 @@ use futures::{FutureExt, future::Shared};
 use gpui::{AppContext as _, Entity, Global, Subscription, Task};
 use project::AgentId;
 pub use project::WorktreePaths;
-use remote::RemoteConnectionOptions;
+use remote::{RemoteConnectionOptions, same_remote_connection_identity};
 use ui::{App, Context, SharedString};
 use util::ResultExt as _;
 use workspace::{PathList, SerializedWorkspaceLocation, WorkspaceDb};
@@ -482,32 +485,50 @@ impl ThreadMetadataStore {
         self.entries().filter(|t| t.archived)
     }
 
-    /// Returns all threads for the given path list, excluding archived threads.
-    pub fn entries_for_path(
-        &self,
+    /// Returns all threads for the given path list and remote connection,
+    /// excluding archived threads.
+    ///
+    /// When `remote_connection` is `Some`, only threads whose persisted
+    /// `remote_connection` matches by normalized identity are returned.
+    /// When `None`, only local (non-remote) threads are returned.
+    pub fn entries_for_path<'a>(
+        &'a self,
         path_list: &PathList,
-    ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
+        remote_connection: Option<&'a RemoteConnectionOptions>,
+    ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
         self.threads_by_paths
             .get(path_list)
             .into_iter()
             .flatten()
             .filter_map(|s| self.threads.get(s))
             .filter(|s| !s.archived)
+            .filter(move |s| {
+                same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
+            })
     }
 
-    /// Returns threads whose `main_worktree_paths` matches the given path list,
-    /// excluding archived threads. This finds threads that were opened in a
-    /// linked worktree but are associated with the given main worktree.
-    pub fn entries_for_main_worktree_path(
-        &self,
+    /// Returns threads whose `main_worktree_paths` matches the given path list
+    /// and remote connection, excluding archived threads. This finds threads
+    /// that were opened in a linked worktree but are associated with the given
+    /// main worktree.
+    ///
+    /// When `remote_connection` is `Some`, only threads whose persisted
+    /// `remote_connection` matches by normalized identity are returned.
+    /// When `None`, only local (non-remote) threads are returned.
+    pub fn entries_for_main_worktree_path<'a>(
+        &'a self,
         path_list: &PathList,
-    ) -> impl Iterator<Item = &ThreadMetadata> + '_ {
+        remote_connection: Option<&'a RemoteConnectionOptions>,
+    ) -> impl Iterator<Item = &'a ThreadMetadata> + 'a {
         self.threads_by_main_paths
             .get(path_list)
             .into_iter()
             .flatten()
             .filter_map(|s| self.threads.get(s))
             .filter(|s| !s.archived)
+            .filter(move |s| {
+                same_remote_connection_identity(s.remote_connection.as_ref(), remote_connection)
+            })
     }
 
     fn reload(&mut self, cx: &mut Context<Self>) -> Shared<Task<()>> {
@@ -677,6 +698,30 @@ impl ThreadMetadataStore {
         self.in_flight_archives.remove(&thread_id);
     }
 
+    /// Returns `true` if any unarchived thread other than `current_session_id`
+    /// references `path` in its folder paths. Used to determine whether a
+    /// worktree can safely be removed from disk.
+    pub fn path_is_referenced_by_other_unarchived_threads(
+        &self,
+        thread_id: ThreadId,
+        path: &Path,
+        remote_connection: Option<&RemoteConnectionOptions>,
+    ) -> bool {
+        self.entries().any(|thread| {
+            thread.thread_id != thread_id
+                && !thread.archived
+                && same_remote_connection_identity(
+                    thread.remote_connection.as_ref(),
+                    remote_connection,
+                )
+                && thread
+                    .folder_paths()
+                    .paths()
+                    .iter()
+                    .any(|other_path| other_path.as_path() == path)
+        })
+    }
+
     /// Updates a thread's `folder_paths` after an archived worktree has been
     /// restored to disk. The restored worktree may land at a different path
     /// than it had before archival, so each `(old_path, new_path)` pair in
@@ -752,12 +797,12 @@ impl ThreadMetadataStore {
             .into_iter()
             .flatten()
             .filter(|id| {
-                remote_connection.is_none()
-                    || self
-                        .threads
+                same_remote_connection_identity(
+                    self.threads
                         .get(id)
-                        .and_then(|t| t.remote_connection.as_ref())
-                        == remote_connection
+                        .and_then(|t| t.remote_connection.as_ref()),
+                    remote_connection,
+                )
             })
             .copied()
             .collect();
@@ -784,12 +829,12 @@ impl ThreadMetadataStore {
             .into_iter()
             .flatten()
             .filter(|id| {
-                remote_connection.is_none()
-                    || self
-                        .threads
+                same_remote_connection_identity(
+                    self.threads
                         .get(id)
-                        .and_then(|t| t.remote_connection.as_ref())
-                        == remote_connection
+                        .and_then(|t| t.remote_connection.as_ref()),
+                    remote_connection,
+                )
             })
             .copied()
             .collect();
@@ -1639,13 +1684,13 @@ mod tests {
             );
 
             let first_path_entries: Vec<_> = store
-                .entries_for_path(&first_paths)
+                .entries_for_path(&first_paths, None)
                 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert_eq!(first_path_entries, vec!["session-1"]);
 
             let second_path_entries: Vec<_> = store
-                .entries_for_path(&second_paths)
+                .entries_for_path(&second_paths, None)
                 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert_eq!(second_path_entries, vec!["session-2"]);
@@ -1692,13 +1737,13 @@ mod tests {
             let store = store.read(cx);
 
             let first_path_entries: Vec<_> = store
-                .entries_for_path(&first_paths)
+                .entries_for_path(&first_paths, None)
                 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert_eq!(first_path_entries, vec!["session-1"]);
 
             let second_path_entries: Vec<_> = store
-                .entries_for_path(&second_paths)
+                .entries_for_path(&second_paths, None)
                 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert_eq!(second_path_entries, vec!["session-2"]);
@@ -1742,13 +1787,13 @@ mod tests {
             );
 
             let first_path_entries: Vec<_> = store
-                .entries_for_path(&first_paths)
+                .entries_for_path(&first_paths, None)
                 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert!(first_path_entries.is_empty());
 
             let second_path_entries: Vec<_> = store
-                .entries_for_path(&second_paths)
+                .entries_for_path(&second_paths, None)
                 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert_eq!(second_path_entries.len(), 2);
@@ -1772,7 +1817,7 @@ mod tests {
             assert_eq!(store.entry_ids().count(), 1);
 
             let second_path_entries: Vec<_> = store
-                .entries_for_path(&second_paths)
+                .entries_for_path(&second_paths, None)
                 .filter_map(|entry| entry.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert_eq!(second_path_entries, vec!["session-1"]);
@@ -2434,7 +2479,7 @@ mod tests {
             let store = store.read(cx);
 
             let path_entries: Vec<_> = store
-                .entries_for_path(&paths)
+                .entries_for_path(&paths, None)
                 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert_eq!(path_entries, vec!["session-1"]);
@@ -2457,7 +2502,7 @@ mod tests {
             let store = store.read(cx);
 
             let path_entries: Vec<_> = store
-                .entries_for_path(&paths)
+                .entries_for_path(&paths, None)
                 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert!(path_entries.is_empty());
@@ -2485,7 +2530,7 @@ mod tests {
             let store = store.read(cx);
 
             let path_entries: Vec<_> = store
-                .entries_for_path(&paths)
+                .entries_for_path(&paths, None)
                 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert_eq!(path_entries, vec!["session-1"]);
@@ -2534,7 +2579,7 @@ mod tests {
             let store = store.read(cx);
 
             let path_entries: Vec<_> = store
-                .entries_for_path(&paths)
+                .entries_for_path(&paths, None)
                 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert_eq!(path_entries, vec!["session-1"]);
@@ -2549,6 +2594,116 @@ mod tests {
         });
     }
 
+    #[gpui::test]
+    async fn test_entries_filter_by_remote_connection(cx: &mut TestAppContext) {
+        init_test(cx);
+
+        let main_paths = PathList::new(&[Path::new("/project-a")]);
+        let linked_paths = PathList::new(&[Path::new("/wt-feature")]);
+        let now = Utc::now();
+
+        let remote_a = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 1 });
+        let remote_b = RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 2 });
+
+        // Three threads at the same folder_paths but different hosts.
+        let local_thread = make_metadata("local-session", "Local Thread", now, main_paths.clone());
+
+        let mut remote_a_thread = make_metadata(
+            "remote-a-session",
+            "Remote A Thread",
+            now - chrono::Duration::seconds(1),
+            main_paths.clone(),
+        );
+        remote_a_thread.remote_connection = Some(remote_a.clone());
+
+        let mut remote_b_thread = make_metadata(
+            "remote-b-session",
+            "Remote B Thread",
+            now - chrono::Duration::seconds(2),
+            main_paths.clone(),
+        );
+        remote_b_thread.remote_connection = Some(remote_b.clone());
+
+        let linked_worktree_paths =
+            WorktreePaths::from_path_lists(main_paths.clone(), linked_paths).unwrap();
+
+        let local_linked_thread = ThreadMetadata {
+            thread_id: ThreadId::new(),
+            archived: false,
+            session_id: Some(acp::SessionId::new("local-linked")),
+            agent_id: agent::ZED_AGENT_ID.clone(),
+            title: Some("Local Linked".into()),
+            updated_at: now,
+            created_at: Some(now),
+            worktree_paths: linked_worktree_paths.clone(),
+            remote_connection: None,
+        };
+
+        let remote_linked_thread = ThreadMetadata {
+            thread_id: ThreadId::new(),
+            archived: false,
+            session_id: Some(acp::SessionId::new("remote-linked")),
+            agent_id: agent::ZED_AGENT_ID.clone(),
+            title: Some("Remote Linked".into()),
+            updated_at: now - chrono::Duration::seconds(1),
+            created_at: Some(now - chrono::Duration::seconds(1)),
+            worktree_paths: linked_worktree_paths,
+            remote_connection: Some(remote_a.clone()),
+        };
+
+        cx.update(|cx| {
+            let store = ThreadMetadataStore::global(cx);
+            store.update(cx, |store, cx| {
+                store.save(local_thread, cx);
+                store.save(remote_a_thread, cx);
+                store.save(remote_b_thread, cx);
+                store.save(local_linked_thread, cx);
+                store.save(remote_linked_thread, cx);
+            });
+        });
+        cx.run_until_parked();
+
+        cx.update(|cx| {
+            let store = ThreadMetadataStore::global(cx);
+            let store = store.read(cx);
+
+            let local_entries: Vec<_> = store
+                .entries_for_path(&main_paths, None)
+                .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
+                .collect();
+            assert_eq!(local_entries, vec!["local-session"]);
+
+            let remote_a_entries: Vec<_> = store
+                .entries_for_path(&main_paths, Some(&remote_a))
+                .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
+                .collect();
+            assert_eq!(remote_a_entries, vec!["remote-a-session"]);
+
+            let remote_b_entries: Vec<_> = store
+                .entries_for_path(&main_paths, Some(&remote_b))
+                .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
+                .collect();
+            assert_eq!(remote_b_entries, vec!["remote-b-session"]);
+
+            let mut local_main_entries: Vec<_> = store
+                .entries_for_main_worktree_path(&main_paths, None)
+                .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
+                .collect();
+            local_main_entries.sort();
+            assert_eq!(local_main_entries, vec!["local-linked", "local-session"]);
+
+            let mut remote_main_entries: Vec<_> = store
+                .entries_for_main_worktree_path(&main_paths, Some(&remote_a))
+                .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
+                .collect();
+            remote_main_entries.sort();
+            assert_eq!(
+                remote_main_entries,
+                vec!["remote-a-session", "remote-linked"]
+            );
+        });
+    }
+
     #[gpui::test]
     async fn test_save_all_persists_multiple_threads(cx: &mut TestAppContext) {
         init_test(cx);
@@ -2650,7 +2805,7 @@ mod tests {
             assert!(thread.archived);
 
             let path_entries: Vec<_> = store
-                .entries_for_path(&paths)
+                .entries_for_path(&paths, None)
                 .filter_map(|e| e.session_id.as_ref().map(|s| s.0.to_string()))
                 .collect();
             assert!(path_entries.is_empty());

crates/agent_ui/src/thread_worktree_archive.rs 🔗

@@ -147,28 +147,6 @@ pub fn build_root_plan(
     })
 }
 
-/// Returns `true` if any unarchived thread other than `current_session_id`
-/// references `path` in its folder paths. Used to determine whether a
-/// worktree can safely be removed from disk.
-pub fn path_is_referenced_by_other_unarchived_threads(
-    current_thread_id: ThreadId,
-    path: &Path,
-    cx: &App,
-) -> bool {
-    ThreadMetadataStore::global(cx)
-        .read(cx)
-        .entries()
-        .filter(|thread| thread.thread_id != current_thread_id)
-        .filter(|thread| !thread.archived)
-        .any(|thread| {
-            thread
-                .folder_paths()
-                .paths()
-                .iter()
-                .any(|other_path| other_path.as_path() == path)
-        })
-}
-
 /// Removes a worktree from all affected projects and deletes it from disk
 /// via `git worktree remove`.
 ///

crates/remote/src/remote.rs 🔗

@@ -2,6 +2,7 @@ pub mod json_log;
 pub mod protocol;
 pub mod proxy;
 pub mod remote_client;
+pub mod remote_identity;
 mod transport;
 
 #[cfg(target_os = "windows")]
@@ -11,6 +12,9 @@ pub use remote_client::{
     RemoteClientDelegate, RemoteClientEvent, RemoteConnection, RemoteConnectionOptions, RemoteOs,
     RemotePlatform, connect, has_active_connection,
 };
+pub use remote_identity::{
+    RemoteConnectionIdentity, remote_connection_identity, same_remote_connection_identity,
+};
 pub use transport::docker::DockerConnectionOptions;
 pub use transport::ssh::{SshConnectionOptions, SshPortForwardOption};
 pub use transport::wsl::WslConnectionOptions;

crates/remote/src/remote_identity.rs 🔗

@@ -0,0 +1,168 @@
+use crate::RemoteConnectionOptions;
+
+/// A normalized remote identity for matching live remote hosts against
+/// persisted remote metadata.
+///
+/// This mirrors workspace persistence identity semantics rather than full
+/// `RemoteConnectionOptions` equality, so runtime-only fields like SSH
+/// nicknames or Docker environment overrides do not affect matching.
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub enum RemoteConnectionIdentity {
+    Ssh {
+        host: String,
+        username: Option<String>,
+        port: Option<u16>,
+    },
+    Wsl {
+        distro_name: String,
+        user: Option<String>,
+    },
+    Docker {
+        container_id: String,
+        name: String,
+        remote_user: String,
+    },
+    #[cfg(any(test, feature = "test-support"))]
+    Mock { id: u64 },
+}
+
+impl From<&RemoteConnectionOptions> for RemoteConnectionIdentity {
+    fn from(options: &RemoteConnectionOptions) -> Self {
+        match options {
+            RemoteConnectionOptions::Ssh(options) => Self::Ssh {
+                host: options.host.to_string(),
+                username: options.username.clone(),
+                port: options.port,
+            },
+            RemoteConnectionOptions::Wsl(options) => Self::Wsl {
+                distro_name: options.distro_name.clone(),
+                user: options.user.clone(),
+            },
+            RemoteConnectionOptions::Docker(options) => Self::Docker {
+                container_id: options.container_id.clone(),
+                name: options.name.clone(),
+                remote_user: options.remote_user.clone(),
+            },
+            #[cfg(any(test, feature = "test-support"))]
+            RemoteConnectionOptions::Mock(options) => Self::Mock { id: options.id },
+        }
+    }
+}
+
+pub fn remote_connection_identity(options: &RemoteConnectionOptions) -> RemoteConnectionIdentity {
+    options.into()
+}
+
+pub fn same_remote_connection_identity(
+    left: Option<&RemoteConnectionOptions>,
+    right: Option<&RemoteConnectionOptions>,
+) -> bool {
+    match (left, right) {
+        (Some(left), Some(right)) => {
+            remote_connection_identity(left) == remote_connection_identity(right)
+        }
+        (None, None) => true,
+        _ => false,
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::collections::BTreeMap;
+
+    use super::*;
+    use crate::{DockerConnectionOptions, SshConnectionOptions, WslConnectionOptions};
+
+    #[test]
+    fn ssh_identity_ignores_non_persisted_runtime_fields() {
+        let left = RemoteConnectionOptions::Ssh(SshConnectionOptions {
+            host: "example.com".into(),
+            username: Some("anth".to_string()),
+            port: Some(2222),
+            password: Some("secret".to_string()),
+            args: Some(vec!["-v".to_string()]),
+            connection_timeout: Some(30),
+            nickname: Some("work".to_string()),
+            upload_binary_over_ssh: true,
+            ..Default::default()
+        });
+        let right = RemoteConnectionOptions::Ssh(SshConnectionOptions {
+            host: "example.com".into(),
+            username: Some("anth".to_string()),
+            port: Some(2222),
+            password: None,
+            args: None,
+            connection_timeout: None,
+            nickname: None,
+            upload_binary_over_ssh: false,
+            ..Default::default()
+        });
+
+        assert!(same_remote_connection_identity(Some(&left), Some(&right),));
+    }
+
+    #[test]
+    fn ssh_identity_distinguishes_persistence_key_fields() {
+        let left = RemoteConnectionOptions::Ssh(SshConnectionOptions {
+            host: "example.com".into(),
+            username: Some("anth".to_string()),
+            port: Some(2222),
+            ..Default::default()
+        });
+        let right = RemoteConnectionOptions::Ssh(SshConnectionOptions {
+            host: "example.com".into(),
+            username: Some("anth".to_string()),
+            port: Some(2223),
+            ..Default::default()
+        });
+
+        assert!(!same_remote_connection_identity(Some(&left), Some(&right),));
+    }
+
+    #[test]
+    fn wsl_identity_includes_user() {
+        let left = RemoteConnectionOptions::Wsl(WslConnectionOptions {
+            distro_name: "Ubuntu".to_string(),
+            user: Some("anth".to_string()),
+        });
+        let right = RemoteConnectionOptions::Wsl(WslConnectionOptions {
+            distro_name: "Ubuntu".to_string(),
+            user: Some("root".to_string()),
+        });
+
+        assert!(!same_remote_connection_identity(Some(&left), Some(&right),));
+    }
+
+    #[test]
+    fn docker_identity_ignores_non_persisted_runtime_fields() {
+        let left = RemoteConnectionOptions::Docker(DockerConnectionOptions {
+            name: "zed-dev".to_string(),
+            container_id: "container-123".to_string(),
+            remote_user: "anth".to_string(),
+            upload_binary_over_docker_exec: true,
+            use_podman: true,
+            remote_env: BTreeMap::from([("FOO".to_string(), "BAR".to_string())]),
+        });
+        let right = RemoteConnectionOptions::Docker(DockerConnectionOptions {
+            name: "zed-dev".to_string(),
+            container_id: "container-123".to_string(),
+            remote_user: "anth".to_string(),
+            upload_binary_over_docker_exec: false,
+            use_podman: false,
+            remote_env: BTreeMap::new(),
+        });
+
+        assert!(same_remote_connection_identity(Some(&left), Some(&right),));
+    }
+
+    #[test]
+    fn local_identity_matches_only_local_identity() {
+        let remote = RemoteConnectionOptions::Wsl(WslConnectionOptions {
+            distro_name: "Ubuntu".to_string(),
+            user: Some("anth".to_string()),
+        });
+
+        assert!(same_remote_connection_identity(None, None));
+        assert!(!same_remote_connection_identity(None, Some(&remote)));
+    }
+}

crates/sidebar/src/sidebar.rs 🔗

@@ -27,7 +27,7 @@ use project::{
     AgentId, AgentRegistryStore, Event as ProjectEvent, WorktreeId, linked_worktree_short_name,
 };
 use recent_projects::sidebar_recent_projects::SidebarRecentProjects;
-use remote::RemoteConnectionOptions;
+use remote::{RemoteConnectionOptions, same_remote_connection_identity};
 use ui::utils::platform_title_bar_height;
 
 use serde::{Deserialize, Serialize};
@@ -962,6 +962,7 @@ impl Sidebar {
                 host,
                 provisional_key,
                 |options, window, cx| connect_remote(active_workspace, options, window, cx),
+                &[],
                 window,
                 cx,
             )
@@ -997,6 +998,7 @@ impl Sidebar {
                 host,
                 provisional_key,
                 |options, window, cx| connect_remote(active_workspace, options, window, cx),
+                &[],
                 window,
                 cx,
             )
@@ -1118,6 +1120,7 @@ impl Sidebar {
             let mut threads: Vec<ThreadEntry> = Vec::new();
             let mut has_running_threads = false;
             let mut waiting_thread_count: usize = 0;
+            let group_host = group_key.host();
 
             if should_load_threads {
                 let thread_store = ThreadMetadataStore::global(cx);
@@ -1172,7 +1175,7 @@ impl Sidebar {
                 // linked worktree the thread was opened in.
                 for row in thread_store
                     .read(cx)
-                    .entries_for_main_worktree_path(group_key.path_list())
+                    .entries_for_main_worktree_path(group_key.path_list(), group_host.as_ref())
                     .cloned()
                 {
                     if !seen_thread_ids.insert(row.thread_id) {
@@ -1188,7 +1191,7 @@ impl Sidebar {
                 // Load any legacy threads for the main worktrees of this project group.
                 for row in thread_store
                     .read(cx)
-                    .entries_for_path(group_key.path_list())
+                    .entries_for_path(group_key.path_list(), group_host.as_ref())
                     .cloned()
                 {
                     if !seen_thread_ids.insert(row.thread_id) {
@@ -1214,7 +1217,7 @@ impl Sidebar {
                     let worktree_path_list = PathList::new(std::slice::from_ref(&path));
                     for row in thread_store
                         .read(cx)
-                        .entries_for_path(&worktree_path_list)
+                        .entries_for_path(&worktree_path_list, group_host.as_ref())
                         .cloned()
                     {
                         if !seen_thread_ids.insert(row.thread_id) {
@@ -1295,11 +1298,11 @@ impl Sidebar {
             } else {
                 let store = ThreadMetadataStore::global(cx).read(cx);
                 store
-                    .entries_for_main_worktree_path(group_key.path_list())
+                    .entries_for_main_worktree_path(group_key.path_list(), group_host.as_ref())
                     .next()
                     .is_some()
                     || store
-                        .entries_for_path(group_key.path_list())
+                        .entries_for_path(group_key.path_list(), group_host.as_ref())
                         .next()
                         .is_some()
             };
@@ -2572,6 +2575,7 @@ impl Sidebar {
                 host,
                 provisional_key,
                 |options, window, cx| connect_remote(active_workspace, options, window, cx),
+                &[],
                 window,
                 cx,
             )
@@ -2604,20 +2608,40 @@ impl Sidebar {
     fn find_current_workspace_for_path_list(
         &self,
         path_list: &PathList,
+        remote_connection: Option<&RemoteConnectionOptions>,
         cx: &App,
     ) -> Option<Entity<Workspace>> {
         self.find_workspace_in_current_window(cx, |workspace, cx| {
             workspace_path_list(workspace, cx).paths() == path_list.paths()
+                && same_remote_connection_identity(
+                    workspace
+                        .read(cx)
+                        .project()
+                        .read(cx)
+                        .remote_connection_options(cx)
+                        .as_ref(),
+                    remote_connection,
+                )
         })
     }
 
     fn find_open_workspace_for_path_list(
         &self,
         path_list: &PathList,
+        remote_connection: Option<&RemoteConnectionOptions>,
         cx: &App,
     ) -> Option<(WindowHandle<MultiWorkspace>, Entity<Workspace>)> {
         self.find_workspace_across_windows(cx, |workspace, cx| {
             workspace_path_list(workspace, cx).paths() == path_list.paths()
+                && same_remote_connection_identity(
+                    workspace
+                        .read(cx)
+                        .project()
+                        .read(cx)
+                        .remote_connection_options(cx)
+                        .as_ref(),
+                    remote_connection,
+                )
         })
     }
 
@@ -2645,12 +2669,15 @@ impl Sidebar {
                 self.activate_thread_locally(&metadata, &workspace, false, window, cx);
             } else {
                 let path_list = metadata.folder_paths().clone();
-                if let Some((target_window, workspace)) =
-                    self.find_open_workspace_for_path_list(&path_list, cx)
-                {
+                if let Some((target_window, workspace)) = self.find_open_workspace_for_path_list(
+                    &path_list,
+                    metadata.remote_connection.as_ref(),
+                    cx,
+                ) {
                     self.activate_thread_in_other_window(metadata, workspace, target_window, cx);
                 } else {
-                    let key = ProjectGroupKey::new(None, path_list.clone());
+                    let key =
+                        ProjectGroupKey::new(metadata.remote_connection.clone(), path_list.clone());
                     self.open_workspace_and_activate_thread(metadata, path_list, &key, window, cx);
                 }
             }
@@ -2674,12 +2701,18 @@ impl Sidebar {
                         ThreadMetadataStore::global(cx)
                             .update(cx, |store, cx| store.unarchive(thread_id, cx));
 
-                        if let Some(workspace) =
-                            this.find_current_workspace_for_path_list(&path_list, cx)
-                        {
+                        if let Some(workspace) = this.find_current_workspace_for_path_list(
+                            &path_list,
+                            metadata.remote_connection.as_ref(),
+                            cx,
+                        ) {
                             this.activate_thread_locally(&metadata, &workspace, false, window, cx);
-                        } else if let Some((target_window, workspace)) =
-                            this.find_open_workspace_for_path_list(&path_list, cx)
+                        } else if let Some((target_window, workspace)) = this
+                            .find_open_workspace_for_path_list(
+                                &path_list,
+                                metadata.remote_connection.as_ref(),
+                                cx,
+                            )
                         {
                             this.activate_thread_in_other_window(
                                 metadata,
@@ -2688,7 +2721,10 @@ impl Sidebar {
                                 cx,
                             );
                         } else {
-                            let key = ProjectGroupKey::new(None, path_list.clone());
+                            let key = ProjectGroupKey::new(
+                                metadata.remote_connection.clone(),
+                                path_list.clone(),
+                            );
                             this.open_workspace_and_activate_thread(
                                 metadata, path_list, &key, window, cx,
                             );
@@ -2763,7 +2799,10 @@ impl Sidebar {
 
                         this.update_in(cx, |this, window, cx| {
                             this.restoring_tasks.remove(&thread_id);
-                            let key = ProjectGroupKey::new(None, new_paths.clone());
+                            let key = ProjectGroupKey::new(
+                                updated_metadata.remote_connection.clone(),
+                                new_paths.clone(),
+                            );
                             this.open_workspace_and_activate_thread(
                                 updated_metadata,
                                 new_paths,
@@ -2981,11 +3020,13 @@ impl Sidebar {
                     })
                     .filter(|plan| {
                         thread_id.map_or(true, |tid| {
-                            !thread_worktree_archive::path_is_referenced_by_other_unarchived_threads(
-                                tid,
-                                &plan.root_path,
-                                cx,
-                            )
+                            !store
+                                .read(cx)
+                                .path_is_referenced_by_other_unarchived_threads(
+                                    tid,
+                                    &plan.root_path,
+                                    metadata.remote_connection.as_ref(),
+                                )
                         })
                     })
                     .collect::<Vec<_>>()
@@ -3035,9 +3076,11 @@ impl Sidebar {
                 return None;
             }
 
+            let thread_remote_connection =
+                metadata.as_ref().and_then(|m| m.remote_connection.as_ref());
             let remaining = ThreadMetadataStore::global(cx)
                 .read(cx)
-                .entries_for_path(folder_paths)
+                .entries_for_path(folder_paths, thread_remote_connection)
                 .filter(|t| t.session_id.as_ref() != Some(session_id))
                 .count();
 
@@ -3156,9 +3199,14 @@ impl Sidebar {
                 mw.remove(
                     workspaces_to_remove,
                     move |this, window, cx| {
-                        this.find_or_create_local_workspace(
+                        let active_workspace = this.workspace().clone();
+                        this.find_or_create_workspace(
                             fallback_paths,
+                            project_group_key.host(),
                             Some(project_group_key),
+                            |options, window, cx| {
+                                connect_remote(active_workspace, options, window, cx)
+                            },
                             &excluded,
                             window,
                             cx,

crates/sidebar/src/sidebar_tests.rs 🔗

@@ -262,6 +262,7 @@ fn save_thread_metadata(
 ) {
     cx.update(|cx| {
         let worktree_paths = project.read(cx).worktree_paths(cx);
+        let remote_connection = project.read(cx).remote_connection_options(cx);
         let thread_id = ThreadMetadataStore::global(cx)
             .read(cx)
             .entries()
@@ -277,7 +278,7 @@ fn save_thread_metadata(
             created_at,
             worktree_paths,
             archived: false,
-            remote_connection: None,
+            remote_connection,
         };
         ThreadMetadataStore::global(cx).update(cx, |store, cx| store.save(metadata, cx));
     });
@@ -2419,8 +2420,6 @@ async fn test_confirm_on_historical_thread_preserves_historical_timestamp_and_or
         sidebar.confirm(&Confirm, window, cx);
     });
     cx.run_until_parked();
-    cx.run_until_parked();
-    cx.run_until_parked();
 
     let older_metadata = cx.update(|_, cx| {
         ThreadMetadataStore::global(cx)
@@ -2523,8 +2522,7 @@ async fn test_confirm_on_historical_thread_in_new_project_group_opens_real_threa
         sidebar.selection = Some(2);
         sidebar.confirm(&Confirm, window, cx);
     });
-    cx.run_until_parked();
-    cx.run_until_parked();
+
     cx.run_until_parked();
 
     assert_eq!(
@@ -5373,8 +5371,7 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
     // removal → git persist → disk removal), each of which may spawn
     // further background work. Each run_until_parked() call drives one
     // layer of pending work.
-    cx.run_until_parked();
-    cx.run_until_parked();
+
     cx.run_until_parked();
 
     // The linked worktree workspace should have been removed.
@@ -5402,6 +5399,170 @@ async fn test_archive_last_worktree_thread_removes_workspace(cx: &mut TestAppCon
     );
 }
 
+#[gpui::test]
+async fn test_archive_last_worktree_thread_not_blocked_by_remote_thread_at_same_path(
+    cx: &mut TestAppContext,
+) {
+    // A remote thread at the same path as a local linked worktree thread
+    // should not prevent the local workspace from being removed when the
+    // local thread is archived (the last local thread for that worktree).
+    init_test(cx);
+    let fs = FakeFs::new(cx.executor());
+
+    fs.insert_tree(
+        "/project",
+        serde_json::json!({
+            ".git": {
+                "worktrees": {
+                    "feature-a": {
+                        "commondir": "../../",
+                        "HEAD": "ref: refs/heads/feature-a",
+                    },
+                },
+            },
+            "src": {},
+        }),
+    )
+    .await;
+
+    fs.insert_tree(
+        "/wt-feature-a",
+        serde_json::json!({
+            ".git": "gitdir: /project/.git/worktrees/feature-a",
+            "src": {},
+        }),
+    )
+    .await;
+
+    fs.add_linked_worktree_for_repo(
+        Path::new("/project/.git"),
+        false,
+        git::repository::Worktree {
+            path: PathBuf::from("/wt-feature-a"),
+            ref_name: Some("refs/heads/feature-a".into()),
+            sha: "abc".into(),
+            is_main: false,
+        },
+    )
+    .await;
+
+    cx.update(|cx| <dyn fs::Fs>::set_global(fs.clone(), cx));
+
+    let main_project = project::Project::test(fs.clone(), ["/project".as_ref()], cx).await;
+    let worktree_project = project::Project::test(fs.clone(), ["/wt-feature-a".as_ref()], cx).await;
+
+    main_project
+        .update(cx, |p, cx| p.git_scans_complete(cx))
+        .await;
+    worktree_project
+        .update(cx, |p, cx| p.git_scans_complete(cx))
+        .await;
+
+    let (multi_workspace, cx) =
+        cx.add_window_view(|window, cx| MultiWorkspace::test_new(main_project.clone(), window, cx));
+    let sidebar = setup_sidebar(&multi_workspace, cx);
+
+    let _worktree_workspace = multi_workspace.update_in(cx, |mw, window, cx| {
+        mw.test_add_workspace(worktree_project.clone(), window, cx)
+    });
+
+    // Save a thread for the main project.
+    save_thread_metadata(
+        acp::SessionId::new(Arc::from("main-thread")),
+        Some("Main Thread".into()),
+        chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 2, 0, 0, 0).unwrap(),
+        None,
+        &main_project,
+        cx,
+    );
+
+    // Save a local thread for the linked worktree.
+    let wt_thread_id = acp::SessionId::new(Arc::from("worktree-thread"));
+    save_thread_metadata(
+        wt_thread_id.clone(),
+        Some("Local Worktree Thread".into()),
+        chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
+        None,
+        &worktree_project,
+        cx,
+    );
+
+    // Save a remote thread at the same /wt-feature-a path but on a
+    // different host. This should NOT count as a remaining thread for
+    // the local linked worktree workspace.
+    let remote_host =
+        remote::RemoteConnectionOptions::Mock(remote::MockConnectionOptions { id: 99 });
+    cx.update(|_window, cx| {
+        let metadata = ThreadMetadata {
+            thread_id: ThreadId::new(),
+            session_id: Some(acp::SessionId::new(Arc::from("remote-wt-thread"))),
+            agent_id: agent::ZED_AGENT_ID.clone(),
+            title: Some("Remote Worktree Thread".into()),
+            updated_at: chrono::TimeZone::with_ymd_and_hms(&Utc, 2024, 1, 1, 0, 0, 0).unwrap(),
+            created_at: None,
+            worktree_paths: WorktreePaths::from_folder_paths(&PathList::new(&[PathBuf::from(
+                "/wt-feature-a",
+            )])),
+            archived: false,
+            remote_connection: Some(remote_host),
+        };
+        ThreadMetadataStore::global(cx).update(cx, |store, cx| {
+            store.save(metadata, cx);
+        });
+    });
+    cx.run_until_parked();
+
+    multi_workspace.update_in(cx, |_, _window, cx| cx.notify());
+    cx.run_until_parked();
+
+    assert_eq!(
+        multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+        2,
+        "should start with 2 workspaces (main + linked worktree)"
+    );
+
+    // The remote thread should NOT appear in the sidebar (it belongs
+    // to a different host and no matching remote project group exists).
+    let entries_before = visible_entries_as_strings(&sidebar, cx);
+    assert!(
+        !entries_before
+            .iter()
+            .any(|e| e.contains("Remote Worktree Thread")),
+        "remote thread should not appear in local sidebar: {entries_before:?}"
+    );
+
+    // Archive the local worktree thread.
+    sidebar.update_in(cx, |sidebar: &mut Sidebar, window, cx| {
+        sidebar.archive_thread(&wt_thread_id, window, cx);
+    });
+
+    cx.run_until_parked();
+
+    // The linked worktree workspace should be removed because the
+    // only *local* thread for it was archived. The remote thread at
+    // the same path should not have prevented removal.
+    assert_eq!(
+        multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
+        1,
+        "linked worktree workspace should be removed; the remote thread at the same path \
+         should not count as a remaining local thread"
+    );
+
+    let entries = visible_entries_as_strings(&sidebar, cx);
+    assert!(
+        entries.iter().any(|e| e.contains("Main Thread")),
+        "main thread should still be visible: {entries:?}"
+    );
+    assert!(
+        !entries.iter().any(|e| e.contains("Local Worktree Thread")),
+        "archived local worktree thread should not be visible: {entries:?}"
+    );
+    assert!(
+        !entries.iter().any(|e| e.contains("Remote Worktree Thread")),
+        "remote thread should still not appear in local sidebar: {entries:?}"
+    );
+}
+
 #[gpui::test]
 async fn test_linked_worktree_threads_not_duplicated_across_groups(cx: &mut TestAppContext) {
     // When a multi-root workspace (e.g. [/other, /project]) shares a
@@ -6214,8 +6375,7 @@ async fn test_unarchive_into_new_workspace_does_not_create_duplicate_real_thread
     sidebar.update_in(cx, |sidebar, window, cx| {
         sidebar.activate_archived_thread(metadata, window, cx);
     });
-    cx.run_until_parked();
-    cx.run_until_parked();
+
     cx.run_until_parked();
 
     assert_eq!(
@@ -6454,8 +6614,6 @@ async fn test_unarchive_into_inactive_existing_workspace_does_not_leave_active_d
         panel_b_before_settle.read_with(cx, |panel, cx| panel.draft_thread_ids(cx));
 
     cx.run_until_parked();
-    cx.run_until_parked();
-    cx.run_until_parked();
 
     sidebar.read_with(cx, |sidebar, _cx| {
         assert_active_thread(
@@ -6547,8 +6705,7 @@ async fn test_unarchive_after_removing_parent_project_group_restores_real_thread
     sidebar.update_in(cx, |sidebar, window, cx| {
         sidebar.archive_thread(&session_id, window, cx);
     });
-    cx.run_until_parked();
-    cx.run_until_parked();
+
     cx.run_until_parked();
 
     let archived_metadata = cx.update(|_, cx| {
@@ -6578,7 +6735,6 @@ async fn test_unarchive_after_removing_parent_project_group_restores_real_thread
         .await
         .expect("remove project group task should complete");
     cx.run_until_parked();
-    cx.run_until_parked();
 
     assert_eq!(
         multi_workspace.read_with(cx, |mw, _| mw.workspaces().count()),
@@ -6590,8 +6746,6 @@ async fn test_unarchive_after_removing_parent_project_group_restores_real_thread
         sidebar.activate_archived_thread(archived_metadata.clone(), window, cx);
     });
     cx.run_until_parked();
-    cx.run_until_parked();
-    cx.run_until_parked();
 
     let restored_workspace = multi_workspace.read_with(cx, |mw, cx| {
         mw.workspaces()
@@ -7290,9 +7444,6 @@ async fn test_unarchive_linked_worktree_thread_into_project_group_shows_only_res
     sidebar.update_in(cx, |sidebar, window, cx| {
         sidebar.activate_archived_thread(metadata, window, cx);
     });
-
-    cx.run_until_parked();
-    cx.run_until_parked();
     cx.run_until_parked();
 
     assert_eq!(
@@ -7357,7 +7508,6 @@ async fn test_unarchive_linked_worktree_thread_into_project_group_shows_only_res
 
     // The reported bug may only appear after an extra scheduling turn.
     cx.run_until_parked();
-    cx.run_until_parked();
 
     let entries_after_extra_turns = visible_entries_as_strings(&sidebar, cx);
     assert_no_extra_rows(&entries_after_extra_turns);
@@ -8495,7 +8645,9 @@ async fn test_non_archive_thread_paths_migrate_on_worktree_add_and_remove(cx: &m
     cx.update(|_window, cx| {
         let store = ThreadMetadataStore::global(cx).read(cx);
         assert_eq!(
-            store.entries_for_main_worktree_path(&old_key_paths).count(),
+            store
+                .entries_for_main_worktree_path(&old_key_paths, None)
+                .count(),
             2,
             "should have 2 historical threads under old key before worktree add"
         );
@@ -8518,12 +8670,16 @@ async fn test_non_archive_thread_paths_migrate_on_worktree_add_and_remove(cx: &m
     cx.update(|_window, cx| {
         let store = ThreadMetadataStore::global(cx).read(cx);
         assert_eq!(
-            store.entries_for_main_worktree_path(&old_key_paths).count(),
+            store
+                .entries_for_main_worktree_path(&old_key_paths, None)
+                .count(),
             0,
             "should have 0 historical threads under old key after worktree add"
         );
         assert_eq!(
-            store.entries_for_main_worktree_path(&new_key_paths).count(),
+            store
+                .entries_for_main_worktree_path(&new_key_paths, None)
+                .count(),
             2,
             "should have 2 historical threads under new key after worktree add"
         );
@@ -8558,12 +8714,16 @@ async fn test_non_archive_thread_paths_migrate_on_worktree_add_and_remove(cx: &m
     cx.update(|_window, cx| {
         let store = ThreadMetadataStore::global(cx).read(cx);
         assert_eq!(
-            store.entries_for_main_worktree_path(&new_key_paths).count(),
+            store
+                .entries_for_main_worktree_path(&new_key_paths, None)
+                .count(),
             0,
             "should have 0 historical threads under new key after worktree remove"
         );
         assert_eq!(
-            store.entries_for_main_worktree_path(&old_key_paths).count(),
+            store
+                .entries_for_main_worktree_path(&old_key_paths, None)
+                .count(),
             2,
             "should have 2 historical threads under old key after worktree remove"
         );
@@ -8661,12 +8821,12 @@ async fn test_worktree_add_only_migrates_threads_for_same_folder_paths(cx: &mut
     cx.update(|_window, cx| {
         let store = ThreadMetadataStore::global(cx).read(cx);
         assert_eq!(
-            store.entries_for_path(&folder_paths_main).count(),
+            store.entries_for_path(&folder_paths_main, None).count(),
             1,
             "one thread under [/project]"
         );
         assert_eq!(
-            store.entries_for_path(&folder_paths_wt).count(),
+            store.entries_for_path(&folder_paths_wt, None).count(),
             1,
             "one thread under [/wt-feature]"
         );
@@ -8688,17 +8848,17 @@ async fn test_worktree_add_only_migrates_threads_for_same_folder_paths(cx: &mut
     cx.update(|_window, cx| {
         let store = ThreadMetadataStore::global(cx).read(cx);
         assert_eq!(
-            store.entries_for_path(&folder_paths_main).count(),
+            store.entries_for_path(&folder_paths_main, None).count(),
             0,
             "main thread should no longer be under old folder paths [/project]"
         );
         assert_eq!(
-            store.entries_for_path(&folder_paths_main_b).count(),
+            store.entries_for_path(&folder_paths_main_b, None).count(),
             1,
             "main thread should now be under [/project, /project-b]"
         );
         assert_eq!(
-            store.entries_for_path(&folder_paths_wt).count(),
+            store.entries_for_path(&folder_paths_wt, None).count(),
             1,
             "worktree thread should remain unchanged under [/wt-feature]"
         );
@@ -9397,13 +9557,13 @@ mod property_test {
             // panel's draft_thread_ids, not by session_id matching.
             for metadata in thread_store
                 .read(cx)
-                .entries_for_main_worktree_path(&path_list)
+                .entries_for_main_worktree_path(&path_list, None)
             {
                 if let Some(sid) = metadata.session_id.clone() {
                     metadata_thread_ids.insert(sid);
                 }
             }
-            for metadata in thread_store.read(cx).entries_for_path(&path_list) {
+            for metadata in thread_store.read(cx).entries_for_path(&path_list, None) {
                 if let Some(sid) = metadata.session_id.clone() {
                     metadata_thread_ids.insert(sid);
                 }
@@ -9423,7 +9583,7 @@ mod property_test {
             for workspace in group_workspaces {
                 let ws_path_list = workspace_path_list(workspace, cx);
                 if ws_path_list != path_list {
-                    for metadata in thread_store.read(cx).entries_for_path(&ws_path_list) {
+                    for metadata in thread_store.read(cx).entries_for_path(&ws_path_list, None) {
                         if let Some(sid) = metadata.session_id.clone() {
                             metadata_thread_ids.insert(sid);
                         }
@@ -9444,7 +9604,9 @@ mod property_test {
                         }
                         let worktree_path_list =
                             PathList::new(std::slice::from_ref(&linked_worktree.path));
-                        for metadata in thread_store.read(cx).entries_for_path(&worktree_path_list)
+                        for metadata in thread_store
+                            .read(cx)
+                            .entries_for_path(&worktree_path_list, None)
                         {
                             if let Some(sid) = metadata.session_id.clone() {
                                 metadata_thread_ids.insert(sid);
@@ -9840,8 +10002,12 @@ async fn test_remote_project_integration_does_not_briefly_render_as_separate_pro
     // in the sidebar under the same remote group. This simulates a
     // linked worktree workspace that was closed.
     let remote_thread_id = acp::SessionId::new(Arc::from("remote-thread"));
-    let main_worktree_paths =
-        project.read_with(cx, |p, cx| p.project_group_key(cx).path_list().clone());
+    let (main_worktree_paths, remote_connection) = project.read_with(cx, |p, cx| {
+        (
+            p.project_group_key(cx).path_list().clone(),
+            p.remote_connection_options(cx),
+        )
+    });
     cx.update(|_window, cx| {
         let metadata = ThreadMetadata {
             thread_id: ThreadId::new(),
@@ -9856,7 +10022,7 @@ async fn test_remote_project_integration_does_not_briefly_render_as_separate_pro
             )
             .unwrap(),
             archived: false,
-            remote_connection: None,
+            remote_connection,
         };
         ThreadMetadataStore::global(cx).update(cx, |store, cx| store.save(metadata, cx));
     });

crates/workspace/src/multi_workspace.rs 🔗

@@ -1127,6 +1127,7 @@ impl MultiWorkspace {
             &mut Context<Self>,
         ) -> Task<Result<Option<Entity<remote::RemoteClient>>>>
         + 'static,
+        excluding: &[Entity<Workspace>],
         window: &mut Window,
         cx: &mut Context<Self>,
     ) -> Task<Result<Entity<Workspace>>> {
@@ -1139,7 +1140,7 @@ impl MultiWorkspace {
             return self.find_or_create_local_workspace(
                 paths,
                 provisional_project_group_key,
-                &[],
+                excluding,
                 window,
                 cx,
             );

crates/workspace/src/multi_workspace_tests.rs 🔗

@@ -497,6 +497,7 @@ async fn test_find_or_create_workspace_uses_project_group_key_when_paths_are_mis
                 None,
                 Some(project_group_key.clone()),
                 |_options, _window, _cx| Task::ready(Ok(None)),
+                &[],
                 window,
                 cx,
             )

crates/workspace/src/persistence.rs 🔗

@@ -27,7 +27,8 @@ use project::{
 
 use language::{LanguageName, Toolchain, ToolchainScope};
 use remote::{
-    DockerConnectionOptions, RemoteConnectionOptions, SshConnectionOptions, WslConnectionOptions,
+    DockerConnectionOptions, RemoteConnectionIdentity, RemoteConnectionOptions,
+    SshConnectionOptions, WslConnectionOptions, remote_connection_identity,
 };
 use serde::{Deserialize, Serialize};
 use sqlez::{
@@ -1500,6 +1501,7 @@ impl WorkspaceDb {
         this: &Connection,
         options: RemoteConnectionOptions,
     ) -> Result<RemoteConnectionId> {
+        let identity = remote_connection_identity(&options);
         let kind;
         let user: Option<String>;
         let mut host = None;
@@ -1509,33 +1511,49 @@ impl WorkspaceDb {
         let mut container_id = None;
         let mut use_podman = None;
         let mut remote_env = None;
-        match options {
-            RemoteConnectionOptions::Ssh(options) => {
+
+        match identity {
+            RemoteConnectionIdentity::Ssh {
+                host: identity_host,
+                username,
+                port: identity_port,
+            } => {
                 kind = RemoteConnectionKind::Ssh;
-                host = Some(options.host.to_string());
-                port = options.port;
-                user = options.username;
+                host = Some(identity_host);
+                port = identity_port;
+                user = username;
             }
-            RemoteConnectionOptions::Wsl(options) => {
+            RemoteConnectionIdentity::Wsl {
+                distro_name,
+                user: identity_user,
+            } => {
                 kind = RemoteConnectionKind::Wsl;
-                distro = Some(options.distro_name);
-                user = options.user;
+                distro = Some(distro_name);
+                user = identity_user;
             }
-            RemoteConnectionOptions::Docker(options) => {
+            RemoteConnectionIdentity::Docker {
+                container_id: identity_container_id,
+                name: identity_name,
+                remote_user,
+            } => {
                 kind = RemoteConnectionKind::Docker;
-                container_id = Some(options.container_id);
-                name = Some(options.name);
-                use_podman = Some(options.use_podman);
-                user = Some(options.remote_user);
-                remote_env = serde_json::to_string(&options.remote_env).ok();
+                container_id = Some(identity_container_id);
+                name = Some(identity_name);
+                user = Some(remote_user);
             }
             #[cfg(any(test, feature = "test-support"))]
-            RemoteConnectionOptions::Mock(options) => {
+            RemoteConnectionIdentity::Mock { id } => {
                 kind = RemoteConnectionKind::Ssh;
-                host = Some(format!("mock-{}", options.id));
-                user = Some(format!("mock-user-{}", options.id));
+                host = Some(format!("mock-{}", id));
+                user = Some(format!("mock-user-{}", id));
             }
         }
+
+        if let RemoteConnectionOptions::Docker(options) = options {
+            use_podman = Some(options.use_podman);
+            remote_env = serde_json::to_string(&options.remote_env).ok();
+        }
+
         Self::get_or_create_remote_connection_query(
             this,
             kind,

crates/workspace/src/workspace.rs 🔗

@@ -38,6 +38,9 @@ pub use multi_workspace::{
     ToggleWorkspaceSidebar, sidebar_side_context_menu,
 };
 pub use path_list::{PathList, SerializedPathList};
+pub use remote::{
+    RemoteConnectionIdentity, remote_connection_identity, same_remote_connection_identity,
+};
 pub use toast_layer::{ToastAction, ToastLayer, ToastView};
 
 use anyhow::{Context as _, Result, anyhow};