Merge pull request #347 from zed-industries/fix-sharing-after-unsharing

Antonio Scandurra created

Cleanup worktrees' shared state when unsharing

Change summary

crates/project/src/project.rs  |  5 ++++
crates/project/src/worktree.rs | 43 ++++++++++++++++++++---------------
crates/server/src/rpc.rs       | 25 ++++++++++++++++++++
crates/server/src/rpc/store.rs | 10 +++++--
4 files changed, 62 insertions(+), 21 deletions(-)

Detailed changes

crates/project/src/project.rs 🔗

@@ -428,6 +428,11 @@ impl Project {
             rpc.send(proto::UnshareProject { project_id }).await?;
             this.update(&mut cx, |this, cx| {
                 this.collaborators.clear();
+                for worktree in &this.worktrees {
+                    worktree.update(cx, |worktree, _| {
+                        worktree.as_local_mut().unwrap().unshare();
+                    });
+                }
                 cx.notify()
             });
             Ok(())

crates/project/src/worktree.rs 🔗

@@ -1009,6 +1009,7 @@ pub struct LocalWorktree {
 struct ShareState {
     project_id: u64,
     snapshots_tx: Sender<Snapshot>,
+    _maintain_remote_snapshot: Option<Task<()>>,
 }
 
 pub struct RemoteWorktree {
@@ -1565,29 +1566,27 @@ impl LocalWorktree {
         let rpc = self.client.clone();
         let worktree_id = cx.model_id() as u64;
         let (snapshots_to_send_tx, snapshots_to_send_rx) = smol::channel::unbounded::<Snapshot>();
+        let maintain_remote_snapshot = cx.background().spawn({
+            let rpc = rpc.clone();
+            let snapshot = snapshot.clone();
+            async move {
+                let mut prev_snapshot = snapshot;
+                while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
+                    let message =
+                        snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
+                    match rpc.send(message).await {
+                        Ok(()) => prev_snapshot = snapshot,
+                        Err(err) => log::error!("error sending snapshot diff {}", err),
+                    }
+                }
+            }
+        });
         self.share = Some(ShareState {
             project_id,
             snapshots_tx: snapshots_to_send_tx,
+            _maintain_remote_snapshot: Some(maintain_remote_snapshot),
         });
 
-        cx.background()
-            .spawn({
-                let rpc = rpc.clone();
-                let snapshot = snapshot.clone();
-                async move {
-                    let mut prev_snapshot = snapshot;
-                    while let Ok(snapshot) = snapshots_to_send_rx.recv().await {
-                        let message =
-                            snapshot.build_update(&prev_snapshot, project_id, worktree_id, false);
-                        match rpc.send(message).await {
-                            Ok(()) => prev_snapshot = snapshot,
-                            Err(err) => log::error!("error sending snapshot diff {}", err),
-                        }
-                    }
-                }
-            })
-            .detach();
-
         let diagnostic_summaries = self.diagnostic_summaries.clone();
         let share_message = cx.background().spawn(async move {
             proto::ShareWorktree {
@@ -1601,6 +1600,14 @@ impl LocalWorktree {
             Ok(())
         })
     }
+
+    pub fn unshare(&mut self) {
+        self.share.take();
+    }
+
+    pub fn is_shared(&self) -> bool {
+        self.share.is_some()
+    }
 }
 
 fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {

crates/server/src/rpc.rs 🔗

@@ -1305,6 +1305,7 @@ mod tests {
             .update(&mut cx_a, |project, cx| project.share(cx))
             .await
             .unwrap();
+        assert!(worktree_a.read_with(&cx_a, |tree, _| tree.as_local().unwrap().is_shared()));
 
         // Join that project as client B
         let project_b = Project::remote(
@@ -1331,6 +1332,30 @@ mod tests {
         project_b
             .condition(&mut cx_b, |project, _| project.is_read_only())
             .await;
+        assert!(worktree_a.read_with(&cx_a, |tree, _| !tree.as_local().unwrap().is_shared()));
+        drop(project_b);
+
+        // Share the project again and ensure guests can still join.
+        project_a
+            .update(&mut cx_a, |project, cx| project.share(cx))
+            .await
+            .unwrap();
+        assert!(worktree_a.read_with(&cx_a, |tree, _| tree.as_local().unwrap().is_shared()));
+        let project_c = Project::remote(
+            project_id,
+            client_b.clone(),
+            client_b.user_store.clone(),
+            lang_registry.clone(),
+            fs.clone(),
+            &mut cx_b.to_async(),
+        )
+        .await
+        .unwrap();
+        let worktree_c = project_c.read_with(&cx_b, |p, _| p.worktrees()[0].clone());
+        worktree_c
+            .update(&mut cx_b, |tree, cx| tree.open_buffer("a.txt", cx))
+            .await
+            .unwrap();
     }
 
     #[gpui::test]

crates/server/src/rpc/store.rs 🔗

@@ -63,7 +63,7 @@ pub struct JoinedProject<'a> {
     pub project: &'a Project,
 }
 
-pub struct UnsharedWorktree {
+pub struct UnsharedProject {
     pub connection_ids: Vec<ConnectionId>,
     pub authorized_user_ids: Vec<UserId>,
 }
@@ -348,7 +348,7 @@ impl Store {
         &mut self,
         project_id: u64,
         acting_connection_id: ConnectionId,
-    ) -> tide::Result<UnsharedWorktree> {
+    ) -> tide::Result<UnsharedProject> {
         let project = if let Some(project) = self.projects.get_mut(&project_id) {
             project
         } else {
@@ -368,10 +368,14 @@ impl Store {
                 }
             }
 
+            for worktree in project.worktrees.values_mut() {
+                worktree.share.take();
+            }
+
             #[cfg(test)]
             self.check_invariants();
 
-            Ok(UnsharedWorktree {
+            Ok(UnsharedProject {
                 connection_ids,
                 authorized_user_ids,
             })