Fix expansion of ancestor directories when refreshing a path

Max Brunsfeld created

Change summary

crates/fs/src/fs.rs                  |   9 
crates/project/src/worktree.rs       | 263 ++++++++++++++---------------
crates/project/src/worktree_tests.rs | 168 ++++++++++++++----
3 files changed, 265 insertions(+), 175 deletions(-)

Detailed changes

crates/fs/src/fs.rs 🔗

@@ -388,6 +388,7 @@ struct FakeFsState {
     event_txs: Vec<smol::channel::Sender<Vec<fsevent::Event>>>,
     events_paused: bool,
     buffered_events: Vec<fsevent::Event>,
+    read_dir_call_count: usize,
 }
 
 #[cfg(any(test, feature = "test-support"))]
@@ -536,6 +537,7 @@ impl FakeFs {
                 event_txs: Default::default(),
                 buffered_events: Vec::new(),
                 events_paused: false,
+                read_dir_call_count: 0,
             }),
         })
     }
@@ -772,6 +774,10 @@ impl FakeFs {
         result
     }
 
+    pub fn read_dir_call_count(&self) -> usize {
+        self.state.lock().read_dir_call_count
+    }
+
     async fn simulate_random_delay(&self) {
         self.executor
             .upgrade()
@@ -1146,7 +1152,8 @@ impl Fs for FakeFs {
     ) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
         self.simulate_random_delay().await;
         let path = normalize_path(path);
-        let state = self.state.lock();
+        let mut state = self.state.lock();
+        state.read_dir_call_count += 1;
         let entry = state.read_path(&path)?;
         let mut entry = entry.lock();
         let children = entry.dir_entries(&path)?;

crates/project/src/worktree.rs 🔗

@@ -84,15 +84,9 @@ pub struct LocalWorktree {
     visible: bool,
 }
 
-enum ScanRequest {
-    RescanPaths {
-        relative_paths: Vec<Arc<Path>>,
-        done: barrier::Sender,
-    },
-    ExpandPath {
-        path: Arc<Path>,
-        done: barrier::Sender,
-    },
+struct ScanRequest {
+    relative_paths: Vec<Arc<Path>>,
+    done: barrier::Sender,
 }
 
 pub struct RemoteWorktree {
@@ -226,6 +220,7 @@ pub struct LocalSnapshot {
 struct BackgroundScannerState {
     snapshot: LocalSnapshot,
     expanded_dirs: HashSet<ProjectEntryId>,
+    expanded_paths: HashSet<Arc<Path>>,
     /// The ids of all of the entries that were removed from the snapshot
     /// as part of the current update. These entry ids may be re-used
     /// if the same inode is discovered at a new path, or if the given
@@ -882,14 +877,11 @@ impl LocalWorktree {
         let path = Arc::from(path);
         let abs_path = self.absolutize(&path);
         let fs = self.fs.clone();
-        let expand = path
-            .parent()
-            .map(|path| self.expand_entry_for_path(path, cx));
+        let entry = self.refresh_entry(path.clone(), None, cx);
 
-        cx.spawn(|this, mut cx| async move {
-            if let Some(mut expand) = expand {
-                expand.recv().await;
-            }
+        cx.spawn(|this, cx| async move {
+            let text = fs.load(&abs_path).await?;
+            let entry = entry.await?;
 
             let mut index_task = None;
             let snapshot = this.read_with(&cx, |this, _| this.as_local().unwrap().snapshot());
@@ -904,21 +896,12 @@ impl LocalWorktree {
                 }
             }
 
-            let text = fs.load(&abs_path).await?;
-
             let diff_base = if let Some(index_task) = index_task {
                 index_task.await
             } else {
                 None
             };
 
-            // Eagerly populate the snapshot with an updated entry for the loaded file
-            let entry = this
-                .update(&mut cx, |this, cx| {
-                    this.as_local().unwrap().refresh_entry(path, None, cx)
-                })
-                .await?;
-
             Ok((
                 File {
                     entry_id: entry.id,
@@ -1082,7 +1065,7 @@ impl LocalWorktree {
                 this.as_local_mut()
                     .unwrap()
                     .scan_requests_tx
-                    .try_send(ScanRequest::RescanPaths {
+                    .try_send(ScanRequest {
                         relative_paths: vec![path],
                         done: tx,
                     })
@@ -1159,8 +1142,8 @@ impl LocalWorktree {
         let (tx, rx) = barrier::channel();
         if let Some(entry) = self.entry_for_id(entry_id) {
             self.scan_requests_tx
-                .try_send(ScanRequest::ExpandPath {
-                    path: entry.path.clone(),
+                .try_send(ScanRequest {
+                    relative_paths: vec![entry.path.clone()],
                     done: tx,
                 })
                 .ok();
@@ -1168,15 +1151,11 @@ impl LocalWorktree {
         rx
     }
 
-    pub fn expand_entry_for_path(
-        &self,
-        path: impl Into<Arc<Path>>,
-        _cx: &mut ModelContext<Worktree>,
-    ) -> barrier::Receiver {
+    pub fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
         let (tx, rx) = barrier::channel();
         self.scan_requests_tx
-            .try_send(ScanRequest::ExpandPath {
-                path: path.into(),
+            .try_send(ScanRequest {
+                relative_paths: paths,
                 done: tx,
             })
             .ok();
@@ -1189,20 +1168,14 @@ impl LocalWorktree {
         old_path: Option<Arc<Path>>,
         cx: &mut ModelContext<Worktree>,
     ) -> Task<Result<Entry>> {
-        let path_changes_tx = self.scan_requests_tx.clone();
+        let paths = if let Some(old_path) = old_path.as_ref() {
+            vec![old_path.clone(), path.clone()]
+        } else {
+            vec![path.clone()]
+        };
+        let mut refresh = self.refresh_entries_for_paths(paths);
         cx.spawn_weak(move |this, mut cx| async move {
-            let relative_paths = if let Some(old_path) = old_path.as_ref() {
-                vec![old_path.clone(), path.clone()]
-            } else {
-                vec![path.clone()]
-            };
-
-            let (tx, mut rx) = barrier::channel();
-            path_changes_tx.try_send(ScanRequest::RescanPaths {
-                relative_paths,
-                done: tx,
-            })?;
-            rx.recv().await;
+            refresh.recv().await;
             this.upgrade(&cx)
                 .ok_or_else(|| anyhow!("worktree was dropped"))?
                 .update(&mut cx, |this, _| {
@@ -2138,9 +2111,14 @@ impl LocalSnapshot {
 
         ignore_stack
     }
-}
 
-impl LocalSnapshot {
+    #[cfg(test)]
+    pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
+        self.entries_by_path
+            .cursor::<()>()
+            .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
+    }
+
     #[cfg(test)]
     pub fn check_invariants(&self) {
         assert_eq!(
@@ -2217,6 +2195,14 @@ impl LocalSnapshot {
 }
 
 impl BackgroundScannerState {
+    fn is_path_expanded(&self, path: &Path) -> bool {
+        self.expanded_paths.iter().any(|p| p.starts_with(path))
+            || self
+                .snapshot
+                .entry_for_path(&path)
+                .map_or(true, |entry| self.expanded_dirs.contains(&entry.id))
+    }
+
     fn reuse_entry_id(&mut self, entry: &mut Entry) {
         if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
             entry.id = removed_entry_id;
@@ -2271,6 +2257,7 @@ impl BackgroundScannerState {
                 .insert(abs_parent_path, (ignore, false));
         }
 
+        self.expanded_dirs.insert(parent_entry.id);
         let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
         let mut entries_by_id_edits = Vec::new();
         let mut dotgit_path = None;
@@ -2883,6 +2870,7 @@ impl BackgroundScanner {
                 expanded_dirs: Default::default(),
                 removed_entry_ids: Default::default(),
                 changed_paths: Default::default(),
+                expanded_paths: Default::default(),
             }),
             phase: BackgroundScannerPhase::InitialScan,
         }
@@ -2986,7 +2974,7 @@ impl BackgroundScanner {
     }
 
     async fn process_scan_request(&self, request: ScanRequest) -> bool {
-        let root_path = self.state.lock().snapshot.abs_path.clone();
+        let root_path = self.expand_paths(&request.relative_paths).await;
         let root_canonical_path = match self.fs.canonicalize(&root_path).await {
             Ok(path) => path,
             Err(err) => {
@@ -2995,57 +2983,20 @@ impl BackgroundScanner {
             }
         };
 
-        match request {
-            ScanRequest::RescanPaths {
-                relative_paths,
-                done,
-            } => {
-                let abs_paths = relative_paths
-                    .into_iter()
-                    .map(|path| {
-                        if path.file_name().is_some() {
-                            root_canonical_path.join(path)
-                        } else {
-                            root_canonical_path.clone()
-                        }
-                    })
-                    .collect::<Vec<_>>();
-                self.reload_entries_for_paths(
-                    root_path,
-                    root_canonical_path,
-                    abs_paths,
-                    false,
-                    None,
-                )
-                .await;
-                self.send_status_update(false, Some(done))
-            }
-            ScanRequest::ExpandPath { path, done } => {
-                let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
-                let mut abs_path = root_canonical_path.clone();
-                for component in path.iter() {
-                    abs_path.push(component);
-                    self.reload_entries_for_paths(
-                        root_path.clone(),
-                        root_canonical_path.clone(),
-                        vec![abs_path.clone()],
-                        true,
-                        Some(scan_job_tx.clone()),
-                    )
-                    .await;
-                }
-                drop(scan_job_tx);
-
-                // Scan the expanded directories serially. This is ok, because only
-                // the direct ancestors of the expanded path need to be scanned.
-                while let Some(job) = scan_job_rx.next().await {
-                    self.scan_dir(&job).await.log_err();
+        let abs_paths = request
+            .relative_paths
+            .into_iter()
+            .map(|path| {
+                if path.file_name().is_some() {
+                    root_canonical_path.join(path)
+                } else {
+                    root_canonical_path.clone()
                 }
-
-                self.send_status_update(false, Some(done));
-                true
-            }
-        }
+            })
+            .collect::<Vec<_>>();
+        self.reload_entries_for_paths(root_path, root_canonical_path, abs_paths, None)
+            .await;
+        self.send_status_update(false, Some(request.done))
     }
 
     async fn process_events(&mut self, abs_paths: Vec<PathBuf>) {
@@ -3060,15 +3011,8 @@ impl BackgroundScanner {
 
         let (scan_job_tx, scan_job_rx) = channel::unbounded();
         let paths = self
-            .reload_entries_for_paths(
-                root_path,
-                root_canonical_path,
-                abs_paths,
-                false,
-                Some(scan_job_tx.clone()),
-            )
+            .reload_entries_for_paths(root_path, root_canonical_path, abs_paths, Some(scan_job_tx))
             .await;
-        drop(scan_job_tx);
         self.scan_dirs(false, scan_job_rx).await;
 
         self.update_ignore_statuses().await;
@@ -3108,6 +3052,46 @@ impl BackgroundScanner {
         self.send_status_update(false, None);
     }
 
+    async fn expand_paths(&self, paths: &[Arc<Path>]) -> Arc<Path> {
+        let root_path;
+        let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
+        {
+            let mut state = self.state.lock();
+            root_path = state.snapshot.abs_path.clone();
+            for path in paths {
+                for ancestor in path.ancestors() {
+                    if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
+                        if entry.kind == EntryKind::PendingDir {
+                            let abs_path = root_path.join(ancestor);
+                            let ignore_stack =
+                                state.snapshot.ignore_stack_for_abs_path(&abs_path, true);
+                            let ancestor_inodes =
+                                state.snapshot.ancestor_inodes_for_path(&ancestor);
+                            scan_job_tx
+                                .try_send(ScanJob {
+                                    abs_path: abs_path.into(),
+                                    path: ancestor.into(),
+                                    ignore_stack,
+                                    scan_queue: scan_job_tx.clone(),
+                                    ancestor_inodes,
+                                    is_external: entry.is_external,
+                                })
+                                .unwrap();
+                            state.expanded_paths.insert(path.clone());
+                            break;
+                        }
+                    }
+                }
+            }
+            drop(scan_job_tx);
+        }
+        while let Some(job) = scan_job_rx.next().await {
+            self.scan_dir(&job).await.log_err();
+        }
+        self.state.lock().expanded_paths.clear();
+        root_path
+    }
+
     async fn scan_dirs(
         &self,
         enable_progress_updates: bool,
@@ -3376,10 +3360,7 @@ impl BackgroundScanner {
                 // If a subdirectory is ignored, or is external to the worktree, don't scan
                 // it unless it is marked as expanded.
                 if (new_job.is_external || new_job.ignore_stack.is_all())
-                    && !state
-                        .snapshot
-                        .entry_for_path(&new_job.path)
-                        .map_or(true, |entry| state.expanded_dirs.contains(&entry.id))
+                    && !state.is_path_expanded(&new_job.path)
                 {
                     log::debug!("defer scanning directory {:?}", new_job.path);
                     continue;
@@ -3399,12 +3380,40 @@ impl BackgroundScanner {
         root_abs_path: Arc<Path>,
         root_canonical_path: PathBuf,
         mut abs_paths: Vec<PathBuf>,
-        expand: bool,
         scan_queue_tx: Option<Sender<ScanJob>>,
     ) -> Option<Vec<Arc<Path>>> {
-        let doing_recursive_update = scan_queue_tx.is_some();
+        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
         abs_paths.sort_unstable();
         abs_paths.dedup_by(|a, b| a.starts_with(&b));
+        {
+            let state = self.state.lock();
+            abs_paths.retain(|abs_path| {
+                let path = if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
+                    path
+                } else {
+                    log::error!(
+                        "unexpected event {:?} for root path {:?}",
+                        abs_path,
+                        root_canonical_path
+                    );
+                    return false;
+                };
+
+                if let Some(parent) = path.parent() {
+                    if state
+                        .snapshot
+                        .entry_for_path(parent)
+                        .map_or(true, |entry| entry.kind != EntryKind::Dir)
+                    {
+                        log::info!("ignoring event within unloaded directory {:?}", parent);
+                        return false;
+                    }
+                }
+
+                event_paths.push(path.into());
+                return true;
+            });
+        }
 
         let metadata = futures::future::join_all(
             abs_paths
@@ -3424,6 +3433,7 @@ impl BackgroundScanner {
 
         let mut state = self.state.lock();
         let snapshot = &mut state.snapshot;
+        let doing_recursive_update = scan_queue_tx.is_some();
         let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
         snapshot.scan_id += 1;
         if is_idle && !doing_recursive_update {
@@ -3433,25 +3443,13 @@ impl BackgroundScanner {
         // Remove any entries for paths that no longer exist or are being recursively
         // refreshed. Do this before adding any new entries, so that renames can be
         // detected regardless of the order of the paths.
-        let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
-        let mut event_metadata = Vec::<_>::with_capacity(abs_paths.len());
-        for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
-            if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
-                if matches!(metadata, Ok(None)) || doing_recursive_update {
-                    state.remove_path(path);
-                }
-                event_paths.push(path.into());
-                event_metadata.push(metadata);
-            } else {
-                log::error!(
-                    "unexpected event {:?} for root path {:?}",
-                    abs_path,
-                    root_canonical_path
-                );
+        for (path, metadata) in event_paths.iter().zip(metadata.iter()) {
+            if matches!(metadata, Ok(None)) || doing_recursive_update {
+                state.remove_path(path);
             }
         }
 
-        for (path, metadata) in event_paths.iter().cloned().zip(event_metadata.into_iter()) {
+        for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
             let abs_path: Arc<Path> = root_abs_path.join(&path).into();
 
             match metadata {
@@ -3487,9 +3485,6 @@ impl BackgroundScanner {
                     }
 
                     let fs_entry = state.insert_entry(fs_entry, self.fs.as_ref());
-                    if expand {
-                        state.expanded_dirs.insert(fs_entry.id);
-                    }
 
                     if let Some(scan_queue_tx) = &scan_queue_tx {
                         let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path);

crates/project/src/worktree_tests.rs 🔗

@@ -1,6 +1,6 @@
 use crate::{
     worktree::{Event, Snapshot, WorktreeHandle},
-    EntryKind, PathChange, Worktree,
+    Entry, EntryKind, PathChange, Worktree,
 };
 use anyhow::Result;
 use client::Client;
@@ -158,9 +158,10 @@ async fn test_descendent_entries(cx: &mut TestAppContext) {
     });
 
     // Expand gitignored directory.
-    tree.update(cx, |tree, cx| {
-        let tree = tree.as_local_mut().unwrap();
-        tree.expand_entry_for_path("i/j".as_ref(), cx)
+    tree.read_with(cx, |tree, _| {
+        tree.as_local()
+            .unwrap()
+            .refresh_entries_for_paths(vec![Path::new("i/j").into()])
     })
     .recv()
     .await;
@@ -336,12 +337,18 @@ async fn test_symlinks_pointing_outside(cx: &mut TestAppContext) {
                 (Path::new("src/b.rs"), false),
             ]
         );
+
+        assert_eq!(
+            tree.entry_for_path("deps/dep-dir2").unwrap().kind,
+            EntryKind::PendingDir
+        );
     });
 
     // Expand one of the symlinked directories.
-    tree.update(cx, |tree, cx| {
-        let tree = tree.as_local_mut().unwrap();
-        tree.expand_entry_for_path("deps/dep-dir3".as_ref(), cx)
+    tree.read_with(cx, |tree, _| {
+        tree.as_local()
+            .unwrap()
+            .refresh_entries_for_paths(vec![Path::new("deps/dep-dir3").into()])
     })
     .recv()
     .await;
@@ -368,9 +375,10 @@ async fn test_symlinks_pointing_outside(cx: &mut TestAppContext) {
     });
 
     // Expand a subdirectory of one of the symlinked directories.
-    tree.update(cx, |tree, cx| {
-        let tree = tree.as_local_mut().unwrap();
-        tree.expand_entry_for_path("deps/dep-dir3/src".as_ref(), cx)
+    tree.read_with(cx, |tree, _| {
+        tree.as_local()
+            .unwrap()
+            .refresh_entries_for_paths(vec![Path::new("deps/dep-dir3/src").into()])
     })
     .recv()
     .await;
@@ -405,17 +413,19 @@ async fn test_open_gitignored_files(cx: &mut TestAppContext) {
         "/root",
         json!({
             ".gitignore": "node_modules\n",
-            "node_modules": {
-                "a": {
-                    "a1.js": "a1",
-                    "a2.js": "a2",
-                },
-                "b": {
-                    "b1.js": "b1",
-                    "b2.js": "b2",
+            "one": {
+                "node_modules": {
+                    "a": {
+                        "a1.js": "a1",
+                        "a2.js": "a2",
+                    },
+                    "b": {
+                        "b1.js": "b1",
+                        "b2.js": "b2",
+                    },
                 },
             },
-            "src": {
+            "two": {
                 "x.js": "",
                 "y.js": "",
             },
@@ -446,19 +456,23 @@ async fn test_open_gitignored_files(cx: &mut TestAppContext) {
             vec![
                 (Path::new(""), false),
                 (Path::new(".gitignore"), false),
-                (Path::new("node_modules"), true),
-                (Path::new("src"), false),
-                (Path::new("src/x.js"), false),
-                (Path::new("src/y.js"), false),
+                (Path::new("one"), false),
+                (Path::new("one/node_modules"), true),
+                (Path::new("two"), false),
+                (Path::new("two/x.js"), false),
+                (Path::new("two/y.js"), false),
             ]
         );
     });
 
+    // Open a file that is nested inside of a gitignored directory that
+    // has not yet been expanded.
+    let prev_read_dir_count = fs.read_dir_call_count();
     let buffer = tree
         .update(cx, |tree, cx| {
             tree.as_local_mut()
                 .unwrap()
-                .load_buffer(0, "node_modules/b/b1.js".as_ref(), cx)
+                .load_buffer(0, "one/node_modules/b/b1.js".as_ref(), cx)
         })
         .await
         .unwrap();
@@ -471,22 +485,68 @@ async fn test_open_gitignored_files(cx: &mut TestAppContext) {
             vec![
                 (Path::new(""), false),
                 (Path::new(".gitignore"), false),
-                (Path::new("node_modules"), true),
-                (Path::new("node_modules/a"), true),
-                (Path::new("node_modules/b"), true),
-                (Path::new("node_modules/b/b1.js"), true),
-                (Path::new("node_modules/b/b2.js"), true),
-                (Path::new("src"), false),
-                (Path::new("src/x.js"), false),
-                (Path::new("src/y.js"), false),
+                (Path::new("one"), false),
+                (Path::new("one/node_modules"), true),
+                (Path::new("one/node_modules/a"), true),
+                (Path::new("one/node_modules/b"), true),
+                (Path::new("one/node_modules/b/b1.js"), true),
+                (Path::new("one/node_modules/b/b2.js"), true),
+                (Path::new("two"), false),
+                (Path::new("two/x.js"), false),
+                (Path::new("two/y.js"), false),
+            ]
+        );
+
+        assert_eq!(
+            buffer.read(cx).file().unwrap().path().as_ref(),
+            Path::new("one/node_modules/b/b1.js")
+        );
+
+        // Only the newly-expanded directories are scanned.
+        assert_eq!(fs.read_dir_call_count() - prev_read_dir_count, 2);
+    });
+
+    // Open another file in a different subdirectory of the same
+    // gitignored directory.
+    let prev_read_dir_count = fs.read_dir_call_count();
+    let buffer = tree
+        .update(cx, |tree, cx| {
+            tree.as_local_mut()
+                .unwrap()
+                .load_buffer(0, "one/node_modules/a/a2.js".as_ref(), cx)
+        })
+        .await
+        .unwrap();
+
+    tree.read_with(cx, |tree, cx| {
+        assert_eq!(
+            tree.entries(true)
+                .map(|entry| (entry.path.as_ref(), entry.is_ignored))
+                .collect::<Vec<_>>(),
+            vec![
+                (Path::new(""), false),
+                (Path::new(".gitignore"), false),
+                (Path::new("one"), false),
+                (Path::new("one/node_modules"), true),
+                (Path::new("one/node_modules/a"), true),
+                (Path::new("one/node_modules/a/a1.js"), true),
+                (Path::new("one/node_modules/a/a2.js"), true),
+                (Path::new("one/node_modules/b"), true),
+                (Path::new("one/node_modules/b/b1.js"), true),
+                (Path::new("one/node_modules/b/b2.js"), true),
+                (Path::new("two"), false),
+                (Path::new("two/x.js"), false),
+                (Path::new("two/y.js"), false),
             ]
         );
 
-        let buffer = buffer.read(cx);
         assert_eq!(
-            buffer.file().unwrap().path().as_ref(),
-            Path::new("node_modules/b/b1.js")
+            buffer.read(cx).file().unwrap().path().as_ref(),
+            Path::new("one/node_modules/a/a2.js")
         );
+
+        // Only the newly-expanded directory is scanned.
+        assert_eq!(fs.read_dir_call_count() - prev_read_dir_count, 1);
     });
 }
 
@@ -525,9 +585,10 @@ async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
     cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
         .await;
 
-    tree.update(cx, |tree, cx| {
-        let tree = tree.as_local_mut().unwrap();
-        tree.expand_entry_for_path("ignored-dir".as_ref(), cx)
+    tree.read_with(cx, |tree, _| {
+        tree.as_local()
+            .unwrap()
+            .refresh_entries_for_paths(vec![Path::new("ignored-dir").into()])
     })
     .recv()
     .await;
@@ -868,8 +929,13 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng)
     log::info!("quiescing");
     fs.as_fake().flush_events(usize::MAX);
     cx.foreground().run_until_parked();
+
     let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
     snapshot.check_invariants();
+    let expanded_paths = snapshot
+        .expanded_entries()
+        .map(|e| e.path.clone())
+        .collect::<Vec<_>>();
 
     {
         let new_worktree = Worktree::local(
@@ -885,6 +951,14 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng)
         new_worktree
             .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
             .await;
+        new_worktree
+            .update(cx, |tree, _| {
+                tree.as_local_mut()
+                    .unwrap()
+                    .refresh_entries_for_paths(expanded_paths)
+            })
+            .recv()
+            .await;
         let new_snapshot =
             new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
         assert_eq!(
@@ -901,11 +975,25 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng)
         }
 
         assert_eq!(
-            prev_snapshot.entries(true).collect::<Vec<_>>(),
-            snapshot.entries(true).collect::<Vec<_>>(),
+            prev_snapshot
+                .entries(true)
+                .map(ignore_pending_dir)
+                .collect::<Vec<_>>(),
+            snapshot
+                .entries(true)
+                .map(ignore_pending_dir)
+                .collect::<Vec<_>>(),
             "wrong updates after snapshot {i}: {updates:#?}",
         );
     }
+
+    fn ignore_pending_dir(entry: &Entry) -> Entry {
+        let mut entry = entry.clone();
+        if entry.kind == EntryKind::PendingDir {
+            entry.kind = EntryKind::Dir
+        }
+        entry
+    }
 }
 
 // The worktree's `UpdatedEntries` event can be used to follow along with