From 3e6aedfc694894966ae89b2c85ab338c83b8f7a9 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Fri, 16 Jun 2023 15:49:27 -0700 Subject: [PATCH] Expand dirs on-demand when opening buffers inside unloaded dirs --- crates/project/src/project.rs | 10 +- crates/project/src/worktree.rs | 164 ++++++++++++++++----------- crates/project/src/worktree_tests.rs | 100 +++++++++++++++- 3 files changed, 203 insertions(+), 71 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index a360e5dd7256273905b890b5107a6ee43f3f12af..5c7120295675c019b9e3d83cc3346c68d6541aa0 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -5413,7 +5413,10 @@ impl Project { if self.is_local() { let worktree = self.worktree_for_id(worktree_id, cx)?; worktree.update(cx, |worktree, cx| { - worktree.as_local_mut().unwrap().expand_dir(entry_id, cx); + worktree + .as_local_mut() + .unwrap() + .expand_entry_for_id(entry_id, cx); }); } else if let Some(project_id) = self.remote_id() { cx.background() @@ -5739,7 +5742,10 @@ impl Project { .read_with(&cx, |this, cx| this.worktree_for_entry(entry_id, cx)) .ok_or_else(|| anyhow!("invalid request"))?; worktree.update(&mut cx, |worktree, cx| { - worktree.as_local_mut().unwrap().expand_dir(entry_id, cx) + worktree + .as_local_mut() + .unwrap() + .expand_entry_for_id(entry_id, cx) }); Ok(proto::Ack {}) } diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 6be61e8b9c9d4b6f4bb4033307ee3646ce750ea1..bbf63901d189f8991f71a0afd4aa83c818a8052b 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -89,8 +89,8 @@ enum ScanRequest { relative_paths: Vec>, done: barrier::Sender, }, - ExpandDir { - entry_id: ProjectEntryId, + ExpandPath { + path: Arc, done: barrier::Sender, }, } @@ -879,26 +879,31 @@ impl LocalWorktree { path: &Path, cx: &mut ModelContext, ) -> Task)>> { - let handle = cx.handle(); let path = Arc::from(path); let abs_path = self.absolutize(&path); let fs = self.fs.clone(); - let snapshot = self.snapshot(); + let expand = path + .parent() + .map(|path| self.expand_entry_for_path(path, cx)); - let mut index_task = None; + cx.spawn(|this, mut cx| async move { + if let Some(mut expand) = expand { + expand.recv().await; + } - if let Some(repo) = snapshot.repository_for_path(&path) { - let repo_path = repo.work_directory.relativize(self, &path).unwrap(); - if let Some(repo) = self.git_repositories.get(&*repo.work_directory) { - let repo = repo.repo_ptr.to_owned(); - index_task = Some( - cx.background() - .spawn(async move { repo.lock().load_index_text(&repo_path) }), - ); + let mut index_task = None; + let snapshot = this.read_with(&cx, |this, _| this.as_local().unwrap().snapshot()); + if let Some(repo) = snapshot.repository_for_path(&path) { + let repo_path = repo.work_directory.relativize(&snapshot, &path).unwrap(); + if let Some(repo) = snapshot.git_repositories.get(&*repo.work_directory) { + let repo = repo.repo_ptr.clone(); + index_task = Some( + cx.background() + .spawn(async move { repo.lock().load_index_text(&repo_path) }), + ); + } } - } - cx.spawn(|this, mut cx| async move { let text = fs.load(&abs_path).await?; let diff_base = if let Some(index_task) = index_task { @@ -917,7 +922,7 @@ impl LocalWorktree { Ok(( File { entry_id: entry.id, - worktree: handle, + worktree: this, path: entry.path, mtime: entry.mtime, is_local: true, @@ -1146,14 +1151,34 @@ impl LocalWorktree { })) } - pub fn expand_dir( + pub fn expand_entry_for_id( &mut self, entry_id: ProjectEntryId, _cx: &mut ModelContext, + ) -> barrier::Receiver { + let (tx, rx) = barrier::channel(); + if let Some(entry) = self.entry_for_id(entry_id) { + self.scan_requests_tx + .try_send(ScanRequest::ExpandPath { + path: entry.path.clone(), + done: tx, + }) + .ok(); + } + rx + } + + pub fn expand_entry_for_path( + &self, + path: impl Into>, + _cx: &mut ModelContext, ) -> barrier::Receiver { let (tx, rx) = barrier::channel(); self.scan_requests_tx - .try_send(ScanRequest::ExpandDir { entry_id, done: tx }) + .try_send(ScanRequest::ExpandPath { + path: path.into(), + done: tx, + }) .ok(); rx } @@ -2192,10 +2217,6 @@ impl LocalSnapshot { } impl BackgroundScannerState { - fn is_entry_expanded(&self, entry: &Entry) -> bool { - self.expanded_dirs.contains(&entry.id) - } - fn reuse_entry_id(&mut self, entry: &mut Entry) { if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) { entry.id = removed_entry_id; @@ -2989,34 +3010,39 @@ impl BackgroundScanner { } }) .collect::>(); - self.reload_entries_for_paths(root_path, root_canonical_path, abs_paths, None) - .await; - self.send_status_update(false, Some(done)) - } - ScanRequest::ExpandDir { entry_id, done } => { - let abs_path; - { - let mut state = self.state.lock(); - if let Some(entry) = state.snapshot.entry_for_id(entry_id) { - abs_path = root_canonical_path.join(&entry.path); - state.expanded_dirs.insert(entry_id); - } else { - return true; - } - }; - - let (scan_job_tx, mut scan_job_rx) = channel::unbounded(); self.reload_entries_for_paths( root_path, root_canonical_path, - vec![abs_path], - Some(scan_job_tx), + abs_paths, + false, + None, ) .await; - if let Some(job) = scan_job_rx.next().await { + self.send_status_update(false, Some(done)) + } + ScanRequest::ExpandPath { path, done } => { + let (scan_job_tx, mut scan_job_rx) = channel::unbounded(); + let mut abs_path = root_canonical_path.clone(); + for component in path.iter() { + abs_path.push(component); + self.reload_entries_for_paths( + root_path.clone(), + root_canonical_path.clone(), + vec![abs_path.clone()], + true, + Some(scan_job_tx.clone()), + ) + .await; + } + drop(scan_job_tx); + + // Scan the expanded directories serially. This is ok, because only + // the direct ancestors of the expanded path need to be scanned. + while let Some(job) = scan_job_rx.next().await { self.scan_dir(&job).await.log_err(); - self.send_status_update(false, Some(done)); } + + self.send_status_update(false, Some(done)); true } } @@ -3038,6 +3064,7 @@ impl BackgroundScanner { root_path, root_canonical_path, abs_paths, + false, Some(scan_job_tx.clone()), ) .await; @@ -3176,22 +3203,18 @@ impl BackgroundScanner { } async fn scan_dir(&self, job: &ScanJob) -> Result<()> { + log::debug!( + "scan directory {:?}. external: {}", + job.path, + job.is_external + ); + let mut new_entries: Vec = Vec::new(); let mut new_jobs: Vec> = Vec::new(); let mut ignore_stack = job.ignore_stack.clone(); let mut new_ignore = None; let (root_abs_path, root_char_bag, next_entry_id, repository) = { - let state = self.state.lock(); - if job.is_external || job.ignore_stack.is_all() { - if let Some(entry) = state.snapshot.entry_for_path(&job.path) { - if !state.is_entry_expanded(entry) { - log::debug!("defer scanning directory {:?}", job.path); - return Ok(()); - } - } - } - - let snapshot = &state.snapshot; + let snapshot = &self.state.lock().snapshot; ( snapshot.abs_path().clone(), snapshot.root_char_bag, @@ -3202,12 +3225,6 @@ impl BackgroundScanner { ) }; - log::debug!( - "scan directory {:?}. external: {}", - job.path, - job.is_external - ); - let mut root_canonical_path = None; let mut child_paths = self.fs.read_dir(&job.abs_path).await?; while let Some(child_abs_path) = child_paths.next().await { @@ -3258,7 +3275,7 @@ impl BackgroundScanner { ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir()); if entry.is_dir() { - if let Some(job) = new_jobs.next().expect("Missing scan job for entry") { + if let Some(job) = new_jobs.next().expect("missing scan job for entry") { job.ignore_stack = if entry.is_ignored { IgnoreStack::all() } else { @@ -3351,13 +3368,26 @@ impl BackgroundScanner { new_entries.push(child_entry); } - self.state - .lock() - .populate_dir(&job.path, new_entries, new_ignore, self.fs.as_ref()); + let mut state = self.state.lock(); + state.populate_dir(&job.path, new_entries, new_ignore, self.fs.as_ref()); for new_job in new_jobs { if let Some(new_job) = new_job { - job.scan_queue.send(new_job).await.ok(); + // If a subdirectory is ignored, or is external to the worktree, don't scan + // it unless it is marked as expanded. + if (new_job.is_external || new_job.ignore_stack.is_all()) + && !state + .snapshot + .entry_for_path(&new_job.path) + .map_or(true, |entry| state.expanded_dirs.contains(&entry.id)) + { + log::debug!("defer scanning directory {:?}", new_job.path); + continue; + } + + job.scan_queue + .try_send(new_job) + .expect("channel is unbounded"); } } @@ -3369,6 +3399,7 @@ impl BackgroundScanner { root_abs_path: Arc, root_canonical_path: PathBuf, mut abs_paths: Vec, + expand: bool, scan_queue_tx: Option>, ) -> Option>> { let doing_recursive_update = scan_queue_tx.is_some(); @@ -3456,6 +3487,9 @@ impl BackgroundScanner { } let fs_entry = state.insert_entry(fs_entry, self.fs.as_ref()); + if expand { + state.expanded_dirs.insert(fs_entry.id); + } if let Some(scan_queue_tx) = &scan_queue_tx { let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path); diff --git a/crates/project/src/worktree_tests.rs b/crates/project/src/worktree_tests.rs index 83f416d7843a81fe2a6346c8fda602c9b2d21dab..92eded0a99a843f7514d94366ffa76848b91a5e9 100644 --- a/crates/project/src/worktree_tests.rs +++ b/crates/project/src/worktree_tests.rs @@ -160,7 +160,7 @@ async fn test_descendent_entries(cx: &mut TestAppContext) { // Expand gitignored directory. tree.update(cx, |tree, cx| { let tree = tree.as_local_mut().unwrap(); - tree.expand_dir(tree.entry_for_path("i/j").unwrap().id, cx) + tree.expand_entry_for_path("i/j".as_ref(), cx) }) .recv() .await; @@ -341,7 +341,7 @@ async fn test_symlinks_pointing_outside(cx: &mut TestAppContext) { // Expand one of the symlinked directories. tree.update(cx, |tree, cx| { let tree = tree.as_local_mut().unwrap(); - tree.expand_dir(tree.entry_for_path("deps/dep-dir3").unwrap().id, cx) + tree.expand_entry_for_path("deps/dep-dir3".as_ref(), cx) }) .recv() .await; @@ -370,7 +370,7 @@ async fn test_symlinks_pointing_outside(cx: &mut TestAppContext) { // Expand a subdirectory of one of the symlinked directories. tree.update(cx, |tree, cx| { let tree = tree.as_local_mut().unwrap(); - tree.expand_dir(tree.entry_for_path("deps/dep-dir3/src").unwrap().id, cx) + tree.expand_entry_for_path("deps/dep-dir3/src".as_ref(), cx) }) .recv() .await; @@ -398,6 +398,98 @@ async fn test_symlinks_pointing_outside(cx: &mut TestAppContext) { }); } +#[gpui::test] +async fn test_open_gitignored_files(cx: &mut TestAppContext) { + let fs = FakeFs::new(cx.background()); + fs.insert_tree( + "/root", + json!({ + ".gitignore": "node_modules\n", + "node_modules": { + "a": { + "a1.js": "a1", + "a2.js": "a2", + }, + "b": { + "b1.js": "b1", + "b2.js": "b2", + }, + }, + "src": { + "x.js": "", + "y.js": "", + }, + }), + ) + .await; + + let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx)); + let tree = Worktree::local( + client, + Path::new("/root"), + true, + fs.clone(), + Default::default(), + &mut cx.to_async(), + ) + .await + .unwrap(); + + cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete()) + .await; + + tree.read_with(cx, |tree, _| { + assert_eq!( + tree.entries(true) + .map(|entry| (entry.path.as_ref(), entry.is_ignored)) + .collect::>(), + vec![ + (Path::new(""), false), + (Path::new(".gitignore"), false), + (Path::new("node_modules"), true), + (Path::new("src"), false), + (Path::new("src/x.js"), false), + (Path::new("src/y.js"), false), + ] + ); + }); + + let buffer = tree + .update(cx, |tree, cx| { + tree.as_local_mut() + .unwrap() + .load_buffer(0, "node_modules/b/b1.js".as_ref(), cx) + }) + .await + .unwrap(); + + tree.read_with(cx, |tree, cx| { + assert_eq!( + tree.entries(true) + .map(|entry| (entry.path.as_ref(), entry.is_ignored)) + .collect::>(), + vec![ + (Path::new(""), false), + (Path::new(".gitignore"), false), + (Path::new("node_modules"), true), + (Path::new("node_modules/a"), true), + (Path::new("node_modules/b"), true), + (Path::new("node_modules/b/b1.js"), true), + (Path::new("node_modules/b/b2.js"), true), + (Path::new("src"), false), + (Path::new("src/x.js"), false), + (Path::new("src/y.js"), false), + ] + ); + + let buffer = buffer.read(cx); + assert_eq!( + buffer.file().unwrap().path().as_ref(), + Path::new("node_modules/b/b1.js") + ); + }); +} + #[gpui::test] async fn test_rescan_with_gitignore(cx: &mut TestAppContext) { // .gitignores are handled explicitly by Zed and do not use the git @@ -435,7 +527,7 @@ async fn test_rescan_with_gitignore(cx: &mut TestAppContext) { tree.update(cx, |tree, cx| { let tree = tree.as_local_mut().unwrap(); - tree.expand_dir(tree.entry_for_path("ignored-dir").unwrap().id, cx) + tree.expand_entry_for_path("ignored-dir".as_ref(), cx) }) .recv() .await;