diff --git a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql index 595d841d075773699d88e4556354f4ef43b2b410..c690b6148ad120c41f9441bb95b5911b7e83e0ca 100644 --- a/crates/collab/migrations.sqlite/20221109000000_test_schema.sql +++ b/crates/collab/migrations.sqlite/20221109000000_test_schema.sql @@ -74,6 +74,7 @@ CREATE TABLE "worktree_entries" ( "mtime_seconds" INTEGER NOT NULL, "mtime_nanos" INTEGER NOT NULL, "is_symlink" BOOL NOT NULL, + "is_external" BOOL NOT NULL, "is_ignored" BOOL NOT NULL, "is_deleted" BOOL NOT NULL, "git_status" INTEGER, diff --git a/crates/collab/migrations/20230616134535_add_is_external_to_worktree_entries.sql b/crates/collab/migrations/20230616134535_add_is_external_to_worktree_entries.sql new file mode 100644 index 0000000000000000000000000000000000000000..e4348af0cc5c12a43fac3adecc106eb16a6de005 --- /dev/null +++ b/crates/collab/migrations/20230616134535_add_is_external_to_worktree_entries.sql @@ -0,0 +1,2 @@ +ALTER TABLE "worktree_entries" +ADD "is_external" BOOL NOT NULL DEFAULT FALSE; diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 7e2c376bc2c9eef6b030fdc7b2b4017d157f9216..208da22efe4b35564f756b8c481a31cf2047481b 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -1539,6 +1539,7 @@ impl Database { }), is_symlink: db_entry.is_symlink, is_ignored: db_entry.is_ignored, + is_external: db_entry.is_external, git_status: db_entry.git_status.map(|status| status as i32), }); } @@ -2349,6 +2350,7 @@ impl Database { mtime_nanos: ActiveValue::set(mtime.nanos as i32), is_symlink: ActiveValue::set(entry.is_symlink), is_ignored: ActiveValue::set(entry.is_ignored), + is_external: ActiveValue::set(entry.is_external), git_status: ActiveValue::set(entry.git_status.map(|status| status as i64)), is_deleted: ActiveValue::set(false), scan_id: ActiveValue::set(update.scan_id as i64), @@ -2705,6 +2707,7 @@ impl Database { }), is_symlink: db_entry.is_symlink, is_ignored: db_entry.is_ignored, + is_external: db_entry.is_external, git_status: db_entry.git_status.map(|status| status as i32), }); } diff --git a/crates/collab/src/db/worktree_entry.rs b/crates/collab/src/db/worktree_entry.rs index f2df808ee3ca7d4792d649896b23c867e9fd444b..cf5090ab6d09ade92e0524770200724f5bdf2d7f 100644 --- a/crates/collab/src/db/worktree_entry.rs +++ b/crates/collab/src/db/worktree_entry.rs @@ -18,6 +18,7 @@ pub struct Model { pub git_status: Option, pub is_symlink: bool, pub is_ignored: bool, + pub is_external: bool, pub is_deleted: bool, pub scan_id: i64, } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index 8d210513c2d3dc32fdac676b4953147cc4f0208e..a5be6e7d62fd7e0a2da1a66f63e5aba5eff98954 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -224,6 +224,7 @@ impl Server { .add_request_handler(forward_project_request::) .add_request_handler(forward_project_request::) .add_request_handler(forward_project_request::) + .add_request_handler(forward_project_request::) .add_request_handler(forward_project_request::) .add_message_handler(create_buffer_for_peer) .add_request_handler(update_buffer) diff --git a/crates/collab/src/tests/integration_tests.rs b/crates/collab/src/tests/integration_tests.rs index 92b63478cbf1d20bf0597857a96ef88e0590efe2..2211e53263226c74c840be49a657004c172d4a97 100644 --- a/crates/collab/src/tests/integration_tests.rs +++ b/crates/collab/src/tests/integration_tests.rs @@ -1266,6 +1266,27 @@ async fn test_share_project( let client_b_collaborator = project.collaborators().get(&client_b_peer_id).unwrap(); assert_eq!(client_b_collaborator.replica_id, replica_id_b); }); + project_b.read_with(cx_b, |project, cx| { + let worktree = project.worktrees(cx).next().unwrap().read(cx); + assert_eq!( + worktree.paths().map(AsRef::as_ref).collect::>(), + [ + Path::new(".gitignore"), + Path::new("a.txt"), + Path::new("b.txt"), + Path::new("ignored-dir"), + ] + ); + }); + + project_b + .update(cx_b, |project, cx| { + let worktree = project.worktrees(cx).next().unwrap(); + let entry = worktree.read(cx).entry_for_path("ignored-dir").unwrap(); + project.expand_entry(worktree_id, entry.id, cx).unwrap() + }) + .await + .unwrap(); project_b.read_with(cx_b, |project, cx| { let worktree = project.worktrees(cx).next().unwrap().read(cx); assert_eq!( diff --git a/crates/fs/Cargo.toml b/crates/fs/Cargo.toml index 7dda3f7273ee8513b51119513d6d7e2a3b9bf4f9..cb738f567c31ce0a81b075e7697e384042003faa 100644 --- a/crates/fs/Cargo.toml +++ b/crates/fs/Cargo.toml @@ -32,5 +32,8 @@ serde_json.workspace = true log.workspace = true libc = "0.2" +[dev-dependencies] +gpui = { path = "../gpui", features = ["test-support"] } + [features] test-support = [] diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index fee7765d497c865903bebe10cdb4a776e565559a..e487b64c4e97591c8bb56a17ed01a0d091a2f57c 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -108,6 +108,7 @@ pub trait Fs: Send + Sync { async fn canonicalize(&self, path: &Path) -> Result; async fn is_file(&self, path: &Path) -> bool; async fn metadata(&self, path: &Path) -> Result>; + async fn read_link(&self, path: &Path) -> Result; async fn read_dir( &self, path: &Path, @@ -323,6 +324,11 @@ impl Fs for RealFs { })) } + async fn read_link(&self, path: &Path) -> Result { + let path = smol::fs::read_link(path).await?; + Ok(path) + } + async fn read_dir( &self, path: &Path, @@ -382,6 +388,7 @@ struct FakeFsState { event_txs: Vec>>, events_paused: bool, buffered_events: Vec, + read_dir_call_count: usize, } #[cfg(any(test, feature = "test-support"))] @@ -407,46 +414,51 @@ enum FakeFsEntry { impl FakeFsState { fn read_path<'a>(&'a self, target: &Path) -> Result>> { Ok(self - .try_read_path(target) + .try_read_path(target, true) .ok_or_else(|| anyhow!("path does not exist: {}", target.display()))? .0) } - fn try_read_path<'a>(&'a self, target: &Path) -> Option<(Arc>, PathBuf)> { + fn try_read_path<'a>( + &'a self, + target: &Path, + follow_symlink: bool, + ) -> Option<(Arc>, PathBuf)> { let mut path = target.to_path_buf(); - let mut real_path = PathBuf::new(); + let mut canonical_path = PathBuf::new(); let mut entry_stack = Vec::new(); 'outer: loop { - let mut path_components = path.components().collect::>(); - while let Some(component) = path_components.pop_front() { + let mut path_components = path.components().peekable(); + while let Some(component) = path_components.next() { match component { Component::Prefix(_) => panic!("prefix paths aren't supported"), Component::RootDir => { entry_stack.clear(); entry_stack.push(self.root.clone()); - real_path.clear(); - real_path.push("/"); + canonical_path.clear(); + canonical_path.push("/"); } Component::CurDir => {} Component::ParentDir => { entry_stack.pop()?; - real_path.pop(); + canonical_path.pop(); } Component::Normal(name) => { let current_entry = entry_stack.last().cloned()?; let current_entry = current_entry.lock(); if let FakeFsEntry::Dir { entries, .. } = &*current_entry { let entry = entries.get(name.to_str().unwrap()).cloned()?; - let _entry = entry.lock(); - if let FakeFsEntry::Symlink { target, .. } = &*_entry { - let mut target = target.clone(); - target.extend(path_components); - path = target; - continue 'outer; - } else { - entry_stack.push(entry.clone()); - real_path.push(name); + if path_components.peek().is_some() || follow_symlink { + let entry = entry.lock(); + if let FakeFsEntry::Symlink { target, .. } = &*entry { + let mut target = target.clone(); + target.extend(path_components); + path = target; + continue 'outer; + } } + entry_stack.push(entry.clone()); + canonical_path.push(name); } else { return None; } @@ -455,7 +467,7 @@ impl FakeFsState { } break; } - entry_stack.pop().map(|entry| (entry, real_path)) + Some((entry_stack.pop()?, canonical_path)) } fn write_path(&self, path: &Path, callback: Fn) -> Result @@ -525,6 +537,7 @@ impl FakeFs { event_txs: Default::default(), buffered_events: Vec::new(), events_paused: false, + read_dir_call_count: 0, }), }) } @@ -761,6 +774,10 @@ impl FakeFs { result } + pub fn read_dir_call_count(&self) -> usize { + self.state.lock().read_dir_call_count + } + async fn simulate_random_delay(&self) { self.executor .upgrade() @@ -776,6 +793,10 @@ impl FakeFsEntry { matches!(self, Self::File { .. }) } + fn is_symlink(&self) -> bool { + matches!(self, Self::Symlink { .. }) + } + fn file_content(&self, path: &Path) -> Result<&String> { if let Self::File { content, .. } = self { Ok(content) @@ -1056,8 +1077,8 @@ impl Fs for FakeFs { let path = normalize_path(path); self.simulate_random_delay().await; let state = self.state.lock(); - if let Some((_, real_path)) = state.try_read_path(&path) { - Ok(real_path) + if let Some((_, canonical_path)) = state.try_read_path(&path, true) { + Ok(canonical_path) } else { Err(anyhow!("path does not exist: {}", path.display())) } @@ -1067,7 +1088,7 @@ impl Fs for FakeFs { let path = normalize_path(path); self.simulate_random_delay().await; let state = self.state.lock(); - if let Some((entry, _)) = state.try_read_path(&path) { + if let Some((entry, _)) = state.try_read_path(&path, true) { entry.lock().is_file() } else { false @@ -1078,10 +1099,17 @@ impl Fs for FakeFs { self.simulate_random_delay().await; let path = normalize_path(path); let state = self.state.lock(); - if let Some((entry, real_path)) = state.try_read_path(&path) { - let entry = entry.lock(); - let is_symlink = real_path != path; + if let Some((mut entry, _)) = state.try_read_path(&path, false) { + let is_symlink = entry.lock().is_symlink(); + if is_symlink { + if let Some(e) = state.try_read_path(&path, true).map(|e| e.0) { + entry = e; + } else { + return Ok(None); + } + } + let entry = entry.lock(); Ok(Some(match &*entry { FakeFsEntry::File { inode, mtime, .. } => Metadata { inode: *inode, @@ -1102,13 +1130,30 @@ impl Fs for FakeFs { } } + async fn read_link(&self, path: &Path) -> Result { + self.simulate_random_delay().await; + let path = normalize_path(path); + let state = self.state.lock(); + if let Some((entry, _)) = state.try_read_path(&path, false) { + let entry = entry.lock(); + if let FakeFsEntry::Symlink { target } = &*entry { + Ok(target.clone()) + } else { + Err(anyhow!("not a symlink: {}", path.display())) + } + } else { + Err(anyhow!("path does not exist: {}", path.display())) + } + } + async fn read_dir( &self, path: &Path, ) -> Result>>>> { self.simulate_random_delay().await; let path = normalize_path(path); - let state = self.state.lock(); + let mut state = self.state.lock(); + state.read_dir_call_count += 1; let entry = state.read_path(&path)?; let mut entry = entry.lock(); let children = entry.dir_entries(&path)?; diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 27d424879f1d566d1bf3d551912a00de89435840..91c92edb6ad7c82b60b48ba2fb92fb48f3d2ed19 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -64,7 +64,7 @@ use std::{ mem, num::NonZeroU32, ops::Range, - path::{Component, Path, PathBuf}, + path::{self, Component, Path, PathBuf}, rc::Rc, str, sync::{ @@ -478,6 +478,7 @@ impl Project { client.add_model_request_handler(Self::handle_rename_project_entry); client.add_model_request_handler(Self::handle_copy_project_entry); client.add_model_request_handler(Self::handle_delete_project_entry); + client.add_model_request_handler(Self::handle_expand_project_entry); client.add_model_request_handler(Self::handle_apply_additional_edits_for_completion); client.add_model_request_handler(Self::handle_apply_code_action); client.add_model_request_handler(Self::handle_on_type_formatting); @@ -1072,6 +1073,40 @@ impl Project { } } + pub fn expand_entry( + &mut self, + worktree_id: WorktreeId, + entry_id: ProjectEntryId, + cx: &mut ModelContext, + ) -> Option>> { + let worktree = self.worktree_for_id(worktree_id, cx)?; + if self.is_local() { + worktree.update(cx, |worktree, cx| { + worktree.as_local_mut().unwrap().expand_entry(entry_id, cx) + }) + } else { + let worktree = worktree.downgrade(); + let request = self.client.request(proto::ExpandProjectEntry { + project_id: self.remote_id().unwrap(), + entry_id: entry_id.to_proto(), + }); + Some(cx.spawn_weak(|_, mut cx| async move { + let response = request.await?; + if let Some(worktree) = worktree.upgrade(&cx) { + worktree + .update(&mut cx, |worktree, _| { + worktree + .as_remote_mut() + .unwrap() + .wait_for_snapshot(response.worktree_scan_id as usize) + }) + .await?; + } + Ok(()) + })) + } + } + pub fn shared(&mut self, project_id: u64, cx: &mut ModelContext) -> Result<()> { if self.client_state.is_some() { return Err(anyhow!("project was already shared")); @@ -3081,23 +3116,44 @@ impl Project { for watcher in params.watchers { for worktree in &self.worktrees { if let Some(worktree) = worktree.upgrade(cx) { - let worktree = worktree.read(cx); - if let Some(abs_path) = worktree.abs_path().to_str() { - if let Some(suffix) = match &watcher.glob_pattern { - lsp::GlobPattern::String(s) => s, - lsp::GlobPattern::Relative(rp) => &rp.pattern, - } - .strip_prefix(abs_path) - .and_then(|s| s.strip_prefix(std::path::MAIN_SEPARATOR)) - { - if let Some(glob) = Glob::new(suffix).log_err() { - builders - .entry(worktree.id()) - .or_insert_with(|| GlobSetBuilder::new()) - .add(glob); + let glob_is_inside_worktree = worktree.update(cx, |tree, _| { + if let Some(abs_path) = tree.abs_path().to_str() { + let relative_glob_pattern = match &watcher.glob_pattern { + lsp::GlobPattern::String(s) => s + .strip_prefix(abs_path) + .and_then(|s| s.strip_prefix(std::path::MAIN_SEPARATOR)), + lsp::GlobPattern::Relative(rp) => { + let base_uri = match &rp.base_uri { + lsp::OneOf::Left(workspace_folder) => { + &workspace_folder.uri + } + lsp::OneOf::Right(base_uri) => base_uri, + }; + base_uri.to_file_path().ok().and_then(|file_path| { + (file_path.to_str() == Some(abs_path)) + .then_some(rp.pattern.as_str()) + }) + } + }; + if let Some(relative_glob_pattern) = relative_glob_pattern { + let literal_prefix = + glob_literal_prefix(&relative_glob_pattern); + tree.as_local_mut() + .unwrap() + .add_path_prefix_to_scan(Path::new(literal_prefix).into()); + if let Some(glob) = Glob::new(relative_glob_pattern).log_err() { + builders + .entry(tree.id()) + .or_insert_with(|| GlobSetBuilder::new()) + .add(glob); + } + return true; } - break; } + false + }); + if glob_is_inside_worktree { + break; } } } @@ -5705,6 +5761,29 @@ impl Project { }) } + async fn handle_expand_project_entry( + this: ModelHandle, + envelope: TypedEnvelope, + _: Arc, + mut cx: AsyncAppContext, + ) -> Result { + let entry_id = ProjectEntryId::from_proto(envelope.payload.entry_id); + let worktree = this + .read_with(&cx, |this, cx| this.worktree_for_entry(entry_id, cx)) + .ok_or_else(|| anyhow!("invalid request"))?; + worktree + .update(&mut cx, |worktree, cx| { + worktree + .as_local_mut() + .unwrap() + .expand_entry(entry_id, cx) + .ok_or_else(|| anyhow!("invalid entry")) + })? + .await?; + let worktree_scan_id = worktree.read_with(&cx, |worktree, _| worktree.scan_id()) as u64; + Ok(proto::ExpandProjectEntryResponse { worktree_scan_id }) + } + async fn handle_update_diagnostic_summary( this: ModelHandle, envelope: TypedEnvelope, @@ -7047,6 +7126,22 @@ impl Project { } } +fn glob_literal_prefix<'a>(glob: &'a str) -> &'a str { + let mut literal_end = 0; + for (i, part) in glob.split(path::MAIN_SEPARATOR).enumerate() { + if part.contains(&['*', '?', '{', '}']) { + break; + } else { + if i > 0 { + // Acount for separator prior to this part + literal_end += path::MAIN_SEPARATOR.len_utf8(); + } + literal_end += part.len(); + } + } + &glob[..literal_end] +} + impl WorktreeHandle { pub fn upgrade(&self, cx: &AppContext) -> Option> { match self { diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index 3c23c30ab973ad76c003e467503ed5dae47f1d65..478fad74a9d2a1b6d33c5c29117ab5667eafe64b 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -535,8 +535,28 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon fs.insert_tree( "/the-root", json!({ - "a.rs": "", - "b.rs": "", + ".gitignore": "target\n", + "src": { + "a.rs": "", + "b.rs": "", + }, + "target": { + "x": { + "out": { + "x.rs": "" + } + }, + "y": { + "out": { + "y.rs": "", + } + }, + "z": { + "out": { + "z.rs": "" + } + } + } }), ) .await; @@ -550,11 +570,32 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon // Start the language server by opening a buffer with a compatible file extension. let _buffer = project .update(cx, |project, cx| { - project.open_local_buffer("/the-root/a.rs", cx) + project.open_local_buffer("/the-root/src/a.rs", cx) }) .await .unwrap(); + // Initially, we don't load ignored files because the language server has not explicitly asked us to watch them. + project.read_with(cx, |project, cx| { + let worktree = project.worktrees(cx).next().unwrap(); + assert_eq!( + worktree + .read(cx) + .snapshot() + .entries(true) + .map(|entry| (entry.path.as_ref(), entry.is_ignored)) + .collect::>(), + &[ + (Path::new(""), false), + (Path::new(".gitignore"), false), + (Path::new("src"), false), + (Path::new("src/a.rs"), false), + (Path::new("src/b.rs"), false), + (Path::new("target"), true), + ] + ); + }); + // Keep track of the FS events reported to the language server. let fake_server = fake_servers.next().await.unwrap(); let file_changes = Arc::new(Mutex::new(Vec::new())); @@ -565,12 +606,20 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon method: "workspace/didChangeWatchedFiles".to_string(), register_options: serde_json::to_value( lsp::DidChangeWatchedFilesRegistrationOptions { - watchers: vec![lsp::FileSystemWatcher { - glob_pattern: lsp::GlobPattern::String( - "/the-root/*.{rs,c}".to_string(), - ), - kind: None, - }], + watchers: vec![ + lsp::FileSystemWatcher { + glob_pattern: lsp::GlobPattern::String( + "/the-root/src/*.{rs,c}".to_string(), + ), + kind: None, + }, + lsp::FileSystemWatcher { + glob_pattern: lsp::GlobPattern::String( + "/the-root/target/y/**/*.rs".to_string(), + ), + kind: None, + }, + ], }, ) .ok(), @@ -588,17 +637,50 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon }); cx.foreground().run_until_parked(); - assert_eq!(file_changes.lock().len(), 0); + assert_eq!(mem::take(&mut *file_changes.lock()), &[]); + + // Now the language server has asked us to watch an ignored directory path, + // so we recursively load it. + project.read_with(cx, |project, cx| { + let worktree = project.worktrees(cx).next().unwrap(); + assert_eq!( + worktree + .read(cx) + .snapshot() + .entries(true) + .map(|entry| (entry.path.as_ref(), entry.is_ignored)) + .collect::>(), + &[ + (Path::new(""), false), + (Path::new(".gitignore"), false), + (Path::new("src"), false), + (Path::new("src/a.rs"), false), + (Path::new("src/b.rs"), false), + (Path::new("target"), true), + (Path::new("target/x"), true), + (Path::new("target/y"), true), + (Path::new("target/y/out"), true), + (Path::new("target/y/out/y.rs"), true), + (Path::new("target/z"), true), + ] + ); + }); // Perform some file system mutations, two of which match the watched patterns, // and one of which does not. - fs.create_file("/the-root/c.rs".as_ref(), Default::default()) + fs.create_file("/the-root/src/c.rs".as_ref(), Default::default()) + .await + .unwrap(); + fs.create_file("/the-root/src/d.txt".as_ref(), Default::default()) + .await + .unwrap(); + fs.remove_file("/the-root/src/b.rs".as_ref(), Default::default()) .await .unwrap(); - fs.create_file("/the-root/d.txt".as_ref(), Default::default()) + fs.create_file("/the-root/target/x/out/x2.rs".as_ref(), Default::default()) .await .unwrap(); - fs.remove_file("/the-root/b.rs".as_ref(), Default::default()) + fs.create_file("/the-root/target/y/out/y2.rs".as_ref(), Default::default()) .await .unwrap(); @@ -608,11 +690,15 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon &*file_changes.lock(), &[ lsp::FileEvent { - uri: lsp::Url::from_file_path("/the-root/b.rs").unwrap(), + uri: lsp::Url::from_file_path("/the-root/src/b.rs").unwrap(), typ: lsp::FileChangeType::DELETED, }, lsp::FileEvent { - uri: lsp::Url::from_file_path("/the-root/c.rs").unwrap(), + uri: lsp::Url::from_file_path("/the-root/src/c.rs").unwrap(), + typ: lsp::FileChangeType::CREATED, + }, + lsp::FileEvent { + uri: lsp::Url::from_file_path("/the-root/target/y/out/y2.rs").unwrap(), typ: lsp::FileChangeType::CREATED, }, ] @@ -3846,6 +3932,14 @@ async fn test_search_with_exclusions_and_inclusions(cx: &mut gpui::TestAppContex ); } +#[test] +fn test_glob_literal_prefix() { + assert_eq!(glob_literal_prefix("**/*.js"), ""); + assert_eq!(glob_literal_prefix("node_modules/**/*.js"), "node_modules"); + assert_eq!(glob_literal_prefix("foo/{bar,baz}.js"), "foo"); + assert_eq!(glob_literal_prefix("foo/bar/baz.js"), "foo/bar/baz.js"); +} + async fn search( project: &ModelHandle, query: SearchQuery, diff --git a/crates/project/src/worktree.rs b/crates/project/src/worktree.rs index 2b0ba3d5218ea86d69dfdac2381bf2a7d62290be..be3bcd05fa2e10241a1bc614599dc23014220260 100644 --- a/crates/project/src/worktree.rs +++ b/crates/project/src/worktree.rs @@ -5,7 +5,7 @@ use ::ignore::gitignore::{Gitignore, GitignoreBuilder}; use anyhow::{anyhow, Context, Result}; use client::{proto, Client}; use clock::ReplicaId; -use collections::{HashMap, VecDeque}; +use collections::{HashMap, HashSet, VecDeque}; use fs::{ repository::{GitFileStatus, GitRepository, RepoPath}, Fs, LineEnding, @@ -67,7 +67,8 @@ pub enum Worktree { pub struct LocalWorktree { snapshot: LocalSnapshot, - path_changes_tx: channel::Sender<(Vec, barrier::Sender)>, + scan_requests_tx: channel::Sender, + path_prefixes_to_scan_tx: channel::Sender>, is_scanning: (watch::Sender, watch::Receiver), _background_scanner_task: Task<()>, share: Option, @@ -84,6 +85,11 @@ pub struct LocalWorktree { visible: bool, } +struct ScanRequest { + relative_paths: Vec>, + done: barrier::Sender, +} + pub struct RemoteWorktree { snapshot: Snapshot, background_snapshot: Arc>, @@ -214,6 +220,9 @@ pub struct LocalSnapshot { struct BackgroundScannerState { snapshot: LocalSnapshot, + scanned_dirs: HashSet, + path_prefixes_to_scan: HashSet>, + paths_to_scan: HashSet>, /// The ids of all of the entries that were removed from the snapshot /// as part of the current update. These entry ids may be re-used /// if the same inode is discovered at a new path, or if the given @@ -232,13 +241,6 @@ pub struct LocalRepositoryEntry { pub(crate) git_dir_path: Arc, } -impl LocalRepositoryEntry { - // Note that this path should be relative to the worktree root. - pub(crate) fn in_dot_git(&self, path: &Path) -> bool { - path.starts_with(self.git_dir_path.as_ref()) - } -} - impl Deref for LocalSnapshot { type Target = Snapshot; @@ -330,7 +332,8 @@ impl Worktree { ); } - let (path_changes_tx, path_changes_rx) = channel::unbounded(); + let (scan_requests_tx, scan_requests_rx) = channel::unbounded(); + let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded(); let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded(); cx.spawn_weak(|this, mut cx| async move { @@ -370,7 +373,8 @@ impl Worktree { fs, scan_states_tx, background, - path_changes_rx, + scan_requests_rx, + path_prefixes_to_scan_rx, ) .run(events) .await; @@ -381,7 +385,8 @@ impl Worktree { snapshot, is_scanning: watch::channel_with(true), share: None, - path_changes_tx, + scan_requests_tx, + path_prefixes_to_scan_tx, _background_scanner_task: background_scanner_task, diagnostics: Default::default(), diagnostic_summaries: Default::default(), @@ -867,27 +872,27 @@ impl LocalWorktree { path: &Path, cx: &mut ModelContext, ) -> Task)>> { - let handle = cx.handle(); let path = Arc::from(path); let abs_path = self.absolutize(&path); let fs = self.fs.clone(); - let snapshot = self.snapshot(); - - let mut index_task = None; - - if let Some(repo) = snapshot.repository_for_path(&path) { - let repo_path = repo.work_directory.relativize(self, &path).unwrap(); - if let Some(repo) = self.git_repositories.get(&*repo.work_directory) { - let repo = repo.repo_ptr.to_owned(); - index_task = Some( - cx.background() - .spawn(async move { repo.lock().load_index_text(&repo_path) }), - ); - } - } + let entry = self.refresh_entry(path.clone(), None, cx); - cx.spawn(|this, mut cx| async move { + cx.spawn(|this, cx| async move { let text = fs.load(&abs_path).await?; + let entry = entry.await?; + + let mut index_task = None; + let snapshot = this.read_with(&cx, |this, _| this.as_local().unwrap().snapshot()); + if let Some(repo) = snapshot.repository_for_path(&path) { + let repo_path = repo.work_directory.relativize(&snapshot, &path).unwrap(); + if let Some(repo) = snapshot.git_repositories.get(&*repo.work_directory) { + let repo = repo.repo_ptr.clone(); + index_task = Some( + cx.background() + .spawn(async move { repo.lock().load_index_text(&repo_path) }), + ); + } + } let diff_base = if let Some(index_task) = index_task { index_task.await @@ -895,17 +900,10 @@ impl LocalWorktree { None }; - // Eagerly populate the snapshot with an updated entry for the loaded file - let entry = this - .update(&mut cx, |this, cx| { - this.as_local().unwrap().refresh_entry(path, None, cx) - }) - .await?; - Ok(( File { entry_id: entry.id, - worktree: handle, + worktree: this, path: entry.path, mtime: entry.mtime, is_local: true, @@ -1039,14 +1037,10 @@ impl LocalWorktree { cx: &mut ModelContext, ) -> Option>> { let entry = self.entry_for_id(entry_id)?.clone(); - let abs_path = self.abs_path.clone(); + let abs_path = self.absolutize(&entry.path); let fs = self.fs.clone(); let delete = cx.background().spawn(async move { - let mut abs_path = fs.canonicalize(&abs_path).await?; - if entry.path.file_name().is_some() { - abs_path = abs_path.join(&entry.path); - } if entry.is_file() { fs.remove_file(&abs_path, Default::default()).await?; } else { @@ -1059,19 +1053,18 @@ impl LocalWorktree { ) .await?; } - anyhow::Ok(abs_path) + anyhow::Ok(entry.path) }); Some(cx.spawn(|this, mut cx| async move { - let abs_path = delete.await?; - let (tx, mut rx) = barrier::channel(); + let path = delete.await?; this.update(&mut cx, |this, _| { this.as_local_mut() .unwrap() - .path_changes_tx - .try_send((vec![abs_path], tx)) - })?; - rx.recv().await; + .refresh_entries_for_paths(vec![path]) + }) + .recv() + .await; Ok(()) })) } @@ -1135,34 +1128,48 @@ impl LocalWorktree { })) } + pub fn expand_entry( + &mut self, + entry_id: ProjectEntryId, + cx: &mut ModelContext, + ) -> Option>> { + let path = self.entry_for_id(entry_id)?.path.clone(); + let mut refresh = self.refresh_entries_for_paths(vec![path]); + Some(cx.background().spawn(async move { + refresh.next().await; + Ok(()) + })) + } + + pub fn refresh_entries_for_paths(&self, paths: Vec>) -> barrier::Receiver { + let (tx, rx) = barrier::channel(); + self.scan_requests_tx + .try_send(ScanRequest { + relative_paths: paths, + done: tx, + }) + .ok(); + rx + } + + pub fn add_path_prefix_to_scan(&self, path_prefix: Arc) { + self.path_prefixes_to_scan_tx.try_send(path_prefix).ok(); + } + fn refresh_entry( &self, path: Arc, old_path: Option>, cx: &mut ModelContext, ) -> Task> { - let fs = self.fs.clone(); - let abs_root_path = self.abs_path.clone(); - let path_changes_tx = self.path_changes_tx.clone(); + let paths = if let Some(old_path) = old_path.as_ref() { + vec![old_path.clone(), path.clone()] + } else { + vec![path.clone()] + }; + let mut refresh = self.refresh_entries_for_paths(paths); cx.spawn_weak(move |this, mut cx| async move { - let abs_path = fs.canonicalize(&abs_root_path).await?; - let mut paths = Vec::with_capacity(2); - paths.push(if path.file_name().is_some() { - abs_path.join(&path) - } else { - abs_path.clone() - }); - if let Some(old_path) = old_path { - paths.push(if old_path.file_name().is_some() { - abs_path.join(&old_path) - } else { - abs_path.clone() - }); - } - - let (tx, mut rx) = barrier::channel(); - path_changes_tx.try_send((paths, tx))?; - rx.recv().await; + refresh.recv().await; this.upgrade(&cx) .ok_or_else(|| anyhow!("worktree was dropped"))? .update(&mut cx, |this, _| { @@ -1331,7 +1338,7 @@ impl RemoteWorktree { self.completed_scan_id >= scan_id } - fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future> { + pub(crate) fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future> { let (tx, rx) = oneshot::channel(); if self.observed_snapshot(scan_id) { let _ = tx.send(()); @@ -1568,7 +1575,7 @@ impl Snapshot { } pub fn visible_file_count(&self) -> usize { - self.entries_by_path.summary().visible_file_count + self.entries_by_path.summary().non_ignored_file_count } fn traverse_from_offset( @@ -1837,15 +1844,6 @@ impl LocalSnapshot { Some((path, self.git_repositories.get(&repo.work_directory_id())?)) } - pub(crate) fn repo_for_metadata( - &self, - path: &Path, - ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> { - self.git_repositories - .iter() - .find(|(_, repo)| repo.in_dot_git(path)) - } - fn build_update( &self, project_id: u64, @@ -1981,57 +1979,6 @@ impl LocalSnapshot { entry } - #[must_use = "Changed paths must be used for diffing later"] - fn build_repo(&mut self, parent_path: Arc, fs: &dyn Fs) -> Option>> { - let abs_path = self.abs_path.join(&parent_path); - let work_dir: Arc = parent_path.parent().unwrap().into(); - - // Guard against repositories inside the repository metadata - if work_dir - .components() - .find(|component| component.as_os_str() == *DOT_GIT) - .is_some() - { - return None; - }; - - let work_dir_id = self - .entry_for_path(work_dir.clone()) - .map(|entry| entry.id)?; - - if self.git_repositories.get(&work_dir_id).is_some() { - return None; - } - - let repo = fs.open_repo(abs_path.as_path())?; - let work_directory = RepositoryWorkDirectory(work_dir.clone()); - - let repo_lock = repo.lock(); - - self.repository_entries.insert( - work_directory.clone(), - RepositoryEntry { - work_directory: work_dir_id.into(), - branch: repo_lock.branch_name().map(Into::into), - }, - ); - - let changed_paths = self.scan_statuses(repo_lock.deref(), &work_directory); - - drop(repo_lock); - - self.git_repositories.insert( - work_dir_id, - LocalRepositoryEntry { - git_dir_scan_id: 0, - repo_ptr: repo, - git_dir_path: parent_path.clone(), - }, - ); - - Some(changed_paths) - } - #[must_use = "Changed paths must be used for diffing later"] fn scan_statuses( &mut self, @@ -2098,11 +2045,18 @@ impl LocalSnapshot { ignore_stack } -} -impl LocalSnapshot { #[cfg(test)] - pub fn check_invariants(&self) { + pub(crate) fn expanded_entries(&self) -> impl Iterator { + self.entries_by_path + .cursor::<()>() + .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored)) + } + + #[cfg(test)] + pub fn check_invariants(&self, git_state: bool) { + use pretty_assertions::assert_eq; + assert_eq!( self.entries_by_path .cursor::<()>() @@ -2122,7 +2076,7 @@ impl LocalSnapshot { for entry in self.entries_by_path.cursor::<()>() { if entry.is_file() { assert_eq!(files.next().unwrap().inode, entry.inode); - if !entry.is_ignored { + if !entry.is_ignored && !entry.is_external { assert_eq!(visible_files.next().unwrap().inode, entry.inode); } } @@ -2132,7 +2086,11 @@ impl LocalSnapshot { assert!(visible_files.next().is_none()); let mut bfs_paths = Vec::new(); - let mut stack = vec![Path::new("")]; + let mut stack = self + .root_entry() + .map(|e| e.path.as_ref()) + .into_iter() + .collect::>(); while let Some(path) = stack.pop() { bfs_paths.push(path); let ix = stack.len(); @@ -2154,12 +2112,15 @@ impl LocalSnapshot { .collect::>(); assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter); - for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() { - let ignore_parent_path = ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap(); - assert!(self.entry_for_path(&ignore_parent_path).is_some()); - assert!(self - .entry_for_path(ignore_parent_path.join(&*GITIGNORE)) - .is_some()); + if git_state { + for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() { + let ignore_parent_path = + ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap(); + assert!(self.entry_for_path(&ignore_parent_path).is_some()); + assert!(self + .entry_for_path(ignore_parent_path.join(&*GITIGNORE)) + .is_some()); + } } } @@ -2177,6 +2138,19 @@ impl LocalSnapshot { } impl BackgroundScannerState { + fn should_scan_directory(&self, entry: &Entry) -> bool { + (!entry.is_external && !entry.is_ignored) + || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning + || self + .paths_to_scan + .iter() + .any(|p| p.starts_with(&entry.path)) + || self + .path_prefixes_to_scan + .iter() + .any(|p| entry.path.starts_with(p)) + } + fn reuse_entry_id(&mut self, entry: &mut Entry) { if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) { entry.id = removed_entry_id; @@ -2187,17 +2161,24 @@ impl BackgroundScannerState { fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry { self.reuse_entry_id(&mut entry); - self.snapshot.insert_entry(entry, fs) + let entry = self.snapshot.insert_entry(entry, fs); + if entry.path.file_name() == Some(&DOT_GIT) { + self.build_repository(entry.path.clone(), fs); + } + + #[cfg(test)] + self.snapshot.check_invariants(false); + + entry } - #[must_use = "Changed paths must be used for diffing later"] fn populate_dir( &mut self, - parent_path: Arc, + parent_path: &Arc, entries: impl IntoIterator, ignore: Option>, fs: &dyn Fs, - ) -> Option>> { + ) { let mut parent_entry = if let Some(parent_entry) = self .snapshot .entries_by_path @@ -2209,15 +2190,13 @@ impl BackgroundScannerState { "populating a directory {:?} that has been removed", parent_path ); - return None; + return; }; match parent_entry.kind { - EntryKind::PendingDir => { - parent_entry.kind = EntryKind::Dir; - } + EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir, EntryKind::Dir => {} - _ => return None, + _ => return, } if let Some(ignore) = ignore { @@ -2227,11 +2206,16 @@ impl BackgroundScannerState { .insert(abs_parent_path, (ignore, false)); } + self.scanned_dirs.insert(parent_entry.id); let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)]; let mut entries_by_id_edits = Vec::new(); + let mut dotgit_path = None; + + for entry in entries { + if entry.path.file_name() == Some(&DOT_GIT) { + dotgit_path = Some(entry.path.clone()); + } - for mut entry in entries { - self.reuse_entry_id(&mut entry); entries_by_id_edits.push(Edit::Insert(PathEntry { id: entry.id, path: entry.path.clone(), @@ -2246,10 +2230,15 @@ impl BackgroundScannerState { .edit(entries_by_path_edits, &()); self.snapshot.entries_by_id.edit(entries_by_id_edits, &()); - if parent_path.file_name() == Some(&DOT_GIT) { - return self.snapshot.build_repo(parent_path, fs); + if let Some(dotgit_path) = dotgit_path { + self.build_repository(dotgit_path, fs); } - None + if let Err(ix) = self.changed_paths.binary_search(parent_path) { + self.changed_paths.insert(ix, parent_path.clone()); + } + + #[cfg(test)] + self.snapshot.check_invariants(false); } fn remove_path(&mut self, path: &Path) { @@ -2284,6 +2273,137 @@ impl BackgroundScannerState { *needs_update = true; } } + + #[cfg(test)] + self.snapshot.check_invariants(false); + } + + fn reload_repositories(&mut self, changed_paths: &[Arc], fs: &dyn Fs) { + let scan_id = self.snapshot.scan_id; + + // Find each of the .git directories that contain any of the given paths. + let mut prev_dot_git_dir = None; + for changed_path in changed_paths { + let Some(dot_git_dir) = changed_path + .ancestors() + .find(|ancestor| ancestor.file_name() == Some(&*DOT_GIT)) else { + continue; + }; + + // Avoid processing the same repository multiple times, if multiple paths + // within it have changed. + if prev_dot_git_dir == Some(dot_git_dir) { + continue; + } + prev_dot_git_dir = Some(dot_git_dir); + + // If there is already a repository for this .git directory, reload + // the status for all of its files. + let repository = self + .snapshot + .git_repositories + .iter() + .find_map(|(entry_id, repo)| { + (repo.git_dir_path.as_ref() == dot_git_dir).then(|| (*entry_id, repo.clone())) + }); + match repository { + None => { + self.build_repository(dot_git_dir.into(), fs); + } + Some((entry_id, repository)) => { + if repository.git_dir_scan_id == scan_id { + continue; + } + let Some(work_dir) = self + .snapshot + .entry_for_id(entry_id) + .map(|entry| RepositoryWorkDirectory(entry.path.clone())) else { continue }; + + let repository = repository.repo_ptr.lock(); + let branch = repository.branch_name(); + repository.reload_index(); + + self.snapshot + .git_repositories + .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id); + self.snapshot + .snapshot + .repository_entries + .update(&work_dir, |entry| entry.branch = branch.map(Into::into)); + + let changed_paths = self.snapshot.scan_statuses(&*repository, &work_dir); + util::extend_sorted( + &mut self.changed_paths, + changed_paths, + usize::MAX, + Ord::cmp, + ) + } + } + } + + // Remove any git repositories whose .git entry no longer exists. + let mut snapshot = &mut self.snapshot; + let mut repositories = mem::take(&mut snapshot.git_repositories); + let mut repository_entries = mem::take(&mut snapshot.repository_entries); + repositories.retain(|work_directory_id, _| { + snapshot + .entry_for_id(*work_directory_id) + .map_or(false, |entry| { + snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some() + }) + }); + repository_entries.retain(|_, entry| repositories.get(&entry.work_directory.0).is_some()); + snapshot.git_repositories = repositories; + snapshot.repository_entries = repository_entries; + } + + fn build_repository(&mut self, dot_git_path: Arc, fs: &dyn Fs) -> Option<()> { + let work_dir_path: Arc = dot_git_path.parent().unwrap().into(); + + // Guard against repositories inside the repository metadata + if work_dir_path.iter().any(|component| component == *DOT_GIT) { + return None; + }; + + let work_dir_id = self + .snapshot + .entry_for_path(work_dir_path.clone()) + .map(|entry| entry.id)?; + + if self.snapshot.git_repositories.get(&work_dir_id).is_some() { + return None; + } + + let abs_path = self.snapshot.abs_path.join(&dot_git_path); + let repository = fs.open_repo(abs_path.as_path())?; + let work_directory = RepositoryWorkDirectory(work_dir_path.clone()); + + let repo_lock = repository.lock(); + self.snapshot.repository_entries.insert( + work_directory.clone(), + RepositoryEntry { + work_directory: work_dir_id.into(), + branch: repo_lock.branch_name().map(Into::into), + }, + ); + + let changed_paths = self + .snapshot + .scan_statuses(repo_lock.deref(), &work_directory); + drop(repo_lock); + + self.snapshot.git_repositories.insert( + work_dir_id, + LocalRepositoryEntry { + git_dir_scan_id: 0, + repo_ptr: repository, + git_dir_path: dot_git_path.clone(), + }, + ); + + util::extend_sorted(&mut self.changed_paths, changed_paths, usize::MAX, Ord::cmp); + Some(()) } } @@ -2570,12 +2690,27 @@ pub struct Entry { pub inode: u64, pub mtime: SystemTime, pub is_symlink: bool, + + /// Whether this entry is ignored by Git. + /// + /// We only scan ignored entries once the directory is expanded and + /// exclude them from searches. pub is_ignored: bool, + + /// Whether this entry's canonical path is outside of the worktree. + /// This means the entry is only accessible from the worktree root via a + /// symlink. + /// + /// We only scan entries outside of the worktree once the symlinked + /// directory is expanded. External entries are treated like gitignored + /// entries in that they are not included in searches. + pub is_external: bool, pub git_status: Option, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum EntryKind { + UnloadedDir, PendingDir, Dir, File(CharBag), @@ -2624,16 +2759,17 @@ impl Entry { mtime: metadata.mtime, is_symlink: metadata.is_symlink, is_ignored: false, + is_external: false, git_status: None, } } pub fn is_dir(&self) -> bool { - matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir) + self.kind.is_dir() } pub fn is_file(&self) -> bool { - matches!(self.kind, EntryKind::File(_)) + self.kind.is_file() } pub fn git_status(&self) -> Option { @@ -2641,19 +2777,40 @@ impl Entry { } } +impl EntryKind { + pub fn is_dir(&self) -> bool { + matches!( + self, + EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir + ) + } + + pub fn is_unloaded(&self) -> bool { + matches!(self, EntryKind::UnloadedDir) + } + + pub fn is_file(&self) -> bool { + matches!(self, EntryKind::File(_)) + } +} + impl sum_tree::Item for Entry { type Summary = EntrySummary; fn summary(&self) -> Self::Summary { - let visible_count = if self.is_ignored { 0 } else { 1 }; + let non_ignored_count = if self.is_ignored || self.is_external { + 0 + } else { + 1 + }; let file_count; - let visible_file_count; + let non_ignored_file_count; if self.is_file() { file_count = 1; - visible_file_count = visible_count; + non_ignored_file_count = non_ignored_count; } else { file_count = 0; - visible_file_count = 0; + non_ignored_file_count = 0; } let mut statuses = GitStatuses::default(); @@ -2669,9 +2826,9 @@ impl sum_tree::Item for Entry { EntrySummary { max_path: self.path.clone(), count: 1, - visible_count, + non_ignored_count, file_count, - visible_file_count, + non_ignored_file_count, statuses, } } @@ -2689,9 +2846,9 @@ impl sum_tree::KeyedItem for Entry { pub struct EntrySummary { max_path: Arc, count: usize, - visible_count: usize, + non_ignored_count: usize, file_count: usize, - visible_file_count: usize, + non_ignored_file_count: usize, statuses: GitStatuses, } @@ -2700,9 +2857,9 @@ impl Default for EntrySummary { Self { max_path: Arc::from(Path::new("")), count: 0, - visible_count: 0, + non_ignored_count: 0, file_count: 0, - visible_file_count: 0, + non_ignored_file_count: 0, statuses: Default::default(), } } @@ -2714,9 +2871,9 @@ impl sum_tree::Summary for EntrySummary { fn add_summary(&mut self, rhs: &Self, _: &()) { self.max_path = rhs.max_path.clone(); self.count += rhs.count; - self.visible_count += rhs.visible_count; + self.non_ignored_count += rhs.non_ignored_count; self.file_count += rhs.file_count; - self.visible_file_count += rhs.visible_file_count; + self.non_ignored_file_count += rhs.non_ignored_file_count; self.statuses += rhs.statuses; } } @@ -2784,7 +2941,8 @@ struct BackgroundScanner { fs: Arc, status_updates_tx: UnboundedSender, executor: Arc, - refresh_requests_rx: channel::Receiver<(Vec, barrier::Sender)>, + scan_requests_rx: channel::Receiver, + path_prefixes_to_scan_rx: channel::Receiver>, next_entry_id: Arc, phase: BackgroundScannerPhase, } @@ -2803,17 +2961,22 @@ impl BackgroundScanner { fs: Arc, status_updates_tx: UnboundedSender, executor: Arc, - refresh_requests_rx: channel::Receiver<(Vec, barrier::Sender)>, + scan_requests_rx: channel::Receiver, + path_prefixes_to_scan_rx: channel::Receiver>, ) -> Self { Self { fs, status_updates_tx, executor, - refresh_requests_rx, + scan_requests_rx, + path_prefixes_to_scan_rx, next_entry_id, state: Mutex::new(BackgroundScannerState { prev_snapshot: snapshot.snapshot.clone(), snapshot, + scanned_dirs: Default::default(), + path_prefixes_to_scan: Default::default(), + paths_to_scan: Default::default(), removed_entry_ids: Default::default(), changed_paths: Default::default(), }), @@ -2823,7 +2986,7 @@ impl BackgroundScanner { async fn run( &mut self, - mut events_rx: Pin>>>, + mut fs_events_rx: Pin>>>, ) { use futures::FutureExt as _; @@ -2868,6 +3031,7 @@ impl BackgroundScanner { path: Arc::from(Path::new("")), ignore_stack, ancestor_inodes: TreeSet::from_ordered_entries(root_inode), + is_external: false, scan_queue: scan_job_tx.clone(), })) .unwrap(); @@ -2884,9 +3048,9 @@ impl BackgroundScanner { // For these events, update events cannot be as precise, because we didn't // have the previous state loaded yet. self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan; - if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) { + if let Poll::Ready(Some(events)) = futures::poll!(fs_events_rx.next()) { let mut paths = events.into_iter().map(|e| e.path).collect::>(); - while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) { + while let Poll::Ready(Some(more_events)) = futures::poll!(fs_events_rx.next()) { paths.extend(more_events.into_iter().map(|e| e.path)); } self.process_events(paths).await; @@ -2898,17 +3062,33 @@ impl BackgroundScanner { select_biased! { // Process any path refresh requests from the worktree. Prioritize // these before handling changes reported by the filesystem. - request = self.refresh_requests_rx.recv().fuse() => { - let Ok((paths, barrier)) = request else { break }; - if !self.process_refresh_request(paths.clone(), barrier).await { + request = self.scan_requests_rx.recv().fuse() => { + let Ok(request) = request else { break }; + if !self.process_scan_request(request, false).await { return; } } - events = events_rx.next().fuse() => { + path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => { + let Ok(path_prefix) = path_prefix else { break }; + + self.forcibly_load_paths(&[path_prefix.clone()]).await; + + let abs_path = + { + let mut state = self.state.lock(); + state.path_prefixes_to_scan.insert(path_prefix.clone()); + state.snapshot.abs_path.join(path_prefix) + }; + if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() { + self.process_events(vec![abs_path]).await; + } + } + + events = fs_events_rx.next().fuse() => { let Some(events) = events else { break }; let mut paths = events.into_iter().map(|e| e.path).collect::>(); - while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) { + while let Poll::Ready(Some(more_events)) = futures::poll!(fs_events_rx.next()) { paths.extend(more_events.into_iter().map(|e| e.path)); } self.process_events(paths.clone()).await; @@ -2917,56 +3097,114 @@ impl BackgroundScanner { } } - async fn process_refresh_request(&self, paths: Vec, barrier: barrier::Sender) -> bool { - self.reload_entries_for_paths(paths, None).await; - self.send_status_update(false, Some(barrier)) + async fn process_scan_request(&self, request: ScanRequest, scanning: bool) -> bool { + log::debug!("rescanning paths {:?}", request.relative_paths); + + let root_path = self.forcibly_load_paths(&request.relative_paths).await; + let root_canonical_path = match self.fs.canonicalize(&root_path).await { + Ok(path) => path, + Err(err) => { + log::error!("failed to canonicalize root path: {}", err); + return false; + } + }; + + let abs_paths = request + .relative_paths + .into_iter() + .map(|path| { + if path.file_name().is_some() { + root_canonical_path.join(path) + } else { + root_canonical_path.clone() + } + }) + .collect::>(); + self.reload_entries_for_paths(root_path, root_canonical_path, abs_paths, None) + .await; + self.send_status_update(scanning, Some(request.done)) } - async fn process_events(&mut self, paths: Vec) { + async fn process_events(&mut self, abs_paths: Vec) { + log::debug!("received fs events {:?}", abs_paths); + + let root_path = self.state.lock().snapshot.abs_path.clone(); + let root_canonical_path = match self.fs.canonicalize(&root_path).await { + Ok(path) => path, + Err(err) => { + log::error!("failed to canonicalize root path: {}", err); + return; + } + }; + let (scan_job_tx, scan_job_rx) = channel::unbounded(); let paths = self - .reload_entries_for_paths(paths, Some(scan_job_tx.clone())) + .reload_entries_for_paths( + root_path, + root_canonical_path, + abs_paths, + Some(scan_job_tx.clone()), + ) .await; drop(scan_job_tx); self.scan_dirs(false, scan_job_rx).await; - self.update_ignore_statuses().await; + let (scan_job_tx, scan_job_rx) = channel::unbounded(); + self.update_ignore_statuses(scan_job_tx).await; + self.scan_dirs(false, scan_job_rx).await; { let mut state = self.state.lock(); - - if let Some(paths) = paths { - for path in paths { - self.reload_git_repo(&path, &mut *state, self.fs.as_ref()); - } + state.reload_repositories(&paths, self.fs.as_ref()); + state.snapshot.completed_scan_id = state.snapshot.scan_id; + for (_, entry_id) in mem::take(&mut state.removed_entry_ids) { + state.scanned_dirs.remove(&entry_id); } - - let mut snapshot = &mut state.snapshot; - - let mut git_repositories = mem::take(&mut snapshot.git_repositories); - git_repositories.retain(|work_directory_id, _| { - snapshot - .entry_for_id(*work_directory_id) - .map_or(false, |entry| { - snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some() - }) - }); - snapshot.git_repositories = git_repositories; - - let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries); - git_repository_entries.retain(|_, entry| { - snapshot - .git_repositories - .get(&entry.work_directory.0) - .is_some() - }); - snapshot.snapshot.repository_entries = git_repository_entries; - snapshot.completed_scan_id = snapshot.scan_id; } self.send_status_update(false, None); } + async fn forcibly_load_paths(&self, paths: &[Arc]) -> Arc { + let root_path; + let (scan_job_tx, mut scan_job_rx) = channel::unbounded(); + { + let mut state = self.state.lock(); + root_path = state.snapshot.abs_path.clone(); + for path in paths { + for ancestor in path.ancestors() { + if let Some(entry) = state.snapshot.entry_for_path(ancestor) { + if entry.kind == EntryKind::UnloadedDir { + let abs_path = root_path.join(ancestor); + let ignore_stack = + state.snapshot.ignore_stack_for_abs_path(&abs_path, true); + let ancestor_inodes = + state.snapshot.ancestor_inodes_for_path(&ancestor); + scan_job_tx + .try_send(ScanJob { + abs_path: abs_path.into(), + path: ancestor.into(), + ignore_stack, + scan_queue: scan_job_tx.clone(), + ancestor_inodes, + is_external: entry.is_external, + }) + .unwrap(); + state.paths_to_scan.insert(path.clone()); + break; + } + } + } + } + drop(scan_job_tx); + } + while let Some(job) = scan_job_rx.next().await { + self.scan_dir(&job).await.log_err(); + } + self.state.lock().paths_to_scan.clear(); + root_path + } + async fn scan_dirs( &self, enable_progress_updates: bool, @@ -2995,9 +3233,9 @@ impl BackgroundScanner { select_biased! { // Process any path refresh requests before moving on to process // the scan queue, so that user operations are prioritized. - request = self.refresh_requests_rx.recv().fuse() => { - let Ok((paths, barrier)) = request else { break }; - if !self.process_refresh_request(paths, barrier).await { + request = self.scan_requests_rx.recv().fuse() => { + let Ok(request) = request else { break }; + if !self.process_scan_request(request, true).await { return; } } @@ -3062,8 +3300,8 @@ impl BackgroundScanner { } async fn scan_dir(&self, job: &ScanJob) -> Result<()> { - let mut new_entries: Vec = Vec::new(); - let mut new_jobs: Vec> = Vec::new(); + log::debug!("scan directory {:?}", job.path); + let mut ignore_stack = job.ignore_stack.clone(); let mut new_ignore = None; let (root_abs_path, root_char_bag, next_entry_id, repository) = { @@ -3078,6 +3316,9 @@ impl BackgroundScanner { ) }; + let mut root_canonical_path = None; + let mut new_entries: Vec = Vec::new(); + let mut new_jobs: Vec> = Vec::new(); let mut child_paths = self.fs.read_dir(&job.abs_path).await?; while let Some(child_abs_path) = child_paths.next().await { let child_abs_path: Arc = match child_abs_path { @@ -3127,7 +3368,7 @@ impl BackgroundScanner { ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir()); if entry.is_dir() { - if let Some(job) = new_jobs.next().expect("Missing scan job for entry") { + if let Some(job) = new_jobs.next().expect("missing scan job for entry") { job.ignore_stack = if entry.is_ignored { IgnoreStack::all() } else { @@ -3145,9 +3386,41 @@ impl BackgroundScanner { root_char_bag, ); + if job.is_external { + child_entry.is_external = true; + } else if child_metadata.is_symlink { + let canonical_path = match self.fs.canonicalize(&child_abs_path).await { + Ok(path) => path, + Err(err) => { + log::error!( + "error reading target of symlink {:?}: {:?}", + child_abs_path, + err + ); + continue; + } + }; + + // lazily canonicalize the root path in order to determine if + // symlinks point outside of the worktree. + let root_canonical_path = match &root_canonical_path { + Some(path) => path, + None => match self.fs.canonicalize(&root_abs_path).await { + Ok(path) => root_canonical_path.insert(path), + Err(err) => { + log::error!("error canonicalizing root {:?}: {:?}", root_abs_path, err); + continue; + } + }, + }; + + if !canonical_path.starts_with(root_canonical_path) { + child_entry.is_external = true; + } + } + if child_entry.is_dir() { - let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true); - child_entry.is_ignored = is_ignored; + child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true); // Avoid recursing until crash in the case of a recursive symlink if !job.ancestor_inodes.contains(&child_entry.inode) { @@ -3157,7 +3430,8 @@ impl BackgroundScanner { new_jobs.push(Some(ScanJob { abs_path: child_abs_path, path: child_path, - ignore_stack: if is_ignored { + is_external: child_entry.is_external, + ignore_stack: if child_entry.is_ignored { IgnoreStack::all() } else { ignore_stack.clone() @@ -3187,48 +3461,67 @@ impl BackgroundScanner { new_entries.push(child_entry); } - { - let mut state = self.state.lock(); - let changed_paths = - state.populate_dir(job.path.clone(), new_entries, new_ignore, self.fs.as_ref()); - if let Err(ix) = state.changed_paths.binary_search(&job.path) { - state.changed_paths.insert(ix, job.path.clone()); - } - if let Some(changed_paths) = changed_paths { - util::extend_sorted( - &mut state.changed_paths, - changed_paths, - usize::MAX, - Ord::cmp, - ) - } - } + let mut state = self.state.lock(); + let mut new_jobs = new_jobs.into_iter(); + for entry in &mut new_entries { + state.reuse_entry_id(entry); - for new_job in new_jobs { - if let Some(new_job) = new_job { - job.scan_queue.send(new_job).await.unwrap(); + if entry.is_dir() { + let new_job = new_jobs.next().expect("missing scan job for entry"); + if state.should_scan_directory(&entry) { + if let Some(new_job) = new_job { + job.scan_queue + .try_send(new_job) + .expect("channel is unbounded"); + } + } else { + log::debug!("defer scanning directory {:?} {:?}", entry.path, entry.kind); + entry.kind = EntryKind::UnloadedDir; + } } } + assert!(new_jobs.next().is_none()); + state.populate_dir(&job.path, new_entries, new_ignore, self.fs.as_ref()); Ok(()) } async fn reload_entries_for_paths( &self, + root_abs_path: Arc, + root_canonical_path: PathBuf, mut abs_paths: Vec, scan_queue_tx: Option>, - ) -> Option>> { - let doing_recursive_update = scan_queue_tx.is_some(); - + ) -> Vec> { + let mut event_paths = Vec::>::with_capacity(abs_paths.len()); abs_paths.sort_unstable(); abs_paths.dedup_by(|a, b| a.starts_with(&b)); + abs_paths.retain(|abs_path| { + if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) { + event_paths.push(path.into()); + true + } else { + log::error!( + "unexpected event {:?} for root path {:?}", + abs_path, + root_canonical_path + ); + false + } + }); - let root_abs_path = self.state.lock().snapshot.abs_path.clone(); - let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?; let metadata = futures::future::join_all( abs_paths .iter() - .map(|abs_path| self.fs.metadata(&abs_path)) + .map(|abs_path| async move { + let metadata = self.fs.metadata(&abs_path).await?; + if let Some(metadata) = metadata { + let canonical_path = self.fs.canonicalize(&abs_path).await?; + anyhow::Ok(Some((metadata, canonical_path))) + } else { + Ok(None) + } + }) .collect::>(), ) .await; @@ -3236,6 +3529,7 @@ impl BackgroundScanner { let mut state = self.state.lock(); let snapshot = &mut state.snapshot; let is_idle = snapshot.completed_scan_id == snapshot.scan_id; + let doing_recursive_update = scan_queue_tx.is_some(); snapshot.scan_id += 1; if is_idle && !doing_recursive_update { snapshot.completed_scan_id = snapshot.scan_id; @@ -3244,40 +3538,44 @@ impl BackgroundScanner { // Remove any entries for paths that no longer exist or are being recursively // refreshed. Do this before adding any new entries, so that renames can be // detected regardless of the order of the paths. - let mut event_paths = Vec::>::with_capacity(abs_paths.len()); - let mut event_metadata = Vec::<_>::with_capacity(abs_paths.len()); - for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) { - if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) { - if matches!(metadata, Ok(None)) || doing_recursive_update { - state.remove_path(path); - } - event_paths.push(path.into()); - event_metadata.push(metadata); - } else { - log::error!( - "unexpected event {:?} for root path {:?}", - abs_path, - root_canonical_path - ); + for (path, metadata) in event_paths.iter().zip(metadata.iter()) { + if matches!(metadata, Ok(None)) || doing_recursive_update { + log::trace!("remove path {:?}", path); + state.remove_path(path); } } - for (path, metadata) in event_paths.iter().cloned().zip(event_metadata.into_iter()) { + for (path, metadata) in event_paths.iter().zip(metadata.iter()) { + if let (Some(parent), true) = (path.parent(), doing_recursive_update) { + if state + .snapshot + .entry_for_path(parent) + .map_or(true, |entry| entry.kind != EntryKind::Dir) + { + log::debug!( + "ignoring event {path:?} within unloaded directory {:?}", + parent + ); + continue; + } + } + let abs_path: Arc = root_abs_path.join(&path).into(); match metadata { - Ok(Some(metadata)) => { + Ok(Some((metadata, canonical_path))) => { let ignore_stack = state .snapshot .ignore_stack_for_abs_path(&abs_path, metadata.is_dir); let mut fs_entry = Entry::new( path.clone(), - &metadata, + metadata, self.next_entry_id.as_ref(), state.snapshot.root_char_bag, ); fs_entry.is_ignored = ignore_stack.is_all(); + fs_entry.is_external = !canonical_path.starts_with(&root_canonical_path); if !fs_entry.is_ignored { if !fs_entry.is_dir() { @@ -3296,7 +3594,7 @@ impl BackgroundScanner { } } - state.insert_entry(fs_entry, self.fs.as_ref()); + let fs_entry = state.insert_entry(fs_entry, self.fs.as_ref()); if let Some(scan_queue_tx) = &scan_queue_tx { let mut ancestor_inodes = state.snapshot.ancestor_inodes_for_path(&path); @@ -3304,9 +3602,10 @@ impl BackgroundScanner { ancestor_inodes.insert(metadata.inode); smol::block_on(scan_queue_tx.send(ScanJob { abs_path, - path, + path: path.clone(), ignore_stack, ancestor_inodes, + is_external: fs_entry.is_external, scan_queue: scan_queue_tx.clone(), })) .unwrap(); @@ -3330,7 +3629,7 @@ impl BackgroundScanner { Ord::cmp, ); - Some(event_paths) + event_paths } fn remove_repo_path(&self, path: &Path, snapshot: &mut LocalSnapshot) -> Option<()> { @@ -3355,78 +3654,7 @@ impl BackgroundScanner { Some(()) } - fn reload_git_repo( - &self, - path: &Path, - state: &mut BackgroundScannerState, - fs: &dyn Fs, - ) -> Option<()> { - let scan_id = state.snapshot.scan_id; - - if path - .components() - .any(|component| component.as_os_str() == *DOT_GIT) - { - let (entry_id, repo_ptr) = { - let Some((entry_id, repo)) = state.snapshot.repo_for_metadata(&path) else { - let dot_git_dir = path.ancestors() - .skip_while(|ancestor| ancestor.file_name() != Some(&*DOT_GIT)) - .next()?; - - let changed_paths = state.snapshot.build_repo(dot_git_dir.into(), fs); - if let Some(changed_paths) = changed_paths { - util::extend_sorted( - &mut state.changed_paths, - changed_paths, - usize::MAX, - Ord::cmp, - ); - } - - return None; - }; - if repo.git_dir_scan_id == scan_id { - return None; - } - - (*entry_id, repo.repo_ptr.to_owned()) - }; - - let work_dir = state - .snapshot - .entry_for_id(entry_id) - .map(|entry| RepositoryWorkDirectory(entry.path.clone()))?; - - let repo = repo_ptr.lock(); - repo.reload_index(); - let branch = repo.branch_name(); - - state.snapshot.git_repositories.update(&entry_id, |entry| { - entry.git_dir_scan_id = scan_id; - }); - - state - .snapshot - .snapshot - .repository_entries - .update(&work_dir, |entry| { - entry.branch = branch.map(Into::into); - }); - - let changed_paths = state.snapshot.scan_statuses(repo.deref(), &work_dir); - - util::extend_sorted( - &mut state.changed_paths, - changed_paths, - usize::MAX, - Ord::cmp, - ) - } - - Some(()) - } - - async fn update_ignore_statuses(&self) { + async fn update_ignore_statuses(&self, scan_job_tx: Sender) { use futures::FutureExt as _; let mut snapshot = self.state.lock().snapshot.clone(); @@ -3474,6 +3702,7 @@ impl BackgroundScanner { abs_path: parent_abs_path, ignore_stack, ignore_queue: ignore_queue_tx.clone(), + scan_queue: scan_job_tx.clone(), })) .unwrap(); } @@ -3487,9 +3716,9 @@ impl BackgroundScanner { select_biased! { // Process any path refresh requests before moving on to process // the queue of ignore statuses. - request = self.refresh_requests_rx.recv().fuse() => { - let Ok((paths, barrier)) = request else { break }; - if !self.process_refresh_request(paths, barrier).await { + request = self.scan_requests_rx.recv().fuse() => { + let Ok(request) = request else { break }; + if !self.process_scan_request(request, true).await { return; } } @@ -3508,6 +3737,8 @@ impl BackgroundScanner { } async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) { + log::trace!("update ignore status {:?}", job.abs_path); + let mut ignore_stack = job.ignore_stack; if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) { ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone()); @@ -3518,7 +3749,7 @@ impl BackgroundScanner { let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap(); for mut entry in snapshot.child_entries(path).cloned() { let was_ignored = entry.is_ignored; - let abs_path = snapshot.abs_path().join(&entry.path); + let abs_path: Arc = snapshot.abs_path().join(&entry.path).into(); entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir()); if entry.is_dir() { let child_ignore_stack = if entry.is_ignored { @@ -3526,11 +3757,36 @@ impl BackgroundScanner { } else { ignore_stack.clone() }; + + // Scan any directories that were previously ignored and weren't + // previously scanned. + if was_ignored + && !entry.is_ignored + && !entry.is_external + && entry.kind == EntryKind::UnloadedDir + { + job.scan_queue + .try_send(ScanJob { + abs_path: abs_path.clone(), + path: entry.path.clone(), + ignore_stack: child_ignore_stack.clone(), + scan_queue: job.scan_queue.clone(), + ancestor_inodes: self + .state + .lock() + .snapshot + .ancestor_inodes_for_path(&entry.path), + is_external: false, + }) + .unwrap(); + } + job.ignore_queue .send(UpdateIgnoreStatusJob { - abs_path: abs_path.into(), + abs_path: abs_path.clone(), ignore_stack: child_ignore_stack, ignore_queue: job.ignore_queue.clone(), + scan_queue: job.scan_queue.clone(), }) .await .unwrap(); @@ -3575,6 +3831,7 @@ impl BackgroundScanner { let mut changes = Vec::new(); let mut old_paths = old_snapshot.entries_by_path.cursor::(); let mut new_paths = new_snapshot.entries_by_path.cursor::(); + let mut last_newly_loaded_dir_path = None; old_paths.next(&()); new_paths.next(&()); for path in event_paths { @@ -3622,20 +3879,33 @@ impl BackgroundScanner { changes.push((old_entry.path.clone(), old_entry.id, Removed)); changes.push((new_entry.path.clone(), new_entry.id, Added)); } else if old_entry != new_entry { - changes.push((new_entry.path.clone(), new_entry.id, Updated)); + if old_entry.kind.is_unloaded() { + last_newly_loaded_dir_path = Some(&new_entry.path); + changes.push(( + new_entry.path.clone(), + new_entry.id, + Loaded, + )); + } else { + changes.push(( + new_entry.path.clone(), + new_entry.id, + Updated, + )); + } } old_paths.next(&()); new_paths.next(&()); } Ordering::Greater => { + let is_newly_loaded = self.phase == InitialScan + || last_newly_loaded_dir_path + .as_ref() + .map_or(false, |dir| new_entry.path.starts_with(&dir)); changes.push(( new_entry.path.clone(), new_entry.id, - if self.phase == InitialScan { - Loaded - } else { - Added - }, + if is_newly_loaded { Loaded } else { Added }, )); new_paths.next(&()); } @@ -3646,14 +3916,14 @@ impl BackgroundScanner { old_paths.next(&()); } (None, Some(new_entry)) => { + let is_newly_loaded = self.phase == InitialScan + || last_newly_loaded_dir_path + .as_ref() + .map_or(false, |dir| new_entry.path.starts_with(&dir)); changes.push(( new_entry.path.clone(), new_entry.id, - if self.phase == InitialScan { - Loaded - } else { - Added - }, + if is_newly_loaded { Loaded } else { Added }, )); new_paths.next(&()); } @@ -3695,12 +3965,14 @@ struct ScanJob { ignore_stack: Arc, scan_queue: Sender, ancestor_inodes: TreeSet, + is_external: bool, } struct UpdateIgnoreStatusJob { abs_path: Arc, ignore_stack: Arc, ignore_queue: Sender, + scan_queue: Sender, } pub trait WorktreeHandle { @@ -3754,9 +4026,9 @@ impl WorktreeHandle for ModelHandle { struct TraversalProgress<'a> { max_path: &'a Path, count: usize, - visible_count: usize, + non_ignored_count: usize, file_count: usize, - visible_file_count: usize, + non_ignored_file_count: usize, } impl<'a> TraversalProgress<'a> { @@ -3764,8 +4036,8 @@ impl<'a> TraversalProgress<'a> { match (include_ignored, include_dirs) { (true, true) => self.count, (true, false) => self.file_count, - (false, true) => self.visible_count, - (false, false) => self.visible_file_count, + (false, true) => self.non_ignored_count, + (false, false) => self.non_ignored_file_count, } } } @@ -3774,9 +4046,9 @@ impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> { fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) { self.max_path = summary.max_path.as_ref(); self.count += summary.count; - self.visible_count += summary.visible_count; + self.non_ignored_count += summary.non_ignored_count; self.file_count += summary.file_count; - self.visible_file_count += summary.visible_file_count; + self.non_ignored_file_count += summary.non_ignored_file_count; } } @@ -3785,9 +4057,9 @@ impl<'a> Default for TraversalProgress<'a> { Self { max_path: Path::new(""), count: 0, - visible_count: 0, + non_ignored_count: 0, file_count: 0, - visible_file_count: 0, + non_ignored_file_count: 0, } } } @@ -3982,6 +4254,7 @@ impl<'a> From<&'a Entry> for proto::Entry { mtime: Some(entry.mtime.into()), is_symlink: entry.is_symlink, is_ignored: entry.is_ignored, + is_external: entry.is_external, git_status: entry.git_status.map(|status| status.to_proto()), } } @@ -4008,6 +4281,7 @@ impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry { mtime: mtime.into(), is_symlink: entry.is_symlink, is_ignored: entry.is_ignored, + is_external: entry.is_external, git_status: GitFileStatus::from_proto(entry.git_status), }) } else { diff --git a/crates/project/src/worktree_tests.rs b/crates/project/src/worktree_tests.rs index 3abf660282a8663626fffb1bfbb9db7fe2147a72..553c5e2ccafa754623fb0ee85d0c63eccc8ea9e9 100644 --- a/crates/project/src/worktree_tests.rs +++ b/crates/project/src/worktree_tests.rs @@ -1,6 +1,6 @@ use crate::{ worktree::{Event, Snapshot, WorktreeHandle}, - EntryKind, PathChange, Worktree, + Entry, EntryKind, PathChange, Worktree, }; use anyhow::Result; use client::Client; @@ -8,12 +8,14 @@ use fs::{repository::GitFileStatus, FakeFs, Fs, RealFs, RemoveOptions}; use git::GITIGNORE; use gpui::{executor::Deterministic, ModelContext, Task, TestAppContext}; use parking_lot::Mutex; +use postage::stream::Stream; use pretty_assertions::assert_eq; use rand::prelude::*; use serde_json::json; use std::{ env, fmt::Write, + mem, path::{Path, PathBuf}, sync::Arc, }; @@ -34,11 +36,8 @@ async fn test_traversal(cx: &mut TestAppContext) { ) .await; - let http_client = FakeHttpClient::with_404_response(); - let client = cx.read(|cx| Client::new(http_client, cx)); - let tree = Worktree::local( - client, + build_client(cx), Path::new("/root"), true, fs, @@ -107,11 +106,8 @@ async fn test_descendent_entries(cx: &mut TestAppContext) { ) .await; - let http_client = FakeHttpClient::with_404_response(); - let client = cx.read(|cx| Client::new(http_client, cx)); - let tree = Worktree::local( - client, + build_client(cx), Path::new("/root"), true, fs, @@ -154,7 +150,18 @@ async fn test_descendent_entries(cx: &mut TestAppContext) { .collect::>(), vec![Path::new("g"), Path::new("g/h"),] ); + }); + + // Expand gitignored directory. + tree.read_with(cx, |tree, _| { + tree.as_local() + .unwrap() + .refresh_entries_for_paths(vec![Path::new("i/j").into()]) + }) + .recv() + .await; + tree.read_with(cx, |tree, _| { assert_eq!( tree.descendent_entries(false, false, Path::new("i")) .map(|entry| entry.path.as_ref()) @@ -196,9 +203,8 @@ async fn test_circular_symlinks(executor: Arc, cx: &mut TestAppCo fs.insert_symlink("/root/lib/a/lib", "..".into()).await; fs.insert_symlink("/root/lib/b/lib", "..".into()).await; - let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx)); let tree = Worktree::local( - client, + build_client(cx), Path::new("/root"), true, fs.clone(), @@ -257,40 +263,489 @@ async fn test_circular_symlinks(executor: Arc, cx: &mut TestAppCo } #[gpui::test] -async fn test_rescan_with_gitignore(cx: &mut TestAppContext) { - // .gitignores are handled explicitly by Zed and do not use the git - // machinery that the git_tests module checks - let parent_dir = temp_tree(json!({ - ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n", - "tree": { - ".git": {}, - ".gitignore": "ignored-dir\n", - "tracked-dir": { - "tracked-file1": "", - "ancestor-ignored-file1": "", +async fn test_symlinks_pointing_outside(cx: &mut TestAppContext) { + let fs = FakeFs::new(cx.background()); + fs.insert_tree( + "/root", + json!({ + "dir1": { + "deps": { + // symlinks here + }, + "src": { + "a.rs": "", + "b.rs": "", + }, }, - "ignored-dir": { - "ignored-file1": "" + "dir2": { + "src": { + "c.rs": "", + "d.rs": "", + } + }, + "dir3": { + "deps": {}, + "src": { + "e.rs": "", + "f.rs": "", + }, } - } - })); - let dir = parent_dir.path().join("tree"); + }), + ) + .await; - let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx)); + // These symlinks point to directories outside of the worktree's root, dir1. + fs.insert_symlink("/root/dir1/deps/dep-dir2", "../../dir2".into()) + .await; + fs.insert_symlink("/root/dir1/deps/dep-dir3", "../../dir3".into()) + .await; let tree = Worktree::local( - client, - dir.as_path(), + build_client(cx), + Path::new("/root/dir1"), true, - Arc::new(RealFs), + fs.clone(), Default::default(), &mut cx.to_async(), ) .await .unwrap(); + cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete()) .await; - tree.flush_fs_events(cx).await; + + let tree_updates = Arc::new(Mutex::new(Vec::new())); + tree.update(cx, |_, cx| { + let tree_updates = tree_updates.clone(); + cx.subscribe(&tree, move |_, _, event, _| { + if let Event::UpdatedEntries(update) = event { + tree_updates.lock().extend( + update + .iter() + .map(|(path, _, change)| (path.clone(), *change)), + ); + } + }) + .detach(); + }); + + // The symlinked directories are not scanned by default. + tree.read_with(cx, |tree, _| { + assert_eq!( + tree.entries(true) + .map(|entry| (entry.path.as_ref(), entry.is_external)) + .collect::>(), + vec![ + (Path::new(""), false), + (Path::new("deps"), false), + (Path::new("deps/dep-dir2"), true), + (Path::new("deps/dep-dir3"), true), + (Path::new("src"), false), + (Path::new("src/a.rs"), false), + (Path::new("src/b.rs"), false), + ] + ); + + assert_eq!( + tree.entry_for_path("deps/dep-dir2").unwrap().kind, + EntryKind::UnloadedDir + ); + }); + + // Expand one of the symlinked directories. + tree.read_with(cx, |tree, _| { + tree.as_local() + .unwrap() + .refresh_entries_for_paths(vec![Path::new("deps/dep-dir3").into()]) + }) + .recv() + .await; + + // The expanded directory's contents are loaded. Subdirectories are + // not scanned yet. + tree.read_with(cx, |tree, _| { + assert_eq!( + tree.entries(true) + .map(|entry| (entry.path.as_ref(), entry.is_external)) + .collect::>(), + vec![ + (Path::new(""), false), + (Path::new("deps"), false), + (Path::new("deps/dep-dir2"), true), + (Path::new("deps/dep-dir3"), true), + (Path::new("deps/dep-dir3/deps"), true), + (Path::new("deps/dep-dir3/src"), true), + (Path::new("src"), false), + (Path::new("src/a.rs"), false), + (Path::new("src/b.rs"), false), + ] + ); + }); + assert_eq!( + mem::take(&mut *tree_updates.lock()), + &[ + (Path::new("deps/dep-dir3").into(), PathChange::Loaded), + (Path::new("deps/dep-dir3/deps").into(), PathChange::Loaded), + (Path::new("deps/dep-dir3/src").into(), PathChange::Loaded) + ] + ); + + // Expand a subdirectory of one of the symlinked directories. + tree.read_with(cx, |tree, _| { + tree.as_local() + .unwrap() + .refresh_entries_for_paths(vec![Path::new("deps/dep-dir3/src").into()]) + }) + .recv() + .await; + + // The expanded subdirectory's contents are loaded. + tree.read_with(cx, |tree, _| { + assert_eq!( + tree.entries(true) + .map(|entry| (entry.path.as_ref(), entry.is_external)) + .collect::>(), + vec![ + (Path::new(""), false), + (Path::new("deps"), false), + (Path::new("deps/dep-dir2"), true), + (Path::new("deps/dep-dir3"), true), + (Path::new("deps/dep-dir3/deps"), true), + (Path::new("deps/dep-dir3/src"), true), + (Path::new("deps/dep-dir3/src/e.rs"), true), + (Path::new("deps/dep-dir3/src/f.rs"), true), + (Path::new("src"), false), + (Path::new("src/a.rs"), false), + (Path::new("src/b.rs"), false), + ] + ); + }); + + assert_eq!( + mem::take(&mut *tree_updates.lock()), + &[ + (Path::new("deps/dep-dir3/src").into(), PathChange::Loaded), + ( + Path::new("deps/dep-dir3/src/e.rs").into(), + PathChange::Loaded + ), + ( + Path::new("deps/dep-dir3/src/f.rs").into(), + PathChange::Loaded + ) + ] + ); +} + +#[gpui::test] +async fn test_open_gitignored_files(cx: &mut TestAppContext) { + let fs = FakeFs::new(cx.background()); + fs.insert_tree( + "/root", + json!({ + ".gitignore": "node_modules\n", + "one": { + "node_modules": { + "a": { + "a1.js": "a1", + "a2.js": "a2", + }, + "b": { + "b1.js": "b1", + "b2.js": "b2", + }, + }, + }, + "two": { + "x.js": "", + "y.js": "", + }, + }), + ) + .await; + + let tree = Worktree::local( + build_client(cx), + Path::new("/root"), + true, + fs.clone(), + Default::default(), + &mut cx.to_async(), + ) + .await + .unwrap(); + + cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete()) + .await; + + tree.read_with(cx, |tree, _| { + assert_eq!( + tree.entries(true) + .map(|entry| (entry.path.as_ref(), entry.is_ignored)) + .collect::>(), + vec![ + (Path::new(""), false), + (Path::new(".gitignore"), false), + (Path::new("one"), false), + (Path::new("one/node_modules"), true), + (Path::new("two"), false), + (Path::new("two/x.js"), false), + (Path::new("two/y.js"), false), + ] + ); + }); + + // Open a file that is nested inside of a gitignored directory that + // has not yet been expanded. + let prev_read_dir_count = fs.read_dir_call_count(); + let buffer = tree + .update(cx, |tree, cx| { + tree.as_local_mut() + .unwrap() + .load_buffer(0, "one/node_modules/b/b1.js".as_ref(), cx) + }) + .await + .unwrap(); + + tree.read_with(cx, |tree, cx| { + assert_eq!( + tree.entries(true) + .map(|entry| (entry.path.as_ref(), entry.is_ignored)) + .collect::>(), + vec![ + (Path::new(""), false), + (Path::new(".gitignore"), false), + (Path::new("one"), false), + (Path::new("one/node_modules"), true), + (Path::new("one/node_modules/a"), true), + (Path::new("one/node_modules/b"), true), + (Path::new("one/node_modules/b/b1.js"), true), + (Path::new("one/node_modules/b/b2.js"), true), + (Path::new("two"), false), + (Path::new("two/x.js"), false), + (Path::new("two/y.js"), false), + ] + ); + + assert_eq!( + buffer.read(cx).file().unwrap().path().as_ref(), + Path::new("one/node_modules/b/b1.js") + ); + + // Only the newly-expanded directories are scanned. + assert_eq!(fs.read_dir_call_count() - prev_read_dir_count, 2); + }); + + // Open another file in a different subdirectory of the same + // gitignored directory. + let prev_read_dir_count = fs.read_dir_call_count(); + let buffer = tree + .update(cx, |tree, cx| { + tree.as_local_mut() + .unwrap() + .load_buffer(0, "one/node_modules/a/a2.js".as_ref(), cx) + }) + .await + .unwrap(); + + tree.read_with(cx, |tree, cx| { + assert_eq!( + tree.entries(true) + .map(|entry| (entry.path.as_ref(), entry.is_ignored)) + .collect::>(), + vec![ + (Path::new(""), false), + (Path::new(".gitignore"), false), + (Path::new("one"), false), + (Path::new("one/node_modules"), true), + (Path::new("one/node_modules/a"), true), + (Path::new("one/node_modules/a/a1.js"), true), + (Path::new("one/node_modules/a/a2.js"), true), + (Path::new("one/node_modules/b"), true), + (Path::new("one/node_modules/b/b1.js"), true), + (Path::new("one/node_modules/b/b2.js"), true), + (Path::new("two"), false), + (Path::new("two/x.js"), false), + (Path::new("two/y.js"), false), + ] + ); + + assert_eq!( + buffer.read(cx).file().unwrap().path().as_ref(), + Path::new("one/node_modules/a/a2.js") + ); + + // Only the newly-expanded directory is scanned. + assert_eq!(fs.read_dir_call_count() - prev_read_dir_count, 1); + }); +} + +#[gpui::test] +async fn test_dirs_no_longer_ignored(cx: &mut TestAppContext) { + let fs = FakeFs::new(cx.background()); + fs.insert_tree( + "/root", + json!({ + ".gitignore": "node_modules\n", + "a": { + "a.js": "", + }, + "b": { + "b.js": "", + }, + "node_modules": { + "c": { + "c.js": "", + }, + "d": { + "d.js": "", + "e": { + "e1.js": "", + "e2.js": "", + }, + "f": { + "f1.js": "", + "f2.js": "", + } + }, + }, + }), + ) + .await; + + let tree = Worktree::local( + build_client(cx), + Path::new("/root"), + true, + fs.clone(), + Default::default(), + &mut cx.to_async(), + ) + .await + .unwrap(); + + cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete()) + .await; + + // Open a file within the gitignored directory, forcing some of its + // subdirectories to be read, but not all. + let read_dir_count_1 = fs.read_dir_call_count(); + tree.read_with(cx, |tree, _| { + tree.as_local() + .unwrap() + .refresh_entries_for_paths(vec![Path::new("node_modules/d/d.js").into()]) + }) + .recv() + .await; + + // Those subdirectories are now loaded. + tree.read_with(cx, |tree, _| { + assert_eq!( + tree.entries(true) + .map(|e| (e.path.as_ref(), e.is_ignored)) + .collect::>(), + &[ + (Path::new(""), false), + (Path::new(".gitignore"), false), + (Path::new("a"), false), + (Path::new("a/a.js"), false), + (Path::new("b"), false), + (Path::new("b/b.js"), false), + (Path::new("node_modules"), true), + (Path::new("node_modules/c"), true), + (Path::new("node_modules/d"), true), + (Path::new("node_modules/d/d.js"), true), + (Path::new("node_modules/d/e"), true), + (Path::new("node_modules/d/f"), true), + ] + ); + }); + let read_dir_count_2 = fs.read_dir_call_count(); + assert_eq!(read_dir_count_2 - read_dir_count_1, 2); + + // Update the gitignore so that node_modules is no longer ignored, + // but a subdirectory is ignored + fs.save("/root/.gitignore".as_ref(), &"e".into(), Default::default()) + .await + .unwrap(); + cx.foreground().run_until_parked(); + + // All of the directories that are no longer ignored are now loaded. + tree.read_with(cx, |tree, _| { + assert_eq!( + tree.entries(true) + .map(|e| (e.path.as_ref(), e.is_ignored)) + .collect::>(), + &[ + (Path::new(""), false), + (Path::new(".gitignore"), false), + (Path::new("a"), false), + (Path::new("a/a.js"), false), + (Path::new("b"), false), + (Path::new("b/b.js"), false), + // This directory is no longer ignored + (Path::new("node_modules"), false), + (Path::new("node_modules/c"), false), + (Path::new("node_modules/c/c.js"), false), + (Path::new("node_modules/d"), false), + (Path::new("node_modules/d/d.js"), false), + // This subdirectory is now ignored + (Path::new("node_modules/d/e"), true), + (Path::new("node_modules/d/f"), false), + (Path::new("node_modules/d/f/f1.js"), false), + (Path::new("node_modules/d/f/f2.js"), false), + ] + ); + }); + + // Each of the newly-loaded directories is scanned only once. + let read_dir_count_3 = fs.read_dir_call_count(); + assert_eq!(read_dir_count_3 - read_dir_count_2, 2); +} + +#[gpui::test(iterations = 10)] +async fn test_rescan_with_gitignore(cx: &mut TestAppContext) { + let fs = FakeFs::new(cx.background()); + fs.insert_tree( + "/root", + json!({ + ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n", + "tree": { + ".git": {}, + ".gitignore": "ignored-dir\n", + "tracked-dir": { + "tracked-file1": "", + "ancestor-ignored-file1": "", + }, + "ignored-dir": { + "ignored-file1": "" + } + } + }), + ) + .await; + + let tree = Worktree::local( + build_client(cx), + "/root/tree".as_ref(), + true, + fs.clone(), + Default::default(), + &mut cx.to_async(), + ) + .await + .unwrap(); + cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete()) + .await; + + tree.read_with(cx, |tree, _| { + tree.as_local() + .unwrap() + .refresh_entries_for_paths(vec![Path::new("ignored-dir").into()]) + }) + .recv() + .await; + cx.read(|cx| { let tree = tree.read(cx); assert!( @@ -311,10 +766,26 @@ async fn test_rescan_with_gitignore(cx: &mut TestAppContext) { ); }); - std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap(); - std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap(); - std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap(); - tree.flush_fs_events(cx).await; + fs.create_file( + "/root/tree/tracked-dir/tracked-file2".as_ref(), + Default::default(), + ) + .await + .unwrap(); + fs.create_file( + "/root/tree/tracked-dir/ancestor-ignored-file2".as_ref(), + Default::default(), + ) + .await + .unwrap(); + fs.create_file( + "/root/tree/ignored-dir/ignored-file2".as_ref(), + Default::default(), + ) + .await + .unwrap(); + + cx.foreground().run_until_parked(); cx.read(|cx| { let tree = tree.read(cx); assert!( @@ -346,10 +817,8 @@ async fn test_write_file(cx: &mut TestAppContext) { "ignored-dir": {} })); - let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx)); - let tree = Worktree::local( - client, + build_client(cx), dir.path(), true, Arc::new(RealFs), @@ -393,8 +862,6 @@ async fn test_write_file(cx: &mut TestAppContext) { #[gpui::test(iterations = 30)] async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) { - let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx)); - let fs = FakeFs::new(cx.background()); fs.insert_tree( "/root", @@ -407,7 +874,7 @@ async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) { .await; let tree = Worktree::local( - client, + build_client(cx), "/root".as_ref(), true, fs, @@ -472,9 +939,8 @@ async fn test_random_worktree_operations_during_initial_scan( } log::info!("generated initial tree"); - let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx)); let worktree = Worktree::local( - client.clone(), + build_client(cx), root_dir, true, fs.clone(), @@ -506,7 +972,7 @@ async fn test_random_worktree_operations_during_initial_scan( .await .log_err(); worktree.read_with(cx, |tree, _| { - tree.as_local().unwrap().snapshot().check_invariants() + tree.as_local().unwrap().snapshot().check_invariants(true) }); if rng.gen_bool(0.6) { @@ -523,7 +989,7 @@ async fn test_random_worktree_operations_during_initial_scan( let final_snapshot = worktree.read_with(cx, |tree, _| { let tree = tree.as_local().unwrap(); let snapshot = tree.snapshot(); - snapshot.check_invariants(); + snapshot.check_invariants(true); snapshot }); @@ -562,9 +1028,8 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) } log::info!("generated initial tree"); - let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx)); let worktree = Worktree::local( - client.clone(), + build_client(cx), root_dir, true, fs.clone(), @@ -627,12 +1092,17 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) log::info!("quiescing"); fs.as_fake().flush_events(usize::MAX); cx.foreground().run_until_parked(); + let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot()); - snapshot.check_invariants(); + snapshot.check_invariants(true); + let expanded_paths = snapshot + .expanded_entries() + .map(|e| e.path.clone()) + .collect::>(); { let new_worktree = Worktree::local( - client.clone(), + build_client(cx), root_dir, true, fs.clone(), @@ -644,6 +1114,14 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) new_worktree .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete()) .await; + new_worktree + .update(cx, |tree, _| { + tree.as_local_mut() + .unwrap() + .refresh_entries_for_paths(expanded_paths) + }) + .recv() + .await; let new_snapshot = new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot()); assert_eq!( @@ -660,11 +1138,25 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) } assert_eq!( - prev_snapshot.entries(true).collect::>(), - snapshot.entries(true).collect::>(), + prev_snapshot + .entries(true) + .map(ignore_pending_dir) + .collect::>(), + snapshot + .entries(true) + .map(ignore_pending_dir) + .collect::>(), "wrong updates after snapshot {i}: {updates:#?}", ); } + + fn ignore_pending_dir(entry: &Entry) -> Entry { + let mut entry = entry.clone(); + if entry.kind.is_dir() { + entry.kind = EntryKind::Dir + } + entry + } } // The worktree's `UpdatedEntries` event can be used to follow along with @@ -679,7 +1171,6 @@ fn check_worktree_change_events(tree: &mut Worktree, cx: &mut ModelContext ix, }; match change_type { - PathChange::Loaded => entries.insert(ix, entry.unwrap()), PathChange::Added => entries.insert(ix, entry.unwrap()), PathChange::Removed => drop(entries.remove(ix)), PathChange::Updated => { @@ -688,7 +1179,7 @@ fn check_worktree_change_events(tree: &mut Worktree, cx: &mut ModelContext { + PathChange::AddedOrUpdated | PathChange::Loaded => { let entry = entry.unwrap(); if entries.get(ix).map(|e| &e.path) == Some(&entry.path) { *entries.get_mut(ix).unwrap() = entry; @@ -947,10 +1438,8 @@ async fn test_rename_work_directory(cx: &mut TestAppContext) { })); let root_path = root.path(); - let http_client = FakeHttpClient::with_404_response(); - let client = cx.read(|cx| Client::new(http_client, cx)); let tree = Worktree::local( - client, + build_client(cx), root_path, true, Arc::new(RealFs), @@ -1026,10 +1515,8 @@ async fn test_git_repository_for_path(cx: &mut TestAppContext) { }, })); - let http_client = FakeHttpClient::with_404_response(); - let client = cx.read(|cx| Client::new(http_client, cx)); let tree = Worktree::local( - client, + build_client(cx), root.path(), true, Arc::new(RealFs), @@ -1150,10 +1637,8 @@ async fn test_git_status(deterministic: Arc, cx: &mut TestAppCont })); - let http_client = FakeHttpClient::with_404_response(); - let client = cx.read(|cx| Client::new(http_client, cx)); let tree = Worktree::local( - client, + build_client(cx), root.path(), true, Arc::new(RealFs), @@ -1357,10 +1842,8 @@ async fn test_propagate_git_statuses(cx: &mut TestAppContext) { ], ); - let http_client = FakeHttpClient::with_404_response(); - let client = cx.read(|cx| Client::new(http_client, cx)); let tree = Worktree::local( - client, + build_client(cx), Path::new("/root"), true, fs.clone(), @@ -1439,6 +1922,11 @@ async fn test_propagate_git_statuses(cx: &mut TestAppContext) { } } +fn build_client(cx: &mut TestAppContext) -> Arc { + let http_client = FakeHttpClient::with_404_response(); + cx.read(|cx| Client::new(http_client, cx)) +} + #[track_caller] fn git_init(path: &Path) -> git2::Repository { git2::Repository::init(path).expect("Failed to initialize git repository") diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index 110bdc846bbc4ff602aea28f17d101f86e76bf25..3f80e023176003646657f10e1fbb60262de399f0 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -411,17 +411,23 @@ impl ProjectPanel { fn expand_selected_entry(&mut self, _: &ExpandSelectedEntry, cx: &mut ViewContext) { if let Some((worktree, entry)) = self.selected_entry(cx) { if entry.is_dir() { + let worktree_id = worktree.id(); + let entry_id = entry.id; let expanded_dir_ids = - if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree.id()) { + if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree_id) { expanded_dir_ids } else { return; }; - match expanded_dir_ids.binary_search(&entry.id) { + match expanded_dir_ids.binary_search(&entry_id) { Ok(_) => self.select_next(&SelectNext, cx), Err(ix) => { - expanded_dir_ids.insert(ix, entry.id); + self.project.update(cx, |project, cx| { + project.expand_entry(worktree_id, entry_id, cx); + }); + + expanded_dir_ids.insert(ix, entry_id); self.update_visible_entries(None, cx); cx.notify(); } @@ -432,18 +438,20 @@ impl ProjectPanel { fn collapse_selected_entry(&mut self, _: &CollapseSelectedEntry, cx: &mut ViewContext) { if let Some((worktree, mut entry)) = self.selected_entry(cx) { + let worktree_id = worktree.id(); let expanded_dir_ids = - if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree.id()) { + if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree_id) { expanded_dir_ids } else { return; }; loop { - match expanded_dir_ids.binary_search(&entry.id) { + let entry_id = entry.id; + match expanded_dir_ids.binary_search(&entry_id) { Ok(ix) => { expanded_dir_ids.remove(ix); - self.update_visible_entries(Some((worktree.id(), entry.id)), cx); + self.update_visible_entries(Some((worktree_id, entry_id)), cx); cx.notify(); break; } @@ -464,14 +472,17 @@ impl ProjectPanel { fn toggle_expanded(&mut self, entry_id: ProjectEntryId, cx: &mut ViewContext) { if let Some(worktree_id) = self.project.read(cx).worktree_id_for_entry(entry_id, cx) { if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree_id) { - match expanded_dir_ids.binary_search(&entry_id) { - Ok(ix) => { - expanded_dir_ids.remove(ix); - } - Err(ix) => { - expanded_dir_ids.insert(ix, entry_id); + self.project.update(cx, |project, cx| { + match expanded_dir_ids.binary_search(&entry_id) { + Ok(ix) => { + expanded_dir_ids.remove(ix); + } + Err(ix) => { + project.expand_entry(worktree_id, entry_id, cx); + expanded_dir_ids.insert(ix, entry_id); + } } - } + }); self.update_visible_entries(Some((worktree_id, entry_id)), cx); cx.focus_self(); cx.notify(); @@ -939,10 +950,19 @@ impl ProjectPanel { } fn selected_entry<'a>(&self, cx: &'a AppContext) -> Option<(&'a Worktree, &'a project::Entry)> { + let (worktree, entry) = self.selected_entry_handle(cx)?; + Some((worktree.read(cx), entry)) + } + + fn selected_entry_handle<'a>( + &self, + cx: &'a AppContext, + ) -> Option<(ModelHandle, &'a project::Entry)> { let selection = self.selection?; let project = self.project.read(cx); - let worktree = project.worktree_for_id(selection.worktree_id, cx)?.read(cx); - Some((worktree, worktree.entry_for_id(selection.entry_id)?)) + let worktree = project.worktree_for_id(selection.worktree_id, cx)?; + let entry = worktree.read(cx).entry_for_id(selection.entry_id)?; + Some((worktree, entry)) } fn update_visible_entries( @@ -1003,6 +1023,7 @@ impl ProjectPanel { mtime: entry.mtime, is_symlink: false, is_ignored: false, + is_external: false, git_status: entry.git_status, }); } @@ -1059,29 +1080,31 @@ impl ProjectPanel { entry_id: ProjectEntryId, cx: &mut ViewContext, ) { - let project = self.project.read(cx); - if let Some((worktree, expanded_dir_ids)) = project - .worktree_for_id(worktree_id, cx) - .zip(self.expanded_dir_ids.get_mut(&worktree_id)) - { - let worktree = worktree.read(cx); + self.project.update(cx, |project, cx| { + if let Some((worktree, expanded_dir_ids)) = project + .worktree_for_id(worktree_id, cx) + .zip(self.expanded_dir_ids.get_mut(&worktree_id)) + { + project.expand_entry(worktree_id, entry_id, cx); + let worktree = worktree.read(cx); - if let Some(mut entry) = worktree.entry_for_id(entry_id) { - loop { - if let Err(ix) = expanded_dir_ids.binary_search(&entry.id) { - expanded_dir_ids.insert(ix, entry.id); - } + if let Some(mut entry) = worktree.entry_for_id(entry_id) { + loop { + if let Err(ix) = expanded_dir_ids.binary_search(&entry.id) { + expanded_dir_ids.insert(ix, entry.id); + } - if let Some(parent_entry) = - entry.path.parent().and_then(|p| worktree.entry_for_path(p)) - { - entry = parent_entry; - } else { - break; + if let Some(parent_entry) = + entry.path.parent().and_then(|p| worktree.entry_for_path(p)) + { + entry = parent_entry; + } else { + break; + } } } } - } + }); } fn for_each_visible_entry( @@ -1191,7 +1214,7 @@ impl ProjectPanel { Flex::row() .with_child( - if kind == EntryKind::Dir { + if kind.is_dir() { if details.is_expanded { Svg::new("icons/chevron_down_8.svg").with_color(style.icon_color) } else { @@ -1288,7 +1311,7 @@ impl ProjectPanel { }) .on_click(MouseButton::Left, move |event, this, cx| { if !show_editor { - if kind == EntryKind::Dir { + if kind.is_dir() { this.toggle_expanded(entry_id, cx); } else { this.open_entry(entry_id, event.click_count > 1, cx); @@ -2349,7 +2372,7 @@ mod tests { } let indent = " ".repeat(details.depth); - let icon = if matches!(details.kind, EntryKind::Dir | EntryKind::PendingDir) { + let icon = if details.kind.is_dir() { if details.is_expanded { "v " } else { diff --git a/crates/rpc/proto/zed.proto b/crates/rpc/proto/zed.proto index ce4dd7f7cf5514fa56aa4a62f1ed4553a9273b54..2bce1ce1e328bf2302059ad476bc37adb30e079a 100644 --- a/crates/rpc/proto/zed.proto +++ b/crates/rpc/proto/zed.proto @@ -63,6 +63,8 @@ message Envelope { CopyProjectEntry copy_project_entry = 47; DeleteProjectEntry delete_project_entry = 48; ProjectEntryResponse project_entry_response = 49; + ExpandProjectEntry expand_project_entry = 114; + ExpandProjectEntryResponse expand_project_entry_response = 115; UpdateDiagnosticSummary update_diagnostic_summary = 50; StartLanguageServer start_language_server = 51; @@ -372,6 +374,15 @@ message DeleteProjectEntry { uint64 entry_id = 2; } +message ExpandProjectEntry { + uint64 project_id = 1; + uint64 entry_id = 2; +} + +message ExpandProjectEntryResponse { + uint64 worktree_scan_id = 1; +} + message ProjectEntryResponse { Entry entry = 1; uint64 worktree_scan_id = 2; @@ -1005,7 +1016,8 @@ message Entry { Timestamp mtime = 5; bool is_symlink = 6; bool is_ignored = 7; - optional GitStatus git_status = 8; + bool is_external = 8; + optional GitStatus git_status = 9; } message RepositoryEntry { diff --git a/crates/rpc/src/proto.rs b/crates/rpc/src/proto.rs index 13794ea64dad1446e579dd00806ccb4afda4758b..4532e798e72d324c807de07dcb7166edba3e9bac 100644 --- a/crates/rpc/src/proto.rs +++ b/crates/rpc/src/proto.rs @@ -150,6 +150,7 @@ messages!( (DeclineCall, Foreground), (DeleteProjectEntry, Foreground), (Error, Foreground), + (ExpandProjectEntry, Foreground), (Follow, Foreground), (FollowResponse, Foreground), (FormatBuffers, Foreground), @@ -200,6 +201,7 @@ messages!( (Ping, Foreground), (PrepareRename, Background), (PrepareRenameResponse, Background), + (ExpandProjectEntryResponse, Foreground), (ProjectEntryResponse, Foreground), (RejoinRoom, Foreground), (RejoinRoomResponse, Foreground), @@ -255,6 +257,7 @@ request_messages!( (CreateRoom, CreateRoomResponse), (DeclineCall, Ack), (DeleteProjectEntry, ProjectEntryResponse), + (ExpandProjectEntry, ExpandProjectEntryResponse), (Follow, FollowResponse), (FormatBuffers, FormatBuffersResponse), (GetChannelMessages, GetChannelMessagesResponse), @@ -311,6 +314,7 @@ entity_messages!( CreateBufferForPeer, CreateProjectEntry, DeleteProjectEntry, + ExpandProjectEntry, Follow, FormatBuffers, GetCodeActions, diff --git a/crates/rpc/src/rpc.rs b/crates/rpc/src/rpc.rs index 8b101670918ad42dc58e9039f8150fb005a1595b..6b430d90e46072a6f885b60a4e912978ed26c6a2 100644 --- a/crates/rpc/src/rpc.rs +++ b/crates/rpc/src/rpc.rs @@ -6,4 +6,4 @@ pub use conn::Connection; pub use peer::*; mod macros; -pub const PROTOCOL_VERSION: u32 = 58; +pub const PROTOCOL_VERSION: u32 = 59;