Detailed changes
@@ -74,6 +74,7 @@ CREATE TABLE "worktree_entries" (
"mtime_seconds" INTEGER NOT NULL,
"mtime_nanos" INTEGER NOT NULL,
"is_symlink" BOOL NOT NULL,
+ "is_external" BOOL NOT NULL,
"is_ignored" BOOL NOT NULL,
"is_deleted" BOOL NOT NULL,
"git_status" INTEGER,
@@ -0,0 +1,2 @@
+ALTER TABLE "worktree_entries"
+ADD "is_external" BOOL NOT NULL DEFAULT FALSE;
@@ -1539,6 +1539,7 @@ impl Database {
}),
is_symlink: db_entry.is_symlink,
is_ignored: db_entry.is_ignored,
+ is_external: db_entry.is_external,
git_status: db_entry.git_status.map(|status| status as i32),
});
}
@@ -2349,6 +2350,7 @@ impl Database {
mtime_nanos: ActiveValue::set(mtime.nanos as i32),
is_symlink: ActiveValue::set(entry.is_symlink),
is_ignored: ActiveValue::set(entry.is_ignored),
+ is_external: ActiveValue::set(entry.is_external),
git_status: ActiveValue::set(entry.git_status.map(|status| status as i64)),
is_deleted: ActiveValue::set(false),
scan_id: ActiveValue::set(update.scan_id as i64),
@@ -2705,6 +2707,7 @@ impl Database {
}),
is_symlink: db_entry.is_symlink,
is_ignored: db_entry.is_ignored,
+ is_external: db_entry.is_external,
git_status: db_entry.git_status.map(|status| status as i32),
});
}
@@ -18,6 +18,7 @@ pub struct Model {
pub git_status: Option<i64>,
pub is_symlink: bool,
pub is_ignored: bool,
+ pub is_external: bool,
pub is_deleted: bool,
pub scan_id: i64,
}
@@ -224,6 +224,7 @@ impl Server {
.add_request_handler(forward_project_request::<proto::RenameProjectEntry>)
.add_request_handler(forward_project_request::<proto::CopyProjectEntry>)
.add_request_handler(forward_project_request::<proto::DeleteProjectEntry>)
+ .add_request_handler(forward_project_request::<proto::ExpandProjectEntry>)
.add_request_handler(forward_project_request::<proto::OnTypeFormatting>)
.add_message_handler(create_buffer_for_peer)
.add_request_handler(update_buffer)
@@ -1266,6 +1266,27 @@ async fn test_share_project(
let client_b_collaborator = project.collaborators().get(&client_b_peer_id).unwrap();
assert_eq!(client_b_collaborator.replica_id, replica_id_b);
});
+ project_b.read_with(cx_b, |project, cx| {
+ let worktree = project.worktrees(cx).next().unwrap().read(cx);
+ assert_eq!(
+ worktree.paths().map(AsRef::as_ref).collect::<Vec<_>>(),
+ [
+ Path::new(".gitignore"),
+ Path::new("a.txt"),
+ Path::new("b.txt"),
+ Path::new("ignored-dir"),
+ ]
+ );
+ });
+
+ project_b
+ .update(cx_b, |project, cx| {
+ let worktree = project.worktrees(cx).next().unwrap();
+ let entry = worktree.read(cx).entry_for_path("ignored-dir").unwrap();
+ project.expand_entry(worktree_id, entry.id, cx).unwrap()
+ })
+ .await
+ .unwrap();
project_b.read_with(cx_b, |project, cx| {
let worktree = project.worktrees(cx).next().unwrap().read(cx);
assert_eq!(
@@ -32,5 +32,8 @@ serde_json.workspace = true
log.workspace = true
libc = "0.2"
+[dev-dependencies]
+gpui = { path = "../gpui", features = ["test-support"] }
+
[features]
test-support = []
@@ -108,6 +108,7 @@ pub trait Fs: Send + Sync {
async fn canonicalize(&self, path: &Path) -> Result<PathBuf>;
async fn is_file(&self, path: &Path) -> bool;
async fn metadata(&self, path: &Path) -> Result<Option<Metadata>>;
+ async fn read_link(&self, path: &Path) -> Result<PathBuf>;
async fn read_dir(
&self,
path: &Path,
@@ -323,6 +324,11 @@ impl Fs for RealFs {
}))
}
+ async fn read_link(&self, path: &Path) -> Result<PathBuf> {
+ let path = smol::fs::read_link(path).await?;
+ Ok(path)
+ }
+
async fn read_dir(
&self,
path: &Path,
@@ -382,6 +388,7 @@ struct FakeFsState {
event_txs: Vec<smol::channel::Sender<Vec<fsevent::Event>>>,
events_paused: bool,
buffered_events: Vec<fsevent::Event>,
+ read_dir_call_count: usize,
}
#[cfg(any(test, feature = "test-support"))]
@@ -407,46 +414,51 @@ enum FakeFsEntry {
impl FakeFsState {
fn read_path<'a>(&'a self, target: &Path) -> Result<Arc<Mutex<FakeFsEntry>>> {
Ok(self
- .try_read_path(target)
+ .try_read_path(target, true)
.ok_or_else(|| anyhow!("path does not exist: {}", target.display()))?
.0)
}
- fn try_read_path<'a>(&'a self, target: &Path) -> Option<(Arc<Mutex<FakeFsEntry>>, PathBuf)> {
+ fn try_read_path<'a>(
+ &'a self,
+ target: &Path,
+ follow_symlink: bool,
+ ) -> Option<(Arc<Mutex<FakeFsEntry>>, PathBuf)> {
let mut path = target.to_path_buf();
- let mut real_path = PathBuf::new();
+ let mut canonical_path = PathBuf::new();
let mut entry_stack = Vec::new();
'outer: loop {
- let mut path_components = path.components().collect::<collections::VecDeque<_>>();
- while let Some(component) = path_components.pop_front() {
+ let mut path_components = path.components().peekable();
+ while let Some(component) = path_components.next() {
match component {
Component::Prefix(_) => panic!("prefix paths aren't supported"),
Component::RootDir => {
entry_stack.clear();
entry_stack.push(self.root.clone());
- real_path.clear();
- real_path.push("/");
+ canonical_path.clear();
+ canonical_path.push("/");
}
Component::CurDir => {}
Component::ParentDir => {
entry_stack.pop()?;
- real_path.pop();
+ canonical_path.pop();
}
Component::Normal(name) => {
let current_entry = entry_stack.last().cloned()?;
let current_entry = current_entry.lock();
if let FakeFsEntry::Dir { entries, .. } = &*current_entry {
let entry = entries.get(name.to_str().unwrap()).cloned()?;
- let _entry = entry.lock();
- if let FakeFsEntry::Symlink { target, .. } = &*_entry {
- let mut target = target.clone();
- target.extend(path_components);
- path = target;
- continue 'outer;
- } else {
- entry_stack.push(entry.clone());
- real_path.push(name);
+ if path_components.peek().is_some() || follow_symlink {
+ let entry = entry.lock();
+ if let FakeFsEntry::Symlink { target, .. } = &*entry {
+ let mut target = target.clone();
+ target.extend(path_components);
+ path = target;
+ continue 'outer;
+ }
}
+ entry_stack.push(entry.clone());
+ canonical_path.push(name);
} else {
return None;
}
@@ -455,7 +467,7 @@ impl FakeFsState {
}
break;
}
- entry_stack.pop().map(|entry| (entry, real_path))
+ Some((entry_stack.pop()?, canonical_path))
}
fn write_path<Fn, T>(&self, path: &Path, callback: Fn) -> Result<T>
@@ -525,6 +537,7 @@ impl FakeFs {
event_txs: Default::default(),
buffered_events: Vec::new(),
events_paused: false,
+ read_dir_call_count: 0,
}),
})
}
@@ -761,6 +774,10 @@ impl FakeFs {
result
}
+ pub fn read_dir_call_count(&self) -> usize {
+ self.state.lock().read_dir_call_count
+ }
+
async fn simulate_random_delay(&self) {
self.executor
.upgrade()
@@ -776,6 +793,10 @@ impl FakeFsEntry {
matches!(self, Self::File { .. })
}
+ fn is_symlink(&self) -> bool {
+ matches!(self, Self::Symlink { .. })
+ }
+
fn file_content(&self, path: &Path) -> Result<&String> {
if let Self::File { content, .. } = self {
Ok(content)
@@ -1056,8 +1077,8 @@ impl Fs for FakeFs {
let path = normalize_path(path);
self.simulate_random_delay().await;
let state = self.state.lock();
- if let Some((_, real_path)) = state.try_read_path(&path) {
- Ok(real_path)
+ if let Some((_, canonical_path)) = state.try_read_path(&path, true) {
+ Ok(canonical_path)
} else {
Err(anyhow!("path does not exist: {}", path.display()))
}
@@ -1067,7 +1088,7 @@ impl Fs for FakeFs {
let path = normalize_path(path);
self.simulate_random_delay().await;
let state = self.state.lock();
- if let Some((entry, _)) = state.try_read_path(&path) {
+ if let Some((entry, _)) = state.try_read_path(&path, true) {
entry.lock().is_file()
} else {
false
@@ -1078,10 +1099,17 @@ impl Fs for FakeFs {
self.simulate_random_delay().await;
let path = normalize_path(path);
let state = self.state.lock();
- if let Some((entry, real_path)) = state.try_read_path(&path) {
- let entry = entry.lock();
- let is_symlink = real_path != path;
+ if let Some((mut entry, _)) = state.try_read_path(&path, false) {
+ let is_symlink = entry.lock().is_symlink();
+ if is_symlink {
+ if let Some(e) = state.try_read_path(&path, true).map(|e| e.0) {
+ entry = e;
+ } else {
+ return Ok(None);
+ }
+ }
+ let entry = entry.lock();
Ok(Some(match &*entry {
FakeFsEntry::File { inode, mtime, .. } => Metadata {
inode: *inode,
@@ -1102,13 +1130,30 @@ impl Fs for FakeFs {
}
}
+ async fn read_link(&self, path: &Path) -> Result<PathBuf> {
+ self.simulate_random_delay().await;
+ let path = normalize_path(path);
+ let state = self.state.lock();
+ if let Some((entry, _)) = state.try_read_path(&path, false) {
+ let entry = entry.lock();
+ if let FakeFsEntry::Symlink { target } = &*entry {
+ Ok(target.clone())
+ } else {
+ Err(anyhow!("not a symlink: {}", path.display()))
+ }
+ } else {
+ Err(anyhow!("path does not exist: {}", path.display()))
+ }
+ }
+
async fn read_dir(
&self,
path: &Path,
) -> Result<Pin<Box<dyn Send + Stream<Item = Result<PathBuf>>>>> {
self.simulate_random_delay().await;
let path = normalize_path(path);
- let state = self.state.lock();
+ let mut state = self.state.lock();
+ state.read_dir_call_count += 1;
let entry = state.read_path(&path)?;
let mut entry = entry.lock();
let children = entry.dir_entries(&path)?;
@@ -64,7 +64,7 @@ use std::{
mem,
num::NonZeroU32,
ops::Range,
- path::{Component, Path, PathBuf},
+ path::{self, Component, Path, PathBuf},
rc::Rc,
str,
sync::{
@@ -478,6 +478,7 @@ impl Project {
client.add_model_request_handler(Self::handle_rename_project_entry);
client.add_model_request_handler(Self::handle_copy_project_entry);
client.add_model_request_handler(Self::handle_delete_project_entry);
+ client.add_model_request_handler(Self::handle_expand_project_entry);
client.add_model_request_handler(Self::handle_apply_additional_edits_for_completion);
client.add_model_request_handler(Self::handle_apply_code_action);
client.add_model_request_handler(Self::handle_on_type_formatting);
@@ -1072,6 +1073,40 @@ impl Project {
}
}
+ pub fn expand_entry(
+ &mut self,
+ worktree_id: WorktreeId,
+ entry_id: ProjectEntryId,
+ cx: &mut ModelContext<Self>,
+ ) -> Option<Task<Result<()>>> {
+ let worktree = self.worktree_for_id(worktree_id, cx)?;
+ if self.is_local() {
+ worktree.update(cx, |worktree, cx| {
+ worktree.as_local_mut().unwrap().expand_entry(entry_id, cx)
+ })
+ } else {
+ let worktree = worktree.downgrade();
+ let request = self.client.request(proto::ExpandProjectEntry {
+ project_id: self.remote_id().unwrap(),
+ entry_id: entry_id.to_proto(),
+ });
+ Some(cx.spawn_weak(|_, mut cx| async move {
+ let response = request.await?;
+ if let Some(worktree) = worktree.upgrade(&cx) {
+ worktree
+ .update(&mut cx, |worktree, _| {
+ worktree
+ .as_remote_mut()
+ .unwrap()
+ .wait_for_snapshot(response.worktree_scan_id as usize)
+ })
+ .await?;
+ }
+ Ok(())
+ }))
+ }
+ }
+
pub fn shared(&mut self, project_id: u64, cx: &mut ModelContext<Self>) -> Result<()> {
if self.client_state.is_some() {
return Err(anyhow!("project was already shared"));
@@ -3081,23 +3116,44 @@ impl Project {
for watcher in params.watchers {
for worktree in &self.worktrees {
if let Some(worktree) = worktree.upgrade(cx) {
- let worktree = worktree.read(cx);
- if let Some(abs_path) = worktree.abs_path().to_str() {
- if let Some(suffix) = match &watcher.glob_pattern {
- lsp::GlobPattern::String(s) => s,
- lsp::GlobPattern::Relative(rp) => &rp.pattern,
- }
- .strip_prefix(abs_path)
- .and_then(|s| s.strip_prefix(std::path::MAIN_SEPARATOR))
- {
- if let Some(glob) = Glob::new(suffix).log_err() {
- builders
- .entry(worktree.id())
- .or_insert_with(|| GlobSetBuilder::new())
- .add(glob);
+ let glob_is_inside_worktree = worktree.update(cx, |tree, _| {
+ if let Some(abs_path) = tree.abs_path().to_str() {
+ let relative_glob_pattern = match &watcher.glob_pattern {
+ lsp::GlobPattern::String(s) => s
+ .strip_prefix(abs_path)
+ .and_then(|s| s.strip_prefix(std::path::MAIN_SEPARATOR)),
+ lsp::GlobPattern::Relative(rp) => {
+ let base_uri = match &rp.base_uri {
+ lsp::OneOf::Left(workspace_folder) => {
+ &workspace_folder.uri
+ }
+ lsp::OneOf::Right(base_uri) => base_uri,
+ };
+ base_uri.to_file_path().ok().and_then(|file_path| {
+ (file_path.to_str() == Some(abs_path))
+ .then_some(rp.pattern.as_str())
+ })
+ }
+ };
+ if let Some(relative_glob_pattern) = relative_glob_pattern {
+ let literal_prefix =
+ glob_literal_prefix(&relative_glob_pattern);
+ tree.as_local_mut()
+ .unwrap()
+ .add_path_prefix_to_scan(Path::new(literal_prefix).into());
+ if let Some(glob) = Glob::new(relative_glob_pattern).log_err() {
+ builders
+ .entry(tree.id())
+ .or_insert_with(|| GlobSetBuilder::new())
+ .add(glob);
+ }
+ return true;
}
- break;
}
+ false
+ });
+ if glob_is_inside_worktree {
+ break;
}
}
}
@@ -5705,6 +5761,29 @@ impl Project {
})
}
+ async fn handle_expand_project_entry(
+ this: ModelHandle<Self>,
+ envelope: TypedEnvelope<proto::ExpandProjectEntry>,
+ _: Arc<Client>,
+ mut cx: AsyncAppContext,
+ ) -> Result<proto::ExpandProjectEntryResponse> {
+ let entry_id = ProjectEntryId::from_proto(envelope.payload.entry_id);
+ let worktree = this
+ .read_with(&cx, |this, cx| this.worktree_for_entry(entry_id, cx))
+ .ok_or_else(|| anyhow!("invalid request"))?;
+ worktree
+ .update(&mut cx, |worktree, cx| {
+ worktree
+ .as_local_mut()
+ .unwrap()
+ .expand_entry(entry_id, cx)
+ .ok_or_else(|| anyhow!("invalid entry"))
+ })?
+ .await?;
+ let worktree_scan_id = worktree.read_with(&cx, |worktree, _| worktree.scan_id()) as u64;
+ Ok(proto::ExpandProjectEntryResponse { worktree_scan_id })
+ }
+
async fn handle_update_diagnostic_summary(
this: ModelHandle<Self>,
envelope: TypedEnvelope<proto::UpdateDiagnosticSummary>,
@@ -7047,6 +7126,22 @@ impl Project {
}
}
+fn glob_literal_prefix<'a>(glob: &'a str) -> &'a str {
+ let mut literal_end = 0;
+ for (i, part) in glob.split(path::MAIN_SEPARATOR).enumerate() {
+ if part.contains(&['*', '?', '{', '}']) {
+ break;
+ } else {
+ if i > 0 {
+ // Acount for separator prior to this part
+ literal_end += path::MAIN_SEPARATOR.len_utf8();
+ }
+ literal_end += part.len();
+ }
+ }
+ &glob[..literal_end]
+}
+
impl WorktreeHandle {
pub fn upgrade(&self, cx: &AppContext) -> Option<ModelHandle<Worktree>> {
match self {
@@ -535,8 +535,28 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon
fs.insert_tree(
"/the-root",
json!({
- "a.rs": "",
- "b.rs": "",
+ ".gitignore": "target\n",
+ "src": {
+ "a.rs": "",
+ "b.rs": "",
+ },
+ "target": {
+ "x": {
+ "out": {
+ "x.rs": ""
+ }
+ },
+ "y": {
+ "out": {
+ "y.rs": "",
+ }
+ },
+ "z": {
+ "out": {
+ "z.rs": ""
+ }
+ }
+ }
}),
)
.await;
@@ -550,11 +570,32 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon
// Start the language server by opening a buffer with a compatible file extension.
let _buffer = project
.update(cx, |project, cx| {
- project.open_local_buffer("/the-root/a.rs", cx)
+ project.open_local_buffer("/the-root/src/a.rs", cx)
})
.await
.unwrap();
+ // Initially, we don't load ignored files because the language server has not explicitly asked us to watch them.
+ project.read_with(cx, |project, cx| {
+ let worktree = project.worktrees(cx).next().unwrap();
+ assert_eq!(
+ worktree
+ .read(cx)
+ .snapshot()
+ .entries(true)
+ .map(|entry| (entry.path.as_ref(), entry.is_ignored))
+ .collect::<Vec<_>>(),
+ &[
+ (Path::new(""), false),
+ (Path::new(".gitignore"), false),
+ (Path::new("src"), false),
+ (Path::new("src/a.rs"), false),
+ (Path::new("src/b.rs"), false),
+ (Path::new("target"), true),
+ ]
+ );
+ });
+
// Keep track of the FS events reported to the language server.
let fake_server = fake_servers.next().await.unwrap();
let file_changes = Arc::new(Mutex::new(Vec::new()));
@@ -565,12 +606,20 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon
method: "workspace/didChangeWatchedFiles".to_string(),
register_options: serde_json::to_value(
lsp::DidChangeWatchedFilesRegistrationOptions {
- watchers: vec![lsp::FileSystemWatcher {
- glob_pattern: lsp::GlobPattern::String(
- "/the-root/*.{rs,c}".to_string(),
- ),
- kind: None,
- }],
+ watchers: vec![
+ lsp::FileSystemWatcher {
+ glob_pattern: lsp::GlobPattern::String(
+ "/the-root/src/*.{rs,c}".to_string(),
+ ),
+ kind: None,
+ },
+ lsp::FileSystemWatcher {
+ glob_pattern: lsp::GlobPattern::String(
+ "/the-root/target/y/**/*.rs".to_string(),
+ ),
+ kind: None,
+ },
+ ],
},
)
.ok(),
@@ -588,17 +637,50 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon
});
cx.foreground().run_until_parked();
- assert_eq!(file_changes.lock().len(), 0);
+ assert_eq!(mem::take(&mut *file_changes.lock()), &[]);
+
+ // Now the language server has asked us to watch an ignored directory path,
+ // so we recursively load it.
+ project.read_with(cx, |project, cx| {
+ let worktree = project.worktrees(cx).next().unwrap();
+ assert_eq!(
+ worktree
+ .read(cx)
+ .snapshot()
+ .entries(true)
+ .map(|entry| (entry.path.as_ref(), entry.is_ignored))
+ .collect::<Vec<_>>(),
+ &[
+ (Path::new(""), false),
+ (Path::new(".gitignore"), false),
+ (Path::new("src"), false),
+ (Path::new("src/a.rs"), false),
+ (Path::new("src/b.rs"), false),
+ (Path::new("target"), true),
+ (Path::new("target/x"), true),
+ (Path::new("target/y"), true),
+ (Path::new("target/y/out"), true),
+ (Path::new("target/y/out/y.rs"), true),
+ (Path::new("target/z"), true),
+ ]
+ );
+ });
// Perform some file system mutations, two of which match the watched patterns,
// and one of which does not.
- fs.create_file("/the-root/c.rs".as_ref(), Default::default())
+ fs.create_file("/the-root/src/c.rs".as_ref(), Default::default())
+ .await
+ .unwrap();
+ fs.create_file("/the-root/src/d.txt".as_ref(), Default::default())
+ .await
+ .unwrap();
+ fs.remove_file("/the-root/src/b.rs".as_ref(), Default::default())
.await
.unwrap();
- fs.create_file("/the-root/d.txt".as_ref(), Default::default())
+ fs.create_file("/the-root/target/x/out/x2.rs".as_ref(), Default::default())
.await
.unwrap();
- fs.remove_file("/the-root/b.rs".as_ref(), Default::default())
+ fs.create_file("/the-root/target/y/out/y2.rs".as_ref(), Default::default())
.await
.unwrap();
@@ -608,11 +690,15 @@ async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppCon
&*file_changes.lock(),
&[
lsp::FileEvent {
- uri: lsp::Url::from_file_path("/the-root/b.rs").unwrap(),
+ uri: lsp::Url::from_file_path("/the-root/src/b.rs").unwrap(),
typ: lsp::FileChangeType::DELETED,
},
lsp::FileEvent {
- uri: lsp::Url::from_file_path("/the-root/c.rs").unwrap(),
+ uri: lsp::Url::from_file_path("/the-root/src/c.rs").unwrap(),
+ typ: lsp::FileChangeType::CREATED,
+ },
+ lsp::FileEvent {
+ uri: lsp::Url::from_file_path("/the-root/target/y/out/y2.rs").unwrap(),
typ: lsp::FileChangeType::CREATED,
},
]
@@ -3846,6 +3932,14 @@ async fn test_search_with_exclusions_and_inclusions(cx: &mut gpui::TestAppContex
);
}
+#[test]
+fn test_glob_literal_prefix() {
+ assert_eq!(glob_literal_prefix("**/*.js"), "");
+ assert_eq!(glob_literal_prefix("node_modules/**/*.js"), "node_modules");
+ assert_eq!(glob_literal_prefix("foo/{bar,baz}.js"), "foo");
+ assert_eq!(glob_literal_prefix("foo/bar/baz.js"), "foo/bar/baz.js");
+}
+
async fn search(
project: &ModelHandle<Project>,
query: SearchQuery,
@@ -5,7 +5,7 @@ use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
use anyhow::{anyhow, Context, Result};
use client::{proto, Client};
use clock::ReplicaId;
-use collections::{HashMap, VecDeque};
+use collections::{HashMap, HashSet, VecDeque};
use fs::{
repository::{GitFileStatus, GitRepository, RepoPath},
Fs, LineEnding,
@@ -67,7 +67,8 @@ pub enum Worktree {
pub struct LocalWorktree {
snapshot: LocalSnapshot,
- path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
+ scan_requests_tx: channel::Sender<ScanRequest>,
+ path_prefixes_to_scan_tx: channel::Sender<Arc<Path>>,
is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
_background_scanner_task: Task<()>,
share: Option<ShareState>,
@@ -84,6 +85,11 @@ pub struct LocalWorktree {
visible: bool,
}
+struct ScanRequest {
+ relative_paths: Vec<Arc<Path>>,
+ done: barrier::Sender,
+}
+
pub struct RemoteWorktree {
snapshot: Snapshot,
background_snapshot: Arc<Mutex<Snapshot>>,
@@ -214,6 +220,9 @@ pub struct LocalSnapshot {
struct BackgroundScannerState {
snapshot: LocalSnapshot,
+ scanned_dirs: HashSet<ProjectEntryId>,
+ path_prefixes_to_scan: HashSet<Arc<Path>>,
+ paths_to_scan: HashSet<Arc<Path>>,
/// The ids of all of the entries that were removed from the snapshot
/// as part of the current update. These entry ids may be re-used
/// if the same inode is discovered at a new path, or if the given
@@ -232,13 +241,6 @@ pub struct LocalRepositoryEntry {
pub(crate) git_dir_path: Arc<Path>,
}
-impl LocalRepositoryEntry {
- // Note that this path should be relative to the worktree root.
- pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
- path.starts_with(self.git_dir_path.as_ref())
- }
-}
-
impl Deref for LocalSnapshot {
type Target = Snapshot;
@@ -330,7 +332,8 @@ impl Worktree {
);
}
- let (path_changes_tx, path_changes_rx) = channel::unbounded();
+ let (scan_requests_tx, scan_requests_rx) = channel::unbounded();
+ let (path_prefixes_to_scan_tx, path_prefixes_to_scan_rx) = channel::unbounded();
let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
cx.spawn_weak(|this, mut cx| async move {
@@ -370,7 +373,8 @@ impl Worktree {
fs,
scan_states_tx,
background,
- path_changes_rx,
+ scan_requests_rx,
+ path_prefixes_to_scan_rx,
)
.run(events)
.await;
@@ -381,7 +385,8 @@ impl Worktree {
snapshot,
is_scanning: watch::channel_with(true),
share: None,
- path_changes_tx,
+ scan_requests_tx,
+ path_prefixes_to_scan_tx,
_background_scanner_task: background_scanner_task,
diagnostics: Default::default(),
diagnostic_summaries: Default::default(),
@@ -867,27 +872,27 @@ impl LocalWorktree {
path: &Path,
cx: &mut ModelContext<Worktree>,
) -> Task<Result<(File, String, Option<String>)>> {
- let handle = cx.handle();
let path = Arc::from(path);
let abs_path = self.absolutize(&path);
let fs = self.fs.clone();
- let snapshot = self.snapshot();
-
- let mut index_task = None;
-
- if let Some(repo) = snapshot.repository_for_path(&path) {
- let repo_path = repo.work_directory.relativize(self, &path).unwrap();
- if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
- let repo = repo.repo_ptr.to_owned();
- index_task = Some(
- cx.background()
- .spawn(async move { repo.lock().load_index_text(&repo_path) }),
- );
- }
- }
+ let entry = self.refresh_entry(path.clone(), None, cx);
- cx.spawn(|this, mut cx| async move {
+ cx.spawn(|this, cx| async move {
let text = fs.load(&abs_path).await?;
+ let entry = entry.await?;
+
+ let mut index_task = None;
+ let snapshot = this.read_with(&cx, |this, _| this.as_local().unwrap().snapshot());
+ if let Some(repo) = snapshot.repository_for_path(&path) {
+ let repo_path = repo.work_directory.relativize(&snapshot, &path).unwrap();
+ if let Some(repo) = snapshot.git_repositories.get(&*repo.work_directory) {
+ let repo = repo.repo_ptr.clone();
+ index_task = Some(
+ cx.background()
+ .spawn(async move { repo.lock().load_index_text(&repo_path) }),
+ );
+ }
+ }
let diff_base = if let Some(index_task) = index_task {
index_task.await
@@ -895,17 +900,10 @@ impl LocalWorktree {
None
};
- // Eagerly populate the snapshot with an updated entry for the loaded file
- let entry = this
- .update(&mut cx, |this, cx| {
- this.as_local().unwrap().refresh_entry(path, None, cx)
- })
- .await?;
-
Ok((
File {
entry_id: entry.id,
- worktree: handle,
+ worktree: this,
path: entry.path,
mtime: entry.mtime,
is_local: true,
@@ -1039,14 +1037,10 @@ impl LocalWorktree {
cx: &mut ModelContext<Worktree>,
) -> Option<Task<Result<()>>> {
let entry = self.entry_for_id(entry_id)?.clone();
- let abs_path = self.abs_path.clone();
+ let abs_path = self.absolutize(&entry.path);
let fs = self.fs.clone();
let delete = cx.background().spawn(async move {
- let mut abs_path = fs.canonicalize(&abs_path).await?;
- if entry.path.file_name().is_some() {
- abs_path = abs_path.join(&entry.path);
- }
if entry.is_file() {
fs.remove_file(&abs_path, Default::default()).await?;
} else {
@@ -1059,19 +1053,18 @@ impl LocalWorktree {
)
.await?;
}
- anyhow::Ok(abs_path)
+ anyhow::Ok(entry.path)
});
Some(cx.spawn(|this, mut cx| async move {
- let abs_path = delete.await?;
- let (tx, mut rx) = barrier::channel();
+ let path = delete.await?;
this.update(&mut cx, |this, _| {
this.as_local_mut()
.unwrap()
- .path_changes_tx
- .try_send((vec![abs_path], tx))
- })?;
- rx.recv().await;
+ .refresh_entries_for_paths(vec![path])
+ })
+ .recv()
+ .await;
Ok(())
}))
}
@@ -1135,34 +1128,48 @@ impl LocalWorktree {
}))
}
+ pub fn expand_entry(
+ &mut self,
+ entry_id: ProjectEntryId,
+ cx: &mut ModelContext<Worktree>,
+ ) -> Option<Task<Result<()>>> {
+ let path = self.entry_for_id(entry_id)?.path.clone();
+ let mut refresh = self.refresh_entries_for_paths(vec![path]);
+ Some(cx.background().spawn(async move {
+ refresh.next().await;
+ Ok(())
+ }))
+ }
+
+ pub fn refresh_entries_for_paths(&self, paths: Vec<Arc<Path>>) -> barrier::Receiver {
+ let (tx, rx) = barrier::channel();
+ self.scan_requests_tx
+ .try_send(ScanRequest {
+ relative_paths: paths,
+ done: tx,
+ })
+ .ok();
+ rx
+ }
+
+ pub fn add_path_prefix_to_scan(&self, path_prefix: Arc<Path>) {
+ self.path_prefixes_to_scan_tx.try_send(path_prefix).ok();
+ }
+
fn refresh_entry(
&self,
path: Arc<Path>,
old_path: Option<Arc<Path>>,
cx: &mut ModelContext<Worktree>,
) -> Task<Result<Entry>> {
- let fs = self.fs.clone();
- let abs_root_path = self.abs_path.clone();
- let path_changes_tx = self.path_changes_tx.clone();
+ let paths = if let Some(old_path) = old_path.as_ref() {
+ vec![old_path.clone(), path.clone()]
+ } else {
+ vec![path.clone()]
+ };
+ let mut refresh = self.refresh_entries_for_paths(paths);
cx.spawn_weak(move |this, mut cx| async move {
- let abs_path = fs.canonicalize(&abs_root_path).await?;
- let mut paths = Vec::with_capacity(2);
- paths.push(if path.file_name().is_some() {
- abs_path.join(&path)
- } else {
- abs_path.clone()
- });
- if let Some(old_path) = old_path {
- paths.push(if old_path.file_name().is_some() {
- abs_path.join(&old_path)
- } else {
- abs_path.clone()
- });
- }
-
- let (tx, mut rx) = barrier::channel();
- path_changes_tx.try_send((paths, tx))?;
- rx.recv().await;
+ refresh.recv().await;
this.upgrade(&cx)
.ok_or_else(|| anyhow!("worktree was dropped"))?
.update(&mut cx, |this, _| {
@@ -1331,7 +1338,7 @@ impl RemoteWorktree {
self.completed_scan_id >= scan_id
}
- fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
+ pub(crate) fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
let (tx, rx) = oneshot::channel();
if self.observed_snapshot(scan_id) {
let _ = tx.send(());
@@ -1568,7 +1575,7 @@ impl Snapshot {
}
pub fn visible_file_count(&self) -> usize {
- self.entries_by_path.summary().visible_file_count
+ self.entries_by_path.summary().non_ignored_file_count
}
fn traverse_from_offset(
@@ -1837,15 +1844,6 @@ impl LocalSnapshot {
Some((path, self.git_repositories.get(&repo.work_directory_id())?))
}
- pub(crate) fn repo_for_metadata(
- &self,
- path: &Path,
- ) -> Option<(&ProjectEntryId, &LocalRepositoryEntry)> {
- self.git_repositories
- .iter()
- .find(|(_, repo)| repo.in_dot_git(path))
- }
-
fn build_update(
&self,
project_id: u64,
@@ -1981,57 +1979,6 @@ impl LocalSnapshot {
entry
}
- #[must_use = "Changed paths must be used for diffing later"]
- fn build_repo(&mut self, parent_path: Arc<Path>, fs: &dyn Fs) -> Option<Vec<Arc<Path>>> {
- let abs_path = self.abs_path.join(&parent_path);
- let work_dir: Arc<Path> = parent_path.parent().unwrap().into();
-
- // Guard against repositories inside the repository metadata
- if work_dir
- .components()
- .find(|component| component.as_os_str() == *DOT_GIT)
- .is_some()
- {
- return None;
- };
-
- let work_dir_id = self
- .entry_for_path(work_dir.clone())
- .map(|entry| entry.id)?;
-
- if self.git_repositories.get(&work_dir_id).is_some() {
- return None;
- }
-
- let repo = fs.open_repo(abs_path.as_path())?;
- let work_directory = RepositoryWorkDirectory(work_dir.clone());
-
- let repo_lock = repo.lock();
-
- self.repository_entries.insert(
- work_directory.clone(),
- RepositoryEntry {
- work_directory: work_dir_id.into(),
- branch: repo_lock.branch_name().map(Into::into),
- },
- );
-
- let changed_paths = self.scan_statuses(repo_lock.deref(), &work_directory);
-
- drop(repo_lock);
-
- self.git_repositories.insert(
- work_dir_id,
- LocalRepositoryEntry {
- git_dir_scan_id: 0,
- repo_ptr: repo,
- git_dir_path: parent_path.clone(),
- },
- );
-
- Some(changed_paths)
- }
-
#[must_use = "Changed paths must be used for diffing later"]
fn scan_statuses(
&mut self,
@@ -2098,11 +2045,18 @@ impl LocalSnapshot {
ignore_stack
}
-}
-impl LocalSnapshot {
#[cfg(test)]
- pub fn check_invariants(&self) {
+ pub(crate) fn expanded_entries(&self) -> impl Iterator<Item = &Entry> {
+ self.entries_by_path
+ .cursor::<()>()
+ .filter(|entry| entry.kind == EntryKind::Dir && (entry.is_external || entry.is_ignored))
+ }
+
+ #[cfg(test)]
+ pub fn check_invariants(&self, git_state: bool) {
+ use pretty_assertions::assert_eq;
+
assert_eq!(
self.entries_by_path
.cursor::<()>()
@@ -2122,7 +2076,7 @@ impl LocalSnapshot {
for entry in self.entries_by_path.cursor::<()>() {
if entry.is_file() {
assert_eq!(files.next().unwrap().inode, entry.inode);
- if !entry.is_ignored {
+ if !entry.is_ignored && !entry.is_external {
assert_eq!(visible_files.next().unwrap().inode, entry.inode);
}
}
@@ -2132,7 +2086,11 @@ impl LocalSnapshot {
assert!(visible_files.next().is_none());
let mut bfs_paths = Vec::new();
- let mut stack = vec![Path::new("")];
+ let mut stack = self
+ .root_entry()
+ .map(|e| e.path.as_ref())
+ .into_iter()
+ .collect::<Vec<_>>();
while let Some(path) = stack.pop() {
bfs_paths.push(path);
let ix = stack.len();
@@ -2154,12 +2112,15 @@ impl LocalSnapshot {
.collect::<Vec<_>>();
assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
- for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
- let ignore_parent_path = ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
- assert!(self.entry_for_path(&ignore_parent_path).is_some());
- assert!(self
- .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
- .is_some());
+ if git_state {
+ for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
+ let ignore_parent_path =
+ ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
+ assert!(self.entry_for_path(&ignore_parent_path).is_some());
+ assert!(self
+ .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
+ .is_some());
+ }
}
}
@@ -2177,6 +2138,19 @@ impl LocalSnapshot {
}
impl BackgroundScannerState {
+ fn should_scan_directory(&self, entry: &Entry) -> bool {
+ (!entry.is_external && !entry.is_ignored)
+ || self.scanned_dirs.contains(&entry.id) // If we've ever scanned it, keep scanning
+ || self
+ .paths_to_scan
+ .iter()
+ .any(|p| p.starts_with(&entry.path))
+ || self
+ .path_prefixes_to_scan
+ .iter()
+ .any(|p| entry.path.starts_with(p))
+ }
+
fn reuse_entry_id(&mut self, entry: &mut Entry) {
if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
entry.id = removed_entry_id;
@@ -2187,17 +2161,24 @@ impl BackgroundScannerState {
fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
self.reuse_entry_id(&mut entry);
- self.snapshot.insert_entry(entry, fs)
+ let entry = self.snapshot.insert_entry(entry, fs);
+ if entry.path.file_name() == Some(&DOT_GIT) {
+ self.build_repository(entry.path.clone(), fs);
+ }
+
+ #[cfg(test)]
+ self.snapshot.check_invariants(false);
+
+ entry
}
- #[must_use = "Changed paths must be used for diffing later"]
fn populate_dir(
&mut self,
- parent_path: Arc<Path>,
+ parent_path: &Arc<Path>,
entries: impl IntoIterator<Item = Entry>,
ignore: Option<Arc<Gitignore>>,
fs: &dyn Fs,
- ) -> Option<Vec<Arc<Path>>> {
+ ) {
let mut parent_entry = if let Some(parent_entry) = self
.snapshot
.entries_by_path
@@ -2209,15 +2190,13 @@ impl BackgroundScannerState {
"populating a directory {:?} that has been removed",
parent_path
);
- return None;
+ return;
};
match parent_entry.kind {
- EntryKind::PendingDir => {
- parent_entry.kind = EntryKind::Dir;
- }
+ EntryKind::PendingDir | EntryKind::UnloadedDir => parent_entry.kind = EntryKind::Dir,
EntryKind::Dir => {}
- _ => return None,
+ _ => return,
}
if let Some(ignore) = ignore {
@@ -2227,11 +2206,16 @@ impl BackgroundScannerState {
.insert(abs_parent_path, (ignore, false));
}
+ self.scanned_dirs.insert(parent_entry.id);
let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
let mut entries_by_id_edits = Vec::new();
+ let mut dotgit_path = None;
+
+ for entry in entries {
+ if entry.path.file_name() == Some(&DOT_GIT) {
+ dotgit_path = Some(entry.path.clone());
+ }
- for mut entry in entries {
- self.reuse_entry_id(&mut entry);
entries_by_id_edits.push(Edit::Insert(PathEntry {
id: entry.id,
path: entry.path.clone(),
@@ -2246,10 +2230,15 @@ impl BackgroundScannerState {
.edit(entries_by_path_edits, &());
self.snapshot.entries_by_id.edit(entries_by_id_edits, &());
- if parent_path.file_name() == Some(&DOT_GIT) {
- return self.snapshot.build_repo(parent_path, fs);
+ if let Some(dotgit_path) = dotgit_path {
+ self.build_repository(dotgit_path, fs);
}
- None
+ if let Err(ix) = self.changed_paths.binary_search(parent_path) {
+ self.changed_paths.insert(ix, parent_path.clone());
+ }
+
+ #[cfg(test)]
+ self.snapshot.check_invariants(false);
}
fn remove_path(&mut self, path: &Path) {
@@ -2284,6 +2273,137 @@ impl BackgroundScannerState {
*needs_update = true;
}
}
+
+ #[cfg(test)]
+ self.snapshot.check_invariants(false);
+ }
+
+ fn reload_repositories(&mut self, changed_paths: &[Arc<Path>], fs: &dyn Fs) {
+ let scan_id = self.snapshot.scan_id;
+
+ // Find each of the .git directories that contain any of the given paths.
+ let mut prev_dot_git_dir = None;
+ for changed_path in changed_paths {
+ let Some(dot_git_dir) = changed_path
+ .ancestors()
+ .find(|ancestor| ancestor.file_name() == Some(&*DOT_GIT)) else {
+ continue;
+ };
+
+ // Avoid processing the same repository multiple times, if multiple paths
+ // within it have changed.
+ if prev_dot_git_dir == Some(dot_git_dir) {
+ continue;
+ }
+ prev_dot_git_dir = Some(dot_git_dir);
+
+ // If there is already a repository for this .git directory, reload
+ // the status for all of its files.
+ let repository = self
+ .snapshot
+ .git_repositories
+ .iter()
+ .find_map(|(entry_id, repo)| {
+ (repo.git_dir_path.as_ref() == dot_git_dir).then(|| (*entry_id, repo.clone()))
+ });
+ match repository {
+ None => {
+ self.build_repository(dot_git_dir.into(), fs);
+ }
+ Some((entry_id, repository)) => {
+ if repository.git_dir_scan_id == scan_id {
+ continue;
+ }
+ let Some(work_dir) = self
+ .snapshot
+ .entry_for_id(entry_id)
+ .map(|entry| RepositoryWorkDirectory(entry.path.clone())) else { continue };
+
+ let repository = repository.repo_ptr.lock();
+ let branch = repository.branch_name();
+ repository.reload_index();
+
+ self.snapshot
+ .git_repositories
+ .update(&entry_id, |entry| entry.git_dir_scan_id = scan_id);
+ self.snapshot
+ .snapshot
+ .repository_entries
+ .update(&work_dir, |entry| entry.branch = branch.map(Into::into));
+
+ let changed_paths = self.snapshot.scan_statuses(&*repository, &work_dir);
+ util::extend_sorted(
+ &mut self.changed_paths,
+ changed_paths,
+ usize::MAX,
+ Ord::cmp,
+ )
+ }
+ }
+ }
+
+ // Remove any git repositories whose .git entry no longer exists.
+ let mut snapshot = &mut self.snapshot;
+ let mut repositories = mem::take(&mut snapshot.git_repositories);
+ let mut repository_entries = mem::take(&mut snapshot.repository_entries);
+ repositories.retain(|work_directory_id, _| {
+ snapshot
+ .entry_for_id(*work_directory_id)
+ .map_or(false, |entry| {
+ snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
+ })
+ });
+ repository_entries.retain(|_, entry| repositories.get(&entry.work_directory.0).is_some());
+ snapshot.git_repositories = repositories;
+ snapshot.repository_entries = repository_entries;
+ }
+
+ fn build_repository(&mut self, dot_git_path: Arc<Path>, fs: &dyn Fs) -> Option<()> {
+ let work_dir_path: Arc<Path> = dot_git_path.parent().unwrap().into();
+
+ // Guard against repositories inside the repository metadata
+ if work_dir_path.iter().any(|component| component == *DOT_GIT) {
+ return None;
+ };
+
+ let work_dir_id = self
+ .snapshot
+ .entry_for_path(work_dir_path.clone())
+ .map(|entry| entry.id)?;
+
+ if self.snapshot.git_repositories.get(&work_dir_id).is_some() {
+ return None;
+ }
+
+ let abs_path = self.snapshot.abs_path.join(&dot_git_path);
+ let repository = fs.open_repo(abs_path.as_path())?;
+ let work_directory = RepositoryWorkDirectory(work_dir_path.clone());
+
+ let repo_lock = repository.lock();
+ self.snapshot.repository_entries.insert(
+ work_directory.clone(),
+ RepositoryEntry {
+ work_directory: work_dir_id.into(),
+ branch: repo_lock.branch_name().map(Into::into),
+ },
+ );
+
+ let changed_paths = self
+ .snapshot
+ .scan_statuses(repo_lock.deref(), &work_directory);
+ drop(repo_lock);
+
+ self.snapshot.git_repositories.insert(
+ work_dir_id,
+ LocalRepositoryEntry {
+ git_dir_scan_id: 0,
+ repo_ptr: repository,
+ git_dir_path: dot_git_path.clone(),
+ },
+ );
+
+ util::extend_sorted(&mut self.changed_paths, changed_paths, usize::MAX, Ord::cmp);
+ Some(())
}
}
@@ -2570,12 +2690,27 @@ pub struct Entry {
pub inode: u64,
pub mtime: SystemTime,
pub is_symlink: bool,
+
+ /// Whether this entry is ignored by Git.
+ ///
+ /// We only scan ignored entries once the directory is expanded and
+ /// exclude them from searches.
pub is_ignored: bool,
+
+ /// Whether this entry's canonical path is outside of the worktree.
+ /// This means the entry is only accessible from the worktree root via a
+ /// symlink.
+ ///
+ /// We only scan entries outside of the worktree once the symlinked
+ /// directory is expanded. External entries are treated like gitignored
+ /// entries in that they are not included in searches.
+ pub is_external: bool,
pub git_status: Option<GitFileStatus>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum EntryKind {
+ UnloadedDir,
PendingDir,
Dir,
File(CharBag),
@@ -2624,16 +2759,17 @@ impl Entry {
mtime: metadata.mtime,
is_symlink: metadata.is_symlink,
is_ignored: false,
+ is_external: false,
git_status: None,
}
}
pub fn is_dir(&self) -> bool {
- matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
+ self.kind.is_dir()
}
pub fn is_file(&self) -> bool {
- matches!(self.kind, EntryKind::File(_))
+ self.kind.is_file()
}
pub fn git_status(&self) -> Option<GitFileStatus> {
@@ -2641,19 +2777,40 @@ impl Entry {
}
}
+impl EntryKind {
+ pub fn is_dir(&self) -> bool {
+ matches!(
+ self,
+ EntryKind::Dir | EntryKind::PendingDir | EntryKind::UnloadedDir
+ )
+ }
+
+ pub fn is_unloaded(&self) -> bool {
+ matches!(self, EntryKind::UnloadedDir)
+ }
+
+ pub fn is_file(&self) -> bool {
+ matches!(self, EntryKind::File(_))
+ }
+}
+
impl sum_tree::Item for Entry {
type Summary = EntrySummary;
fn summary(&self) -> Self::Summary {
- let visible_count = if self.is_ignored { 0 } else { 1 };
+ let non_ignored_count = if self.is_ignored || self.is_external {
+ 0
+ } else {
+ 1
+ };
let file_count;
- let visible_file_count;
+ let non_ignored_file_count;
if self.is_file() {
file_count = 1;
- visible_file_count = visible_count;
+ non_ignored_file_count = non_ignored_count;
} else {
file_count = 0;
- visible_file_count = 0;
+ non_ignored_file_count = 0;
}
let mut statuses = GitStatuses::default();
@@ -2669,9 +2826,9 @@ impl sum_tree::Item for Entry {
EntrySummary {
max_path: self.path.clone(),
count: 1,
- visible_count,
+ non_ignored_count,
file_count,
- visible_file_count,
+ non_ignored_file_count,
statuses,
}
}
@@ -2689,9 +2846,9 @@ impl sum_tree::KeyedItem for Entry {
pub struct EntrySummary {
max_path: Arc<Path>,
count: usize,
- visible_count: usize,
+ non_ignored_count: usize,
file_count: usize,
- visible_file_count: usize,
+ non_ignored_file_count: usize,
statuses: GitStatuses,
}
@@ -2700,9 +2857,9 @@ impl Default for EntrySummary {
Self {
max_path: Arc::from(Path::new("")),
count: 0,
- visible_count: 0,
+ non_ignored_count: 0,
file_count: 0,
- visible_file_count: 0,
+ non_ignored_file_count: 0,
statuses: Default::default(),
}
}
@@ -2714,9 +2871,9 @@ impl sum_tree::Summary for EntrySummary {
fn add_summary(&mut self, rhs: &Self, _: &()) {
self.max_path = rhs.max_path.clone();
self.count += rhs.count;
- self.visible_count += rhs.visible_count;
+ self.non_ignored_count += rhs.non_ignored_count;
self.file_count += rhs.file_count;
- self.visible_file_count += rhs.visible_file_count;
+ self.non_ignored_file_count += rhs.non_ignored_file_count;
self.statuses += rhs.statuses;
}
}
@@ -2784,7 +2941,8 @@ struct BackgroundScanner {
fs: Arc<dyn Fs>,
status_updates_tx: UnboundedSender<ScanState>,
executor: Arc<executor::Background>,
- refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
+ scan_requests_rx: channel::Receiver<ScanRequest>,
+ path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
next_entry_id: Arc<AtomicUsize>,
phase: BackgroundScannerPhase,
}
@@ -2803,17 +2961,22 @@ impl BackgroundScanner {
fs: Arc<dyn Fs>,
status_updates_tx: UnboundedSender<ScanState>,
executor: Arc<executor::Background>,
- refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
+ scan_requests_rx: channel::Receiver<ScanRequest>,
+ path_prefixes_to_scan_rx: channel::Receiver<Arc<Path>>,
) -> Self {
Self {
fs,
status_updates_tx,
executor,
- refresh_requests_rx,
+ scan_requests_rx,
+ path_prefixes_to_scan_rx,
next_entry_id,
state: Mutex::new(BackgroundScannerState {
prev_snapshot: snapshot.snapshot.clone(),
snapshot,
+ scanned_dirs: Default::default(),
+ path_prefixes_to_scan: Default::default(),
+ paths_to_scan: Default::default(),
removed_entry_ids: Default::default(),
changed_paths: Default::default(),
}),
@@ -2823,7 +2986,7 @@ impl BackgroundScanner {
async fn run(
&mut self,
- mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
+ mut fs_events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
) {
use futures::FutureExt as _;
@@ -2868,6 +3031,7 @@ impl BackgroundScanner {
path: Arc::from(Path::new("")),
ignore_stack,
ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
+ is_external: false,
scan_queue: scan_job_tx.clone(),
}))
.unwrap();
@@ -2884,9 +3048,9 @@ impl BackgroundScanner {
// For these events, update events cannot be as precise, because we didn't
// have the previous state loaded yet.
self.phase = BackgroundScannerPhase::EventsReceivedDuringInitialScan;
- if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
+ if let Poll::Ready(Some(events)) = futures::poll!(fs_events_rx.next()) {
let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
- while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
+ while let Poll::Ready(Some(more_events)) = futures::poll!(fs_events_rx.next()) {
paths.extend(more_events.into_iter().map(|e| e.path));
}
self.process_events(paths).await;
@@ -2898,17 +3062,33 @@ impl BackgroundScanner {
select_biased! {
// Process any path refresh requests from the worktree. Prioritize
// these before handling changes reported by the filesystem.
- request = self.refresh_requests_rx.recv().fuse() => {
- let Ok((paths, barrier)) = request else { break };
- if !self.process_refresh_request(paths.clone(), barrier).await {
+ request = self.scan_requests_rx.recv().fuse() => {
+ let Ok(request) = request else { break };
+ if !self.process_scan_request(request, false).await {
return;
}
}
- events = events_rx.next().fuse() => {
+ path_prefix = self.path_prefixes_to_scan_rx.recv().fuse() => {
+ let Ok(path_prefix) = path_prefix else { break };
+
+ self.forcibly_load_paths(&[path_prefix.clone()]).await;
+
+ let abs_path =
+ {
+ let mut state = self.state.lock();
+ state.path_prefixes_to_scan.insert(path_prefix.clone());
+ state.snapshot.abs_path.join(path_prefix)
+ };
+ if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
+ self.process_events(vec![abs_path]).await;
+ }
+ }
+
+ events = fs_events_rx.next().fuse() => {
let Some(events) = events else { break };
let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
- while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
+ while let Poll::Ready(Some(more_events)) = futures::poll!(fs_events_rx.next()) {
paths.extend(more_events.into_iter().map(|e| e.path));
}
self.process_events(paths.clone()).await;
@@ -2917,56 +3097,114 @@ impl BackgroundScanner {
}
}
- async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
- self.reload_entries_for_paths(paths, None).await;
- self.send_status_update(false, Some(barrier))
+ async fn process_scan_request(&self, request: ScanRequest, scanning: bool) -> bool {
+ log::debug!("rescanning paths {:?}", request.relative_paths);
+
+ let root_path = self.forcibly_load_paths(&request.relative_paths).await;
+ let root_canonical_path = match self.fs.canonicalize(&root_path).await {
+ Ok(path) => path,
+ Err(err) => {
+ log::error!("failed to canonicalize root path: {}", err);
+ return false;
+ }
+ };
+
+ let abs_paths = request
+ .relative_paths
+ .into_iter()
+ .map(|path| {
+ if path.file_name().is_some() {
+ root_canonical_path.join(path)
+ } else {
+ root_canonical_path.clone()
+ }
+ })
+ .collect::<Vec<_>>();
+ self.reload_entries_for_paths(root_path, root_canonical_path, abs_paths, None)
+ .await;
+ self.send_status_update(scanning, Some(request.done))
}
- async fn process_events(&mut self, paths: Vec<PathBuf>) {
+ async fn process_events(&mut self, abs_paths: Vec<PathBuf>) {
+ log::debug!("received fs events {:?}", abs_paths);
+
+ let root_path = self.state.lock().snapshot.abs_path.clone();
+ let root_canonical_path = match self.fs.canonicalize(&root_path).await {
+ Ok(path) => path,
+ Err(err) => {
+ log::error!("failed to canonicalize root path: {}", err);
+ return;
+ }
+ };
+
let (scan_job_tx, scan_job_rx) = channel::unbounded();
let paths = self
- .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
+ .reload_entries_for_paths(
+ root_path,
+ root_canonical_path,
+ abs_paths,
+ Some(scan_job_tx.clone()),
+ )
.await;
drop(scan_job_tx);
self.scan_dirs(false, scan_job_rx).await;
- self.update_ignore_statuses().await;
+ let (scan_job_tx, scan_job_rx) = channel::unbounded();
+ self.update_ignore_statuses(scan_job_tx).await;
+ self.scan_dirs(false, scan_job_rx).await;
{
let mut state = self.state.lock();
-
- if let Some(paths) = paths {
- for path in paths {
- self.reload_git_repo(&path, &mut *state, self.fs.as_ref());
- }
+ state.reload_repositories(&paths, self.fs.as_ref());
+ state.snapshot.completed_scan_id = state.snapshot.scan_id;
+ for (_, entry_id) in mem::take(&mut state.removed_entry_ids) {
+ state.scanned_dirs.remove(&entry_id);
}
-
- let mut snapshot = &mut state.snapshot;
-
- let mut git_repositories = mem::take(&mut snapshot.git_repositories);
- git_repositories.retain(|work_directory_id, _| {
- snapshot
- .entry_for_id(*work_directory_id)
- .map_or(false, |entry| {
- snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
- })
- });
- snapshot.git_repositories = git_repositories;
-
- let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
- git_repository_entries.retain(|_, entry| {
- snapshot
- .git_repositories
- .get(&entry.work_directory.0)
- .is_some()
- });
- snapshot.snapshot.repository_entries = git_repository_entries;
- snapshot.completed_scan_id = snapshot.scan_id;
}
self.send_status_update(false, None);
}
+ async fn forcibly_load_paths(&self, paths: &[Arc<Path>]) -> Arc<Path> {
+ let root_path;
+ let (scan_job_tx, mut scan_job_rx) = channel::unbounded();
+ {
+ let mut state = self.state.lock();
+ root_path = state.snapshot.abs_path.clone();
+ for path in paths {
+ for ancestor in path.ancestors() {
+ if let Some(entry) = state.snapshot.entry_for_path(ancestor) {
+ if entry.kind == EntryKind::UnloadedDir {
+ let abs_path = root_path.join(ancestor);
+ let ignore_stack =
+ state.snapshot.ignore_stack_for_abs_path(&abs_path, true);
+ let ancestor_inodes =
+ state.snapshot.ancestor_inodes_for_path(&ancestor);
+ scan_job_tx
+ .try_send(ScanJob {
+ abs_path: abs_path.into(),
+ path: ancestor.into(),
+ ignore_stack,
+ scan_queue: scan_job_tx.clone(),
+ ancestor_inodes,
+ is_external: entry.is_external,
+ })
+ .unwrap();
+ state.paths_to_scan.insert(path.clone());
+ break;
+ }
+ }
+ }
+ }
+ drop(scan_job_tx);
+ }
+ while let Some(job) = scan_job_rx.next().await {
+ self.scan_dir(&job).await.log_err();
+ }
+ self.state.lock().paths_to_scan.clear();
+ root_path
+ }
+
async fn scan_dirs(
&self,
enable_progress_updates: bool,
@@ -1,6 +1,6 @@
use crate::{
worktree::{Event, Snapshot, WorktreeHandle},
- EntryKind, PathChange, Worktree,
+ Entry, EntryKind, PathChange, Worktree,
};
use anyhow::Result;
use client::Client;
@@ -8,12 +8,14 @@ use fs::{repository::GitFileStatus, FakeFs, Fs, RealFs, RemoveOptions};
use git::GITIGNORE;
use gpui::{executor::Deterministic, ModelContext, Task, TestAppContext};
use parking_lot::Mutex;
+use postage::stream::Stream;
use pretty_assertions::assert_eq;
use rand::prelude::*;
use serde_json::json;
use std::{
env,
fmt::Write,
+ mem,
path::{Path, PathBuf},
sync::Arc,
};
@@ -34,11 +36,8 @@ async fn test_traversal(cx: &mut TestAppContext) {
)
.await;
- let http_client = FakeHttpClient::with_404_response();
- let client = cx.read(|cx| Client::new(http_client, cx));
-
let tree = Worktree::local(
- client,
+ build_client(cx),
Path::new("/root"),
true,
fs,
@@ -107,11 +106,8 @@ async fn test_descendent_entries(cx: &mut TestAppContext) {
)
.await;
- let http_client = FakeHttpClient::with_404_response();
- let client = cx.read(|cx| Client::new(http_client, cx));
-
let tree = Worktree::local(
- client,
+ build_client(cx),
Path::new("/root"),
true,
fs,
@@ -154,7 +150,18 @@ async fn test_descendent_entries(cx: &mut TestAppContext) {
.collect::<Vec<_>>(),
vec![Path::new("g"), Path::new("g/h"),]
);
+ });
+
+ // Expand gitignored directory.
+ tree.read_with(cx, |tree, _| {
+ tree.as_local()
+ .unwrap()
+ .refresh_entries_for_paths(vec![Path::new("i/j").into()])
+ })
+ .recv()
+ .await;
+ tree.read_with(cx, |tree, _| {
assert_eq!(
tree.descendent_entries(false, false, Path::new("i"))
.map(|entry| entry.path.as_ref())
@@ -196,9 +203,8 @@ async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppCo
fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
- let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
let tree = Worktree::local(
- client,
+ build_client(cx),
Path::new("/root"),
true,
fs.clone(),
@@ -257,40 +263,489 @@ async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppCo
}
#[gpui::test]
-async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
- // .gitignores are handled explicitly by Zed and do not use the git
- // machinery that the git_tests module checks
- let parent_dir = temp_tree(json!({
- ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
- "tree": {
- ".git": {},
- ".gitignore": "ignored-dir\n",
- "tracked-dir": {
- "tracked-file1": "",
- "ancestor-ignored-file1": "",
+async fn test_symlinks_pointing_outside(cx: &mut TestAppContext) {
+ let fs = FakeFs::new(cx.background());
+ fs.insert_tree(
+ "/root",
+ json!({
+ "dir1": {
+ "deps": {
+ // symlinks here
+ },
+ "src": {
+ "a.rs": "",
+ "b.rs": "",
+ },
},
- "ignored-dir": {
- "ignored-file1": ""
+ "dir2": {
+ "src": {
+ "c.rs": "",
+ "d.rs": "",
+ }
+ },
+ "dir3": {
+ "deps": {},
+ "src": {
+ "e.rs": "",
+ "f.rs": "",
+ },
}
- }
- }));
- let dir = parent_dir.path().join("tree");
+ }),
+ )
+ .await;
- let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
+ // These symlinks point to directories outside of the worktree's root, dir1.
+ fs.insert_symlink("/root/dir1/deps/dep-dir2", "../../dir2".into())
+ .await;
+ fs.insert_symlink("/root/dir1/deps/dep-dir3", "../../dir3".into())
+ .await;
let tree = Worktree::local(
- client,
- dir.as_path(),
+ build_client(cx),
+ Path::new("/root/dir1"),
true,
- Arc::new(RealFs),
+ fs.clone(),
Default::default(),
&mut cx.to_async(),
)
.await
.unwrap();
+
cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
.await;
- tree.flush_fs_events(cx).await;
+
+ let tree_updates = Arc::new(Mutex::new(Vec::new()));
+ tree.update(cx, |_, cx| {
+ let tree_updates = tree_updates.clone();
+ cx.subscribe(&tree, move |_, _, event, _| {
+ if let Event::UpdatedEntries(update) = event {
+ tree_updates.lock().extend(
+ update
+ .iter()
+ .map(|(path, _, change)| (path.clone(), *change)),
+ );
+ }
+ })
+ .detach();
+ });
+
+ // The symlinked directories are not scanned by default.
+ tree.read_with(cx, |tree, _| {
+ assert_eq!(
+ tree.entries(true)
+ .map(|entry| (entry.path.as_ref(), entry.is_external))
+ .collect::<Vec<_>>(),
+ vec![
+ (Path::new(""), false),
+ (Path::new("deps"), false),
+ (Path::new("deps/dep-dir2"), true),
+ (Path::new("deps/dep-dir3"), true),
+ (Path::new("src"), false),
+ (Path::new("src/a.rs"), false),
+ (Path::new("src/b.rs"), false),
+ ]
+ );
+
+ assert_eq!(
+ tree.entry_for_path("deps/dep-dir2").unwrap().kind,
+ EntryKind::UnloadedDir
+ );
+ });
+
+ // Expand one of the symlinked directories.
+ tree.read_with(cx, |tree, _| {
+ tree.as_local()
+ .unwrap()
+ .refresh_entries_for_paths(vec![Path::new("deps/dep-dir3").into()])
+ })
+ .recv()
+ .await;
+
+ // The expanded directory's contents are loaded. Subdirectories are
+ // not scanned yet.
+ tree.read_with(cx, |tree, _| {
+ assert_eq!(
+ tree.entries(true)
+ .map(|entry| (entry.path.as_ref(), entry.is_external))
+ .collect::<Vec<_>>(),
+ vec![
+ (Path::new(""), false),
+ (Path::new("deps"), false),
+ (Path::new("deps/dep-dir2"), true),
+ (Path::new("deps/dep-dir3"), true),
+ (Path::new("deps/dep-dir3/deps"), true),
+ (Path::new("deps/dep-dir3/src"), true),
+ (Path::new("src"), false),
+ (Path::new("src/a.rs"), false),
+ (Path::new("src/b.rs"), false),
+ ]
+ );
+ });
+ assert_eq!(
+ mem::take(&mut *tree_updates.lock()),
+ &[
+ (Path::new("deps/dep-dir3").into(), PathChange::Loaded),
+ (Path::new("deps/dep-dir3/deps").into(), PathChange::Loaded),
+ (Path::new("deps/dep-dir3/src").into(), PathChange::Loaded)
+ ]
+ );
+
+ // Expand a subdirectory of one of the symlinked directories.
+ tree.read_with(cx, |tree, _| {
+ tree.as_local()
+ .unwrap()
+ .refresh_entries_for_paths(vec![Path::new("deps/dep-dir3/src").into()])
+ })
+ .recv()
+ .await;
+
+ // The expanded subdirectory's contents are loaded.
+ tree.read_with(cx, |tree, _| {
+ assert_eq!(
+ tree.entries(true)
+ .map(|entry| (entry.path.as_ref(), entry.is_external))
+ .collect::<Vec<_>>(),
+ vec![
+ (Path::new(""), false),
+ (Path::new("deps"), false),
+ (Path::new("deps/dep-dir2"), true),
+ (Path::new("deps/dep-dir3"), true),
+ (Path::new("deps/dep-dir3/deps"), true),
+ (Path::new("deps/dep-dir3/src"), true),
+ (Path::new("deps/dep-dir3/src/e.rs"), true),
+ (Path::new("deps/dep-dir3/src/f.rs"), true),
+ (Path::new("src"), false),
+ (Path::new("src/a.rs"), false),
+ (Path::new("src/b.rs"), false),
+ ]
+ );
+ });
+
+ assert_eq!(
+ mem::take(&mut *tree_updates.lock()),
+ &[
+ (Path::new("deps/dep-dir3/src").into(), PathChange::Loaded),
+ (
+ Path::new("deps/dep-dir3/src/e.rs").into(),
+ PathChange::Loaded
+ ),
+ (
+ Path::new("deps/dep-dir3/src/f.rs").into(),
+ PathChange::Loaded
+ )
+ ]
+ );
+}
+
+#[gpui::test]
+async fn test_open_gitignored_files(cx: &mut TestAppContext) {
+ let fs = FakeFs::new(cx.background());
+ fs.insert_tree(
+ "/root",
+ json!({
+ ".gitignore": "node_modules\n",
+ "one": {
+ "node_modules": {
+ "a": {
+ "a1.js": "a1",
+ "a2.js": "a2",
+ },
+ "b": {
+ "b1.js": "b1",
+ "b2.js": "b2",
+ },
+ },
+ },
+ "two": {
+ "x.js": "",
+ "y.js": "",
+ },
+ }),
+ )
+ .await;
+
+ let tree = Worktree::local(
+ build_client(cx),
+ Path::new("/root"),
+ true,
+ fs.clone(),
+ Default::default(),
+ &mut cx.to_async(),
+ )
+ .await
+ .unwrap();
+
+ cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
+ .await;
+
+ tree.read_with(cx, |tree, _| {
+ assert_eq!(
+ tree.entries(true)
+ .map(|entry| (entry.path.as_ref(), entry.is_ignored))
+ .collect::<Vec<_>>(),
+ vec![
+ (Path::new(""), false),
+ (Path::new(".gitignore"), false),
+ (Path::new("one"), false),
+ (Path::new("one/node_modules"), true),
+ (Path::new("two"), false),
+ (Path::new("two/x.js"), false),
+ (Path::new("two/y.js"), false),
+ ]
+ );
+ });
+
+ // Open a file that is nested inside of a gitignored directory that
+ // has not yet been expanded.
+ let prev_read_dir_count = fs.read_dir_call_count();
+ let buffer = tree
+ .update(cx, |tree, cx| {
+ tree.as_local_mut()
+ .unwrap()
+ .load_buffer(0, "one/node_modules/b/b1.js".as_ref(), cx)
+ })
+ .await
+ .unwrap();
+
+ tree.read_with(cx, |tree, cx| {
+ assert_eq!(
+ tree.entries(true)
+ .map(|entry| (entry.path.as_ref(), entry.is_ignored))
+ .collect::<Vec<_>>(),
+ vec![
+ (Path::new(""), false),
+ (Path::new(".gitignore"), false),
+ (Path::new("one"), false),
+ (Path::new("one/node_modules"), true),
+ (Path::new("one/node_modules/a"), true),
+ (Path::new("one/node_modules/b"), true),
+ (Path::new("one/node_modules/b/b1.js"), true),
+ (Path::new("one/node_modules/b/b2.js"), true),
+ (Path::new("two"), false),
+ (Path::new("two/x.js"), false),
+ (Path::new("two/y.js"), false),
+ ]
+ );
+
+ assert_eq!(
+ buffer.read(cx).file().unwrap().path().as_ref(),
+ Path::new("one/node_modules/b/b1.js")
+ );
+
+ // Only the newly-expanded directories are scanned.
+ assert_eq!(fs.read_dir_call_count() - prev_read_dir_count, 2);
+ });
+
+ // Open another file in a different subdirectory of the same
+ // gitignored directory.
+ let prev_read_dir_count = fs.read_dir_call_count();
+ let buffer = tree
+ .update(cx, |tree, cx| {
+ tree.as_local_mut()
+ .unwrap()
+ .load_buffer(0, "one/node_modules/a/a2.js".as_ref(), cx)
+ })
+ .await
+ .unwrap();
+
+ tree.read_with(cx, |tree, cx| {
+ assert_eq!(
+ tree.entries(true)
+ .map(|entry| (entry.path.as_ref(), entry.is_ignored))
+ .collect::<Vec<_>>(),
+ vec![
+ (Path::new(""), false),
+ (Path::new(".gitignore"), false),
+ (Path::new("one"), false),
+ (Path::new("one/node_modules"), true),
+ (Path::new("one/node_modules/a"), true),
+ (Path::new("one/node_modules/a/a1.js"), true),
+ (Path::new("one/node_modules/a/a2.js"), true),
+ (Path::new("one/node_modules/b"), true),
+ (Path::new("one/node_modules/b/b1.js"), true),
+ (Path::new("one/node_modules/b/b2.js"), true),
+ (Path::new("two"), false),
+ (Path::new("two/x.js"), false),
+ (Path::new("two/y.js"), false),
+ ]
+ );
+
+ assert_eq!(
+ buffer.read(cx).file().unwrap().path().as_ref(),
+ Path::new("one/node_modules/a/a2.js")
+ );
+
+ // Only the newly-expanded directory is scanned.
+ assert_eq!(fs.read_dir_call_count() - prev_read_dir_count, 1);
+ });
+}
+
+#[gpui::test]
+async fn test_dirs_no_longer_ignored(cx: &mut TestAppContext) {
+ let fs = FakeFs::new(cx.background());
+ fs.insert_tree(
+ "/root",
+ json!({
+ ".gitignore": "node_modules\n",
+ "a": {
+ "a.js": "",
+ },
+ "b": {
+ "b.js": "",
+ },
+ "node_modules": {
+ "c": {
+ "c.js": "",
+ },
+ "d": {
+ "d.js": "",
+ "e": {
+ "e1.js": "",
+ "e2.js": "",
+ },
+ "f": {
+ "f1.js": "",
+ "f2.js": "",
+ }
+ },
+ },
+ }),
+ )
+ .await;
+
+ let tree = Worktree::local(
+ build_client(cx),
+ Path::new("/root"),
+ true,
+ fs.clone(),
+ Default::default(),
+ &mut cx.to_async(),
+ )
+ .await
+ .unwrap();
+
+ cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
+ .await;
+
+ // Open a file within the gitignored directory, forcing some of its
+ // subdirectories to be read, but not all.
+ let read_dir_count_1 = fs.read_dir_call_count();
+ tree.read_with(cx, |tree, _| {
+ tree.as_local()
+ .unwrap()
+ .refresh_entries_for_paths(vec![Path::new("node_modules/d/d.js").into()])
+ })
+ .recv()
+ .await;
+
+ // Those subdirectories are now loaded.
+ tree.read_with(cx, |tree, _| {
+ assert_eq!(
+ tree.entries(true)
+ .map(|e| (e.path.as_ref(), e.is_ignored))
+ .collect::<Vec<_>>(),
+ &[
+ (Path::new(""), false),
+ (Path::new(".gitignore"), false),
+ (Path::new("a"), false),
+ (Path::new("a/a.js"), false),
+ (Path::new("b"), false),
+ (Path::new("b/b.js"), false),
+ (Path::new("node_modules"), true),
+ (Path::new("node_modules/c"), true),
+ (Path::new("node_modules/d"), true),
+ (Path::new("node_modules/d/d.js"), true),
+ (Path::new("node_modules/d/e"), true),
+ (Path::new("node_modules/d/f"), true),
+ ]
+ );
+ });
+ let read_dir_count_2 = fs.read_dir_call_count();
+ assert_eq!(read_dir_count_2 - read_dir_count_1, 2);
+
+ // Update the gitignore so that node_modules is no longer ignored,
+ // but a subdirectory is ignored
+ fs.save("/root/.gitignore".as_ref(), &"e".into(), Default::default())
+ .await
+ .unwrap();
+ cx.foreground().run_until_parked();
+
+ // All of the directories that are no longer ignored are now loaded.
+ tree.read_with(cx, |tree, _| {
+ assert_eq!(
+ tree.entries(true)
+ .map(|e| (e.path.as_ref(), e.is_ignored))
+ .collect::<Vec<_>>(),
+ &[
+ (Path::new(""), false),
+ (Path::new(".gitignore"), false),
+ (Path::new("a"), false),
+ (Path::new("a/a.js"), false),
+ (Path::new("b"), false),
+ (Path::new("b/b.js"), false),
+ // This directory is no longer ignored
+ (Path::new("node_modules"), false),
+ (Path::new("node_modules/c"), false),
+ (Path::new("node_modules/c/c.js"), false),
+ (Path::new("node_modules/d"), false),
+ (Path::new("node_modules/d/d.js"), false),
+ // This subdirectory is now ignored
+ (Path::new("node_modules/d/e"), true),
+ (Path::new("node_modules/d/f"), false),
+ (Path::new("node_modules/d/f/f1.js"), false),
+ (Path::new("node_modules/d/f/f2.js"), false),
+ ]
+ );
+ });
+
+ // Each of the newly-loaded directories is scanned only once.
+ let read_dir_count_3 = fs.read_dir_call_count();
+ assert_eq!(read_dir_count_3 - read_dir_count_2, 2);
+}
+
+#[gpui::test(iterations = 10)]
+async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
+ let fs = FakeFs::new(cx.background());
+ fs.insert_tree(
+ "/root",
+ json!({
+ ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
+ "tree": {
+ ".git": {},
+ ".gitignore": "ignored-dir\n",
+ "tracked-dir": {
+ "tracked-file1": "",
+ "ancestor-ignored-file1": "",
+ },
+ "ignored-dir": {
+ "ignored-file1": ""
+ }
+ }
+ }),
+ )
+ .await;
+
+ let tree = Worktree::local(
+ build_client(cx),
+ "/root/tree".as_ref(),
+ true,
+ fs.clone(),
+ Default::default(),
+ &mut cx.to_async(),
+ )
+ .await
+ .unwrap();
+ cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
+ .await;
+
+ tree.read_with(cx, |tree, _| {
+ tree.as_local()
+ .unwrap()
+ .refresh_entries_for_paths(vec![Path::new("ignored-dir").into()])
+ })
+ .recv()
+ .await;
+
cx.read(|cx| {
let tree = tree.read(cx);
assert!(
@@ -311,10 +766,26 @@ async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
);
});
- std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
- std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
- std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
- tree.flush_fs_events(cx).await;
+ fs.create_file(
+ "/root/tree/tracked-dir/tracked-file2".as_ref(),
+ Default::default(),
+ )
+ .await
+ .unwrap();
+ fs.create_file(
+ "/root/tree/tracked-dir/ancestor-ignored-file2".as_ref(),
+ Default::default(),
+ )
+ .await
+ .unwrap();
+ fs.create_file(
+ "/root/tree/ignored-dir/ignored-file2".as_ref(),
+ Default::default(),
+ )
+ .await
+ .unwrap();
+
+ cx.foreground().run_until_parked();
cx.read(|cx| {
let tree = tree.read(cx);
assert!(
@@ -346,10 +817,8 @@ async fn test_write_file(cx: &mut TestAppContext) {
"ignored-dir": {}
}));
- let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
-
let tree = Worktree::local(
- client,
+ build_client(cx),
dir.path(),
true,
Arc::new(RealFs),
@@ -393,8 +862,6 @@ async fn test_write_file(cx: &mut TestAppContext) {
#[gpui::test(iterations = 30)]
async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
- let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
-
let fs = FakeFs::new(cx.background());
fs.insert_tree(
"/root",
@@ -407,7 +874,7 @@ async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
.await;
let tree = Worktree::local(
- client,
+ build_client(cx),
"/root".as_ref(),
true,
fs,
@@ -472,9 +939,8 @@ async fn test_random_worktree_operations_during_initial_scan(
}
log::info!("generated initial tree");
- let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
let worktree = Worktree::local(
- client.clone(),
+ build_client(cx),
root_dir,
true,
fs.clone(),
@@ -506,7 +972,7 @@ async fn test_random_worktree_operations_during_initial_scan(
.await
.log_err();
worktree.read_with(cx, |tree, _| {
- tree.as_local().unwrap().snapshot().check_invariants()
+ tree.as_local().unwrap().snapshot().check_invariants(true)
});
if rng.gen_bool(0.6) {
@@ -523,7 +989,7 @@ async fn test_random_worktree_operations_during_initial_scan(
let final_snapshot = worktree.read_with(cx, |tree, _| {
let tree = tree.as_local().unwrap();
let snapshot = tree.snapshot();
- snapshot.check_invariants();
+ snapshot.check_invariants(true);
snapshot
});
@@ -562,9 +1028,8 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng)
}
log::info!("generated initial tree");
- let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
let worktree = Worktree::local(
- client.clone(),
+ build_client(cx),
root_dir,
true,
fs.clone(),
@@ -627,12 +1092,17 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng)
log::info!("quiescing");
fs.as_fake().flush_events(usize::MAX);
cx.foreground().run_until_parked();
+
let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
- snapshot.check_invariants();
+ snapshot.check_invariants(true);
+ let expanded_paths = snapshot
+ .expanded_entries()
+ .map(|e| e.path.clone())
+ .collect::<Vec<_>>();
{
let new_worktree = Worktree::local(
- client.clone(),
+ build_client(cx),
root_dir,
true,
fs.clone(),
@@ -644,6 +1114,14 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng)
new_worktree
.update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
.await;
+ new_worktree
+ .update(cx, |tree, _| {
+ tree.as_local_mut()
+ .unwrap()
+ .refresh_entries_for_paths(expanded_paths)
+ })
+ .recv()
+ .await;
let new_snapshot =
new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
assert_eq!(
@@ -660,11 +1138,25 @@ async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng)
}
assert_eq!(
- prev_snapshot.entries(true).collect::<Vec<_>>(),
- snapshot.entries(true).collect::<Vec<_>>(),
+ prev_snapshot
+ .entries(true)
+ .map(ignore_pending_dir)
+ .collect::<Vec<_>>(),
+ snapshot
+ .entries(true)
+ .map(ignore_pending_dir)
+ .collect::<Vec<_>>(),
"wrong updates after snapshot {i}: {updates:#?}",
);
}
+
+ fn ignore_pending_dir(entry: &Entry) -> Entry {
+ let mut entry = entry.clone();
+ if entry.kind.is_dir() {
+ entry.kind = EntryKind::Dir
+ }
+ entry
+ }
}
// The worktree's `UpdatedEntries` event can be used to follow along with
@@ -679,7 +1171,6 @@ fn check_worktree_change_events(tree: &mut Worktree, cx: &mut ModelContext<Workt
Ok(ix) | Err(ix) => ix,
};
match change_type {
- PathChange::Loaded => entries.insert(ix, entry.unwrap()),
PathChange::Added => entries.insert(ix, entry.unwrap()),
PathChange::Removed => drop(entries.remove(ix)),
PathChange::Updated => {
@@ -688,7 +1179,7 @@ fn check_worktree_change_events(tree: &mut Worktree, cx: &mut ModelContext<Workt
assert_eq!(existing_entry.path, entry.path);
*existing_entry = entry;
}
- PathChange::AddedOrUpdated => {
+ PathChange::AddedOrUpdated | PathChange::Loaded => {
let entry = entry.unwrap();
if entries.get(ix).map(|e| &e.path) == Some(&entry.path) {
*entries.get_mut(ix).unwrap() = entry;
@@ -947,10 +1438,8 @@ async fn test_rename_work_directory(cx: &mut TestAppContext) {
}));
let root_path = root.path();
- let http_client = FakeHttpClient::with_404_response();
- let client = cx.read(|cx| Client::new(http_client, cx));
let tree = Worktree::local(
- client,
+ build_client(cx),
root_path,
true,
Arc::new(RealFs),
@@ -1026,10 +1515,8 @@ async fn test_git_repository_for_path(cx: &mut TestAppContext) {
},
}));
- let http_client = FakeHttpClient::with_404_response();
- let client = cx.read(|cx| Client::new(http_client, cx));
let tree = Worktree::local(
- client,
+ build_client(cx),
root.path(),
true,
Arc::new(RealFs),
@@ -1150,10 +1637,8 @@ async fn test_git_status(deterministic: Arc<Deterministic>, cx: &mut TestAppCont
}));
- let http_client = FakeHttpClient::with_404_response();
- let client = cx.read(|cx| Client::new(http_client, cx));
let tree = Worktree::local(
- client,
+ build_client(cx),
root.path(),
true,
Arc::new(RealFs),
@@ -1357,10 +1842,8 @@ async fn test_propagate_git_statuses(cx: &mut TestAppContext) {
],
);
- let http_client = FakeHttpClient::with_404_response();
- let client = cx.read(|cx| Client::new(http_client, cx));
let tree = Worktree::local(
- client,
+ build_client(cx),
Path::new("/root"),
true,
fs.clone(),
@@ -1439,6 +1922,11 @@ async fn test_propagate_git_statuses(cx: &mut TestAppContext) {
}
}
+fn build_client(cx: &mut TestAppContext) -> Arc<Client> {
+ let http_client = FakeHttpClient::with_404_response();
+ cx.read(|cx| Client::new(http_client, cx))
+}
+
#[track_caller]
fn git_init(path: &Path) -> git2::Repository {
git2::Repository::init(path).expect("Failed to initialize git repository")
@@ -411,17 +411,23 @@ impl ProjectPanel {
fn expand_selected_entry(&mut self, _: &ExpandSelectedEntry, cx: &mut ViewContext<Self>) {
if let Some((worktree, entry)) = self.selected_entry(cx) {
if entry.is_dir() {
+ let worktree_id = worktree.id();
+ let entry_id = entry.id;
let expanded_dir_ids =
- if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree.id()) {
+ if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree_id) {
expanded_dir_ids
} else {
return;
};
- match expanded_dir_ids.binary_search(&entry.id) {
+ match expanded_dir_ids.binary_search(&entry_id) {
Ok(_) => self.select_next(&SelectNext, cx),
Err(ix) => {
- expanded_dir_ids.insert(ix, entry.id);
+ self.project.update(cx, |project, cx| {
+ project.expand_entry(worktree_id, entry_id, cx);
+ });
+
+ expanded_dir_ids.insert(ix, entry_id);
self.update_visible_entries(None, cx);
cx.notify();
}
@@ -432,18 +438,20 @@ impl ProjectPanel {
fn collapse_selected_entry(&mut self, _: &CollapseSelectedEntry, cx: &mut ViewContext<Self>) {
if let Some((worktree, mut entry)) = self.selected_entry(cx) {
+ let worktree_id = worktree.id();
let expanded_dir_ids =
- if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree.id()) {
+ if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree_id) {
expanded_dir_ids
} else {
return;
};
loop {
- match expanded_dir_ids.binary_search(&entry.id) {
+ let entry_id = entry.id;
+ match expanded_dir_ids.binary_search(&entry_id) {
Ok(ix) => {
expanded_dir_ids.remove(ix);
- self.update_visible_entries(Some((worktree.id(), entry.id)), cx);
+ self.update_visible_entries(Some((worktree_id, entry_id)), cx);
cx.notify();
break;
}
@@ -464,14 +472,17 @@ impl ProjectPanel {
fn toggle_expanded(&mut self, entry_id: ProjectEntryId, cx: &mut ViewContext<Self>) {
if let Some(worktree_id) = self.project.read(cx).worktree_id_for_entry(entry_id, cx) {
if let Some(expanded_dir_ids) = self.expanded_dir_ids.get_mut(&worktree_id) {
- match expanded_dir_ids.binary_search(&entry_id) {
- Ok(ix) => {
- expanded_dir_ids.remove(ix);
- }
- Err(ix) => {
- expanded_dir_ids.insert(ix, entry_id);
+ self.project.update(cx, |project, cx| {
+ match expanded_dir_ids.binary_search(&entry_id) {
+ Ok(ix) => {
+ expanded_dir_ids.remove(ix);
+ }
+ Err(ix) => {
+ project.expand_entry(worktree_id, entry_id, cx);
+ expanded_dir_ids.insert(ix, entry_id);
+ }
}
- }
+ });
self.update_visible_entries(Some((worktree_id, entry_id)), cx);
cx.focus_self();
cx.notify();
@@ -939,10 +950,19 @@ impl ProjectPanel {
}
fn selected_entry<'a>(&self, cx: &'a AppContext) -> Option<(&'a Worktree, &'a project::Entry)> {
+ let (worktree, entry) = self.selected_entry_handle(cx)?;
+ Some((worktree.read(cx), entry))
+ }
+
+ fn selected_entry_handle<'a>(
+ &self,
+ cx: &'a AppContext,
+ ) -> Option<(ModelHandle<Worktree>, &'a project::Entry)> {
let selection = self.selection?;
let project = self.project.read(cx);
- let worktree = project.worktree_for_id(selection.worktree_id, cx)?.read(cx);
- Some((worktree, worktree.entry_for_id(selection.entry_id)?))
+ let worktree = project.worktree_for_id(selection.worktree_id, cx)?;
+ let entry = worktree.read(cx).entry_for_id(selection.entry_id)?;
+ Some((worktree, entry))
}
fn update_visible_entries(
@@ -1003,6 +1023,7 @@ impl ProjectPanel {
mtime: entry.mtime,
is_symlink: false,
is_ignored: false,
+ is_external: false,
git_status: entry.git_status,
});
}
@@ -1059,29 +1080,31 @@ impl ProjectPanel {
entry_id: ProjectEntryId,
cx: &mut ViewContext<Self>,
) {
- let project = self.project.read(cx);
- if let Some((worktree, expanded_dir_ids)) = project
- .worktree_for_id(worktree_id, cx)
- .zip(self.expanded_dir_ids.get_mut(&worktree_id))
- {
- let worktree = worktree.read(cx);
+ self.project.update(cx, |project, cx| {
+ if let Some((worktree, expanded_dir_ids)) = project
+ .worktree_for_id(worktree_id, cx)
+ .zip(self.expanded_dir_ids.get_mut(&worktree_id))
+ {
+ project.expand_entry(worktree_id, entry_id, cx);
+ let worktree = worktree.read(cx);
- if let Some(mut entry) = worktree.entry_for_id(entry_id) {
- loop {
- if let Err(ix) = expanded_dir_ids.binary_search(&entry.id) {
- expanded_dir_ids.insert(ix, entry.id);
- }
+ if let Some(mut entry) = worktree.entry_for_id(entry_id) {
+ loop {
+ if let Err(ix) = expanded_dir_ids.binary_search(&entry.id) {
+ expanded_dir_ids.insert(ix, entry.id);
+ }
- if let Some(parent_entry) =
- entry.path.parent().and_then(|p| worktree.entry_for_path(p))
- {
- entry = parent_entry;
- } else {
- break;
+ if let Some(parent_entry) =
+ entry.path.parent().and_then(|p| worktree.entry_for_path(p))
+ {
+ entry = parent_entry;
+ } else {
+ break;
+ }
}
}
}
- }
+ });
}
fn for_each_visible_entry(
@@ -1191,7 +1214,7 @@ impl ProjectPanel {
Flex::row()
.with_child(
- if kind == EntryKind::Dir {
+ if kind.is_dir() {
if details.is_expanded {
Svg::new("icons/chevron_down_8.svg").with_color(style.icon_color)
} else {
@@ -1288,7 +1311,7 @@ impl ProjectPanel {
})
.on_click(MouseButton::Left, move |event, this, cx| {
if !show_editor {
- if kind == EntryKind::Dir {
+ if kind.is_dir() {
this.toggle_expanded(entry_id, cx);
} else {
this.open_entry(entry_id, event.click_count > 1, cx);
@@ -2349,7 +2372,7 @@ mod tests {
}
let indent = " ".repeat(details.depth);
- let icon = if matches!(details.kind, EntryKind::Dir | EntryKind::PendingDir) {
+ let icon = if details.kind.is_dir() {
if details.is_expanded {
"v "
} else {
@@ -63,6 +63,8 @@ message Envelope {
CopyProjectEntry copy_project_entry = 47;
DeleteProjectEntry delete_project_entry = 48;
ProjectEntryResponse project_entry_response = 49;
+ ExpandProjectEntry expand_project_entry = 114;
+ ExpandProjectEntryResponse expand_project_entry_response = 115;
UpdateDiagnosticSummary update_diagnostic_summary = 50;
StartLanguageServer start_language_server = 51;
@@ -372,6 +374,15 @@ message DeleteProjectEntry {
uint64 entry_id = 2;
}
+message ExpandProjectEntry {
+ uint64 project_id = 1;
+ uint64 entry_id = 2;
+}
+
+message ExpandProjectEntryResponse {
+ uint64 worktree_scan_id = 1;
+}
+
message ProjectEntryResponse {
Entry entry = 1;
uint64 worktree_scan_id = 2;
@@ -1005,7 +1016,8 @@ message Entry {
Timestamp mtime = 5;
bool is_symlink = 6;
bool is_ignored = 7;
- optional GitStatus git_status = 8;
+ bool is_external = 8;
+ optional GitStatus git_status = 9;
}
message RepositoryEntry {
@@ -150,6 +150,7 @@ messages!(
(DeclineCall, Foreground),
(DeleteProjectEntry, Foreground),
(Error, Foreground),
+ (ExpandProjectEntry, Foreground),
(Follow, Foreground),
(FollowResponse, Foreground),
(FormatBuffers, Foreground),
@@ -200,6 +201,7 @@ messages!(
(Ping, Foreground),
(PrepareRename, Background),
(PrepareRenameResponse, Background),
+ (ExpandProjectEntryResponse, Foreground),
(ProjectEntryResponse, Foreground),
(RejoinRoom, Foreground),
(RejoinRoomResponse, Foreground),
@@ -255,6 +257,7 @@ request_messages!(
(CreateRoom, CreateRoomResponse),
(DeclineCall, Ack),
(DeleteProjectEntry, ProjectEntryResponse),
+ (ExpandProjectEntry, ExpandProjectEntryResponse),
(Follow, FollowResponse),
(FormatBuffers, FormatBuffersResponse),
(GetChannelMessages, GetChannelMessagesResponse),
@@ -311,6 +314,7 @@ entity_messages!(
CreateBufferForPeer,
CreateProjectEntry,
DeleteProjectEntry,
+ ExpandProjectEntry,
Follow,
FormatBuffers,
GetCodeActions,
@@ -6,4 +6,4 @@ pub use conn::Connection;
pub use peer::*;
mod macros;
-pub const PROTOCOL_VERSION: u32 = 58;
+pub const PROTOCOL_VERSION: u32 = 59;