Detailed changes
@@ -76,6 +76,7 @@ pub enum PathEventKind {
Removed,
Created,
Changed,
+ Rescan,
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
@@ -1737,6 +1738,10 @@ impl FakeFs {
self.state.lock().buffered_events.len()
}
+ pub fn clear_buffered_events(&self) {
+ self.state.lock().buffered_events.clear();
+ }
+
pub fn flush_events(&self, count: usize) {
self.state.lock().flush_events(count);
}
@@ -3,6 +3,7 @@ use parking_lot::Mutex;
use std::{
collections::{BTreeMap, HashMap},
ops::DerefMut,
+ path::Path,
sync::{Arc, OnceLock},
};
use util::{ResultExt, paths::SanitizedPath};
@@ -86,10 +87,12 @@ impl Watcher for FsWatcher {
#[cfg(target_os = "linux")]
let mode = notify::RecursiveMode::NonRecursive;
+ let registration_path = path.clone();
let registration_id = global({
- let path = path.clone();
+ let watch_path = path.clone();
+ let callback_path = path;
|g| {
- g.add(path, mode, move |event: ¬ify::Event| {
+ g.add(watch_path, mode, move |event: ¬ify::Event| {
log::trace!("watcher received event: {event:?}");
let kind = match event.kind {
EventKind::Create(_) => Some(PathEventKind::Created),
@@ -109,12 +112,27 @@ impl Watcher for FsWatcher {
})
.collect::<Vec<_>>();
+ let is_rescan_event = event.need_rescan();
+ if is_rescan_event {
+ log::warn!(
+ "filesystem watcher lost sync for {callback_path:?}; scheduling rescan"
+ );
+ // we only keep the first event per path below, this ensures it will be the rescan event
+ // we'll remove any existing pending events for the same reason once we have the lock below
+ path_events.retain(|p| &p.path != callback_path.as_ref());
+ path_events.push(PathEvent {
+ path: callback_path.to_path_buf(),
+ kind: Some(PathEventKind::Rescan),
+ });
+ }
+
if !path_events.is_empty() {
path_events.sort();
let mut pending_paths = pending_paths.lock();
if pending_paths.is_empty() {
tx.try_send(()).ok();
}
+ coalesce_pending_rescans(&mut pending_paths, &mut path_events);
util::extend_sorted(
&mut *pending_paths,
path_events,
@@ -126,7 +144,9 @@ impl Watcher for FsWatcher {
}
})??;
- self.registrations.lock().insert(path, registration_id);
+ self.registrations
+ .lock()
+ .insert(registration_path, registration_id);
Ok(())
}
@@ -141,6 +161,56 @@ impl Watcher for FsWatcher {
}
}
+fn coalesce_pending_rescans(pending_paths: &mut Vec<PathEvent>, path_events: &mut Vec<PathEvent>) {
+ if !path_events
+ .iter()
+ .any(|event| event.kind == Some(PathEventKind::Rescan))
+ {
+ return;
+ }
+
+ let mut new_rescan_paths: Vec<std::path::PathBuf> = path_events
+ .iter()
+ .filter(|e| e.kind == Some(PathEventKind::Rescan))
+ .map(|e| e.path.clone())
+ .collect();
+ new_rescan_paths.sort_unstable();
+
+ let mut deduped_rescans: Vec<std::path::PathBuf> = Vec::with_capacity(new_rescan_paths.len());
+ for path in new_rescan_paths {
+ if deduped_rescans
+ .iter()
+ .any(|ancestor| path != *ancestor && path.starts_with(ancestor))
+ {
+ continue;
+ }
+ deduped_rescans.push(path);
+ }
+
+ deduped_rescans.retain(|new_path| {
+ !pending_paths
+ .iter()
+ .any(|pending| is_covered_rescan(pending.kind, new_path, &pending.path))
+ });
+
+ if !deduped_rescans.is_empty() {
+ pending_paths.retain(|pending| {
+ !deduped_rescans.iter().any(|rescan_path| {
+ pending.path == *rescan_path
+ || is_covered_rescan(pending.kind, &pending.path, rescan_path)
+ })
+ });
+ }
+
+ path_events.retain(|event| {
+ event.kind != Some(PathEventKind::Rescan) || deduped_rescans.contains(&event.path)
+ });
+}
+
+fn is_covered_rescan(kind: Option<PathEventKind>, path: &Path, ancestor: &Path) -> bool {
+ kind == Some(PathEventKind::Rescan) && path != ancestor && path.starts_with(ancestor)
+}
+
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct WatcherRegistrationId(u32);
@@ -238,6 +308,97 @@ impl GlobalWatcher {
static FS_WATCHER_INSTANCE: OnceLock<anyhow::Result<GlobalWatcher, notify::Error>> =
OnceLock::new();
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::path::PathBuf;
+
+ fn rescan(path: &str) -> PathEvent {
+ PathEvent {
+ path: PathBuf::from(path),
+ kind: Some(PathEventKind::Rescan),
+ }
+ }
+
+ fn changed(path: &str) -> PathEvent {
+ PathEvent {
+ path: PathBuf::from(path),
+ kind: Some(PathEventKind::Changed),
+ }
+ }
+
+ struct TestCase {
+ name: &'static str,
+ pending_paths: Vec<PathEvent>,
+ path_events: Vec<PathEvent>,
+ expected_pending_paths: Vec<PathEvent>,
+ expected_path_events: Vec<PathEvent>,
+ }
+
+ #[test]
+ fn test_coalesce_pending_rescans() {
+ let test_cases = [
+ TestCase {
+ name: "coalesces descendant rescans under pending ancestor",
+ pending_paths: vec![rescan("/root")],
+ path_events: vec![rescan("/root/child"), rescan("/root/child/grandchild")],
+ expected_pending_paths: vec![rescan("/root")],
+ expected_path_events: vec![],
+ },
+ TestCase {
+ name: "new ancestor rescan replaces pending descendant rescans",
+ pending_paths: vec![
+ changed("/other"),
+ rescan("/root/child"),
+ rescan("/root/child/grandchild"),
+ ],
+ path_events: vec![rescan("/root")],
+ expected_pending_paths: vec![changed("/other")],
+ expected_path_events: vec![rescan("/root")],
+ },
+ TestCase {
+ name: "same path rescan replaces pending non-rescan event",
+ pending_paths: vec![changed("/root")],
+ path_events: vec![rescan("/root")],
+ expected_pending_paths: vec![],
+ expected_path_events: vec![rescan("/root")],
+ },
+ TestCase {
+ name: "unrelated rescans are preserved",
+ pending_paths: vec![rescan("/root-a")],
+ path_events: vec![rescan("/root-b")],
+ expected_pending_paths: vec![rescan("/root-a")],
+ expected_path_events: vec![rescan("/root-b")],
+ },
+ TestCase {
+ name: "batch ancestor rescan replaces descendant rescan",
+ pending_paths: vec![],
+ path_events: vec![rescan("/root/child"), rescan("/root")],
+ expected_pending_paths: vec![],
+ expected_path_events: vec![rescan("/root")],
+ },
+ ];
+
+ for test_case in test_cases {
+ let mut pending_paths = test_case.pending_paths;
+ let mut path_events = test_case.path_events;
+
+ coalesce_pending_rescans(&mut pending_paths, &mut path_events);
+
+ assert_eq!(
+ pending_paths, test_case.expected_pending_paths,
+ "pending_paths mismatch for case: {}",
+ test_case.name
+ );
+ assert_eq!(
+ path_events, test_case.expected_path_events,
+ "path_events mismatch for case: {}",
+ test_case.name
+ );
+ }
+ }
+}
+
fn handle_event(event: Result<notify::Event, notify::Error>) {
log::trace!("global handle event: {event:?}");
// Filter out access events, which could lead to a weird bug on Linux after upgrading notify
@@ -1,10 +1,14 @@
use std::{
+ collections::BTreeSet,
io::Write,
path::{Path, PathBuf},
+ time::Duration,
};
+use futures::{FutureExt, StreamExt};
+
use fs::*;
-use gpui::BackgroundExecutor;
+use gpui::{BackgroundExecutor, TestAppContext};
use serde_json::json;
use tempfile::TempDir;
use util::path;
@@ -621,3 +625,90 @@ async fn test_realfs_symlink_loop_metadata(executor: BackgroundExecutor) {
assert!(!metadata.is_executable);
// don't care about len or mtime on symlinks?
}
+
+#[gpui::test]
+#[ignore = "stress test; run explicitly when needed"]
+async fn test_realfs_watch_stress_reports_missed_paths(
+ executor: BackgroundExecutor,
+ cx: &mut TestAppContext,
+) {
+ const FILE_COUNT: usize = 32000;
+ cx.executor().allow_parking();
+
+ let fs = RealFs::new(None, executor.clone());
+ let temp_dir = TempDir::new().expect("create temp dir");
+ let root = temp_dir.path();
+
+ let mut file_paths = Vec::with_capacity(FILE_COUNT);
+ let mut expected_paths = BTreeSet::new();
+
+ for index in 0..FILE_COUNT {
+ let dir_path = root.join(format!("dir-{index:04}"));
+ let file_path = dir_path.join("file.txt");
+ fs.create_dir(&dir_path).await.expect("create watched dir");
+ fs.write(&file_path, b"before")
+ .await
+ .expect("create initial file");
+ expected_paths.insert(file_path.clone());
+ file_paths.push(file_path);
+ }
+
+ let (mut events, watcher) = fs.watch(root, Duration::from_millis(10)).await;
+ let _watcher = watcher;
+
+ for file_path in &expected_paths {
+ _watcher
+ .add(file_path.parent().expect("file has parent"))
+ .expect("add explicit directory watch");
+ }
+
+ for (index, file_path) in file_paths.iter().enumerate() {
+ let content = format!("after-{index}");
+ fs.write(file_path, content.as_bytes())
+ .await
+ .expect("modify watched file");
+ }
+
+ let mut changed_paths = BTreeSet::new();
+ let mut rescan_count: u32 = 0;
+ let timeout = executor.timer(Duration::from_secs(10)).fuse();
+
+ futures::pin_mut!(timeout);
+
+ let mut ticks = 0;
+ while ticks < 1000 {
+ if let Some(batch) = events.next().fuse().now_or_never().flatten() {
+ for event in batch {
+ if event.kind == Some(PathEventKind::Rescan) {
+ rescan_count += 1;
+ }
+ if expected_paths.contains(&event.path) {
+ changed_paths.insert(event.path);
+ }
+ }
+ if changed_paths.len() == expected_paths.len() {
+ break;
+ }
+ ticks = 0;
+ } else {
+ ticks += 1;
+ executor.timer(Duration::from_millis(10)).await;
+ }
+ }
+
+ let missed_paths: BTreeSet<_> = expected_paths.difference(&changed_paths).cloned().collect();
+
+ eprintln!(
+ "realfs watch stress: expected={}, observed={}, missed={}, rescan={}",
+ expected_paths.len(),
+ changed_paths.len(),
+ missed_paths.len(),
+ rescan_count
+ );
+
+ assert!(
+ missed_paths.is_empty() || rescan_count > 0,
+ "missed {} paths without rescan being reported",
+ missed_paths.len()
+ );
+}
@@ -9840,7 +9840,9 @@ impl LspStore {
let typ = match event.kind? {
PathEventKind::Created => lsp::FileChangeType::CREATED,
PathEventKind::Removed => lsp::FileChangeType::DELETED,
- PathEventKind::Changed => lsp::FileChangeType::CHANGED,
+ PathEventKind::Changed | PathEventKind::Rescan => {
+ lsp::FileChangeType::CHANGED
+ }
};
Some(lsp::FileEvent {
uri: file_path_to_lsp_url(&event.path).log_err()?,
@@ -26,7 +26,7 @@ use buffer_diff::{
};
use collections::{BTreeSet, HashMap, HashSet};
use encoding_rs;
-use fs::FakeFs;
+use fs::{FakeFs, PathEventKind};
use futures::{StreamExt, future};
use git::{
GitHostingProviderRegistry,
@@ -2072,6 +2072,97 @@ async fn test_language_server_tilde_path(cx: &mut gpui::TestAppContext) {
);
}
+#[gpui::test]
+async fn test_rescan_fs_change_is_reported_to_language_servers_as_changed(
+ cx: &mut gpui::TestAppContext,
+) {
+ init_test(cx);
+
+ let fs = FakeFs::new(cx.executor());
+ fs.insert_tree(
+ path!("/the-root"),
+ json!({
+ "Cargo.lock": "",
+ "src": {
+ "a.rs": "",
+ }
+ }),
+ )
+ .await;
+
+ let project = Project::test(fs.clone(), [path!("/the-root").as_ref()], cx).await;
+ let (language_registry, _lsp_store) = project.read_with(cx, |project, _| {
+ (project.languages().clone(), project.lsp_store())
+ });
+ language_registry.add(rust_lang());
+ let mut fake_servers = language_registry.register_fake_lsp(
+ "Rust",
+ FakeLspAdapter {
+ name: "the-language-server",
+ ..Default::default()
+ },
+ );
+
+ cx.executor().run_until_parked();
+
+ project
+ .update(cx, |project, cx| {
+ project.open_local_buffer_with_lsp(path!("/the-root/src/a.rs"), cx)
+ })
+ .await
+ .unwrap();
+
+ let fake_server = fake_servers.next().await.unwrap();
+ cx.executor().run_until_parked();
+
+ let file_changes = Arc::new(Mutex::new(Vec::new()));
+ fake_server
+ .request::<lsp::request::RegisterCapability>(
+ lsp::RegistrationParams {
+ registrations: vec![lsp::Registration {
+ id: Default::default(),
+ method: "workspace/didChangeWatchedFiles".to_string(),
+ register_options: serde_json::to_value(
+ lsp::DidChangeWatchedFilesRegistrationOptions {
+ watchers: vec![lsp::FileSystemWatcher {
+ glob_pattern: lsp::GlobPattern::String(
+ path!("/the-root/Cargo.lock").to_string(),
+ ),
+ kind: None,
+ }],
+ },
+ )
+ .ok(),
+ }],
+ },
+ DEFAULT_LSP_REQUEST_TIMEOUT,
+ )
+ .await
+ .into_response()
+ .unwrap();
+ fake_server.handle_notification::<lsp::notification::DidChangeWatchedFiles, _>({
+ let file_changes = file_changes.clone();
+ move |params, _| {
+ let mut file_changes = file_changes.lock();
+ file_changes.extend(params.changes);
+ }
+ });
+
+ cx.executor().run_until_parked();
+ assert_eq!(mem::take(&mut *file_changes.lock()), &[]);
+
+ fs.emit_fs_event(path!("/the-root/Cargo.lock"), Some(PathEventKind::Rescan));
+ cx.executor().run_until_parked();
+
+ assert_eq!(
+ &*file_changes.lock(),
+ &[lsp::FileEvent {
+ uri: lsp::Uri::from_file_path(path!("/the-root/Cargo.lock")).unwrap(),
+ typ: lsp::FileChangeType::CHANGED,
+ }]
+ );
+}
+
#[gpui::test]
async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppContext) {
init_test(cx);
@@ -5,6 +5,61 @@ use futures::{StreamExt, channel::mpsc};
use gpui::{App, BackgroundExecutor, ReadGlobal};
use std::{path::PathBuf, sync::Arc, time::Duration};
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use fs::FakeFs;
+
+ use gpui::TestAppContext;
+ use serde_json::json;
+ use std::path::Path;
+
+ #[gpui::test]
+ async fn test_watch_config_dir_reloads_tracked_file_on_rescan(cx: &mut TestAppContext) {
+ cx.executor().allow_parking();
+
+ let fs = FakeFs::new(cx.background_executor.clone());
+ let config_dir = PathBuf::from("/root/config");
+ let settings_path = PathBuf::from("/root/config/settings.json");
+
+ fs.insert_tree(
+ Path::new("/root"),
+ json!({
+ "config": {
+ "settings.json": "A"
+ }
+ }),
+ )
+ .await;
+
+ let mut rx = watch_config_dir(
+ &cx.background_executor,
+ fs.clone(),
+ config_dir.clone(),
+ HashSet::from_iter([settings_path.clone()]),
+ );
+
+ assert_eq!(rx.next().await.as_deref(), Some("A"));
+ cx.run_until_parked();
+
+ fs.pause_events();
+ fs.insert_file(&settings_path, b"B".to_vec()).await;
+ fs.clear_buffered_events();
+
+ fs.emit_fs_event(&settings_path, Some(PathEventKind::Rescan));
+ fs.unpause_events_and_flush();
+ assert_eq!(rx.next().await.as_deref(), Some("B"));
+
+ fs.pause_events();
+ fs.insert_file(&settings_path, b"A".to_vec()).await;
+ fs.clear_buffered_events();
+
+ fs.emit_fs_event(&config_dir, Some(PathEventKind::Rescan));
+ fs.unpause_events_and_flush();
+ assert_eq!(rx.next().await.as_deref(), Some("A"));
+ }
+}
+
pub const EMPTY_THEME_NAME: &str = "empty-theme";
/// Settings for visual tests that use proper fonts instead of Courier.
@@ -139,8 +194,25 @@ pub fn watch_config_dir(
return;
}
}
+ Some(PathEventKind::Rescan) => {
+ for file_path in &config_paths {
+ let contents = fs.load(file_path).await.unwrap_or_default();
+ if tx.unbounded_send(contents).is_err() {
+ return;
+ }
+ }
+ }
_ => {}
}
+ } else if matches!(event.kind, Some(PathEventKind::Rescan))
+ && event.path == dir_path
+ {
+ for file_path in &config_paths {
+ let contents = fs.load(file_path).await.unwrap_or_default();
+ if tx.unbounded_send(contents).is_err() {
+ return;
+ }
+ }
}
}
}
@@ -267,6 +267,12 @@ struct BackgroundScannerState {
scanning_enabled: bool,
}
+#[derive(Clone, Debug, Eq, PartialEq)]
+struct EventRoot {
+ path: Arc<RelPath>,
+ was_rescanned: bool,
+}
+
#[derive(Debug, Clone)]
struct LocalRepositoryEntry {
work_directory_id: ProjectEntryId,
@@ -3880,7 +3886,7 @@ impl BackgroundScanner {
state.snapshot.completed_scan_id = state.snapshot.scan_id;
}
- self.send_status_update(false, SmallVec::new()).await;
+ self.send_status_update(false, SmallVec::new(), &[]).await;
// Process any any FS events that occurred while performing the initial scan.
// For these events, update events cannot be as precise, because we didn't
@@ -3893,14 +3899,17 @@ impl BackgroundScanner {
self.process_events(
paths
.into_iter()
- .filter(|e| e.kind.is_some())
- .map(Into::into)
+ .filter(|event| event.kind.is_some())
.collect(),
)
.await;
}
if let Some(abs_path) = containing_git_repository {
- self.process_events(vec![abs_path]).await;
+ self.process_events(vec![PathEvent {
+ path: abs_path,
+ kind: Some(fs::PathEventKind::Changed),
+ }])
+ .await;
}
// Continue processing events until the worktree is dropped.
@@ -3931,10 +3940,14 @@ impl BackgroundScanner {
};
if let Some(abs_path) = self.fs.canonicalize(&abs_path).await.log_err() {
- self.process_events(vec![abs_path]).await;
+ self.process_events(vec![PathEvent {
+ path: abs_path,
+ kind: Some(fs::PathEventKind::Changed),
+ }])
+ .await;
}
}
- self.send_status_update(false, request.done).await;
+ self.send_status_update(false, request.done, &[]).await;
}
paths = fs_events_rx.next().fuse() => {
@@ -3942,7 +3955,7 @@ impl BackgroundScanner {
while let Poll::Ready(Some(more_paths)) = futures::poll!(fs_events_rx.next()) {
paths.extend(more_paths);
}
- self.process_events(paths.into_iter().filter(|e| e.kind.is_some()).map(Into::into).collect()).await;
+ self.process_events(paths.into_iter().filter(|event| event.kind.is_some()).collect()).await;
}
_ = global_gitignore_events.next().fuse() => {
@@ -3999,11 +4012,10 @@ impl BackgroundScanner {
)
.await;
- self.send_status_update(scanning, request.done).await
+ self.send_status_update(scanning, request.done, &[]).await
}
- async fn process_events(&self, mut abs_paths: Vec<PathBuf>) {
- log::trace!("process events: {abs_paths:?}");
+ async fn process_events(&self, mut events: Vec<PathEvent>) {
let root_path = self.state.lock().await.snapshot.abs_path.clone();
let root_canonical_path = self.fs.canonicalize(root_path.as_path()).await;
let root_canonical_path = match &root_canonical_path {
@@ -4047,11 +4059,25 @@ impl BackgroundScanner {
let skipped_files_in_dot_git = [COMMIT_MESSAGE, INDEX_LOCK];
let skipped_dirs_in_dot_git = [FSMONITOR_DAEMON, LFS_DIR];
- let mut relative_paths = Vec::with_capacity(abs_paths.len());
+ let mut relative_paths = Vec::with_capacity(events.len());
let mut dot_git_abs_paths = Vec::new();
let mut work_dirs_needing_exclude_update = Vec::new();
- abs_paths.sort_unstable();
- abs_paths.dedup_by(|a, b| a.starts_with(b));
+ events.sort_unstable_by(|left, right| left.path.cmp(&right.path));
+ events.dedup_by(|left, right| {
+ if left.path == right.path {
+ if matches!(left.kind, Some(fs::PathEventKind::Rescan)) {
+ right.kind = left.kind;
+ }
+ true
+ } else if left.path.starts_with(&right.path) {
+ if matches!(left.kind, Some(fs::PathEventKind::Rescan)) {
+ right.kind = left.kind;
+ }
+ true
+ } else {
+ false
+ }
+ });
{
let snapshot = &self.state.lock().await.snapshot;
@@ -4067,8 +4093,8 @@ impl BackgroundScanner {
}
}
- for (ix, abs_path) in abs_paths.iter().enumerate() {
- let abs_path = &SanitizedPath::new(&abs_path);
+ for (ix, event) in events.iter().enumerate() {
+ let abs_path = &SanitizedPath::new(&event.path);
let mut is_git_related = false;
let mut dot_git_paths = None;
@@ -4168,11 +4194,14 @@ impl BackgroundScanner {
continue;
}
- relative_paths.push(relative_path.into_arc());
+ relative_paths.push(EventRoot {
+ path: relative_path.into_arc(),
+ was_rescanned: matches!(event.kind, Some(fs::PathEventKind::Rescan)),
+ });
}
for range_to_drop in ranges_to_drop.into_iter().rev() {
- abs_paths.drain(range_to_drop);
+ events.drain(range_to_drop);
}
}
@@ -4196,12 +4225,24 @@ impl BackgroundScanner {
self.state.lock().await.snapshot.scan_id += 1;
let (scan_job_tx, scan_job_rx) = channel::unbounded();
- log::debug!("received fs events {:?}", relative_paths);
+ log::debug!(
+ "received fs events {:?}",
+ relative_paths
+ .iter()
+ .map(|event_root| &event_root.path)
+ .collect::<Vec<_>>()
+ );
self.reload_entries_for_paths(
&root_path,
&root_canonical_path,
- &relative_paths,
- abs_paths,
+ &relative_paths
+ .iter()
+ .map(|event_root| event_root.path.clone())
+ .collect::<Vec<_>>(),
+ events
+ .into_iter()
+ .map(|event| event.path)
+ .collect::<Vec<_>>(),
Some(scan_job_tx.clone()),
)
.await;
@@ -4229,7 +4270,8 @@ impl BackgroundScanner {
state.scanned_dirs.remove(&entry.id);
}
}
- self.send_status_update(false, SmallVec::new()).await;
+ self.send_status_update(false, SmallVec::new(), &relative_paths)
+ .await;
}
async fn update_global_gitignore(&self, abs_path: &Path) {
@@ -4255,7 +4297,7 @@ impl BackgroundScanner {
)
.await;
self.scan_dirs(false, scan_job_rx).await;
- self.send_status_update(false, SmallVec::new()).await;
+ self.send_status_update(false, SmallVec::new(), &[]).await;
}
async fn forcibly_load_paths(&self, paths: &[Arc<RelPath>]) -> bool {
@@ -4336,7 +4378,8 @@ impl BackgroundScanner {
) {
Ok(_) => {
last_progress_update_count += 1;
- self.send_status_update(true, SmallVec::new()).await;
+ self.send_status_update(true, SmallVec::new(), &[])
+ .await;
}
Err(count) => {
last_progress_update_count = count;
@@ -4365,19 +4408,22 @@ impl BackgroundScanner {
&self,
scanning: bool,
barrier: SmallVec<[barrier::Sender; 1]>,
+ event_roots: &[EventRoot],
) -> bool {
let mut state = self.state.lock().await;
- if state.changed_paths.is_empty() && scanning {
+ if state.changed_paths.is_empty() && event_roots.is_empty() && scanning {
return true;
}
+ let merged_event_roots = merge_event_roots(&state.changed_paths, event_roots);
+
let new_snapshot = state.snapshot.clone();
let old_snapshot = mem::replace(&mut state.prev_snapshot, new_snapshot.snapshot.clone());
let changes = build_diff(
self.phase,
&old_snapshot,
&new_snapshot,
- &state.changed_paths,
+ &merged_event_roots,
);
state.changed_paths.clear();
@@ -5231,11 +5277,40 @@ async fn discover_ancestor_git_repo(
(ignores, exclude, None)
}
+fn merge_event_roots(changed_paths: &[Arc<RelPath>], event_roots: &[EventRoot]) -> Vec<EventRoot> {
+ let mut merged_event_roots = Vec::with_capacity(changed_paths.len() + event_roots.len());
+ let mut changed_paths = changed_paths.iter().peekable();
+ let mut event_roots = event_roots.iter().peekable();
+ while let (Some(path), Some(event_root)) = (changed_paths.peek(), event_roots.peek()) {
+ match path.cmp(&&event_root.path) {
+ Ordering::Less => {
+ merged_event_roots.push(EventRoot {
+ path: (*changed_paths.next().expect("peeked changed path")).clone(),
+ was_rescanned: false,
+ });
+ }
+ Ordering::Equal => {
+ merged_event_roots.push((*event_roots.next().expect("peeked event root")).clone());
+ changed_paths.next();
+ }
+ Ordering::Greater => {
+ merged_event_roots.push((*event_roots.next().expect("peeked event root")).clone());
+ }
+ }
+ }
+ merged_event_roots.extend(changed_paths.map(|path| EventRoot {
+ path: path.clone(),
+ was_rescanned: false,
+ }));
+ merged_event_roots.extend(event_roots.cloned());
+ merged_event_roots
+}
+
fn build_diff(
phase: BackgroundScannerPhase,
old_snapshot: &Snapshot,
new_snapshot: &Snapshot,
- event_paths: &[Arc<RelPath>],
+ event_roots: &[EventRoot],
) -> UpdatedEntriesSet {
use BackgroundScannerPhase::*;
use PathChange::{Added, AddedOrUpdated, Loaded, Removed, Updated};
@@ -5243,13 +5318,14 @@ fn build_diff(
// Identify which paths have changed. Use the known set of changed
// parent paths to optimize the search.
let mut changes = Vec::new();
+
let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>(());
let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>(());
let mut last_newly_loaded_dir_path = None;
old_paths.next();
new_paths.next();
- for path in event_paths {
- let path = PathKey(path.clone());
+ for event_root in event_roots {
+ let path = PathKey(event_root.path.clone());
if old_paths.item().is_some_and(|e| e.path < path.0) {
old_paths.seek_forward(&path, Bias::Left);
}
@@ -5295,6 +5371,8 @@ fn build_diff(
} else {
changes.push((new_entry.path.clone(), new_entry.id, Updated));
}
+ } else if event_root.was_rescanned {
+ changes.push((new_entry.path.clone(), new_entry.id, Updated));
}
old_paths.next();
new_paths.next();
@@ -409,6 +409,164 @@ async fn test_renaming_case_only(cx: &mut TestAppContext) {
});
}
+#[gpui::test]
+async fn test_root_rescan_reconciles_stale_state(cx: &mut TestAppContext) {
+ init_test(cx);
+ let fs = FakeFs::new(cx.background_executor.clone());
+ fs.insert_tree(
+ "/root",
+ json!({
+ "old.txt": "",
+ }),
+ )
+ .await;
+
+ let tree = Worktree::local(
+ Path::new("/root"),
+ true,
+ fs.clone(),
+ Default::default(),
+ true,
+ WorktreeId::from_proto(0),
+ &mut cx.to_async(),
+ )
+ .await
+ .unwrap();
+
+ cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
+ .await;
+
+ tree.read_with(cx, |tree, _| {
+ assert_eq!(
+ tree.entries(true, 0)
+ .map(|entry| entry.path.as_ref())
+ .collect::<Vec<_>>(),
+ vec![rel_path(""), rel_path("old.txt")]
+ );
+ });
+
+ fs.pause_events();
+ fs.remove_file(Path::new("/root/old.txt"), RemoveOptions::default())
+ .await
+ .unwrap();
+ fs.insert_file(Path::new("/root/new.txt"), Vec::new()).await;
+ assert_eq!(fs.buffered_event_count(), 2);
+ fs.clear_buffered_events();
+
+ tree.read_with(cx, |tree, _| {
+ assert!(tree.entry_for_path(rel_path("old.txt")).is_some());
+ assert!(tree.entry_for_path(rel_path("new.txt")).is_none());
+ });
+
+ fs.emit_fs_event("/root", Some(fs::PathEventKind::Rescan));
+ fs.unpause_events_and_flush();
+ tree.flush_fs_events(cx).await;
+
+ tree.read_with(cx, |tree, _| {
+ assert!(tree.entry_for_path(rel_path("old.txt")).is_none());
+ assert!(tree.entry_for_path(rel_path("new.txt")).is_some());
+ assert_eq!(
+ tree.entries(true, 0)
+ .map(|entry| entry.path.as_ref())
+ .collect::<Vec<_>>(),
+ vec![rel_path(""), rel_path("new.txt")]
+ );
+ });
+}
+
+#[gpui::test]
+async fn test_subtree_rescan_reports_unchanged_descendants_as_updated(cx: &mut TestAppContext) {
+ init_test(cx);
+ let fs = FakeFs::new(cx.background_executor.clone());
+ fs.insert_tree(
+ "/root",
+ json!({
+ "dir": {
+ "child.txt": "",
+ "nested": {
+ "grandchild.txt": "",
+ },
+ "remove": {
+ "removed.txt": "",
+ }
+ },
+ "other.txt": "",
+ }),
+ )
+ .await;
+
+ let tree = Worktree::local(
+ Path::new("/root"),
+ true,
+ fs.clone(),
+ Default::default(),
+ true,
+ WorktreeId::from_proto(0),
+ &mut cx.to_async(),
+ )
+ .await
+ .unwrap();
+
+ cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
+ .await;
+
+ let tree_updates = Arc::new(Mutex::new(Vec::new()));
+ tree.update(cx, |_, cx| {
+ let tree_updates = tree_updates.clone();
+ cx.subscribe(&tree, move |_, _, event, _| {
+ if let Event::UpdatedEntries(update) = event {
+ tree_updates.lock().extend(
+ update
+ .iter()
+ .filter(|(path, _, _)| path.as_ref() != rel_path("fs-event-sentinel"))
+ .map(|(path, _, change)| (path.clone(), *change)),
+ );
+ }
+ })
+ .detach();
+ });
+ fs.pause_events();
+ fs.insert_file("/root/dir/new.txt", b"new content".to_vec())
+ .await;
+ fs.remove_dir(
+ "/root/dir/remove".as_ref(),
+ RemoveOptions {
+ recursive: true,
+ ignore_if_not_exists: false,
+ },
+ )
+ .await
+ .unwrap();
+ fs.clear_buffered_events();
+ fs.unpause_events_and_flush();
+
+ fs.emit_fs_event("/root/dir", Some(fs::PathEventKind::Rescan));
+ tree.flush_fs_events(cx).await;
+
+ assert_eq!(
+ mem::take(&mut *tree_updates.lock()),
+ &[
+ (rel_path("dir").into(), PathChange::Updated),
+ (rel_path("dir/child.txt").into(), PathChange::Updated),
+ (rel_path("dir/nested").into(), PathChange::Updated),
+ (
+ rel_path("dir/nested/grandchild.txt").into(),
+ PathChange::Updated
+ ),
+ (rel_path("dir/new.txt").into(), PathChange::Added),
+ (rel_path("dir/remove").into(), PathChange::Removed),
+ (
+ rel_path("dir/remove/removed.txt").into(),
+ PathChange::Removed
+ ),
+ ]
+ );
+
+ tree.read_with(cx, |tree, _| {
+ assert!(tree.entry_for_path(rel_path("other.txt")).is_some());
+ });
+}
+
#[gpui::test]
async fn test_open_gitignored_files(cx: &mut TestAppContext) {
init_test(cx);