1mod char_bag;
2mod fuzzy;
3mod ignore;
4
5use crate::{
6 editor::{History, Snapshot as BufferSnapshot},
7 sum_tree::{self, Cursor, Edit, SeekBias, SumTree},
8};
9use ::ignore::gitignore::Gitignore;
10use anyhow::{Context, Result};
11pub use fuzzy::{match_paths, PathMatch};
12use gpui::{scoped_pool, AppContext, Entity, ModelContext, ModelHandle, Task};
13use lazy_static::lazy_static;
14use parking_lot::Mutex;
15use postage::{
16 prelude::{Sink, Stream},
17 watch,
18};
19use smol::{channel::Sender, Timer};
20use std::{
21 cmp,
22 collections::{HashMap, HashSet},
23 ffi::{CStr, OsStr, OsString},
24 fmt, fs,
25 future::Future,
26 io::{self, Read, Write},
27 ops::Deref,
28 os::unix::{ffi::OsStrExt, fs::MetadataExt},
29 path::{Path, PathBuf},
30 sync::{Arc, Weak},
31 time::Duration,
32};
33
34use self::{char_bag::CharBag, ignore::IgnoreStack};
35
36lazy_static! {
37 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
38}
39
40#[derive(Clone, Debug)]
41enum ScanState {
42 Idle,
43 Scanning,
44 Err(Arc<io::Error>),
45}
46
47pub struct Worktree {
48 snapshot: Snapshot,
49 background_snapshot: Arc<Mutex<Snapshot>>,
50 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
51 scan_state: (watch::Sender<ScanState>, watch::Receiver<ScanState>),
52 _event_stream_handle: fsevent::Handle,
53 poll_scheduled: bool,
54}
55
56#[derive(Clone, Debug)]
57pub struct FileHandle {
58 worktree: ModelHandle<Worktree>,
59 state: Arc<Mutex<FileHandleState>>,
60}
61
62#[derive(Clone, Debug, PartialEq, Eq)]
63struct FileHandleState {
64 path: Arc<Path>,
65 is_deleted: bool,
66}
67
68impl Worktree {
69 pub fn new(path: impl Into<Arc<Path>>, ctx: &mut ModelContext<Self>) -> Self {
70 let abs_path = path.into();
71 let (scan_state_tx, scan_state_rx) = smol::channel::unbounded();
72 let id = ctx.model_id();
73 let snapshot = Snapshot {
74 id,
75 scan_id: 0,
76 abs_path,
77 root_name: Default::default(),
78 ignores: Default::default(),
79 entries: Default::default(),
80 };
81 let (event_stream, event_stream_handle) =
82 fsevent::EventStream::new(&[snapshot.abs_path.as_ref()], Duration::from_millis(100));
83
84 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
85 let handles = Arc::new(Mutex::new(Default::default()));
86
87 let tree = Self {
88 snapshot,
89 background_snapshot: background_snapshot.clone(),
90 handles: handles.clone(),
91 scan_state: watch::channel_with(ScanState::Scanning),
92 _event_stream_handle: event_stream_handle,
93 poll_scheduled: false,
94 };
95
96 std::thread::spawn(move || {
97 let scanner = BackgroundScanner::new(background_snapshot, handles, scan_state_tx, id);
98 scanner.run(event_stream)
99 });
100
101 ctx.spawn_stream(scan_state_rx, Self::observe_scan_state, |_, _| {})
102 .detach();
103
104 tree
105 }
106
107 pub fn scan_complete(&self) -> impl Future<Output = ()> {
108 let mut scan_state_rx = self.scan_state.1.clone();
109 async move {
110 let mut scan_state = Some(scan_state_rx.borrow().clone());
111 while let Some(ScanState::Scanning) = scan_state {
112 scan_state = scan_state_rx.recv().await;
113 }
114 }
115 }
116
117 pub fn next_scan_complete(&self, ctx: &mut ModelContext<Self>) -> impl Future<Output = ()> {
118 let scan_id = self.snapshot.scan_id;
119 ctx.spawn_stream(
120 self.scan_state.1.clone(),
121 move |this, scan_state, ctx| {
122 if matches!(scan_state, ScanState::Idle) && this.snapshot.scan_id > scan_id {
123 ctx.halt_stream();
124 }
125 },
126 |_, _| {},
127 )
128 }
129
130 fn observe_scan_state(&mut self, scan_state: ScanState, ctx: &mut ModelContext<Self>) {
131 let _ = self.scan_state.0.blocking_send(scan_state);
132 self.poll_entries(ctx);
133 }
134
135 fn poll_entries(&mut self, ctx: &mut ModelContext<Self>) {
136 self.snapshot = self.background_snapshot.lock().clone();
137 ctx.notify();
138
139 if self.is_scanning() && !self.poll_scheduled {
140 ctx.spawn(Timer::after(Duration::from_millis(100)), |this, _, ctx| {
141 this.poll_scheduled = false;
142 this.poll_entries(ctx);
143 })
144 .detach();
145 self.poll_scheduled = true;
146 }
147 }
148
149 fn is_scanning(&self) -> bool {
150 if let ScanState::Scanning = *self.scan_state.1.borrow() {
151 true
152 } else {
153 false
154 }
155 }
156
157 pub fn snapshot(&self) -> Snapshot {
158 self.snapshot.clone()
159 }
160
161 pub fn abs_path(&self) -> &Path {
162 self.snapshot.abs_path.as_ref()
163 }
164
165 pub fn contains_abs_path(&self, path: &Path) -> bool {
166 path.starts_with(&self.snapshot.abs_path)
167 }
168
169 fn absolutize(&self, path: &Path) -> PathBuf {
170 if path.file_name().is_some() {
171 self.snapshot.abs_path.join(path)
172 } else {
173 self.snapshot.abs_path.to_path_buf()
174 }
175 }
176
177 pub fn load_history(
178 &self,
179 path: &Path,
180 ctx: &AppContext,
181 ) -> impl Future<Output = Result<History>> {
182 let abs_path = self.absolutize(path);
183 ctx.background_executor().spawn(async move {
184 let mut file = std::fs::File::open(&abs_path)?;
185 let mut base_text = String::new();
186 file.read_to_string(&mut base_text)?;
187 Ok(History::new(Arc::from(base_text)))
188 })
189 }
190
191 pub fn save<'a>(
192 &self,
193 path: &Path,
194 content: BufferSnapshot,
195 ctx: &AppContext,
196 ) -> Task<Result<()>> {
197 let abs_path = self.absolutize(path);
198 ctx.background_executor().spawn(async move {
199 let buffer_size = content.text_summary().bytes.min(10 * 1024);
200 let file = std::fs::File::create(&abs_path)?;
201 let mut writer = std::io::BufWriter::with_capacity(buffer_size, file);
202 for chunk in content.fragments() {
203 writer.write(chunk.as_bytes())?;
204 }
205 writer.flush()?;
206 Ok(())
207 })
208 }
209}
210
211impl Entity for Worktree {
212 type Event = ();
213}
214
215impl Deref for Worktree {
216 type Target = Snapshot;
217
218 fn deref(&self) -> &Self::Target {
219 &self.snapshot
220 }
221}
222
223impl fmt::Debug for Worktree {
224 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
225 self.snapshot.fmt(f)
226 }
227}
228
229#[derive(Clone)]
230pub struct Snapshot {
231 id: usize,
232 scan_id: usize,
233 abs_path: Arc<Path>,
234 root_name: String,
235 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
236 entries: SumTree<Entry>,
237}
238
239impl Snapshot {
240 pub fn file_count(&self) -> usize {
241 self.entries.summary().file_count
242 }
243
244 pub fn visible_file_count(&self) -> usize {
245 self.entries.summary().visible_file_count
246 }
247
248 pub fn files(&self, start: usize) -> FileIter {
249 FileIter::all(self, start)
250 }
251
252 #[cfg(test)]
253 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
254 self.entries
255 .cursor::<(), ()>()
256 .skip(1)
257 .map(|entry| entry.path())
258 }
259
260 pub fn visible_files(&self, start: usize) -> FileIter {
261 FileIter::visible(self, start)
262 }
263
264 fn child_entries<'a>(&'a self, path: &'a Path) -> ChildEntriesIter<'a> {
265 ChildEntriesIter::new(path, self)
266 }
267
268 pub fn root_entry(&self) -> &Entry {
269 self.entry_for_path("").unwrap()
270 }
271
272 /// Returns the filename of the snapshot's root, plus a trailing slash if the snapshot's root is
273 /// a directory.
274 pub fn root_name(&self) -> &str {
275 &self.root_name
276 }
277
278 fn path_is_pending(&self, path: impl AsRef<Path>) -> bool {
279 if self.entries.is_empty() {
280 return true;
281 }
282 let path = path.as_ref();
283 let mut cursor = self.entries.cursor::<_, ()>();
284 if cursor.seek(&PathSearch::Exact(path), SeekBias::Left, &()) {
285 let entry = cursor.item().unwrap();
286 if entry.path.as_ref() == path {
287 return matches!(entry.kind, EntryKind::PendingDir);
288 }
289 }
290 if let Some(entry) = cursor.prev_item() {
291 matches!(entry.kind, EntryKind::PendingDir) && path.starts_with(entry.path.as_ref())
292 } else {
293 false
294 }
295 }
296
297 fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
298 let mut cursor = self.entries.cursor::<_, ()>();
299 if cursor.seek(&PathSearch::Exact(path.as_ref()), SeekBias::Left, &()) {
300 cursor.item()
301 } else {
302 None
303 }
304 }
305
306 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
307 self.entry_for_path(path.as_ref()).map(|e| e.inode())
308 }
309
310 fn insert_entry(&mut self, entry: Entry) {
311 if !entry.is_dir() && entry.path().file_name() == Some(&GITIGNORE) {
312 let (ignore, err) = Gitignore::new(self.abs_path.join(entry.path()));
313 if let Some(err) = err {
314 log::error!("error in ignore file {:?} - {:?}", entry.path(), err);
315 }
316
317 let ignore_dir_path = entry.path().parent().unwrap();
318 self.ignores
319 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
320 }
321 self.entries.insert(entry, &());
322 }
323
324 fn populate_dir(
325 &mut self,
326 parent_path: Arc<Path>,
327 entries: impl IntoIterator<Item = Entry>,
328 ignore: Option<Arc<Gitignore>>,
329 ) {
330 let mut edits = Vec::new();
331
332 let mut parent_entry = self
333 .entries
334 .get(&PathKey(parent_path.clone()), &())
335 .unwrap()
336 .clone();
337 if let Some(ignore) = ignore {
338 self.ignores.insert(parent_path, (ignore, self.scan_id));
339 }
340 if matches!(parent_entry.kind, EntryKind::PendingDir) {
341 parent_entry.kind = EntryKind::Dir;
342 } else {
343 unreachable!();
344 }
345 edits.push(Edit::Insert(parent_entry));
346
347 for entry in entries {
348 edits.push(Edit::Insert(entry));
349 }
350 self.entries.edit(edits, &());
351 }
352
353 fn remove_path(&mut self, path: &Path) {
354 let new_entries = {
355 let mut cursor = self.entries.cursor::<_, ()>();
356 let mut new_entries = cursor.slice(&PathSearch::Exact(path), SeekBias::Left, &());
357 cursor.seek_forward(&PathSearch::Successor(path), SeekBias::Left, &());
358 new_entries.push_tree(cursor.suffix(&()), &());
359 new_entries
360 };
361 self.entries = new_entries;
362
363 if path.file_name() == Some(&GITIGNORE) {
364 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
365 *scan_id = self.scan_id;
366 }
367 }
368 }
369
370 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
371 let mut new_ignores = Vec::new();
372 for ancestor in path.ancestors().skip(1) {
373 if let Some((ignore, _)) = self.ignores.get(ancestor) {
374 new_ignores.push((ancestor, Some(ignore.clone())));
375 } else {
376 new_ignores.push((ancestor, None));
377 }
378 }
379
380 let mut ignore_stack = IgnoreStack::none();
381 for (parent_path, ignore) in new_ignores.into_iter().rev() {
382 if ignore_stack.is_path_ignored(&parent_path, true) {
383 ignore_stack = IgnoreStack::all();
384 break;
385 } else if let Some(ignore) = ignore {
386 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
387 }
388 }
389
390 if ignore_stack.is_path_ignored(path, is_dir) {
391 ignore_stack = IgnoreStack::all();
392 }
393
394 ignore_stack
395 }
396}
397
398impl fmt::Debug for Snapshot {
399 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
400 for entry in self.entries.cursor::<(), ()>() {
401 for _ in entry.path().ancestors().skip(1) {
402 write!(f, " ")?;
403 }
404 writeln!(f, "{:?} (inode: {})", entry.path(), entry.inode())?;
405 }
406 Ok(())
407 }
408}
409
410impl FileHandle {
411 /// Returns this file's path relative to the root of its worktree.
412 pub fn path(&self) -> Arc<Path> {
413 self.state.lock().path.clone()
414 }
415
416 /// Returns the last component of this handle's absolute path. If this handle refers to the root
417 /// of its worktree, then this method will return the name of the worktree itself.
418 pub fn file_name<'a>(&'a self, ctx: &'a AppContext) -> Option<OsString> {
419 self.state
420 .lock()
421 .path
422 .file_name()
423 .or_else(|| self.worktree.read(ctx).abs_path().file_name())
424 .map(Into::into)
425 }
426
427 pub fn is_deleted(&self) -> bool {
428 self.state.lock().is_deleted
429 }
430
431 pub fn exists(&self) -> bool {
432 !self.is_deleted()
433 }
434
435 pub fn load_history(&self, ctx: &AppContext) -> impl Future<Output = Result<History>> {
436 self.worktree.read(ctx).load_history(&self.path(), ctx)
437 }
438
439 pub fn save<'a>(&self, content: BufferSnapshot, ctx: &AppContext) -> Task<Result<()>> {
440 let worktree = self.worktree.read(ctx);
441 worktree.save(&self.path(), content, ctx)
442 }
443
444 pub fn worktree_id(&self) -> usize {
445 self.worktree.id()
446 }
447
448 pub fn entry_id(&self) -> (usize, Arc<Path>) {
449 (self.worktree.id(), self.path())
450 }
451
452 pub fn observe_from_model<T: Entity>(
453 &self,
454 ctx: &mut ModelContext<T>,
455 mut callback: impl FnMut(&mut T, FileHandle, &mut ModelContext<T>) + 'static,
456 ) {
457 let mut prev_state = self.state.lock().clone();
458 let cur_state = Arc::downgrade(&self.state);
459 ctx.observe(&self.worktree, move |observer, worktree, ctx| {
460 if let Some(cur_state) = cur_state.upgrade() {
461 let cur_state_unlocked = cur_state.lock();
462 if *cur_state_unlocked != prev_state {
463 prev_state = cur_state_unlocked.clone();
464 drop(cur_state_unlocked);
465 callback(
466 observer,
467 FileHandle {
468 worktree,
469 state: cur_state,
470 },
471 ctx,
472 );
473 }
474 }
475 });
476 }
477}
478
479#[derive(Clone, Debug)]
480pub struct Entry {
481 kind: EntryKind,
482 path: Arc<Path>,
483 inode: u64,
484 is_symlink: bool,
485 is_ignored: bool,
486}
487
488#[derive(Clone, Debug)]
489pub enum EntryKind {
490 PendingDir,
491 Dir,
492 File(CharBag),
493}
494
495impl Entry {
496 pub fn path(&self) -> &Arc<Path> {
497 &self.path
498 }
499
500 pub fn inode(&self) -> u64 {
501 self.inode
502 }
503
504 pub fn is_ignored(&self) -> bool {
505 self.is_ignored
506 }
507
508 fn is_dir(&self) -> bool {
509 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
510 }
511
512 fn is_file(&self) -> bool {
513 matches!(self.kind, EntryKind::File(_))
514 }
515}
516
517impl sum_tree::Item for Entry {
518 type Summary = EntrySummary;
519
520 fn summary(&self) -> Self::Summary {
521 let file_count;
522 let visible_file_count;
523 if self.is_file() {
524 file_count = 1;
525 if self.is_ignored {
526 visible_file_count = 0;
527 } else {
528 visible_file_count = 1;
529 }
530 } else {
531 file_count = 0;
532 visible_file_count = 0;
533 }
534
535 EntrySummary {
536 max_path: self.path().clone(),
537 file_count,
538 visible_file_count,
539 }
540 }
541}
542
543impl sum_tree::KeyedItem for Entry {
544 type Key = PathKey;
545
546 fn key(&self) -> Self::Key {
547 PathKey(self.path().clone())
548 }
549}
550
551#[derive(Clone, Debug)]
552pub struct EntrySummary {
553 max_path: Arc<Path>,
554 file_count: usize,
555 visible_file_count: usize,
556}
557
558impl Default for EntrySummary {
559 fn default() -> Self {
560 Self {
561 max_path: Arc::from(Path::new("")),
562 file_count: 0,
563 visible_file_count: 0,
564 }
565 }
566}
567
568impl sum_tree::Summary for EntrySummary {
569 type Context = ();
570
571 fn add_summary(&mut self, rhs: &Self, _: &()) {
572 self.max_path = rhs.max_path.clone();
573 self.file_count += rhs.file_count;
574 self.visible_file_count += rhs.visible_file_count;
575 }
576}
577
578#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
579pub struct PathKey(Arc<Path>);
580
581impl Default for PathKey {
582 fn default() -> Self {
583 Self(Path::new("").into())
584 }
585}
586
587impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
588 fn add_summary(&mut self, summary: &'a EntrySummary) {
589 self.0 = summary.max_path.clone();
590 }
591}
592
593#[derive(Copy, Clone, Debug, PartialEq, Eq)]
594enum PathSearch<'a> {
595 Exact(&'a Path),
596 Successor(&'a Path),
597}
598
599impl<'a> Ord for PathSearch<'a> {
600 fn cmp(&self, other: &Self) -> cmp::Ordering {
601 match (self, other) {
602 (Self::Exact(a), Self::Exact(b)) => a.cmp(b),
603 (Self::Successor(a), Self::Exact(b)) => {
604 if b.starts_with(a) {
605 cmp::Ordering::Greater
606 } else {
607 a.cmp(b)
608 }
609 }
610 _ => todo!("not sure we need the other two cases"),
611 }
612 }
613}
614
615impl<'a> PartialOrd for PathSearch<'a> {
616 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
617 Some(self.cmp(other))
618 }
619}
620
621impl<'a> Default for PathSearch<'a> {
622 fn default() -> Self {
623 Self::Exact(Path::new("").into())
624 }
625}
626
627impl<'a: 'b, 'b> sum_tree::Dimension<'a, EntrySummary> for PathSearch<'b> {
628 fn add_summary(&mut self, summary: &'a EntrySummary) {
629 *self = Self::Exact(summary.max_path.as_ref());
630 }
631}
632
633#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
634pub struct FileCount(usize);
635
636impl<'a> sum_tree::Dimension<'a, EntrySummary> for FileCount {
637 fn add_summary(&mut self, summary: &'a EntrySummary) {
638 self.0 += summary.file_count;
639 }
640}
641
642#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
643pub struct VisibleFileCount(usize);
644
645impl<'a> sum_tree::Dimension<'a, EntrySummary> for VisibleFileCount {
646 fn add_summary(&mut self, summary: &'a EntrySummary) {
647 self.0 += summary.visible_file_count;
648 }
649}
650
651struct BackgroundScanner {
652 snapshot: Arc<Mutex<Snapshot>>,
653 notify: Sender<ScanState>,
654 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
655 other_mount_paths: HashSet<PathBuf>,
656 thread_pool: scoped_pool::Pool,
657 root_char_bag: CharBag,
658}
659
660impl BackgroundScanner {
661 fn new(
662 snapshot: Arc<Mutex<Snapshot>>,
663 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
664 notify: Sender<ScanState>,
665 worktree_id: usize,
666 ) -> Self {
667 let mut scanner = Self {
668 root_char_bag: Default::default(),
669 snapshot,
670 notify,
671 handles,
672 other_mount_paths: Default::default(),
673 thread_pool: scoped_pool::Pool::new(16, format!("worktree-{}-scanner", worktree_id)),
674 };
675 scanner.update_other_mount_paths();
676 scanner
677 }
678
679 fn update_other_mount_paths(&mut self) {
680 let path = self.snapshot.lock().abs_path.clone();
681 self.other_mount_paths.clear();
682 self.other_mount_paths.extend(
683 mounted_volume_paths()
684 .into_iter()
685 .filter(|mount_path| !path.starts_with(mount_path)),
686 );
687 }
688
689 fn abs_path(&self) -> Arc<Path> {
690 self.snapshot.lock().abs_path.clone()
691 }
692
693 fn snapshot(&self) -> Snapshot {
694 self.snapshot.lock().clone()
695 }
696
697 fn run(mut self, event_stream: fsevent::EventStream) {
698 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
699 return;
700 }
701
702 if let Err(err) = self.scan_dirs() {
703 if smol::block_on(self.notify.send(ScanState::Err(Arc::new(err)))).is_err() {
704 return;
705 }
706 }
707
708 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
709 return;
710 }
711
712 event_stream.run(move |events| {
713 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
714 return false;
715 }
716
717 if !self.process_events(events) {
718 return false;
719 }
720
721 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
722 return false;
723 }
724
725 true
726 });
727 }
728
729 fn scan_dirs(&mut self) -> io::Result<()> {
730 self.snapshot.lock().scan_id += 1;
731
732 let path: Arc<Path> = Arc::from(Path::new(""));
733 let abs_path = self.abs_path();
734 let metadata = fs::metadata(&abs_path)?;
735 let inode = metadata.ino();
736 let is_symlink = fs::symlink_metadata(&abs_path)?.file_type().is_symlink();
737 let is_dir = metadata.file_type().is_dir();
738
739 // After determining whether the root entry is a file or a directory, populate the
740 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
741 let mut root_name = abs_path
742 .file_name()
743 .map_or(String::new(), |f| f.to_string_lossy().to_string());
744 if is_dir {
745 root_name.push('/');
746 }
747 self.root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
748 self.snapshot.lock().root_name = root_name;
749
750 if is_dir {
751 self.snapshot.lock().insert_entry(Entry {
752 kind: EntryKind::PendingDir,
753 path: path.clone(),
754 inode,
755 is_symlink,
756 is_ignored: false,
757 });
758
759 let (tx, rx) = crossbeam_channel::unbounded();
760 tx.send(ScanJob {
761 abs_path: abs_path.to_path_buf(),
762 path,
763 ignore_stack: IgnoreStack::none(),
764 scan_queue: tx.clone(),
765 })
766 .unwrap();
767 drop(tx);
768
769 self.thread_pool.scoped(|pool| {
770 for _ in 0..self.thread_pool.thread_count() {
771 pool.execute(|| {
772 while let Ok(job) = rx.recv() {
773 if let Err(err) = self.scan_dir(&job) {
774 log::error!("error scanning {:?}: {}", job.abs_path, err);
775 }
776 }
777 });
778 }
779 });
780 } else {
781 self.snapshot.lock().insert_entry(Entry {
782 kind: EntryKind::File(self.char_bag(&path)),
783 path,
784 inode,
785 is_symlink,
786 is_ignored: false,
787 });
788 }
789
790 self.mark_deleted_file_handles();
791 Ok(())
792 }
793
794 fn scan_dir(&self, job: &ScanJob) -> io::Result<()> {
795 let mut new_entries: Vec<Entry> = Vec::new();
796 let mut new_jobs: Vec<ScanJob> = Vec::new();
797 let mut ignore_stack = job.ignore_stack.clone();
798 let mut new_ignore = None;
799
800 for child_entry in fs::read_dir(&job.abs_path)? {
801 let child_entry = child_entry?;
802 let child_name = child_entry.file_name();
803 let child_abs_path = job.abs_path.join(&child_name);
804 let child_path: Arc<Path> = job.path.join(&child_name).into();
805 let child_is_symlink = child_entry.metadata()?.file_type().is_symlink();
806 let child_metadata = if let Ok(metadata) = fs::metadata(&child_abs_path) {
807 metadata
808 } else {
809 log::error!("could not get metadata for path {:?}", child_abs_path);
810 continue;
811 };
812
813 let child_inode = child_metadata.ino();
814
815 // Disallow mount points outside the file system containing the root of this worktree
816 if self.other_mount_paths.contains(&child_abs_path) {
817 continue;
818 }
819
820 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
821 if child_name == *GITIGNORE {
822 let (ignore, err) = Gitignore::new(&child_abs_path);
823 if let Some(err) = err {
824 log::error!("error in ignore file {:?} - {:?}", child_path, err);
825 }
826 let ignore = Arc::new(ignore);
827 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
828 new_ignore = Some(ignore);
829
830 // Update ignore status of any child entries we've already processed to reflect the
831 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
832 // there should rarely be too numerous. Update the ignore stack associated with any
833 // new jobs as well.
834 let mut new_jobs = new_jobs.iter_mut();
835 for entry in &mut new_entries {
836 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
837 if entry.is_dir() {
838 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
839 IgnoreStack::all()
840 } else {
841 ignore_stack.clone()
842 };
843 }
844 }
845 }
846
847 if child_metadata.is_dir() {
848 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
849 new_entries.push(Entry {
850 kind: EntryKind::PendingDir,
851 path: child_path.clone(),
852 inode: child_inode,
853 is_symlink: child_is_symlink,
854 is_ignored,
855 });
856 new_jobs.push(ScanJob {
857 abs_path: child_abs_path,
858 path: child_path,
859 ignore_stack: if is_ignored {
860 IgnoreStack::all()
861 } else {
862 ignore_stack.clone()
863 },
864 scan_queue: job.scan_queue.clone(),
865 });
866 } else {
867 let is_ignored = ignore_stack.is_path_ignored(&child_path, false);
868 new_entries.push(Entry {
869 kind: EntryKind::File(self.char_bag(&child_path)),
870 path: child_path,
871 inode: child_inode,
872 is_symlink: child_is_symlink,
873 is_ignored,
874 });
875 };
876 }
877
878 self.snapshot
879 .lock()
880 .populate_dir(job.path.clone(), new_entries, new_ignore);
881 for new_job in new_jobs {
882 job.scan_queue.send(new_job).unwrap();
883 }
884
885 Ok(())
886 }
887
888 fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
889 self.update_other_mount_paths();
890
891 let mut snapshot = self.snapshot();
892 snapshot.scan_id += 1;
893
894 let root_abs_path = if let Ok(abs_path) = snapshot.abs_path.canonicalize() {
895 abs_path
896 } else {
897 return false;
898 };
899
900 let mut renamed_paths: HashMap<u64, PathBuf> = HashMap::new();
901 let mut updated_handles = HashMap::new();
902 for event in &events {
903 if event.flags.contains(fsevent::StreamFlags::ITEM_RENAMED) {
904 if let Ok(path) = event.path.strip_prefix(&root_abs_path) {
905 if let Some(inode) = snapshot.inode_for_path(path) {
906 renamed_paths.insert(inode, path.to_path_buf());
907 } else if let Ok(metadata) = fs::metadata(&event.path) {
908 let new_path = path;
909 let mut handles = self.handles.lock();
910 if let Some(old_path) = renamed_paths.get(&metadata.ino()) {
911 handles.retain(|handle_path, handle_state| {
912 if let Ok(path_suffix) = handle_path.strip_prefix(&old_path) {
913 let new_handle_path: Arc<Path> =
914 if path_suffix.file_name().is_some() {
915 new_path.join(path_suffix)
916 } else {
917 new_path.to_path_buf()
918 }
919 .into();
920 if let Some(handle_state) = Weak::upgrade(&handle_state) {
921 handle_state.lock().path = new_handle_path.clone();
922 updated_handles
923 .insert(new_handle_path, Arc::downgrade(&handle_state));
924 }
925 false
926 } else {
927 true
928 }
929 });
930 handles.extend(updated_handles.drain());
931 }
932 }
933 }
934 }
935 }
936
937 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
938 let mut abs_paths = events.into_iter().map(|e| e.path).peekable();
939 let (scan_queue_tx, scan_queue_rx) = crossbeam_channel::unbounded();
940
941 while let Some(abs_path) = abs_paths.next() {
942 let path = match abs_path.strip_prefix(&root_abs_path) {
943 Ok(path) => Arc::from(path.to_path_buf()),
944 Err(_) => {
945 log::error!(
946 "unexpected event {:?} for root path {:?}",
947 abs_path,
948 root_abs_path
949 );
950 continue;
951 }
952 };
953
954 while abs_paths.peek().map_or(false, |p| p.starts_with(&abs_path)) {
955 abs_paths.next();
956 }
957
958 snapshot.remove_path(&path);
959
960 match self.fs_entry_for_path(path.clone(), &abs_path) {
961 Ok(Some(mut fs_entry)) => {
962 let is_dir = fs_entry.is_dir();
963 let ignore_stack = snapshot.ignore_stack_for_path(&path, is_dir);
964 fs_entry.is_ignored = ignore_stack.is_all();
965 snapshot.insert_entry(fs_entry);
966 if is_dir {
967 scan_queue_tx
968 .send(ScanJob {
969 abs_path,
970 path,
971 ignore_stack,
972 scan_queue: scan_queue_tx.clone(),
973 })
974 .unwrap();
975 }
976 }
977 Ok(None) => {}
978 Err(err) => {
979 // TODO - create a special 'error' entry in the entries tree to mark this
980 log::error!("error reading file on event {:?}", err);
981 }
982 }
983 }
984
985 *self.snapshot.lock() = snapshot;
986
987 // Scan any directories that were created as part of this event batch.
988 drop(scan_queue_tx);
989 self.thread_pool.scoped(|pool| {
990 for _ in 0..self.thread_pool.thread_count() {
991 pool.execute(|| {
992 while let Ok(job) = scan_queue_rx.recv() {
993 if let Err(err) = self.scan_dir(&job) {
994 log::error!("error scanning {:?}: {}", job.abs_path, err);
995 }
996 }
997 });
998 }
999 });
1000
1001 self.update_ignore_statuses();
1002 self.mark_deleted_file_handles();
1003 true
1004 }
1005
1006 fn update_ignore_statuses(&self) {
1007 let mut snapshot = self.snapshot();
1008
1009 let mut ignores_to_update = Vec::new();
1010 let mut ignores_to_delete = Vec::new();
1011 for (parent_path, (_, scan_id)) in &snapshot.ignores {
1012 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
1013 ignores_to_update.push(parent_path.clone());
1014 }
1015
1016 let ignore_path = parent_path.join(&*GITIGNORE);
1017 if snapshot.entry_for_path(ignore_path).is_none() {
1018 ignores_to_delete.push(parent_path.clone());
1019 }
1020 }
1021
1022 for parent_path in ignores_to_delete {
1023 snapshot.ignores.remove(&parent_path);
1024 self.snapshot.lock().ignores.remove(&parent_path);
1025 }
1026
1027 let (ignore_queue_tx, ignore_queue_rx) = crossbeam_channel::unbounded();
1028 ignores_to_update.sort_unstable();
1029 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
1030 while let Some(parent_path) = ignores_to_update.next() {
1031 while ignores_to_update
1032 .peek()
1033 .map_or(false, |p| p.starts_with(&parent_path))
1034 {
1035 ignores_to_update.next().unwrap();
1036 }
1037
1038 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
1039 ignore_queue_tx
1040 .send(UpdateIgnoreStatusJob {
1041 path: parent_path,
1042 ignore_stack,
1043 ignore_queue: ignore_queue_tx.clone(),
1044 })
1045 .unwrap();
1046 }
1047 drop(ignore_queue_tx);
1048
1049 self.thread_pool.scoped(|scope| {
1050 for _ in 0..self.thread_pool.thread_count() {
1051 scope.execute(|| {
1052 while let Ok(job) = ignore_queue_rx.recv() {
1053 self.update_ignore_status(job, &snapshot);
1054 }
1055 });
1056 }
1057 });
1058 }
1059
1060 fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
1061 let mut ignore_stack = job.ignore_stack;
1062 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
1063 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1064 }
1065
1066 let mut edits = Vec::new();
1067 for mut entry in snapshot.child_entries(&job.path).cloned() {
1068 let was_ignored = entry.is_ignored;
1069 entry.is_ignored = ignore_stack.is_path_ignored(entry.path(), entry.is_dir());
1070 if entry.is_dir() {
1071 let child_ignore_stack = if entry.is_ignored {
1072 IgnoreStack::all()
1073 } else {
1074 ignore_stack.clone()
1075 };
1076 job.ignore_queue
1077 .send(UpdateIgnoreStatusJob {
1078 path: entry.path().clone(),
1079 ignore_stack: child_ignore_stack,
1080 ignore_queue: job.ignore_queue.clone(),
1081 })
1082 .unwrap();
1083 }
1084
1085 if entry.is_ignored != was_ignored {
1086 edits.push(Edit::Insert(entry));
1087 }
1088 }
1089 self.snapshot.lock().entries.edit(edits, &());
1090 }
1091
1092 fn mark_deleted_file_handles(&self) {
1093 let mut handles = self.handles.lock();
1094 let snapshot = self.snapshot.lock();
1095 handles.retain(|path, handle_state| {
1096 if let Some(handle_state) = Weak::upgrade(&handle_state) {
1097 let mut handle_state = handle_state.lock();
1098 handle_state.is_deleted = snapshot.entry_for_path(&path).is_none();
1099 true
1100 } else {
1101 false
1102 }
1103 });
1104 }
1105
1106 fn fs_entry_for_path(&self, path: Arc<Path>, abs_path: &Path) -> Result<Option<Entry>> {
1107 let metadata = match fs::metadata(&abs_path) {
1108 Err(err) => {
1109 return match (err.kind(), err.raw_os_error()) {
1110 (io::ErrorKind::NotFound, _) => Ok(None),
1111 (io::ErrorKind::Other, Some(libc::ENOTDIR)) => Ok(None),
1112 _ => Err(anyhow::Error::new(err)),
1113 }
1114 }
1115 Ok(metadata) => metadata,
1116 };
1117 let inode = metadata.ino();
1118 let is_symlink = fs::symlink_metadata(&abs_path)
1119 .context("failed to read symlink metadata")?
1120 .file_type()
1121 .is_symlink();
1122
1123 let entry = Entry {
1124 kind: if metadata.file_type().is_dir() {
1125 EntryKind::PendingDir
1126 } else {
1127 EntryKind::File(self.char_bag(&path))
1128 },
1129 path,
1130 inode,
1131 is_symlink,
1132 is_ignored: false,
1133 };
1134
1135 Ok(Some(entry))
1136 }
1137
1138 fn char_bag(&self, path: &Path) -> CharBag {
1139 let mut result = self.root_char_bag;
1140 result.extend(
1141 path.to_string_lossy()
1142 .chars()
1143 .map(|c| c.to_ascii_lowercase()),
1144 );
1145 result
1146 }
1147}
1148
1149struct ScanJob {
1150 abs_path: PathBuf,
1151 path: Arc<Path>,
1152 ignore_stack: Arc<IgnoreStack>,
1153 scan_queue: crossbeam_channel::Sender<ScanJob>,
1154}
1155
1156struct UpdateIgnoreStatusJob {
1157 path: Arc<Path>,
1158 ignore_stack: Arc<IgnoreStack>,
1159 ignore_queue: crossbeam_channel::Sender<UpdateIgnoreStatusJob>,
1160}
1161
1162pub trait WorktreeHandle {
1163 fn file(&self, path: impl AsRef<Path>, app: &AppContext) -> FileHandle;
1164
1165 #[cfg(test)]
1166 fn flush_fs_events<'a>(
1167 &self,
1168 app: &'a gpui::TestAppContext,
1169 ) -> futures_core::future::LocalBoxFuture<'a, ()>;
1170}
1171
1172impl WorktreeHandle for ModelHandle<Worktree> {
1173 fn file(&self, path: impl AsRef<Path>, app: &AppContext) -> FileHandle {
1174 let path = path.as_ref();
1175 let tree = self.read(app);
1176 let mut handles = tree.handles.lock();
1177 let state = if let Some(state) = handles.get(path).and_then(Weak::upgrade) {
1178 state
1179 } else {
1180 let handle_state = if let Some(entry) = tree.entry_for_path(path) {
1181 FileHandleState {
1182 path: entry.path().clone(),
1183 is_deleted: false,
1184 }
1185 } else {
1186 FileHandleState {
1187 path: path.into(),
1188 is_deleted: !tree.path_is_pending(path),
1189 }
1190 };
1191
1192 let state = Arc::new(Mutex::new(handle_state.clone()));
1193 handles.insert(handle_state.path, Arc::downgrade(&state));
1194 state
1195 };
1196
1197 FileHandle {
1198 worktree: self.clone(),
1199 state,
1200 }
1201 }
1202
1203 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
1204 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
1205 // extra directory scans, and emit extra scan-state notifications.
1206 //
1207 // This function mutates the worktree's directory and waits for those mutations to be picked up,
1208 // to ensure that all redundant FS events have already been processed.
1209 #[cfg(test)]
1210 fn flush_fs_events<'a>(
1211 &self,
1212 app: &'a gpui::TestAppContext,
1213 ) -> futures_core::future::LocalBoxFuture<'a, ()> {
1214 use smol::future::FutureExt;
1215
1216 let filename = "fs-event-sentinel";
1217 let root_path = app.read(|ctx| self.read(ctx).abs_path.clone());
1218 let tree = self.clone();
1219 async move {
1220 fs::write(root_path.join(filename), "").unwrap();
1221 tree.condition_with_duration(Duration::from_secs(5), &app, |tree, _| {
1222 tree.entry_for_path(filename).is_some()
1223 })
1224 .await;
1225
1226 fs::remove_file(root_path.join(filename)).unwrap();
1227 tree.condition_with_duration(Duration::from_secs(5), &app, |tree, _| {
1228 tree.entry_for_path(filename).is_none()
1229 })
1230 .await;
1231
1232 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1233 }
1234 .boxed_local()
1235 }
1236}
1237
1238pub enum FileIter<'a> {
1239 All(Cursor<'a, Entry, FileCount, FileCount>),
1240 Visible(Cursor<'a, Entry, VisibleFileCount, VisibleFileCount>),
1241}
1242
1243impl<'a> FileIter<'a> {
1244 fn all(snapshot: &'a Snapshot, start: usize) -> Self {
1245 let mut cursor = snapshot.entries.cursor();
1246 cursor.seek(&FileCount(start), SeekBias::Right, &());
1247 Self::All(cursor)
1248 }
1249
1250 fn visible(snapshot: &'a Snapshot, start: usize) -> Self {
1251 let mut cursor = snapshot.entries.cursor();
1252 cursor.seek(&VisibleFileCount(start), SeekBias::Right, &());
1253 Self::Visible(cursor)
1254 }
1255
1256 fn next_internal(&mut self) {
1257 match self {
1258 Self::All(cursor) => {
1259 let ix = *cursor.start();
1260 cursor.seek_forward(&FileCount(ix.0 + 1), SeekBias::Right, &());
1261 }
1262 Self::Visible(cursor) => {
1263 let ix = *cursor.start();
1264 cursor.seek_forward(&VisibleFileCount(ix.0 + 1), SeekBias::Right, &());
1265 }
1266 }
1267 }
1268
1269 fn item(&self) -> Option<&'a Entry> {
1270 match self {
1271 Self::All(cursor) => cursor.item(),
1272 Self::Visible(cursor) => cursor.item(),
1273 }
1274 }
1275}
1276
1277impl<'a> Iterator for FileIter<'a> {
1278 type Item = &'a Entry;
1279
1280 fn next(&mut self) -> Option<Self::Item> {
1281 if let Some(entry) = self.item() {
1282 self.next_internal();
1283 Some(entry)
1284 } else {
1285 None
1286 }
1287 }
1288}
1289
1290struct ChildEntriesIter<'a> {
1291 parent_path: &'a Path,
1292 cursor: Cursor<'a, Entry, PathSearch<'a>, ()>,
1293}
1294
1295impl<'a> ChildEntriesIter<'a> {
1296 fn new(parent_path: &'a Path, snapshot: &'a Snapshot) -> Self {
1297 let mut cursor = snapshot.entries.cursor();
1298 cursor.seek(&PathSearch::Exact(parent_path), SeekBias::Right, &());
1299 Self {
1300 parent_path,
1301 cursor,
1302 }
1303 }
1304}
1305
1306impl<'a> Iterator for ChildEntriesIter<'a> {
1307 type Item = &'a Entry;
1308
1309 fn next(&mut self) -> Option<Self::Item> {
1310 if let Some(item) = self.cursor.item() {
1311 if item.path().starts_with(self.parent_path) {
1312 self.cursor
1313 .seek_forward(&PathSearch::Successor(item.path()), SeekBias::Left, &());
1314 Some(item)
1315 } else {
1316 None
1317 }
1318 } else {
1319 None
1320 }
1321 }
1322}
1323
1324fn mounted_volume_paths() -> Vec<PathBuf> {
1325 unsafe {
1326 let mut stat_ptr: *mut libc::statfs = std::ptr::null_mut();
1327 let count = libc::getmntinfo(&mut stat_ptr as *mut _, libc::MNT_WAIT);
1328 if count >= 0 {
1329 std::slice::from_raw_parts(stat_ptr, count as usize)
1330 .iter()
1331 .map(|stat| {
1332 PathBuf::from(OsStr::from_bytes(
1333 CStr::from_ptr(&stat.f_mntonname[0]).to_bytes(),
1334 ))
1335 })
1336 .collect()
1337 } else {
1338 panic!("failed to run getmntinfo");
1339 }
1340 }
1341}
1342
1343#[cfg(test)]
1344mod tests {
1345 use super::*;
1346 use crate::editor::Buffer;
1347 use crate::test::*;
1348 use anyhow::Result;
1349 use gpui::App;
1350 use rand::prelude::*;
1351 use serde_json::json;
1352 use std::env;
1353 use std::fmt::Write;
1354 use std::os::unix;
1355 use std::time::{SystemTime, UNIX_EPOCH};
1356
1357 #[test]
1358 fn test_populate_and_search() {
1359 App::test_async((), |mut app| async move {
1360 let dir = temp_tree(json!({
1361 "root": {
1362 "apple": "",
1363 "banana": {
1364 "carrot": {
1365 "date": "",
1366 "endive": "",
1367 }
1368 },
1369 "fennel": {
1370 "grape": "",
1371 }
1372 }
1373 }));
1374
1375 let root_link_path = dir.path().join("root_link");
1376 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1377 unix::fs::symlink(
1378 &dir.path().join("root/fennel"),
1379 &dir.path().join("root/finnochio"),
1380 )
1381 .unwrap();
1382
1383 let tree = app.add_model(|ctx| Worktree::new(root_link_path, ctx));
1384
1385 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1386 app.read(|ctx| {
1387 let tree = tree.read(ctx);
1388 assert_eq!(tree.file_count(), 5);
1389
1390 assert_eq!(
1391 tree.inode_for_path("fennel/grape"),
1392 tree.inode_for_path("finnochio/grape")
1393 );
1394
1395 let results = match_paths(
1396 Some(tree.snapshot()).iter(),
1397 "bna",
1398 false,
1399 false,
1400 false,
1401 10,
1402 Default::default(),
1403 ctx.thread_pool().clone(),
1404 )
1405 .into_iter()
1406 .map(|result| result.path)
1407 .collect::<Vec<Arc<Path>>>();
1408 assert_eq!(
1409 results,
1410 vec![
1411 PathBuf::from("banana/carrot/date").into(),
1412 PathBuf::from("banana/carrot/endive").into(),
1413 ]
1414 );
1415 })
1416 });
1417 }
1418
1419 #[test]
1420 fn test_save_file() {
1421 App::test_async((), |mut app| async move {
1422 let dir = temp_tree(json!({
1423 "file1": "the old contents",
1424 }));
1425
1426 let tree = app.add_model(|ctx| Worktree::new(dir.path(), ctx));
1427 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1428 app.read(|ctx| assert_eq!(tree.read(ctx).file_count(), 1));
1429
1430 let buffer =
1431 app.add_model(|ctx| Buffer::new(1, "a line of text.\n".repeat(10 * 1024), ctx));
1432
1433 let path = tree.update(&mut app, |tree, ctx| {
1434 let path = tree.files(0).next().unwrap().path().clone();
1435 assert_eq!(path.file_name().unwrap(), "file1");
1436 smol::block_on(tree.save(&path, buffer.read(ctx).snapshot(), ctx.as_ref()))
1437 .unwrap();
1438 path
1439 });
1440
1441 let history = app
1442 .read(|ctx| tree.read(ctx).load_history(&path, ctx))
1443 .await
1444 .unwrap();
1445 app.read(|ctx| {
1446 assert_eq!(history.base_text.as_ref(), buffer.read(ctx).text());
1447 });
1448 });
1449 }
1450
1451 #[test]
1452 fn test_save_in_single_file_worktree() {
1453 App::test_async((), |mut app| async move {
1454 let dir = temp_tree(json!({
1455 "file1": "the old contents",
1456 }));
1457
1458 let tree = app.add_model(|ctx| Worktree::new(dir.path().join("file1"), ctx));
1459 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1460 app.read(|ctx| assert_eq!(tree.read(ctx).file_count(), 1));
1461
1462 let buffer =
1463 app.add_model(|ctx| Buffer::new(1, "a line of text.\n".repeat(10 * 1024), ctx));
1464
1465 let file = app.read(|ctx| tree.file("", ctx));
1466 app.update(|ctx| {
1467 assert_eq!(file.path().file_name(), None);
1468 smol::block_on(file.save(buffer.read(ctx).snapshot(), ctx.as_ref())).unwrap();
1469 });
1470
1471 let history = app.read(|ctx| file.load_history(ctx)).await.unwrap();
1472 app.read(|ctx| assert_eq!(history.base_text.as_ref(), buffer.read(ctx).text()));
1473 });
1474 }
1475
1476 #[test]
1477 fn test_rescan_simple() {
1478 App::test_async((), |mut app| async move {
1479 let dir = temp_tree(json!({
1480 "a": {
1481 "file1": "",
1482 "file2": "",
1483 "file3": "",
1484 },
1485 "b": {
1486 "c": {
1487 "file4": "",
1488 "file5": "",
1489 }
1490 }
1491 }));
1492
1493 let tree = app.add_model(|ctx| Worktree::new(dir.path(), ctx));
1494 let (file2, file3, file4, file5, non_existent_file) = app.read(|ctx| {
1495 (
1496 tree.file("a/file2", ctx),
1497 tree.file("a/file3", ctx),
1498 tree.file("b/c/file4", ctx),
1499 tree.file("b/c/file5", ctx),
1500 tree.file("a/filex", ctx),
1501 )
1502 });
1503
1504 // The worktree hasn't scanned the directories containing these paths,
1505 // so it can't determine that the paths are deleted.
1506 assert!(!file2.is_deleted());
1507 assert!(!file3.is_deleted());
1508 assert!(!file4.is_deleted());
1509 assert!(!file5.is_deleted());
1510 assert!(!non_existent_file.is_deleted());
1511
1512 // After scanning, the worktree knows which files exist and which don't.
1513 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1514 assert!(!file2.is_deleted());
1515 assert!(!file3.is_deleted());
1516 assert!(!file4.is_deleted());
1517 assert!(!file5.is_deleted());
1518 assert!(non_existent_file.is_deleted());
1519
1520 tree.flush_fs_events(&app).await;
1521 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
1522 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
1523 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
1524 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
1525 tree.update(&mut app, |tree, ctx| tree.next_scan_complete(ctx))
1526 .await;
1527
1528 app.read(|ctx| {
1529 assert_eq!(
1530 tree.read(ctx)
1531 .paths()
1532 .map(|p| p.to_str().unwrap())
1533 .collect::<Vec<_>>(),
1534 vec![
1535 "a",
1536 "a/file1",
1537 "a/file2.new",
1538 "b",
1539 "d",
1540 "d/file3",
1541 "d/file4"
1542 ]
1543 );
1544
1545 assert_eq!(file2.path().to_str().unwrap(), "a/file2.new");
1546 assert_eq!(file4.path().as_ref(), Path::new("d/file4"));
1547 assert_eq!(file5.path().as_ref(), Path::new("d/file5"));
1548 assert!(!file2.is_deleted());
1549 assert!(!file4.is_deleted());
1550 assert!(file5.is_deleted());
1551
1552 // Right now, this rename isn't detected because the target path
1553 // no longer exists on the file system by the time we process the
1554 // rename event.
1555 assert_eq!(file3.path().as_ref(), Path::new("a/file3"));
1556 assert!(file3.is_deleted());
1557 });
1558 });
1559 }
1560
1561 #[test]
1562 fn test_rescan_with_gitignore() {
1563 App::test_async((), |mut app| async move {
1564 let dir = temp_tree(json!({
1565 ".git": {},
1566 ".gitignore": "ignored-dir\n",
1567 "tracked-dir": {
1568 "tracked-file1": "tracked contents",
1569 },
1570 "ignored-dir": {
1571 "ignored-file1": "ignored contents",
1572 }
1573 }));
1574
1575 let tree = app.add_model(|ctx| Worktree::new(dir.path(), ctx));
1576 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1577 tree.flush_fs_events(&app).await;
1578 app.read(|ctx| {
1579 let tree = tree.read(ctx);
1580 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
1581 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
1582 assert_eq!(tracked.is_ignored(), false);
1583 assert_eq!(ignored.is_ignored(), true);
1584 });
1585
1586 fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
1587 fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
1588 tree.update(&mut app, |tree, ctx| tree.next_scan_complete(ctx))
1589 .await;
1590 app.read(|ctx| {
1591 let tree = tree.read(ctx);
1592 let dot_git = tree.entry_for_path(".git").unwrap();
1593 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
1594 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
1595 assert_eq!(tracked.is_ignored(), false);
1596 assert_eq!(ignored.is_ignored(), true);
1597 assert_eq!(dot_git.is_ignored(), true);
1598 });
1599 });
1600 }
1601
1602 #[test]
1603 fn test_path_is_pending() {
1604 let mut snapshot = Snapshot {
1605 id: 0,
1606 scan_id: 0,
1607 abs_path: Path::new("").into(),
1608 entries: Default::default(),
1609 ignores: Default::default(),
1610 root_name: Default::default(),
1611 };
1612
1613 snapshot.entries.edit(
1614 vec![
1615 Edit::Insert(Entry {
1616 path: Path::new("b").into(),
1617 kind: EntryKind::Dir,
1618 inode: 0,
1619 is_ignored: false,
1620 is_symlink: false,
1621 }),
1622 Edit::Insert(Entry {
1623 path: Path::new("b/a").into(),
1624 kind: EntryKind::Dir,
1625 inode: 0,
1626 is_ignored: false,
1627 is_symlink: false,
1628 }),
1629 Edit::Insert(Entry {
1630 path: Path::new("b/c").into(),
1631 kind: EntryKind::PendingDir,
1632 inode: 0,
1633 is_ignored: false,
1634 is_symlink: false,
1635 }),
1636 Edit::Insert(Entry {
1637 path: Path::new("b/e").into(),
1638 kind: EntryKind::Dir,
1639 inode: 0,
1640 is_ignored: false,
1641 is_symlink: false,
1642 }),
1643 ],
1644 &(),
1645 );
1646
1647 assert!(!snapshot.path_is_pending("b/a"));
1648 assert!(!snapshot.path_is_pending("b/b"));
1649 assert!(snapshot.path_is_pending("b/c"));
1650 assert!(snapshot.path_is_pending("b/c/x"));
1651 assert!(!snapshot.path_is_pending("b/d"));
1652 assert!(!snapshot.path_is_pending("b/e"));
1653 }
1654
1655 #[test]
1656 fn test_mounted_volume_paths() {
1657 let paths = mounted_volume_paths();
1658 assert!(paths.contains(&"/".into()));
1659 }
1660
1661 #[test]
1662 fn test_random() {
1663 let iterations = env::var("ITERATIONS")
1664 .map(|i| i.parse().unwrap())
1665 .unwrap_or(100);
1666 let operations = env::var("OPERATIONS")
1667 .map(|o| o.parse().unwrap())
1668 .unwrap_or(40);
1669 let initial_entries = env::var("INITIAL_ENTRIES")
1670 .map(|o| o.parse().unwrap())
1671 .unwrap_or(20);
1672 let seeds = if let Ok(seed) = env::var("SEED").map(|s| s.parse().unwrap()) {
1673 seed..seed + 1
1674 } else {
1675 0..iterations
1676 };
1677
1678 for seed in seeds {
1679 dbg!(seed);
1680 let mut rng = StdRng::seed_from_u64(seed);
1681
1682 let root_dir = tempdir::TempDir::new(&format!("test-{}", seed)).unwrap();
1683 for _ in 0..initial_entries {
1684 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
1685 }
1686 log::info!("Generated initial tree");
1687
1688 let (notify_tx, _notify_rx) = smol::channel::unbounded();
1689 let mut scanner = BackgroundScanner::new(
1690 Arc::new(Mutex::new(Snapshot {
1691 id: 0,
1692 scan_id: 0,
1693 abs_path: root_dir.path().into(),
1694 entries: Default::default(),
1695 ignores: Default::default(),
1696 root_name: Default::default(),
1697 })),
1698 Arc::new(Mutex::new(Default::default())),
1699 notify_tx,
1700 0,
1701 );
1702 scanner.scan_dirs().unwrap();
1703 scanner.snapshot().check_invariants();
1704
1705 let mut events = Vec::new();
1706 let mut mutations_len = operations;
1707 while mutations_len > 1 {
1708 if !events.is_empty() && rng.gen_bool(0.4) {
1709 let len = rng.gen_range(0..=events.len());
1710 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
1711 log::info!("Delivering events: {:#?}", to_deliver);
1712 scanner.process_events(to_deliver);
1713 scanner.snapshot().check_invariants();
1714 } else {
1715 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
1716 mutations_len -= 1;
1717 }
1718 }
1719 log::info!("Quiescing: {:#?}", events);
1720 scanner.process_events(events);
1721 scanner.snapshot().check_invariants();
1722
1723 let (notify_tx, _notify_rx) = smol::channel::unbounded();
1724 let mut new_scanner = BackgroundScanner::new(
1725 Arc::new(Mutex::new(Snapshot {
1726 id: 0,
1727 scan_id: 0,
1728 abs_path: root_dir.path().into(),
1729 entries: Default::default(),
1730 ignores: Default::default(),
1731 root_name: Default::default(),
1732 })),
1733 Arc::new(Mutex::new(Default::default())),
1734 notify_tx,
1735 1,
1736 );
1737 new_scanner.scan_dirs().unwrap();
1738 assert_eq!(scanner.snapshot().to_vec(), new_scanner.snapshot().to_vec());
1739 }
1740 }
1741
1742 fn randomly_mutate_tree(
1743 root_path: &Path,
1744 insertion_probability: f64,
1745 rng: &mut impl Rng,
1746 ) -> Result<Vec<fsevent::Event>> {
1747 let root_path = root_path.canonicalize().unwrap();
1748 let (dirs, files) = read_dir_recursive(root_path.clone());
1749
1750 let mut events = Vec::new();
1751 let mut record_event = |path: PathBuf| {
1752 events.push(fsevent::Event {
1753 event_id: SystemTime::now()
1754 .duration_since(UNIX_EPOCH)
1755 .unwrap()
1756 .as_secs(),
1757 flags: fsevent::StreamFlags::empty(),
1758 path,
1759 });
1760 };
1761
1762 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
1763 let path = dirs.choose(rng).unwrap();
1764 let new_path = path.join(gen_name(rng));
1765
1766 if rng.gen() {
1767 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
1768 fs::create_dir(&new_path)?;
1769 } else {
1770 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
1771 fs::write(&new_path, "")?;
1772 }
1773 record_event(new_path);
1774 } else if rng.gen_bool(0.05) {
1775 let ignore_dir_path = dirs.choose(rng).unwrap();
1776 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
1777
1778 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
1779 let files_to_ignore = {
1780 let len = rng.gen_range(0..=subfiles.len());
1781 subfiles.choose_multiple(rng, len)
1782 };
1783 let dirs_to_ignore = {
1784 let len = rng.gen_range(0..subdirs.len());
1785 subdirs.choose_multiple(rng, len)
1786 };
1787
1788 let mut ignore_contents = String::new();
1789 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
1790 write!(
1791 ignore_contents,
1792 "{}\n",
1793 path_to_ignore
1794 .strip_prefix(&ignore_dir_path)?
1795 .to_str()
1796 .unwrap()
1797 )
1798 .unwrap();
1799 }
1800 log::info!(
1801 "Creating {:?} with contents:\n{}",
1802 ignore_path.strip_prefix(&root_path)?,
1803 ignore_contents
1804 );
1805 fs::write(&ignore_path, ignore_contents).unwrap();
1806 record_event(ignore_path);
1807 } else {
1808 let old_path = {
1809 let file_path = files.choose(rng);
1810 let dir_path = dirs[1..].choose(rng);
1811 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
1812 };
1813
1814 let is_rename = rng.gen();
1815 if is_rename {
1816 let new_path_parent = dirs
1817 .iter()
1818 .filter(|d| !d.starts_with(old_path))
1819 .choose(rng)
1820 .unwrap();
1821
1822 let overwrite_existing_dir =
1823 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
1824 let new_path = if overwrite_existing_dir {
1825 fs::remove_dir_all(&new_path_parent).ok();
1826 new_path_parent.to_path_buf()
1827 } else {
1828 new_path_parent.join(gen_name(rng))
1829 };
1830
1831 log::info!(
1832 "Renaming {:?} to {}{:?}",
1833 old_path.strip_prefix(&root_path)?,
1834 if overwrite_existing_dir {
1835 "overwrite "
1836 } else {
1837 ""
1838 },
1839 new_path.strip_prefix(&root_path)?
1840 );
1841 fs::rename(&old_path, &new_path)?;
1842 record_event(old_path.clone());
1843 record_event(new_path);
1844 } else if old_path.is_dir() {
1845 let (dirs, files) = read_dir_recursive(old_path.clone());
1846
1847 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
1848 fs::remove_dir_all(&old_path).unwrap();
1849 for file in files {
1850 record_event(file);
1851 }
1852 for dir in dirs {
1853 record_event(dir);
1854 }
1855 } else {
1856 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
1857 fs::remove_file(old_path).unwrap();
1858 record_event(old_path.clone());
1859 }
1860 }
1861
1862 Ok(events)
1863 }
1864
1865 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
1866 let child_entries = fs::read_dir(&path).unwrap();
1867 let mut dirs = vec![path];
1868 let mut files = Vec::new();
1869 for child_entry in child_entries {
1870 let child_path = child_entry.unwrap().path();
1871 if child_path.is_dir() {
1872 let (child_dirs, child_files) = read_dir_recursive(child_path);
1873 dirs.extend(child_dirs);
1874 files.extend(child_files);
1875 } else {
1876 files.push(child_path);
1877 }
1878 }
1879 (dirs, files)
1880 }
1881
1882 fn gen_name(rng: &mut impl Rng) -> String {
1883 (0..6)
1884 .map(|_| rng.sample(rand::distributions::Alphanumeric))
1885 .map(char::from)
1886 .collect()
1887 }
1888
1889 impl Snapshot {
1890 fn check_invariants(&self) {
1891 let mut files = self.files(0);
1892 let mut visible_files = self.visible_files(0);
1893 for entry in self.entries.cursor::<(), ()>() {
1894 if entry.is_file() {
1895 assert_eq!(files.next().unwrap().inode(), entry.inode);
1896 if !entry.is_ignored {
1897 assert_eq!(visible_files.next().unwrap().inode(), entry.inode);
1898 }
1899 }
1900 }
1901 assert!(files.next().is_none());
1902 assert!(visible_files.next().is_none());
1903
1904 let mut bfs_paths = Vec::new();
1905 let mut stack = vec![Path::new("")];
1906 while let Some(path) = stack.pop() {
1907 bfs_paths.push(path);
1908 let ix = stack.len();
1909 for child_entry in self.child_entries(path) {
1910 stack.insert(ix, child_entry.path());
1911 }
1912 }
1913
1914 let dfs_paths = self
1915 .entries
1916 .cursor::<(), ()>()
1917 .map(|e| e.path().as_ref())
1918 .collect::<Vec<_>>();
1919 assert_eq!(bfs_paths, dfs_paths);
1920
1921 for (ignore_parent_path, _) in &self.ignores {
1922 assert!(self.entry_for_path(ignore_parent_path).is_some());
1923 assert!(self
1924 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
1925 .is_some());
1926 }
1927 }
1928
1929 fn to_vec(&self) -> Vec<(&Path, u64, bool)> {
1930 let mut paths = Vec::new();
1931 for entry in self.entries.cursor::<(), ()>() {
1932 paths.push((entry.path().as_ref(), entry.inode(), entry.is_ignored()));
1933 }
1934 paths.sort_by(|a, b| a.0.cmp(&b.0));
1935 paths
1936 }
1937 }
1938}