1mod char_bag;
2mod fuzzy;
3mod ignore;
4
5use crate::{
6 editor::{History, Rope},
7 sum_tree::{self, Cursor, Edit, SeekBias, SumTree},
8};
9use ::ignore::gitignore::Gitignore;
10use anyhow::{Context, Result};
11pub use fuzzy::{match_paths, PathMatch};
12use gpui::{scoped_pool, AppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task};
13use lazy_static::lazy_static;
14use parking_lot::Mutex;
15use postage::{
16 prelude::{Sink, Stream},
17 watch,
18};
19use smol::channel::Sender;
20use std::{
21 cmp,
22 collections::{HashMap, HashSet},
23 ffi::{CStr, OsStr, OsString},
24 fmt, fs,
25 future::Future,
26 io::{self, Read, Write},
27 ops::Deref,
28 os::unix::{ffi::OsStrExt, fs::MetadataExt},
29 path::{Path, PathBuf},
30 sync::{Arc, Weak},
31 time::{Duration, SystemTime, UNIX_EPOCH},
32};
33
34use self::{char_bag::CharBag, ignore::IgnoreStack};
35
36lazy_static! {
37 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
38}
39
40#[derive(Clone, Debug)]
41enum ScanState {
42 Idle,
43 Scanning,
44 Err(Arc<io::Error>),
45}
46
47pub struct Worktree {
48 snapshot: Snapshot,
49 background_snapshot: Arc<Mutex<Snapshot>>,
50 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
51 scan_state: (watch::Sender<ScanState>, watch::Receiver<ScanState>),
52 _event_stream_handle: fsevent::Handle,
53 poll_scheduled: bool,
54}
55
56#[derive(Clone, Debug)]
57pub struct FileHandle {
58 worktree: ModelHandle<Worktree>,
59 state: Arc<Mutex<FileHandleState>>,
60}
61
62#[derive(Clone, Debug, PartialEq, Eq)]
63struct FileHandleState {
64 path: Arc<Path>,
65 is_deleted: bool,
66 mtime: SystemTime,
67}
68
69impl Worktree {
70 pub fn new(path: impl Into<Arc<Path>>, ctx: &mut ModelContext<Self>) -> Self {
71 let abs_path = path.into();
72 let (scan_state_tx, scan_state_rx) = smol::channel::unbounded();
73 let id = ctx.model_id();
74 let snapshot = Snapshot {
75 id,
76 scan_id: 0,
77 abs_path,
78 root_name: Default::default(),
79 ignores: Default::default(),
80 entries: Default::default(),
81 };
82 let (event_stream, event_stream_handle) =
83 fsevent::EventStream::new(&[snapshot.abs_path.as_ref()], Duration::from_millis(100));
84
85 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
86 let handles = Arc::new(Mutex::new(Default::default()));
87
88 let tree = Self {
89 snapshot,
90 background_snapshot: background_snapshot.clone(),
91 handles: handles.clone(),
92 scan_state: watch::channel_with(ScanState::Scanning),
93 _event_stream_handle: event_stream_handle,
94 poll_scheduled: false,
95 };
96
97 std::thread::spawn(move || {
98 let scanner = BackgroundScanner::new(background_snapshot, handles, scan_state_tx, id);
99 scanner.run(event_stream)
100 });
101
102 ctx.spawn(|this, mut ctx| {
103 let this = this.downgrade();
104 async move {
105 while let Ok(scan_state) = scan_state_rx.recv().await {
106 let alive = ctx.update(|ctx| {
107 if let Some(handle) = this.upgrade(&ctx) {
108 handle
109 .update(ctx, |this, ctx| this.observe_scan_state(scan_state, ctx));
110 true
111 } else {
112 false
113 }
114 });
115
116 if !alive {
117 break;
118 }
119 }
120 }
121 })
122 .detach();
123
124 tree
125 }
126
127 pub fn scan_complete(&self) -> impl Future<Output = ()> {
128 let mut scan_state_rx = self.scan_state.1.clone();
129 async move {
130 let mut scan_state = Some(scan_state_rx.borrow().clone());
131 while let Some(ScanState::Scanning) = scan_state {
132 scan_state = scan_state_rx.recv().await;
133 }
134 }
135 }
136
137 fn observe_scan_state(&mut self, scan_state: ScanState, ctx: &mut ModelContext<Self>) {
138 let _ = self.scan_state.0.blocking_send(scan_state);
139 self.poll_entries(ctx);
140 }
141
142 fn poll_entries(&mut self, ctx: &mut ModelContext<Self>) {
143 self.snapshot = self.background_snapshot.lock().clone();
144 ctx.notify();
145
146 if self.is_scanning() && !self.poll_scheduled {
147 ctx.spawn(|this, mut ctx| async move {
148 smol::Timer::after(Duration::from_millis(100)).await;
149 this.update(&mut ctx, |this, ctx| {
150 this.poll_scheduled = false;
151 this.poll_entries(ctx);
152 })
153 })
154 .detach();
155 self.poll_scheduled = true;
156 }
157 }
158
159 fn is_scanning(&self) -> bool {
160 if let ScanState::Scanning = *self.scan_state.1.borrow() {
161 true
162 } else {
163 false
164 }
165 }
166
167 pub fn snapshot(&self) -> Snapshot {
168 self.snapshot.clone()
169 }
170
171 pub fn abs_path(&self) -> &Path {
172 self.snapshot.abs_path.as_ref()
173 }
174
175 pub fn contains_abs_path(&self, path: &Path) -> bool {
176 path.starts_with(&self.snapshot.abs_path)
177 }
178
179 fn absolutize(&self, path: &Path) -> PathBuf {
180 if path.file_name().is_some() {
181 self.snapshot.abs_path.join(path)
182 } else {
183 self.snapshot.abs_path.to_path_buf()
184 }
185 }
186
187 pub fn load_history(
188 &self,
189 path: &Path,
190 ctx: &AppContext,
191 ) -> impl Future<Output = Result<History>> {
192 let path = path.to_path_buf();
193 let abs_path = self.absolutize(&path);
194 ctx.background_executor().spawn(async move {
195 let mut file = fs::File::open(&abs_path)?;
196 let mut base_text = String::new();
197 file.read_to_string(&mut base_text)?;
198 Ok(History::new(Arc::from(base_text)))
199 })
200 }
201
202 pub fn save<'a>(&self, path: &Path, content: Rope, ctx: &AppContext) -> Task<Result<()>> {
203 let handles = self.handles.clone();
204 let path = path.to_path_buf();
205 let abs_path = self.absolutize(&path);
206 ctx.background_executor().spawn(async move {
207 let buffer_size = content.summary().bytes.min(10 * 1024);
208 let file = fs::File::create(&abs_path)?;
209 let mut writer = io::BufWriter::with_capacity(buffer_size, &file);
210 for chunk in content.chunks() {
211 writer.write(chunk.as_bytes())?;
212 }
213 writer.flush()?;
214
215 if let Some(handle) = handles.lock().get(&*path).and_then(Weak::upgrade) {
216 let mut handle = handle.lock();
217 handle.mtime = file.metadata()?.modified()?;
218 handle.is_deleted = false;
219 }
220
221 Ok(())
222 })
223 }
224}
225
226impl Entity for Worktree {
227 type Event = ();
228}
229
230impl Deref for Worktree {
231 type Target = Snapshot;
232
233 fn deref(&self) -> &Self::Target {
234 &self.snapshot
235 }
236}
237
238impl fmt::Debug for Worktree {
239 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
240 self.snapshot.fmt(f)
241 }
242}
243
244#[derive(Clone)]
245pub struct Snapshot {
246 id: usize,
247 scan_id: usize,
248 abs_path: Arc<Path>,
249 root_name: String,
250 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
251 entries: SumTree<Entry>,
252}
253
254impl Snapshot {
255 pub fn file_count(&self) -> usize {
256 self.entries.summary().file_count
257 }
258
259 pub fn visible_file_count(&self) -> usize {
260 self.entries.summary().visible_file_count
261 }
262
263 pub fn files(&self, start: usize) -> FileIter {
264 FileIter::all(self, start)
265 }
266
267 #[cfg(test)]
268 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
269 self.entries
270 .cursor::<(), ()>()
271 .skip(1)
272 .map(|entry| entry.path())
273 }
274
275 pub fn visible_files(&self, start: usize) -> FileIter {
276 FileIter::visible(self, start)
277 }
278
279 fn child_entries<'a>(&'a self, path: &'a Path) -> ChildEntriesIter<'a> {
280 ChildEntriesIter::new(path, self)
281 }
282
283 pub fn root_entry(&self) -> &Entry {
284 self.entry_for_path("").unwrap()
285 }
286
287 /// Returns the filename of the snapshot's root, plus a trailing slash if the snapshot's root is
288 /// a directory.
289 pub fn root_name(&self) -> &str {
290 &self.root_name
291 }
292
293 fn path_is_pending(&self, path: impl AsRef<Path>) -> bool {
294 if self.entries.is_empty() {
295 return true;
296 }
297 let path = path.as_ref();
298 let mut cursor = self.entries.cursor::<_, ()>();
299 if cursor.seek(&PathSearch::Exact(path), SeekBias::Left, &()) {
300 let entry = cursor.item().unwrap();
301 if entry.path.as_ref() == path {
302 return matches!(entry.kind, EntryKind::PendingDir);
303 }
304 }
305 if let Some(entry) = cursor.prev_item() {
306 matches!(entry.kind, EntryKind::PendingDir) && path.starts_with(entry.path.as_ref())
307 } else {
308 false
309 }
310 }
311
312 fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
313 let mut cursor = self.entries.cursor::<_, ()>();
314 if cursor.seek(&PathSearch::Exact(path.as_ref()), SeekBias::Left, &()) {
315 cursor.item()
316 } else {
317 None
318 }
319 }
320
321 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
322 self.entry_for_path(path.as_ref()).map(|e| e.inode())
323 }
324
325 fn insert_entry(&mut self, entry: Entry) {
326 if !entry.is_dir() && entry.path().file_name() == Some(&GITIGNORE) {
327 let (ignore, err) = Gitignore::new(self.abs_path.join(entry.path()));
328 if let Some(err) = err {
329 log::error!("error in ignore file {:?} - {:?}", entry.path(), err);
330 }
331
332 let ignore_dir_path = entry.path().parent().unwrap();
333 self.ignores
334 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
335 }
336 self.entries.insert(entry, &());
337 }
338
339 fn populate_dir(
340 &mut self,
341 parent_path: Arc<Path>,
342 entries: impl IntoIterator<Item = Entry>,
343 ignore: Option<Arc<Gitignore>>,
344 ) {
345 let mut edits = Vec::new();
346
347 let mut parent_entry = self
348 .entries
349 .get(&PathKey(parent_path.clone()), &())
350 .unwrap()
351 .clone();
352 if let Some(ignore) = ignore {
353 self.ignores.insert(parent_path, (ignore, self.scan_id));
354 }
355 if matches!(parent_entry.kind, EntryKind::PendingDir) {
356 parent_entry.kind = EntryKind::Dir;
357 } else {
358 unreachable!();
359 }
360 edits.push(Edit::Insert(parent_entry));
361
362 for entry in entries {
363 edits.push(Edit::Insert(entry));
364 }
365 self.entries.edit(edits, &());
366 }
367
368 fn remove_path(&mut self, path: &Path) {
369 let new_entries = {
370 let mut cursor = self.entries.cursor::<_, ()>();
371 let mut new_entries = cursor.slice(&PathSearch::Exact(path), SeekBias::Left, &());
372 cursor.seek_forward(&PathSearch::Successor(path), SeekBias::Left, &());
373 new_entries.push_tree(cursor.suffix(&()), &());
374 new_entries
375 };
376 self.entries = new_entries;
377
378 if path.file_name() == Some(&GITIGNORE) {
379 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
380 *scan_id = self.scan_id;
381 }
382 }
383 }
384
385 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
386 let mut new_ignores = Vec::new();
387 for ancestor in path.ancestors().skip(1) {
388 if let Some((ignore, _)) = self.ignores.get(ancestor) {
389 new_ignores.push((ancestor, Some(ignore.clone())));
390 } else {
391 new_ignores.push((ancestor, None));
392 }
393 }
394
395 let mut ignore_stack = IgnoreStack::none();
396 for (parent_path, ignore) in new_ignores.into_iter().rev() {
397 if ignore_stack.is_path_ignored(&parent_path, true) {
398 ignore_stack = IgnoreStack::all();
399 break;
400 } else if let Some(ignore) = ignore {
401 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
402 }
403 }
404
405 if ignore_stack.is_path_ignored(path, is_dir) {
406 ignore_stack = IgnoreStack::all();
407 }
408
409 ignore_stack
410 }
411}
412
413impl fmt::Debug for Snapshot {
414 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
415 for entry in self.entries.cursor::<(), ()>() {
416 for _ in entry.path().ancestors().skip(1) {
417 write!(f, " ")?;
418 }
419 writeln!(f, "{:?} (inode: {})", entry.path(), entry.inode())?;
420 }
421 Ok(())
422 }
423}
424
425impl FileHandle {
426 /// Returns this file's path relative to the root of its worktree.
427 pub fn path(&self) -> Arc<Path> {
428 self.state.lock().path.clone()
429 }
430
431 /// Returns the last component of this handle's absolute path. If this handle refers to the root
432 /// of its worktree, then this method will return the name of the worktree itself.
433 pub fn file_name<'a>(&'a self, ctx: &'a AppContext) -> Option<OsString> {
434 self.state
435 .lock()
436 .path
437 .file_name()
438 .or_else(|| self.worktree.read(ctx).abs_path().file_name())
439 .map(Into::into)
440 }
441
442 pub fn is_deleted(&self) -> bool {
443 self.state.lock().is_deleted
444 }
445
446 pub fn mtime(&self) -> SystemTime {
447 self.state.lock().mtime
448 }
449
450 pub fn exists(&self) -> bool {
451 !self.is_deleted()
452 }
453
454 pub fn load_history(&self, ctx: &AppContext) -> impl Future<Output = Result<History>> {
455 self.worktree.read(ctx).load_history(&self.path(), ctx)
456 }
457
458 pub fn save<'a>(&self, content: Rope, ctx: &AppContext) -> Task<Result<()>> {
459 let worktree = self.worktree.read(ctx);
460 worktree.save(&self.path(), content, ctx)
461 }
462
463 pub fn worktree_id(&self) -> usize {
464 self.worktree.id()
465 }
466
467 pub fn entry_id(&self) -> (usize, Arc<Path>) {
468 (self.worktree.id(), self.path())
469 }
470
471 pub fn observe_from_model<T: Entity>(
472 &self,
473 ctx: &mut ModelContext<T>,
474 mut callback: impl FnMut(&mut T, FileHandle, &mut ModelContext<T>) + 'static,
475 ) {
476 let mut prev_state = self.state.lock().clone();
477 let cur_state = Arc::downgrade(&self.state);
478 ctx.observe(&self.worktree, move |observer, worktree, ctx| {
479 if let Some(cur_state) = cur_state.upgrade() {
480 let cur_state_unlocked = cur_state.lock();
481 if *cur_state_unlocked != prev_state {
482 prev_state = cur_state_unlocked.clone();
483 drop(cur_state_unlocked);
484 callback(
485 observer,
486 FileHandle {
487 worktree,
488 state: cur_state,
489 },
490 ctx,
491 );
492 }
493 }
494 });
495 }
496}
497
498#[derive(Clone, Debug)]
499pub struct Entry {
500 kind: EntryKind,
501 path: Arc<Path>,
502 inode: u64,
503 is_symlink: bool,
504 is_ignored: bool,
505}
506
507#[derive(Clone, Debug)]
508pub enum EntryKind {
509 PendingDir,
510 Dir,
511 File(CharBag),
512}
513
514impl Entry {
515 pub fn path(&self) -> &Arc<Path> {
516 &self.path
517 }
518
519 pub fn inode(&self) -> u64 {
520 self.inode
521 }
522
523 pub fn is_ignored(&self) -> bool {
524 self.is_ignored
525 }
526
527 fn is_dir(&self) -> bool {
528 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
529 }
530
531 fn is_file(&self) -> bool {
532 matches!(self.kind, EntryKind::File(_))
533 }
534}
535
536impl sum_tree::Item for Entry {
537 type Summary = EntrySummary;
538
539 fn summary(&self) -> Self::Summary {
540 let file_count;
541 let visible_file_count;
542 if self.is_file() {
543 file_count = 1;
544 if self.is_ignored {
545 visible_file_count = 0;
546 } else {
547 visible_file_count = 1;
548 }
549 } else {
550 file_count = 0;
551 visible_file_count = 0;
552 }
553
554 EntrySummary {
555 max_path: self.path().clone(),
556 file_count,
557 visible_file_count,
558 }
559 }
560}
561
562impl sum_tree::KeyedItem for Entry {
563 type Key = PathKey;
564
565 fn key(&self) -> Self::Key {
566 PathKey(self.path().clone())
567 }
568}
569
570#[derive(Clone, Debug)]
571pub struct EntrySummary {
572 max_path: Arc<Path>,
573 file_count: usize,
574 visible_file_count: usize,
575}
576
577impl Default for EntrySummary {
578 fn default() -> Self {
579 Self {
580 max_path: Arc::from(Path::new("")),
581 file_count: 0,
582 visible_file_count: 0,
583 }
584 }
585}
586
587impl sum_tree::Summary for EntrySummary {
588 type Context = ();
589
590 fn add_summary(&mut self, rhs: &Self, _: &()) {
591 self.max_path = rhs.max_path.clone();
592 self.file_count += rhs.file_count;
593 self.visible_file_count += rhs.visible_file_count;
594 }
595}
596
597#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
598pub struct PathKey(Arc<Path>);
599
600impl Default for PathKey {
601 fn default() -> Self {
602 Self(Path::new("").into())
603 }
604}
605
606impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
607 fn add_summary(&mut self, summary: &'a EntrySummary) {
608 self.0 = summary.max_path.clone();
609 }
610}
611
612#[derive(Copy, Clone, Debug, PartialEq, Eq)]
613enum PathSearch<'a> {
614 Exact(&'a Path),
615 Successor(&'a Path),
616}
617
618impl<'a> Ord for PathSearch<'a> {
619 fn cmp(&self, other: &Self) -> cmp::Ordering {
620 match (self, other) {
621 (Self::Exact(a), Self::Exact(b)) => a.cmp(b),
622 (Self::Successor(a), Self::Exact(b)) => {
623 if b.starts_with(a) {
624 cmp::Ordering::Greater
625 } else {
626 a.cmp(b)
627 }
628 }
629 _ => todo!("not sure we need the other two cases"),
630 }
631 }
632}
633
634impl<'a> PartialOrd for PathSearch<'a> {
635 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
636 Some(self.cmp(other))
637 }
638}
639
640impl<'a> Default for PathSearch<'a> {
641 fn default() -> Self {
642 Self::Exact(Path::new("").into())
643 }
644}
645
646impl<'a: 'b, 'b> sum_tree::Dimension<'a, EntrySummary> for PathSearch<'b> {
647 fn add_summary(&mut self, summary: &'a EntrySummary) {
648 *self = Self::Exact(summary.max_path.as_ref());
649 }
650}
651
652#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
653pub struct FileCount(usize);
654
655impl<'a> sum_tree::Dimension<'a, EntrySummary> for FileCount {
656 fn add_summary(&mut self, summary: &'a EntrySummary) {
657 self.0 += summary.file_count;
658 }
659}
660
661#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
662pub struct VisibleFileCount(usize);
663
664impl<'a> sum_tree::Dimension<'a, EntrySummary> for VisibleFileCount {
665 fn add_summary(&mut self, summary: &'a EntrySummary) {
666 self.0 += summary.visible_file_count;
667 }
668}
669
670struct BackgroundScanner {
671 snapshot: Arc<Mutex<Snapshot>>,
672 notify: Sender<ScanState>,
673 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
674 other_mount_paths: HashSet<PathBuf>,
675 thread_pool: scoped_pool::Pool,
676 root_char_bag: CharBag,
677}
678
679impl BackgroundScanner {
680 fn new(
681 snapshot: Arc<Mutex<Snapshot>>,
682 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
683 notify: Sender<ScanState>,
684 worktree_id: usize,
685 ) -> Self {
686 let mut scanner = Self {
687 root_char_bag: Default::default(),
688 snapshot,
689 notify,
690 handles,
691 other_mount_paths: Default::default(),
692 thread_pool: scoped_pool::Pool::new(16, format!("worktree-{}-scanner", worktree_id)),
693 };
694 scanner.update_other_mount_paths();
695 scanner
696 }
697
698 fn update_other_mount_paths(&mut self) {
699 let path = self.snapshot.lock().abs_path.clone();
700 self.other_mount_paths.clear();
701 self.other_mount_paths.extend(
702 mounted_volume_paths()
703 .into_iter()
704 .filter(|mount_path| !path.starts_with(mount_path)),
705 );
706 }
707
708 fn abs_path(&self) -> Arc<Path> {
709 self.snapshot.lock().abs_path.clone()
710 }
711
712 fn snapshot(&self) -> Snapshot {
713 self.snapshot.lock().clone()
714 }
715
716 fn run(mut self, event_stream: fsevent::EventStream) {
717 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
718 return;
719 }
720
721 if let Err(err) = self.scan_dirs() {
722 if smol::block_on(self.notify.send(ScanState::Err(Arc::new(err)))).is_err() {
723 return;
724 }
725 }
726
727 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
728 return;
729 }
730
731 event_stream.run(move |events| {
732 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
733 return false;
734 }
735
736 if !self.process_events(events) {
737 return false;
738 }
739
740 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
741 return false;
742 }
743
744 true
745 });
746 }
747
748 fn scan_dirs(&mut self) -> io::Result<()> {
749 self.snapshot.lock().scan_id += 1;
750
751 let path: Arc<Path> = Arc::from(Path::new(""));
752 let abs_path = self.abs_path();
753 let metadata = fs::metadata(&abs_path)?;
754 let inode = metadata.ino();
755 let is_symlink = fs::symlink_metadata(&abs_path)?.file_type().is_symlink();
756 let is_dir = metadata.file_type().is_dir();
757
758 // After determining whether the root entry is a file or a directory, populate the
759 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
760 let mut root_name = abs_path
761 .file_name()
762 .map_or(String::new(), |f| f.to_string_lossy().to_string());
763 if is_dir {
764 root_name.push('/');
765 }
766 self.root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
767 self.snapshot.lock().root_name = root_name;
768
769 if is_dir {
770 self.snapshot.lock().insert_entry(Entry {
771 kind: EntryKind::PendingDir,
772 path: path.clone(),
773 inode,
774 is_symlink,
775 is_ignored: false,
776 });
777
778 let (tx, rx) = crossbeam_channel::unbounded();
779 tx.send(ScanJob {
780 abs_path: abs_path.to_path_buf(),
781 path,
782 ignore_stack: IgnoreStack::none(),
783 scan_queue: tx.clone(),
784 })
785 .unwrap();
786 drop(tx);
787
788 self.thread_pool.scoped(|pool| {
789 for _ in 0..self.thread_pool.thread_count() {
790 pool.execute(|| {
791 while let Ok(job) = rx.recv() {
792 if let Err(err) = self.scan_dir(&job) {
793 log::error!("error scanning {:?}: {}", job.abs_path, err);
794 }
795 }
796 });
797 }
798 });
799 } else {
800 self.snapshot.lock().insert_entry(Entry {
801 kind: EntryKind::File(self.char_bag(&path)),
802 path,
803 inode,
804 is_symlink,
805 is_ignored: false,
806 });
807 }
808
809 self.mark_deleted_file_handles();
810 Ok(())
811 }
812
813 fn scan_dir(&self, job: &ScanJob) -> io::Result<()> {
814 let mut new_entries: Vec<Entry> = Vec::new();
815 let mut new_jobs: Vec<ScanJob> = Vec::new();
816 let mut ignore_stack = job.ignore_stack.clone();
817 let mut new_ignore = None;
818
819 for child_entry in fs::read_dir(&job.abs_path)? {
820 let child_entry = child_entry?;
821 let child_name = child_entry.file_name();
822 let child_abs_path = job.abs_path.join(&child_name);
823 let child_path: Arc<Path> = job.path.join(&child_name).into();
824 let child_is_symlink = child_entry.metadata()?.file_type().is_symlink();
825 let child_metadata = if let Ok(metadata) = fs::metadata(&child_abs_path) {
826 metadata
827 } else {
828 log::error!("could not get metadata for path {:?}", child_abs_path);
829 continue;
830 };
831
832 let child_inode = child_metadata.ino();
833
834 // Disallow mount points outside the file system containing the root of this worktree
835 if self.other_mount_paths.contains(&child_abs_path) {
836 continue;
837 }
838
839 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
840 if child_name == *GITIGNORE {
841 let (ignore, err) = Gitignore::new(&child_abs_path);
842 if let Some(err) = err {
843 log::error!("error in ignore file {:?} - {:?}", child_path, err);
844 }
845 let ignore = Arc::new(ignore);
846 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
847 new_ignore = Some(ignore);
848
849 // Update ignore status of any child entries we've already processed to reflect the
850 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
851 // there should rarely be too numerous. Update the ignore stack associated with any
852 // new jobs as well.
853 let mut new_jobs = new_jobs.iter_mut();
854 for entry in &mut new_entries {
855 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
856 if entry.is_dir() {
857 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
858 IgnoreStack::all()
859 } else {
860 ignore_stack.clone()
861 };
862 }
863 }
864 }
865
866 if child_metadata.is_dir() {
867 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
868 new_entries.push(Entry {
869 kind: EntryKind::PendingDir,
870 path: child_path.clone(),
871 inode: child_inode,
872 is_symlink: child_is_symlink,
873 is_ignored,
874 });
875 new_jobs.push(ScanJob {
876 abs_path: child_abs_path,
877 path: child_path,
878 ignore_stack: if is_ignored {
879 IgnoreStack::all()
880 } else {
881 ignore_stack.clone()
882 },
883 scan_queue: job.scan_queue.clone(),
884 });
885 } else {
886 let is_ignored = ignore_stack.is_path_ignored(&child_path, false);
887 new_entries.push(Entry {
888 kind: EntryKind::File(self.char_bag(&child_path)),
889 path: child_path,
890 inode: child_inode,
891 is_symlink: child_is_symlink,
892 is_ignored,
893 });
894 };
895 }
896
897 self.snapshot
898 .lock()
899 .populate_dir(job.path.clone(), new_entries, new_ignore);
900 for new_job in new_jobs {
901 job.scan_queue.send(new_job).unwrap();
902 }
903
904 Ok(())
905 }
906
907 fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
908 self.update_other_mount_paths();
909
910 let mut snapshot = self.snapshot();
911 snapshot.scan_id += 1;
912
913 let root_abs_path = if let Ok(abs_path) = snapshot.abs_path.canonicalize() {
914 abs_path
915 } else {
916 return false;
917 };
918
919 let mut renamed_paths: HashMap<u64, PathBuf> = HashMap::new();
920 let mut handles = self.handles.lock();
921 let mut updated_handles = HashMap::new();
922 for event in &events {
923 let path = if let Ok(path) = event.path.strip_prefix(&root_abs_path) {
924 path
925 } else {
926 continue;
927 };
928
929 let metadata = fs::metadata(&event.path);
930 if event.flags.contains(fsevent::StreamFlags::ITEM_RENAMED) {
931 if let Some(inode) = snapshot.inode_for_path(path) {
932 renamed_paths.insert(inode, path.to_path_buf());
933 } else if let Ok(metadata) = &metadata {
934 let new_path = path;
935 if let Some(old_path) = renamed_paths.get(&metadata.ino()) {
936 handles.retain(|handle_path, handle_state| {
937 if let Ok(path_suffix) = handle_path.strip_prefix(&old_path) {
938 let new_handle_path: Arc<Path> =
939 if path_suffix.file_name().is_some() {
940 new_path.join(path_suffix)
941 } else {
942 new_path.to_path_buf()
943 }
944 .into();
945 if let Some(handle_state) = Weak::upgrade(&handle_state) {
946 let mut state = handle_state.lock();
947 state.path = new_handle_path.clone();
948 updated_handles
949 .insert(new_handle_path, Arc::downgrade(&handle_state));
950 }
951 false
952 } else {
953 true
954 }
955 });
956 handles.extend(updated_handles.drain());
957 }
958 }
959 }
960
961 for state in handles.values_mut() {
962 if let Some(state) = Weak::upgrade(&state) {
963 let mut state = state.lock();
964 if state.path.as_ref() == path {
965 if let Ok(metadata) = &metadata {
966 state.mtime = metadata.modified().unwrap();
967 }
968 } else if state.path.starts_with(path) {
969 if let Ok(metadata) = fs::metadata(state.path.as_ref()) {
970 state.mtime = metadata.modified().unwrap();
971 }
972 }
973 }
974 }
975 }
976 drop(handles);
977
978 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
979 let mut abs_paths = events.into_iter().map(|e| e.path).peekable();
980 let (scan_queue_tx, scan_queue_rx) = crossbeam_channel::unbounded();
981
982 while let Some(abs_path) = abs_paths.next() {
983 let path = match abs_path.strip_prefix(&root_abs_path) {
984 Ok(path) => Arc::from(path.to_path_buf()),
985 Err(_) => {
986 log::error!(
987 "unexpected event {:?} for root path {:?}",
988 abs_path,
989 root_abs_path
990 );
991 continue;
992 }
993 };
994
995 while abs_paths.peek().map_or(false, |p| p.starts_with(&abs_path)) {
996 abs_paths.next();
997 }
998
999 snapshot.remove_path(&path);
1000
1001 match self.fs_entry_for_path(path.clone(), &abs_path) {
1002 Ok(Some(mut fs_entry)) => {
1003 let is_dir = fs_entry.is_dir();
1004 let ignore_stack = snapshot.ignore_stack_for_path(&path, is_dir);
1005 fs_entry.is_ignored = ignore_stack.is_all();
1006 snapshot.insert_entry(fs_entry);
1007 if is_dir {
1008 scan_queue_tx
1009 .send(ScanJob {
1010 abs_path,
1011 path,
1012 ignore_stack,
1013 scan_queue: scan_queue_tx.clone(),
1014 })
1015 .unwrap();
1016 }
1017 }
1018 Ok(None) => {}
1019 Err(err) => {
1020 // TODO - create a special 'error' entry in the entries tree to mark this
1021 log::error!("error reading file on event {:?}", err);
1022 }
1023 }
1024 }
1025
1026 *self.snapshot.lock() = snapshot;
1027
1028 // Scan any directories that were created as part of this event batch.
1029 drop(scan_queue_tx);
1030 self.thread_pool.scoped(|pool| {
1031 for _ in 0..self.thread_pool.thread_count() {
1032 pool.execute(|| {
1033 while let Ok(job) = scan_queue_rx.recv() {
1034 if let Err(err) = self.scan_dir(&job) {
1035 log::error!("error scanning {:?}: {}", job.abs_path, err);
1036 }
1037 }
1038 });
1039 }
1040 });
1041
1042 self.update_ignore_statuses();
1043 self.mark_deleted_file_handles();
1044 true
1045 }
1046
1047 fn update_ignore_statuses(&self) {
1048 let mut snapshot = self.snapshot();
1049
1050 let mut ignores_to_update = Vec::new();
1051 let mut ignores_to_delete = Vec::new();
1052 for (parent_path, (_, scan_id)) in &snapshot.ignores {
1053 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
1054 ignores_to_update.push(parent_path.clone());
1055 }
1056
1057 let ignore_path = parent_path.join(&*GITIGNORE);
1058 if snapshot.entry_for_path(ignore_path).is_none() {
1059 ignores_to_delete.push(parent_path.clone());
1060 }
1061 }
1062
1063 for parent_path in ignores_to_delete {
1064 snapshot.ignores.remove(&parent_path);
1065 self.snapshot.lock().ignores.remove(&parent_path);
1066 }
1067
1068 let (ignore_queue_tx, ignore_queue_rx) = crossbeam_channel::unbounded();
1069 ignores_to_update.sort_unstable();
1070 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
1071 while let Some(parent_path) = ignores_to_update.next() {
1072 while ignores_to_update
1073 .peek()
1074 .map_or(false, |p| p.starts_with(&parent_path))
1075 {
1076 ignores_to_update.next().unwrap();
1077 }
1078
1079 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
1080 ignore_queue_tx
1081 .send(UpdateIgnoreStatusJob {
1082 path: parent_path,
1083 ignore_stack,
1084 ignore_queue: ignore_queue_tx.clone(),
1085 })
1086 .unwrap();
1087 }
1088 drop(ignore_queue_tx);
1089
1090 self.thread_pool.scoped(|scope| {
1091 for _ in 0..self.thread_pool.thread_count() {
1092 scope.execute(|| {
1093 while let Ok(job) = ignore_queue_rx.recv() {
1094 self.update_ignore_status(job, &snapshot);
1095 }
1096 });
1097 }
1098 });
1099 }
1100
1101 fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
1102 let mut ignore_stack = job.ignore_stack;
1103 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
1104 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1105 }
1106
1107 let mut edits = Vec::new();
1108 for mut entry in snapshot.child_entries(&job.path).cloned() {
1109 let was_ignored = entry.is_ignored;
1110 entry.is_ignored = ignore_stack.is_path_ignored(entry.path(), entry.is_dir());
1111 if entry.is_dir() {
1112 let child_ignore_stack = if entry.is_ignored {
1113 IgnoreStack::all()
1114 } else {
1115 ignore_stack.clone()
1116 };
1117 job.ignore_queue
1118 .send(UpdateIgnoreStatusJob {
1119 path: entry.path().clone(),
1120 ignore_stack: child_ignore_stack,
1121 ignore_queue: job.ignore_queue.clone(),
1122 })
1123 .unwrap();
1124 }
1125
1126 if entry.is_ignored != was_ignored {
1127 edits.push(Edit::Insert(entry));
1128 }
1129 }
1130 self.snapshot.lock().entries.edit(edits, &());
1131 }
1132
1133 fn mark_deleted_file_handles(&self) {
1134 let mut handles = self.handles.lock();
1135 let snapshot = self.snapshot.lock();
1136 handles.retain(|path, handle_state| {
1137 if let Some(handle_state) = Weak::upgrade(&handle_state) {
1138 let mut handle_state = handle_state.lock();
1139 handle_state.is_deleted = snapshot.entry_for_path(&path).is_none();
1140 true
1141 } else {
1142 false
1143 }
1144 });
1145 }
1146
1147 fn fs_entry_for_path(&self, path: Arc<Path>, abs_path: &Path) -> Result<Option<Entry>> {
1148 let metadata = match fs::metadata(&abs_path) {
1149 Err(err) => {
1150 return match (err.kind(), err.raw_os_error()) {
1151 (io::ErrorKind::NotFound, _) => Ok(None),
1152 (io::ErrorKind::Other, Some(libc::ENOTDIR)) => Ok(None),
1153 _ => Err(anyhow::Error::new(err)),
1154 }
1155 }
1156 Ok(metadata) => metadata,
1157 };
1158 let inode = metadata.ino();
1159 let is_symlink = fs::symlink_metadata(&abs_path)
1160 .context("failed to read symlink metadata")?
1161 .file_type()
1162 .is_symlink();
1163
1164 let entry = Entry {
1165 kind: if metadata.file_type().is_dir() {
1166 EntryKind::PendingDir
1167 } else {
1168 EntryKind::File(self.char_bag(&path))
1169 },
1170 path,
1171 inode,
1172 is_symlink,
1173 is_ignored: false,
1174 };
1175
1176 Ok(Some(entry))
1177 }
1178
1179 fn char_bag(&self, path: &Path) -> CharBag {
1180 let mut result = self.root_char_bag;
1181 result.extend(
1182 path.to_string_lossy()
1183 .chars()
1184 .map(|c| c.to_ascii_lowercase()),
1185 );
1186 result
1187 }
1188}
1189
1190struct ScanJob {
1191 abs_path: PathBuf,
1192 path: Arc<Path>,
1193 ignore_stack: Arc<IgnoreStack>,
1194 scan_queue: crossbeam_channel::Sender<ScanJob>,
1195}
1196
1197struct UpdateIgnoreStatusJob {
1198 path: Arc<Path>,
1199 ignore_stack: Arc<IgnoreStack>,
1200 ignore_queue: crossbeam_channel::Sender<UpdateIgnoreStatusJob>,
1201}
1202
1203pub trait WorktreeHandle {
1204 fn file(&self, path: impl AsRef<Path>, app: &mut MutableAppContext) -> Task<FileHandle>;
1205
1206 #[cfg(test)]
1207 fn flush_fs_events<'a>(
1208 &self,
1209 app: &'a gpui::TestAppContext,
1210 ) -> futures_core::future::LocalBoxFuture<'a, ()>;
1211}
1212
1213impl WorktreeHandle for ModelHandle<Worktree> {
1214 fn file(&self, path: impl AsRef<Path>, app: &mut MutableAppContext) -> Task<FileHandle> {
1215 let path = Arc::from(path.as_ref());
1216 let handle = self.clone();
1217 let tree = self.read(app);
1218 let abs_path = tree.absolutize(&path);
1219 app.spawn(|ctx| async move {
1220 let mtime = ctx
1221 .background_executor()
1222 .spawn(async move {
1223 if let Ok(metadata) = fs::metadata(&abs_path) {
1224 metadata.modified().unwrap()
1225 } else {
1226 UNIX_EPOCH
1227 }
1228 })
1229 .await;
1230 let state = handle.read_with(&ctx, |tree, _| {
1231 let mut handles = tree.handles.lock();
1232 if let Some(state) = handles.get(&path).and_then(Weak::upgrade) {
1233 state
1234 } else {
1235 let handle_state = if let Some(entry) = tree.entry_for_path(&path) {
1236 FileHandleState {
1237 path: entry.path().clone(),
1238 is_deleted: false,
1239 mtime,
1240 }
1241 } else {
1242 FileHandleState {
1243 path: path.clone(),
1244 is_deleted: !tree.path_is_pending(path),
1245 mtime,
1246 }
1247 };
1248
1249 let state = Arc::new(Mutex::new(handle_state.clone()));
1250 handles.insert(handle_state.path, Arc::downgrade(&state));
1251 state
1252 }
1253 });
1254 FileHandle {
1255 worktree: handle.clone(),
1256 state,
1257 }
1258 })
1259 }
1260
1261 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
1262 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
1263 // extra directory scans, and emit extra scan-state notifications.
1264 //
1265 // This function mutates the worktree's directory and waits for those mutations to be picked up,
1266 // to ensure that all redundant FS events have already been processed.
1267 #[cfg(test)]
1268 fn flush_fs_events<'a>(
1269 &self,
1270 app: &'a gpui::TestAppContext,
1271 ) -> futures_core::future::LocalBoxFuture<'a, ()> {
1272 use smol::future::FutureExt;
1273
1274 let filename = "fs-event-sentinel";
1275 let root_path = app.read(|ctx| self.read(ctx).abs_path.clone());
1276 let tree = self.clone();
1277 async move {
1278 fs::write(root_path.join(filename), "").unwrap();
1279 tree.condition(&app, |tree, _| tree.entry_for_path(filename).is_some())
1280 .await;
1281
1282 fs::remove_file(root_path.join(filename)).unwrap();
1283 tree.condition(&app, |tree, _| tree.entry_for_path(filename).is_none())
1284 .await;
1285
1286 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1287 }
1288 .boxed_local()
1289 }
1290}
1291
1292pub enum FileIter<'a> {
1293 All(Cursor<'a, Entry, FileCount, FileCount>),
1294 Visible(Cursor<'a, Entry, VisibleFileCount, VisibleFileCount>),
1295}
1296
1297impl<'a> FileIter<'a> {
1298 fn all(snapshot: &'a Snapshot, start: usize) -> Self {
1299 let mut cursor = snapshot.entries.cursor();
1300 cursor.seek(&FileCount(start), SeekBias::Right, &());
1301 Self::All(cursor)
1302 }
1303
1304 fn visible(snapshot: &'a Snapshot, start: usize) -> Self {
1305 let mut cursor = snapshot.entries.cursor();
1306 cursor.seek(&VisibleFileCount(start), SeekBias::Right, &());
1307 Self::Visible(cursor)
1308 }
1309
1310 fn next_internal(&mut self) {
1311 match self {
1312 Self::All(cursor) => {
1313 let ix = *cursor.start();
1314 cursor.seek_forward(&FileCount(ix.0 + 1), SeekBias::Right, &());
1315 }
1316 Self::Visible(cursor) => {
1317 let ix = *cursor.start();
1318 cursor.seek_forward(&VisibleFileCount(ix.0 + 1), SeekBias::Right, &());
1319 }
1320 }
1321 }
1322
1323 fn item(&self) -> Option<&'a Entry> {
1324 match self {
1325 Self::All(cursor) => cursor.item(),
1326 Self::Visible(cursor) => cursor.item(),
1327 }
1328 }
1329}
1330
1331impl<'a> Iterator for FileIter<'a> {
1332 type Item = &'a Entry;
1333
1334 fn next(&mut self) -> Option<Self::Item> {
1335 if let Some(entry) = self.item() {
1336 self.next_internal();
1337 Some(entry)
1338 } else {
1339 None
1340 }
1341 }
1342}
1343
1344struct ChildEntriesIter<'a> {
1345 parent_path: &'a Path,
1346 cursor: Cursor<'a, Entry, PathSearch<'a>, ()>,
1347}
1348
1349impl<'a> ChildEntriesIter<'a> {
1350 fn new(parent_path: &'a Path, snapshot: &'a Snapshot) -> Self {
1351 let mut cursor = snapshot.entries.cursor();
1352 cursor.seek(&PathSearch::Exact(parent_path), SeekBias::Right, &());
1353 Self {
1354 parent_path,
1355 cursor,
1356 }
1357 }
1358}
1359
1360impl<'a> Iterator for ChildEntriesIter<'a> {
1361 type Item = &'a Entry;
1362
1363 fn next(&mut self) -> Option<Self::Item> {
1364 if let Some(item) = self.cursor.item() {
1365 if item.path().starts_with(self.parent_path) {
1366 self.cursor
1367 .seek_forward(&PathSearch::Successor(item.path()), SeekBias::Left, &());
1368 Some(item)
1369 } else {
1370 None
1371 }
1372 } else {
1373 None
1374 }
1375 }
1376}
1377
1378fn mounted_volume_paths() -> Vec<PathBuf> {
1379 unsafe {
1380 let mut stat_ptr: *mut libc::statfs = std::ptr::null_mut();
1381 let count = libc::getmntinfo(&mut stat_ptr as *mut _, libc::MNT_WAIT);
1382 if count >= 0 {
1383 std::slice::from_raw_parts(stat_ptr, count as usize)
1384 .iter()
1385 .map(|stat| {
1386 PathBuf::from(OsStr::from_bytes(
1387 CStr::from_ptr(&stat.f_mntonname[0]).to_bytes(),
1388 ))
1389 })
1390 .collect()
1391 } else {
1392 panic!("failed to run getmntinfo");
1393 }
1394 }
1395}
1396
1397#[cfg(test)]
1398mod tests {
1399 use super::*;
1400 use crate::editor::Buffer;
1401 use crate::test::*;
1402 use anyhow::Result;
1403 use rand::prelude::*;
1404 use serde_json::json;
1405 use std::env;
1406 use std::fmt::Write;
1407 use std::os::unix;
1408 use std::time::{SystemTime, UNIX_EPOCH};
1409
1410 #[gpui::test]
1411 async fn test_populate_and_search(mut app: gpui::TestAppContext) {
1412 let dir = temp_tree(json!({
1413 "root": {
1414 "apple": "",
1415 "banana": {
1416 "carrot": {
1417 "date": "",
1418 "endive": "",
1419 }
1420 },
1421 "fennel": {
1422 "grape": "",
1423 }
1424 }
1425 }));
1426
1427 let root_link_path = dir.path().join("root_link");
1428 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1429 unix::fs::symlink(
1430 &dir.path().join("root/fennel"),
1431 &dir.path().join("root/finnochio"),
1432 )
1433 .unwrap();
1434
1435 let tree = app.add_model(|ctx| Worktree::new(root_link_path, ctx));
1436
1437 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1438 app.read(|ctx| {
1439 let tree = tree.read(ctx);
1440 assert_eq!(tree.file_count(), 5);
1441
1442 assert_eq!(
1443 tree.inode_for_path("fennel/grape"),
1444 tree.inode_for_path("finnochio/grape")
1445 );
1446
1447 let results = match_paths(
1448 Some(tree.snapshot()).iter(),
1449 "bna",
1450 false,
1451 false,
1452 false,
1453 10,
1454 Default::default(),
1455 ctx.thread_pool().clone(),
1456 )
1457 .into_iter()
1458 .map(|result| result.path)
1459 .collect::<Vec<Arc<Path>>>();
1460 assert_eq!(
1461 results,
1462 vec![
1463 PathBuf::from("banana/carrot/date").into(),
1464 PathBuf::from("banana/carrot/endive").into(),
1465 ]
1466 );
1467 })
1468 }
1469
1470 #[gpui::test]
1471 async fn test_save_file(mut app: gpui::TestAppContext) {
1472 let dir = temp_tree(json!({
1473 "file1": "the old contents",
1474 }));
1475
1476 let tree = app.add_model(|ctx| Worktree::new(dir.path(), ctx));
1477 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1478 app.read(|ctx| assert_eq!(tree.read(ctx).file_count(), 1));
1479
1480 let buffer =
1481 app.add_model(|ctx| Buffer::new(1, "a line of text.\n".repeat(10 * 1024), ctx));
1482
1483 let path = tree.update(&mut app, |tree, ctx| {
1484 let path = tree.files(0).next().unwrap().path().clone();
1485 assert_eq!(path.file_name().unwrap(), "file1");
1486 smol::block_on(tree.save(&path, buffer.read(ctx).snapshot(), ctx.as_ref())).unwrap();
1487 path
1488 });
1489
1490 let history = app
1491 .read(|ctx| tree.read(ctx).load_history(&path, ctx))
1492 .await
1493 .unwrap();
1494 app.read(|ctx| {
1495 assert_eq!(history.base_text.as_ref(), buffer.read(ctx).text());
1496 });
1497 }
1498
1499 #[gpui::test]
1500 async fn test_save_in_single_file_worktree(mut app: gpui::TestAppContext) {
1501 let dir = temp_tree(json!({
1502 "file1": "the old contents",
1503 }));
1504
1505 let tree = app.add_model(|ctx| Worktree::new(dir.path().join("file1"), ctx));
1506 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1507 app.read(|ctx| assert_eq!(tree.read(ctx).file_count(), 1));
1508
1509 let buffer =
1510 app.add_model(|ctx| Buffer::new(1, "a line of text.\n".repeat(10 * 1024), ctx));
1511
1512 let file = app.update(|ctx| tree.file("", ctx)).await;
1513 app.update(|ctx| {
1514 assert_eq!(file.path().file_name(), None);
1515 smol::block_on(file.save(buffer.read(ctx).snapshot(), ctx.as_ref())).unwrap();
1516 });
1517
1518 let history = app.read(|ctx| file.load_history(ctx)).await.unwrap();
1519 app.read(|ctx| assert_eq!(history.base_text.as_ref(), buffer.read(ctx).text()));
1520 }
1521
1522 #[gpui::test]
1523 async fn test_rescan_simple(mut app: gpui::TestAppContext) {
1524 let dir = temp_tree(json!({
1525 "a": {
1526 "file1": "",
1527 "file2": "",
1528 "file3": "",
1529 },
1530 "b": {
1531 "c": {
1532 "file4": "",
1533 "file5": "",
1534 }
1535 }
1536 }));
1537
1538 let tree = app.add_model(|ctx| Worktree::new(dir.path(), ctx));
1539 let file2 = app.update(|ctx| tree.file("a/file2", ctx)).await;
1540 let file3 = app.update(|ctx| tree.file("a/file3", ctx)).await;
1541 let file4 = app.update(|ctx| tree.file("b/c/file4", ctx)).await;
1542 let file5 = app.update(|ctx| tree.file("b/c/file5", ctx)).await;
1543 let non_existent_file = app.update(|ctx| tree.file("a/file_x", ctx)).await;
1544
1545 // After scanning, the worktree knows which files exist and which don't.
1546 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1547 assert!(!file2.is_deleted());
1548 assert!(!file3.is_deleted());
1549 assert!(!file4.is_deleted());
1550 assert!(!file5.is_deleted());
1551 assert!(non_existent_file.is_deleted());
1552
1553 tree.flush_fs_events(&app).await;
1554 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
1555 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
1556 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
1557 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
1558 tree.flush_fs_events(&app).await;
1559
1560 app.read(|ctx| {
1561 assert_eq!(
1562 tree.read(ctx)
1563 .paths()
1564 .map(|p| p.to_str().unwrap())
1565 .collect::<Vec<_>>(),
1566 vec![
1567 "a",
1568 "a/file1",
1569 "a/file2.new",
1570 "b",
1571 "d",
1572 "d/file3",
1573 "d/file4"
1574 ]
1575 );
1576
1577 assert_eq!(file2.path().to_str().unwrap(), "a/file2.new");
1578 assert_eq!(file4.path().as_ref(), Path::new("d/file4"));
1579 assert_eq!(file5.path().as_ref(), Path::new("d/file5"));
1580 assert!(!file2.is_deleted());
1581 assert!(!file4.is_deleted());
1582 assert!(file5.is_deleted());
1583
1584 // Right now, this rename isn't detected because the target path
1585 // no longer exists on the file system by the time we process the
1586 // rename event.
1587 assert_eq!(file3.path().as_ref(), Path::new("a/file3"));
1588 assert!(file3.is_deleted());
1589 });
1590 }
1591
1592 #[gpui::test]
1593 async fn test_rescan_with_gitignore(mut app: gpui::TestAppContext) {
1594 let dir = temp_tree(json!({
1595 ".git": {},
1596 ".gitignore": "ignored-dir\n",
1597 "tracked-dir": {
1598 "tracked-file1": "tracked contents",
1599 },
1600 "ignored-dir": {
1601 "ignored-file1": "ignored contents",
1602 }
1603 }));
1604
1605 let tree = app.add_model(|ctx| Worktree::new(dir.path(), ctx));
1606 app.read(|ctx| tree.read(ctx).scan_complete()).await;
1607 tree.flush_fs_events(&app).await;
1608 app.read(|ctx| {
1609 let tree = tree.read(ctx);
1610 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
1611 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
1612 assert_eq!(tracked.is_ignored(), false);
1613 assert_eq!(ignored.is_ignored(), true);
1614 });
1615
1616 fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
1617 fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
1618 tree.flush_fs_events(&app).await;
1619 app.read(|ctx| {
1620 let tree = tree.read(ctx);
1621 let dot_git = tree.entry_for_path(".git").unwrap();
1622 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
1623 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
1624 assert_eq!(tracked.is_ignored(), false);
1625 assert_eq!(ignored.is_ignored(), true);
1626 assert_eq!(dot_git.is_ignored(), true);
1627 });
1628 }
1629
1630 #[test]
1631 fn test_path_is_pending() {
1632 let mut snapshot = Snapshot {
1633 id: 0,
1634 scan_id: 0,
1635 abs_path: Path::new("").into(),
1636 entries: Default::default(),
1637 ignores: Default::default(),
1638 root_name: Default::default(),
1639 };
1640
1641 snapshot.entries.edit(
1642 vec![
1643 Edit::Insert(Entry {
1644 path: Path::new("b").into(),
1645 kind: EntryKind::Dir,
1646 inode: 0,
1647 is_ignored: false,
1648 is_symlink: false,
1649 }),
1650 Edit::Insert(Entry {
1651 path: Path::new("b/a").into(),
1652 kind: EntryKind::Dir,
1653 inode: 0,
1654 is_ignored: false,
1655 is_symlink: false,
1656 }),
1657 Edit::Insert(Entry {
1658 path: Path::new("b/c").into(),
1659 kind: EntryKind::PendingDir,
1660 inode: 0,
1661 is_ignored: false,
1662 is_symlink: false,
1663 }),
1664 Edit::Insert(Entry {
1665 path: Path::new("b/e").into(),
1666 kind: EntryKind::Dir,
1667 inode: 0,
1668 is_ignored: false,
1669 is_symlink: false,
1670 }),
1671 ],
1672 &(),
1673 );
1674
1675 assert!(!snapshot.path_is_pending("b/a"));
1676 assert!(!snapshot.path_is_pending("b/b"));
1677 assert!(snapshot.path_is_pending("b/c"));
1678 assert!(snapshot.path_is_pending("b/c/x"));
1679 assert!(!snapshot.path_is_pending("b/d"));
1680 assert!(!snapshot.path_is_pending("b/e"));
1681 }
1682
1683 #[test]
1684 fn test_mounted_volume_paths() {
1685 let paths = mounted_volume_paths();
1686 assert!(paths.contains(&"/".into()));
1687 }
1688
1689 #[test]
1690 fn test_random() {
1691 let iterations = env::var("ITERATIONS")
1692 .map(|i| i.parse().unwrap())
1693 .unwrap_or(100);
1694 let operations = env::var("OPERATIONS")
1695 .map(|o| o.parse().unwrap())
1696 .unwrap_or(40);
1697 let initial_entries = env::var("INITIAL_ENTRIES")
1698 .map(|o| o.parse().unwrap())
1699 .unwrap_or(20);
1700 let seeds = if let Ok(seed) = env::var("SEED").map(|s| s.parse().unwrap()) {
1701 seed..seed + 1
1702 } else {
1703 0..iterations
1704 };
1705
1706 for seed in seeds {
1707 dbg!(seed);
1708 let mut rng = StdRng::seed_from_u64(seed);
1709
1710 let root_dir = tempdir::TempDir::new(&format!("test-{}", seed)).unwrap();
1711 for _ in 0..initial_entries {
1712 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
1713 }
1714 log::info!("Generated initial tree");
1715
1716 let (notify_tx, _notify_rx) = smol::channel::unbounded();
1717 let mut scanner = BackgroundScanner::new(
1718 Arc::new(Mutex::new(Snapshot {
1719 id: 0,
1720 scan_id: 0,
1721 abs_path: root_dir.path().into(),
1722 entries: Default::default(),
1723 ignores: Default::default(),
1724 root_name: Default::default(),
1725 })),
1726 Arc::new(Mutex::new(Default::default())),
1727 notify_tx,
1728 0,
1729 );
1730 scanner.scan_dirs().unwrap();
1731 scanner.snapshot().check_invariants();
1732
1733 let mut events = Vec::new();
1734 let mut mutations_len = operations;
1735 while mutations_len > 1 {
1736 if !events.is_empty() && rng.gen_bool(0.4) {
1737 let len = rng.gen_range(0..=events.len());
1738 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
1739 log::info!("Delivering events: {:#?}", to_deliver);
1740 scanner.process_events(to_deliver);
1741 scanner.snapshot().check_invariants();
1742 } else {
1743 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
1744 mutations_len -= 1;
1745 }
1746 }
1747 log::info!("Quiescing: {:#?}", events);
1748 scanner.process_events(events);
1749 scanner.snapshot().check_invariants();
1750
1751 let (notify_tx, _notify_rx) = smol::channel::unbounded();
1752 let mut new_scanner = BackgroundScanner::new(
1753 Arc::new(Mutex::new(Snapshot {
1754 id: 0,
1755 scan_id: 0,
1756 abs_path: root_dir.path().into(),
1757 entries: Default::default(),
1758 ignores: Default::default(),
1759 root_name: Default::default(),
1760 })),
1761 Arc::new(Mutex::new(Default::default())),
1762 notify_tx,
1763 1,
1764 );
1765 new_scanner.scan_dirs().unwrap();
1766 assert_eq!(scanner.snapshot().to_vec(), new_scanner.snapshot().to_vec());
1767 }
1768 }
1769
1770 fn randomly_mutate_tree(
1771 root_path: &Path,
1772 insertion_probability: f64,
1773 rng: &mut impl Rng,
1774 ) -> Result<Vec<fsevent::Event>> {
1775 let root_path = root_path.canonicalize().unwrap();
1776 let (dirs, files) = read_dir_recursive(root_path.clone());
1777
1778 let mut events = Vec::new();
1779 let mut record_event = |path: PathBuf| {
1780 events.push(fsevent::Event {
1781 event_id: SystemTime::now()
1782 .duration_since(UNIX_EPOCH)
1783 .unwrap()
1784 .as_secs(),
1785 flags: fsevent::StreamFlags::empty(),
1786 path,
1787 });
1788 };
1789
1790 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
1791 let path = dirs.choose(rng).unwrap();
1792 let new_path = path.join(gen_name(rng));
1793
1794 if rng.gen() {
1795 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
1796 fs::create_dir(&new_path)?;
1797 } else {
1798 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
1799 fs::write(&new_path, "")?;
1800 }
1801 record_event(new_path);
1802 } else if rng.gen_bool(0.05) {
1803 let ignore_dir_path = dirs.choose(rng).unwrap();
1804 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
1805
1806 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
1807 let files_to_ignore = {
1808 let len = rng.gen_range(0..=subfiles.len());
1809 subfiles.choose_multiple(rng, len)
1810 };
1811 let dirs_to_ignore = {
1812 let len = rng.gen_range(0..subdirs.len());
1813 subdirs.choose_multiple(rng, len)
1814 };
1815
1816 let mut ignore_contents = String::new();
1817 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
1818 write!(
1819 ignore_contents,
1820 "{}\n",
1821 path_to_ignore
1822 .strip_prefix(&ignore_dir_path)?
1823 .to_str()
1824 .unwrap()
1825 )
1826 .unwrap();
1827 }
1828 log::info!(
1829 "Creating {:?} with contents:\n{}",
1830 ignore_path.strip_prefix(&root_path)?,
1831 ignore_contents
1832 );
1833 fs::write(&ignore_path, ignore_contents).unwrap();
1834 record_event(ignore_path);
1835 } else {
1836 let old_path = {
1837 let file_path = files.choose(rng);
1838 let dir_path = dirs[1..].choose(rng);
1839 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
1840 };
1841
1842 let is_rename = rng.gen();
1843 if is_rename {
1844 let new_path_parent = dirs
1845 .iter()
1846 .filter(|d| !d.starts_with(old_path))
1847 .choose(rng)
1848 .unwrap();
1849
1850 let overwrite_existing_dir =
1851 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
1852 let new_path = if overwrite_existing_dir {
1853 fs::remove_dir_all(&new_path_parent).ok();
1854 new_path_parent.to_path_buf()
1855 } else {
1856 new_path_parent.join(gen_name(rng))
1857 };
1858
1859 log::info!(
1860 "Renaming {:?} to {}{:?}",
1861 old_path.strip_prefix(&root_path)?,
1862 if overwrite_existing_dir {
1863 "overwrite "
1864 } else {
1865 ""
1866 },
1867 new_path.strip_prefix(&root_path)?
1868 );
1869 fs::rename(&old_path, &new_path)?;
1870 record_event(old_path.clone());
1871 record_event(new_path);
1872 } else if old_path.is_dir() {
1873 let (dirs, files) = read_dir_recursive(old_path.clone());
1874
1875 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
1876 fs::remove_dir_all(&old_path).unwrap();
1877 for file in files {
1878 record_event(file);
1879 }
1880 for dir in dirs {
1881 record_event(dir);
1882 }
1883 } else {
1884 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
1885 fs::remove_file(old_path).unwrap();
1886 record_event(old_path.clone());
1887 }
1888 }
1889
1890 Ok(events)
1891 }
1892
1893 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
1894 let child_entries = fs::read_dir(&path).unwrap();
1895 let mut dirs = vec![path];
1896 let mut files = Vec::new();
1897 for child_entry in child_entries {
1898 let child_path = child_entry.unwrap().path();
1899 if child_path.is_dir() {
1900 let (child_dirs, child_files) = read_dir_recursive(child_path);
1901 dirs.extend(child_dirs);
1902 files.extend(child_files);
1903 } else {
1904 files.push(child_path);
1905 }
1906 }
1907 (dirs, files)
1908 }
1909
1910 fn gen_name(rng: &mut impl Rng) -> String {
1911 (0..6)
1912 .map(|_| rng.sample(rand::distributions::Alphanumeric))
1913 .map(char::from)
1914 .collect()
1915 }
1916
1917 impl Snapshot {
1918 fn check_invariants(&self) {
1919 let mut files = self.files(0);
1920 let mut visible_files = self.visible_files(0);
1921 for entry in self.entries.cursor::<(), ()>() {
1922 if entry.is_file() {
1923 assert_eq!(files.next().unwrap().inode(), entry.inode);
1924 if !entry.is_ignored {
1925 assert_eq!(visible_files.next().unwrap().inode(), entry.inode);
1926 }
1927 }
1928 }
1929 assert!(files.next().is_none());
1930 assert!(visible_files.next().is_none());
1931
1932 let mut bfs_paths = Vec::new();
1933 let mut stack = vec![Path::new("")];
1934 while let Some(path) = stack.pop() {
1935 bfs_paths.push(path);
1936 let ix = stack.len();
1937 for child_entry in self.child_entries(path) {
1938 stack.insert(ix, child_entry.path());
1939 }
1940 }
1941
1942 let dfs_paths = self
1943 .entries
1944 .cursor::<(), ()>()
1945 .map(|e| e.path().as_ref())
1946 .collect::<Vec<_>>();
1947 assert_eq!(bfs_paths, dfs_paths);
1948
1949 for (ignore_parent_path, _) in &self.ignores {
1950 assert!(self.entry_for_path(ignore_parent_path).is_some());
1951 assert!(self
1952 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
1953 .is_some());
1954 }
1955 }
1956
1957 fn to_vec(&self) -> Vec<(&Path, u64, bool)> {
1958 let mut paths = Vec::new();
1959 for entry in self.entries.cursor::<(), ()>() {
1960 paths.push((entry.path().as_ref(), entry.inode(), entry.is_ignored()));
1961 }
1962 paths.sort_by(|a, b| a.0.cmp(&b.0));
1963 paths
1964 }
1965 }
1966}