1mod char_bag;
2mod fuzzy;
3mod ignore;
4
5use crate::{
6 editor::{History, Rope},
7 sum_tree::{self, Cursor, Edit, SeekBias, SumTree},
8};
9use ::ignore::gitignore::Gitignore;
10use anyhow::{Context, Result};
11pub use fuzzy::{match_paths, PathMatch};
12use gpui::{scoped_pool, AppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task};
13use lazy_static::lazy_static;
14use parking_lot::Mutex;
15use postage::{
16 prelude::{Sink, Stream},
17 watch,
18};
19use smol::channel::Sender;
20use std::{
21 cmp,
22 collections::{HashMap, HashSet},
23 ffi::{CStr, OsStr, OsString},
24 fmt, fs,
25 future::Future,
26 io::{self, Read, Write},
27 ops::Deref,
28 os::unix::{ffi::OsStrExt, fs::MetadataExt},
29 path::{Path, PathBuf},
30 sync::{Arc, Weak},
31 time::{Duration, SystemTime, UNIX_EPOCH},
32};
33
34use self::{char_bag::CharBag, ignore::IgnoreStack};
35
36lazy_static! {
37 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
38}
39
40#[derive(Clone, Debug)]
41enum ScanState {
42 Idle,
43 Scanning,
44 Err(Arc<io::Error>),
45}
46
47pub struct Worktree {
48 snapshot: Snapshot,
49 background_snapshot: Arc<Mutex<Snapshot>>,
50 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
51 scan_state: (watch::Sender<ScanState>, watch::Receiver<ScanState>),
52 _event_stream_handle: fsevent::Handle,
53 poll_scheduled: bool,
54}
55
56#[derive(Clone, Debug)]
57pub struct FileHandle {
58 worktree: ModelHandle<Worktree>,
59 state: Arc<Mutex<FileHandleState>>,
60}
61
62#[derive(Clone, Debug, PartialEq, Eq)]
63struct FileHandleState {
64 path: Arc<Path>,
65 is_deleted: bool,
66 mtime: SystemTime,
67}
68
69impl Worktree {
70 pub fn new(path: impl Into<Arc<Path>>, cx: &mut ModelContext<Self>) -> Self {
71 let abs_path = path.into();
72 let (scan_state_tx, scan_state_rx) = smol::channel::unbounded();
73 let id = cx.model_id();
74 let snapshot = Snapshot {
75 id,
76 scan_id: 0,
77 abs_path,
78 root_name: Default::default(),
79 ignores: Default::default(),
80 entries: Default::default(),
81 };
82 let (event_stream, event_stream_handle) =
83 fsevent::EventStream::new(&[snapshot.abs_path.as_ref()], Duration::from_millis(100));
84
85 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
86 let handles = Arc::new(Mutex::new(Default::default()));
87
88 let tree = Self {
89 snapshot,
90 background_snapshot: background_snapshot.clone(),
91 handles: handles.clone(),
92 scan_state: watch::channel_with(ScanState::Scanning),
93 _event_stream_handle: event_stream_handle,
94 poll_scheduled: false,
95 };
96
97 std::thread::spawn(move || {
98 let scanner = BackgroundScanner::new(background_snapshot, handles, scan_state_tx, id);
99 scanner.run(event_stream)
100 });
101
102 cx.spawn(|this, mut cx| {
103 let this = this.downgrade();
104 async move {
105 while let Ok(scan_state) = scan_state_rx.recv().await {
106 let alive = cx.update(|cx| {
107 if let Some(handle) = this.upgrade(&cx) {
108 handle.update(cx, |this, cx| this.observe_scan_state(scan_state, cx));
109 true
110 } else {
111 false
112 }
113 });
114
115 if !alive {
116 break;
117 }
118 }
119 }
120 })
121 .detach();
122
123 tree
124 }
125
126 pub fn scan_complete(&self) -> impl Future<Output = ()> {
127 let mut scan_state_rx = self.scan_state.1.clone();
128 async move {
129 let mut scan_state = Some(scan_state_rx.borrow().clone());
130 while let Some(ScanState::Scanning) = scan_state {
131 scan_state = scan_state_rx.recv().await;
132 }
133 }
134 }
135
136 fn observe_scan_state(&mut self, scan_state: ScanState, cx: &mut ModelContext<Self>) {
137 let _ = self.scan_state.0.blocking_send(scan_state);
138 self.poll_entries(cx);
139 }
140
141 fn poll_entries(&mut self, cx: &mut ModelContext<Self>) {
142 self.snapshot = self.background_snapshot.lock().clone();
143 cx.notify();
144
145 if self.is_scanning() && !self.poll_scheduled {
146 cx.spawn(|this, mut cx| async move {
147 smol::Timer::after(Duration::from_millis(100)).await;
148 this.update(&mut cx, |this, cx| {
149 this.poll_scheduled = false;
150 this.poll_entries(cx);
151 })
152 })
153 .detach();
154 self.poll_scheduled = true;
155 }
156 }
157
158 fn is_scanning(&self) -> bool {
159 if let ScanState::Scanning = *self.scan_state.1.borrow() {
160 true
161 } else {
162 false
163 }
164 }
165
166 pub fn snapshot(&self) -> Snapshot {
167 self.snapshot.clone()
168 }
169
170 pub fn abs_path(&self) -> &Path {
171 self.snapshot.abs_path.as_ref()
172 }
173
174 pub fn contains_abs_path(&self, path: &Path) -> bool {
175 path.starts_with(&self.snapshot.abs_path)
176 }
177
178 fn absolutize(&self, path: &Path) -> PathBuf {
179 if path.file_name().is_some() {
180 self.snapshot.abs_path.join(path)
181 } else {
182 self.snapshot.abs_path.to_path_buf()
183 }
184 }
185
186 pub fn load_history(
187 &self,
188 path: &Path,
189 cx: &AppContext,
190 ) -> impl Future<Output = Result<History>> {
191 let path = path.to_path_buf();
192 let abs_path = self.absolutize(&path);
193 cx.background_executor().spawn(async move {
194 let mut file = fs::File::open(&abs_path)?;
195 let mut base_text = String::new();
196 file.read_to_string(&mut base_text)?;
197 Ok(History::new(Arc::from(base_text)))
198 })
199 }
200
201 pub fn save<'a>(&self, path: &Path, content: Rope, cx: &AppContext) -> Task<Result<()>> {
202 let handles = self.handles.clone();
203 let path = path.to_path_buf();
204 let abs_path = self.absolutize(&path);
205 cx.background_executor().spawn(async move {
206 let buffer_size = content.summary().bytes.min(10 * 1024);
207 let file = fs::File::create(&abs_path)?;
208 let mut writer = io::BufWriter::with_capacity(buffer_size, &file);
209 for chunk in content.chunks() {
210 writer.write(chunk.as_bytes())?;
211 }
212 writer.flush()?;
213
214 if let Some(handle) = handles.lock().get(&*path).and_then(Weak::upgrade) {
215 let mut handle = handle.lock();
216 handle.mtime = file.metadata()?.modified()?;
217 handle.is_deleted = false;
218 }
219
220 Ok(())
221 })
222 }
223}
224
225impl Entity for Worktree {
226 type Event = ();
227}
228
229impl Deref for Worktree {
230 type Target = Snapshot;
231
232 fn deref(&self) -> &Self::Target {
233 &self.snapshot
234 }
235}
236
237impl fmt::Debug for Worktree {
238 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
239 self.snapshot.fmt(f)
240 }
241}
242
243#[derive(Clone)]
244pub struct Snapshot {
245 id: usize,
246 scan_id: usize,
247 abs_path: Arc<Path>,
248 root_name: String,
249 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
250 entries: SumTree<Entry>,
251}
252
253impl Snapshot {
254 pub fn file_count(&self) -> usize {
255 self.entries.summary().file_count
256 }
257
258 pub fn visible_file_count(&self) -> usize {
259 self.entries.summary().visible_file_count
260 }
261
262 pub fn files(&self, start: usize) -> FileIter {
263 FileIter::all(self, start)
264 }
265
266 #[cfg(test)]
267 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
268 self.entries
269 .cursor::<(), ()>()
270 .skip(1)
271 .map(|entry| entry.path())
272 }
273
274 pub fn visible_files(&self, start: usize) -> FileIter {
275 FileIter::visible(self, start)
276 }
277
278 fn child_entries<'a>(&'a self, path: &'a Path) -> ChildEntriesIter<'a> {
279 ChildEntriesIter::new(path, self)
280 }
281
282 pub fn root_entry(&self) -> &Entry {
283 self.entry_for_path("").unwrap()
284 }
285
286 /// Returns the filename of the snapshot's root, plus a trailing slash if the snapshot's root is
287 /// a directory.
288 pub fn root_name(&self) -> &str {
289 &self.root_name
290 }
291
292 fn path_is_pending(&self, path: impl AsRef<Path>) -> bool {
293 if self.entries.is_empty() {
294 return true;
295 }
296 let path = path.as_ref();
297 let mut cursor = self.entries.cursor::<_, ()>();
298 if cursor.seek(&PathSearch::Exact(path), SeekBias::Left, &()) {
299 let entry = cursor.item().unwrap();
300 if entry.path.as_ref() == path {
301 return matches!(entry.kind, EntryKind::PendingDir);
302 }
303 }
304 if let Some(entry) = cursor.prev_item() {
305 matches!(entry.kind, EntryKind::PendingDir) && path.starts_with(entry.path.as_ref())
306 } else {
307 false
308 }
309 }
310
311 fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
312 let mut cursor = self.entries.cursor::<_, ()>();
313 if cursor.seek(&PathSearch::Exact(path.as_ref()), SeekBias::Left, &()) {
314 cursor.item()
315 } else {
316 None
317 }
318 }
319
320 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
321 self.entry_for_path(path.as_ref()).map(|e| e.inode())
322 }
323
324 fn insert_entry(&mut self, entry: Entry) {
325 if !entry.is_dir() && entry.path().file_name() == Some(&GITIGNORE) {
326 let (ignore, err) = Gitignore::new(self.abs_path.join(entry.path()));
327 if let Some(err) = err {
328 log::error!("error in ignore file {:?} - {:?}", entry.path(), err);
329 }
330
331 let ignore_dir_path = entry.path().parent().unwrap();
332 self.ignores
333 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
334 }
335 self.entries.insert(entry, &());
336 }
337
338 fn populate_dir(
339 &mut self,
340 parent_path: Arc<Path>,
341 entries: impl IntoIterator<Item = Entry>,
342 ignore: Option<Arc<Gitignore>>,
343 ) {
344 let mut edits = Vec::new();
345
346 let mut parent_entry = self
347 .entries
348 .get(&PathKey(parent_path.clone()), &())
349 .unwrap()
350 .clone();
351 if let Some(ignore) = ignore {
352 self.ignores.insert(parent_path, (ignore, self.scan_id));
353 }
354 if matches!(parent_entry.kind, EntryKind::PendingDir) {
355 parent_entry.kind = EntryKind::Dir;
356 } else {
357 unreachable!();
358 }
359 edits.push(Edit::Insert(parent_entry));
360
361 for entry in entries {
362 edits.push(Edit::Insert(entry));
363 }
364 self.entries.edit(edits, &());
365 }
366
367 fn remove_path(&mut self, path: &Path) {
368 let new_entries = {
369 let mut cursor = self.entries.cursor::<_, ()>();
370 let mut new_entries = cursor.slice(&PathSearch::Exact(path), SeekBias::Left, &());
371 cursor.seek_forward(&PathSearch::Successor(path), SeekBias::Left, &());
372 new_entries.push_tree(cursor.suffix(&()), &());
373 new_entries
374 };
375 self.entries = new_entries;
376
377 if path.file_name() == Some(&GITIGNORE) {
378 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
379 *scan_id = self.scan_id;
380 }
381 }
382 }
383
384 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
385 let mut new_ignores = Vec::new();
386 for ancestor in path.ancestors().skip(1) {
387 if let Some((ignore, _)) = self.ignores.get(ancestor) {
388 new_ignores.push((ancestor, Some(ignore.clone())));
389 } else {
390 new_ignores.push((ancestor, None));
391 }
392 }
393
394 let mut ignore_stack = IgnoreStack::none();
395 for (parent_path, ignore) in new_ignores.into_iter().rev() {
396 if ignore_stack.is_path_ignored(&parent_path, true) {
397 ignore_stack = IgnoreStack::all();
398 break;
399 } else if let Some(ignore) = ignore {
400 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
401 }
402 }
403
404 if ignore_stack.is_path_ignored(path, is_dir) {
405 ignore_stack = IgnoreStack::all();
406 }
407
408 ignore_stack
409 }
410}
411
412impl fmt::Debug for Snapshot {
413 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
414 for entry in self.entries.cursor::<(), ()>() {
415 for _ in entry.path().ancestors().skip(1) {
416 write!(f, " ")?;
417 }
418 writeln!(f, "{:?} (inode: {})", entry.path(), entry.inode())?;
419 }
420 Ok(())
421 }
422}
423
424impl FileHandle {
425 /// Returns this file's path relative to the root of its worktree.
426 pub fn path(&self) -> Arc<Path> {
427 self.state.lock().path.clone()
428 }
429
430 /// Returns the last component of this handle's absolute path. If this handle refers to the root
431 /// of its worktree, then this method will return the name of the worktree itself.
432 pub fn file_name<'a>(&'a self, cx: &'a AppContext) -> Option<OsString> {
433 self.state
434 .lock()
435 .path
436 .file_name()
437 .or_else(|| self.worktree.read(cx).abs_path().file_name())
438 .map(Into::into)
439 }
440
441 pub fn is_deleted(&self) -> bool {
442 self.state.lock().is_deleted
443 }
444
445 pub fn mtime(&self) -> SystemTime {
446 self.state.lock().mtime
447 }
448
449 pub fn exists(&self) -> bool {
450 !self.is_deleted()
451 }
452
453 pub fn load_history(&self, cx: &AppContext) -> impl Future<Output = Result<History>> {
454 self.worktree.read(cx).load_history(&self.path(), cx)
455 }
456
457 pub fn save<'a>(&self, content: Rope, cx: &AppContext) -> Task<Result<()>> {
458 let worktree = self.worktree.read(cx);
459 worktree.save(&self.path(), content, cx)
460 }
461
462 pub fn worktree_id(&self) -> usize {
463 self.worktree.id()
464 }
465
466 pub fn entry_id(&self) -> (usize, Arc<Path>) {
467 (self.worktree.id(), self.path())
468 }
469
470 pub fn observe_from_model<T: Entity>(
471 &self,
472 cx: &mut ModelContext<T>,
473 mut callback: impl FnMut(&mut T, FileHandle, &mut ModelContext<T>) + 'static,
474 ) {
475 let mut prev_state = self.state.lock().clone();
476 let cur_state = Arc::downgrade(&self.state);
477 cx.observe(&self.worktree, move |observer, worktree, cx| {
478 if let Some(cur_state) = cur_state.upgrade() {
479 let cur_state_unlocked = cur_state.lock();
480 if *cur_state_unlocked != prev_state {
481 prev_state = cur_state_unlocked.clone();
482 drop(cur_state_unlocked);
483 callback(
484 observer,
485 FileHandle {
486 worktree,
487 state: cur_state,
488 },
489 cx,
490 );
491 }
492 }
493 });
494 }
495}
496
497#[derive(Clone, Debug)]
498pub struct Entry {
499 kind: EntryKind,
500 path: Arc<Path>,
501 inode: u64,
502 is_symlink: bool,
503 is_ignored: bool,
504}
505
506#[derive(Clone, Debug)]
507pub enum EntryKind {
508 PendingDir,
509 Dir,
510 File(CharBag),
511}
512
513impl Entry {
514 pub fn path(&self) -> &Arc<Path> {
515 &self.path
516 }
517
518 pub fn inode(&self) -> u64 {
519 self.inode
520 }
521
522 pub fn is_ignored(&self) -> bool {
523 self.is_ignored
524 }
525
526 fn is_dir(&self) -> bool {
527 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
528 }
529
530 fn is_file(&self) -> bool {
531 matches!(self.kind, EntryKind::File(_))
532 }
533}
534
535impl sum_tree::Item for Entry {
536 type Summary = EntrySummary;
537
538 fn summary(&self) -> Self::Summary {
539 let file_count;
540 let visible_file_count;
541 if self.is_file() {
542 file_count = 1;
543 if self.is_ignored {
544 visible_file_count = 0;
545 } else {
546 visible_file_count = 1;
547 }
548 } else {
549 file_count = 0;
550 visible_file_count = 0;
551 }
552
553 EntrySummary {
554 max_path: self.path().clone(),
555 file_count,
556 visible_file_count,
557 }
558 }
559}
560
561impl sum_tree::KeyedItem for Entry {
562 type Key = PathKey;
563
564 fn key(&self) -> Self::Key {
565 PathKey(self.path().clone())
566 }
567}
568
569#[derive(Clone, Debug)]
570pub struct EntrySummary {
571 max_path: Arc<Path>,
572 file_count: usize,
573 visible_file_count: usize,
574}
575
576impl Default for EntrySummary {
577 fn default() -> Self {
578 Self {
579 max_path: Arc::from(Path::new("")),
580 file_count: 0,
581 visible_file_count: 0,
582 }
583 }
584}
585
586impl sum_tree::Summary for EntrySummary {
587 type Context = ();
588
589 fn add_summary(&mut self, rhs: &Self, _: &()) {
590 self.max_path = rhs.max_path.clone();
591 self.file_count += rhs.file_count;
592 self.visible_file_count += rhs.visible_file_count;
593 }
594}
595
596#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
597pub struct PathKey(Arc<Path>);
598
599impl Default for PathKey {
600 fn default() -> Self {
601 Self(Path::new("").into())
602 }
603}
604
605impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
606 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
607 self.0 = summary.max_path.clone();
608 }
609}
610
611#[derive(Copy, Clone, Debug, PartialEq, Eq)]
612enum PathSearch<'a> {
613 Exact(&'a Path),
614 Successor(&'a Path),
615}
616
617impl<'a> Ord for PathSearch<'a> {
618 fn cmp(&self, other: &Self) -> cmp::Ordering {
619 match (self, other) {
620 (Self::Exact(a), Self::Exact(b)) => a.cmp(b),
621 (Self::Successor(a), Self::Exact(b)) => {
622 if b.starts_with(a) {
623 cmp::Ordering::Greater
624 } else {
625 a.cmp(b)
626 }
627 }
628 _ => todo!("not sure we need the other two cases"),
629 }
630 }
631}
632
633impl<'a> PartialOrd for PathSearch<'a> {
634 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
635 Some(self.cmp(other))
636 }
637}
638
639impl<'a> Default for PathSearch<'a> {
640 fn default() -> Self {
641 Self::Exact(Path::new("").into())
642 }
643}
644
645impl<'a: 'b, 'b> sum_tree::Dimension<'a, EntrySummary> for PathSearch<'b> {
646 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
647 *self = Self::Exact(summary.max_path.as_ref());
648 }
649}
650
651#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
652pub struct FileCount(usize);
653
654impl<'a> sum_tree::Dimension<'a, EntrySummary> for FileCount {
655 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
656 self.0 += summary.file_count;
657 }
658}
659
660#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
661pub struct VisibleFileCount(usize);
662
663impl<'a> sum_tree::Dimension<'a, EntrySummary> for VisibleFileCount {
664 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
665 self.0 += summary.visible_file_count;
666 }
667}
668
669struct BackgroundScanner {
670 snapshot: Arc<Mutex<Snapshot>>,
671 notify: Sender<ScanState>,
672 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
673 other_mount_paths: HashSet<PathBuf>,
674 thread_pool: scoped_pool::Pool,
675 root_char_bag: CharBag,
676}
677
678impl BackgroundScanner {
679 fn new(
680 snapshot: Arc<Mutex<Snapshot>>,
681 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
682 notify: Sender<ScanState>,
683 worktree_id: usize,
684 ) -> Self {
685 let mut scanner = Self {
686 root_char_bag: Default::default(),
687 snapshot,
688 notify,
689 handles,
690 other_mount_paths: Default::default(),
691 thread_pool: scoped_pool::Pool::new(16, format!("worktree-{}-scanner", worktree_id)),
692 };
693 scanner.update_other_mount_paths();
694 scanner
695 }
696
697 fn update_other_mount_paths(&mut self) {
698 let path = self.snapshot.lock().abs_path.clone();
699 self.other_mount_paths.clear();
700 self.other_mount_paths.extend(
701 mounted_volume_paths()
702 .into_iter()
703 .filter(|mount_path| !path.starts_with(mount_path)),
704 );
705 }
706
707 fn abs_path(&self) -> Arc<Path> {
708 self.snapshot.lock().abs_path.clone()
709 }
710
711 fn snapshot(&self) -> Snapshot {
712 self.snapshot.lock().clone()
713 }
714
715 fn run(mut self, event_stream: fsevent::EventStream) {
716 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
717 return;
718 }
719
720 if let Err(err) = self.scan_dirs() {
721 if smol::block_on(self.notify.send(ScanState::Err(Arc::new(err)))).is_err() {
722 return;
723 }
724 }
725
726 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
727 return;
728 }
729
730 event_stream.run(move |events| {
731 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
732 return false;
733 }
734
735 if !self.process_events(events) {
736 return false;
737 }
738
739 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
740 return false;
741 }
742
743 true
744 });
745 }
746
747 fn scan_dirs(&mut self) -> io::Result<()> {
748 self.snapshot.lock().scan_id += 1;
749
750 let path: Arc<Path> = Arc::from(Path::new(""));
751 let abs_path = self.abs_path();
752 let metadata = fs::metadata(&abs_path)?;
753 let inode = metadata.ino();
754 let is_symlink = fs::symlink_metadata(&abs_path)?.file_type().is_symlink();
755 let is_dir = metadata.file_type().is_dir();
756
757 // After determining whether the root entry is a file or a directory, populate the
758 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
759 let mut root_name = abs_path
760 .file_name()
761 .map_or(String::new(), |f| f.to_string_lossy().to_string());
762 if is_dir {
763 root_name.push('/');
764 }
765 self.root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
766 self.snapshot.lock().root_name = root_name;
767
768 if is_dir {
769 self.snapshot.lock().insert_entry(Entry {
770 kind: EntryKind::PendingDir,
771 path: path.clone(),
772 inode,
773 is_symlink,
774 is_ignored: false,
775 });
776
777 let (tx, rx) = crossbeam_channel::unbounded();
778 tx.send(ScanJob {
779 abs_path: abs_path.to_path_buf(),
780 path,
781 ignore_stack: IgnoreStack::none(),
782 scan_queue: tx.clone(),
783 })
784 .unwrap();
785 drop(tx);
786
787 self.thread_pool.scoped(|pool| {
788 for _ in 0..self.thread_pool.thread_count() {
789 pool.execute(|| {
790 while let Ok(job) = rx.recv() {
791 if let Err(err) = self.scan_dir(&job) {
792 log::error!("error scanning {:?}: {}", job.abs_path, err);
793 }
794 }
795 });
796 }
797 });
798 } else {
799 self.snapshot.lock().insert_entry(Entry {
800 kind: EntryKind::File(self.char_bag(&path)),
801 path,
802 inode,
803 is_symlink,
804 is_ignored: false,
805 });
806 }
807
808 self.mark_deleted_file_handles();
809 Ok(())
810 }
811
812 fn scan_dir(&self, job: &ScanJob) -> io::Result<()> {
813 let mut new_entries: Vec<Entry> = Vec::new();
814 let mut new_jobs: Vec<ScanJob> = Vec::new();
815 let mut ignore_stack = job.ignore_stack.clone();
816 let mut new_ignore = None;
817
818 for child_entry in fs::read_dir(&job.abs_path)? {
819 let child_entry = child_entry?;
820 let child_name = child_entry.file_name();
821 let child_abs_path = job.abs_path.join(&child_name);
822 let child_path: Arc<Path> = job.path.join(&child_name).into();
823 let child_is_symlink = child_entry.metadata()?.file_type().is_symlink();
824 let child_metadata = if let Ok(metadata) = fs::metadata(&child_abs_path) {
825 metadata
826 } else {
827 log::error!("could not get metadata for path {:?}", child_abs_path);
828 continue;
829 };
830
831 let child_inode = child_metadata.ino();
832
833 // Disallow mount points outside the file system containing the root of this worktree
834 if self.other_mount_paths.contains(&child_abs_path) {
835 continue;
836 }
837
838 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
839 if child_name == *GITIGNORE {
840 let (ignore, err) = Gitignore::new(&child_abs_path);
841 if let Some(err) = err {
842 log::error!("error in ignore file {:?} - {:?}", child_path, err);
843 }
844 let ignore = Arc::new(ignore);
845 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
846 new_ignore = Some(ignore);
847
848 // Update ignore status of any child entries we've already processed to reflect the
849 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
850 // there should rarely be too numerous. Update the ignore stack associated with any
851 // new jobs as well.
852 let mut new_jobs = new_jobs.iter_mut();
853 for entry in &mut new_entries {
854 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
855 if entry.is_dir() {
856 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
857 IgnoreStack::all()
858 } else {
859 ignore_stack.clone()
860 };
861 }
862 }
863 }
864
865 if child_metadata.is_dir() {
866 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
867 new_entries.push(Entry {
868 kind: EntryKind::PendingDir,
869 path: child_path.clone(),
870 inode: child_inode,
871 is_symlink: child_is_symlink,
872 is_ignored,
873 });
874 new_jobs.push(ScanJob {
875 abs_path: child_abs_path,
876 path: child_path,
877 ignore_stack: if is_ignored {
878 IgnoreStack::all()
879 } else {
880 ignore_stack.clone()
881 },
882 scan_queue: job.scan_queue.clone(),
883 });
884 } else {
885 let is_ignored = ignore_stack.is_path_ignored(&child_path, false);
886 new_entries.push(Entry {
887 kind: EntryKind::File(self.char_bag(&child_path)),
888 path: child_path,
889 inode: child_inode,
890 is_symlink: child_is_symlink,
891 is_ignored,
892 });
893 };
894 }
895
896 self.snapshot
897 .lock()
898 .populate_dir(job.path.clone(), new_entries, new_ignore);
899 for new_job in new_jobs {
900 job.scan_queue.send(new_job).unwrap();
901 }
902
903 Ok(())
904 }
905
906 fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
907 self.update_other_mount_paths();
908
909 let mut snapshot = self.snapshot();
910 snapshot.scan_id += 1;
911
912 let root_abs_path = if let Ok(abs_path) = snapshot.abs_path.canonicalize() {
913 abs_path
914 } else {
915 return false;
916 };
917
918 let mut renamed_paths: HashMap<u64, PathBuf> = HashMap::new();
919 let mut handles = self.handles.lock();
920 let mut updated_handles = HashMap::new();
921 for event in &events {
922 let path = if let Ok(path) = event.path.strip_prefix(&root_abs_path) {
923 path
924 } else {
925 continue;
926 };
927
928 let metadata = fs::metadata(&event.path);
929 if event.flags.contains(fsevent::StreamFlags::ITEM_RENAMED) {
930 if let Some(inode) = snapshot.inode_for_path(path) {
931 renamed_paths.insert(inode, path.to_path_buf());
932 } else if let Ok(metadata) = &metadata {
933 let new_path = path;
934 if let Some(old_path) = renamed_paths.get(&metadata.ino()) {
935 handles.retain(|handle_path, handle_state| {
936 if let Ok(path_suffix) = handle_path.strip_prefix(&old_path) {
937 let new_handle_path: Arc<Path> =
938 if path_suffix.file_name().is_some() {
939 new_path.join(path_suffix)
940 } else {
941 new_path.to_path_buf()
942 }
943 .into();
944 if let Some(handle_state) = Weak::upgrade(&handle_state) {
945 let mut state = handle_state.lock();
946 state.path = new_handle_path.clone();
947 updated_handles
948 .insert(new_handle_path, Arc::downgrade(&handle_state));
949 }
950 false
951 } else {
952 true
953 }
954 });
955 handles.extend(updated_handles.drain());
956 }
957 }
958 }
959
960 for state in handles.values_mut() {
961 if let Some(state) = Weak::upgrade(&state) {
962 let mut state = state.lock();
963 if state.path.as_ref() == path {
964 if let Ok(metadata) = &metadata {
965 state.mtime = metadata.modified().unwrap();
966 }
967 } else if state.path.starts_with(path) {
968 if let Ok(metadata) = fs::metadata(state.path.as_ref()) {
969 state.mtime = metadata.modified().unwrap();
970 }
971 }
972 }
973 }
974 }
975 drop(handles);
976
977 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
978 let mut abs_paths = events.into_iter().map(|e| e.path).peekable();
979 let (scan_queue_tx, scan_queue_rx) = crossbeam_channel::unbounded();
980
981 while let Some(abs_path) = abs_paths.next() {
982 let path = match abs_path.strip_prefix(&root_abs_path) {
983 Ok(path) => Arc::from(path.to_path_buf()),
984 Err(_) => {
985 log::error!(
986 "unexpected event {:?} for root path {:?}",
987 abs_path,
988 root_abs_path
989 );
990 continue;
991 }
992 };
993
994 while abs_paths.peek().map_or(false, |p| p.starts_with(&abs_path)) {
995 abs_paths.next();
996 }
997
998 snapshot.remove_path(&path);
999
1000 match self.fs_entry_for_path(path.clone(), &abs_path) {
1001 Ok(Some(mut fs_entry)) => {
1002 let is_dir = fs_entry.is_dir();
1003 let ignore_stack = snapshot.ignore_stack_for_path(&path, is_dir);
1004 fs_entry.is_ignored = ignore_stack.is_all();
1005 snapshot.insert_entry(fs_entry);
1006 if is_dir {
1007 scan_queue_tx
1008 .send(ScanJob {
1009 abs_path,
1010 path,
1011 ignore_stack,
1012 scan_queue: scan_queue_tx.clone(),
1013 })
1014 .unwrap();
1015 }
1016 }
1017 Ok(None) => {}
1018 Err(err) => {
1019 // TODO - create a special 'error' entry in the entries tree to mark this
1020 log::error!("error reading file on event {:?}", err);
1021 }
1022 }
1023 }
1024
1025 *self.snapshot.lock() = snapshot;
1026
1027 // Scan any directories that were created as part of this event batch.
1028 drop(scan_queue_tx);
1029 self.thread_pool.scoped(|pool| {
1030 for _ in 0..self.thread_pool.thread_count() {
1031 pool.execute(|| {
1032 while let Ok(job) = scan_queue_rx.recv() {
1033 if let Err(err) = self.scan_dir(&job) {
1034 log::error!("error scanning {:?}: {}", job.abs_path, err);
1035 }
1036 }
1037 });
1038 }
1039 });
1040
1041 self.update_ignore_statuses();
1042 self.mark_deleted_file_handles();
1043 true
1044 }
1045
1046 fn update_ignore_statuses(&self) {
1047 let mut snapshot = self.snapshot();
1048
1049 let mut ignores_to_update = Vec::new();
1050 let mut ignores_to_delete = Vec::new();
1051 for (parent_path, (_, scan_id)) in &snapshot.ignores {
1052 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
1053 ignores_to_update.push(parent_path.clone());
1054 }
1055
1056 let ignore_path = parent_path.join(&*GITIGNORE);
1057 if snapshot.entry_for_path(ignore_path).is_none() {
1058 ignores_to_delete.push(parent_path.clone());
1059 }
1060 }
1061
1062 for parent_path in ignores_to_delete {
1063 snapshot.ignores.remove(&parent_path);
1064 self.snapshot.lock().ignores.remove(&parent_path);
1065 }
1066
1067 let (ignore_queue_tx, ignore_queue_rx) = crossbeam_channel::unbounded();
1068 ignores_to_update.sort_unstable();
1069 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
1070 while let Some(parent_path) = ignores_to_update.next() {
1071 while ignores_to_update
1072 .peek()
1073 .map_or(false, |p| p.starts_with(&parent_path))
1074 {
1075 ignores_to_update.next().unwrap();
1076 }
1077
1078 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
1079 ignore_queue_tx
1080 .send(UpdateIgnoreStatusJob {
1081 path: parent_path,
1082 ignore_stack,
1083 ignore_queue: ignore_queue_tx.clone(),
1084 })
1085 .unwrap();
1086 }
1087 drop(ignore_queue_tx);
1088
1089 self.thread_pool.scoped(|scope| {
1090 for _ in 0..self.thread_pool.thread_count() {
1091 scope.execute(|| {
1092 while let Ok(job) = ignore_queue_rx.recv() {
1093 self.update_ignore_status(job, &snapshot);
1094 }
1095 });
1096 }
1097 });
1098 }
1099
1100 fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
1101 let mut ignore_stack = job.ignore_stack;
1102 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
1103 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1104 }
1105
1106 let mut edits = Vec::new();
1107 for mut entry in snapshot.child_entries(&job.path).cloned() {
1108 let was_ignored = entry.is_ignored;
1109 entry.is_ignored = ignore_stack.is_path_ignored(entry.path(), entry.is_dir());
1110 if entry.is_dir() {
1111 let child_ignore_stack = if entry.is_ignored {
1112 IgnoreStack::all()
1113 } else {
1114 ignore_stack.clone()
1115 };
1116 job.ignore_queue
1117 .send(UpdateIgnoreStatusJob {
1118 path: entry.path().clone(),
1119 ignore_stack: child_ignore_stack,
1120 ignore_queue: job.ignore_queue.clone(),
1121 })
1122 .unwrap();
1123 }
1124
1125 if entry.is_ignored != was_ignored {
1126 edits.push(Edit::Insert(entry));
1127 }
1128 }
1129 self.snapshot.lock().entries.edit(edits, &());
1130 }
1131
1132 fn mark_deleted_file_handles(&self) {
1133 let mut handles = self.handles.lock();
1134 let snapshot = self.snapshot.lock();
1135 handles.retain(|path, handle_state| {
1136 if let Some(handle_state) = Weak::upgrade(&handle_state) {
1137 let mut handle_state = handle_state.lock();
1138 handle_state.is_deleted = snapshot.entry_for_path(&path).is_none();
1139 true
1140 } else {
1141 false
1142 }
1143 });
1144 }
1145
1146 fn fs_entry_for_path(&self, path: Arc<Path>, abs_path: &Path) -> Result<Option<Entry>> {
1147 let metadata = match fs::metadata(&abs_path) {
1148 Err(err) => {
1149 return match (err.kind(), err.raw_os_error()) {
1150 (io::ErrorKind::NotFound, _) => Ok(None),
1151 (io::ErrorKind::Other, Some(libc::ENOTDIR)) => Ok(None),
1152 _ => Err(anyhow::Error::new(err)),
1153 }
1154 }
1155 Ok(metadata) => metadata,
1156 };
1157 let inode = metadata.ino();
1158 let is_symlink = fs::symlink_metadata(&abs_path)
1159 .context("failed to read symlink metadata")?
1160 .file_type()
1161 .is_symlink();
1162
1163 let entry = Entry {
1164 kind: if metadata.file_type().is_dir() {
1165 EntryKind::PendingDir
1166 } else {
1167 EntryKind::File(self.char_bag(&path))
1168 },
1169 path,
1170 inode,
1171 is_symlink,
1172 is_ignored: false,
1173 };
1174
1175 Ok(Some(entry))
1176 }
1177
1178 fn char_bag(&self, path: &Path) -> CharBag {
1179 let mut result = self.root_char_bag;
1180 result.extend(
1181 path.to_string_lossy()
1182 .chars()
1183 .map(|c| c.to_ascii_lowercase()),
1184 );
1185 result
1186 }
1187}
1188
1189struct ScanJob {
1190 abs_path: PathBuf,
1191 path: Arc<Path>,
1192 ignore_stack: Arc<IgnoreStack>,
1193 scan_queue: crossbeam_channel::Sender<ScanJob>,
1194}
1195
1196struct UpdateIgnoreStatusJob {
1197 path: Arc<Path>,
1198 ignore_stack: Arc<IgnoreStack>,
1199 ignore_queue: crossbeam_channel::Sender<UpdateIgnoreStatusJob>,
1200}
1201
1202pub trait WorktreeHandle {
1203 fn file(&self, path: impl AsRef<Path>, cx: &mut MutableAppContext) -> Task<FileHandle>;
1204
1205 #[cfg(test)]
1206 fn flush_fs_events<'a>(
1207 &self,
1208 cx: &'a gpui::TestAppContext,
1209 ) -> futures_core::future::LocalBoxFuture<'a, ()>;
1210}
1211
1212impl WorktreeHandle for ModelHandle<Worktree> {
1213 fn file(&self, path: impl AsRef<Path>, cx: &mut MutableAppContext) -> Task<FileHandle> {
1214 let path = Arc::from(path.as_ref());
1215 let handle = self.clone();
1216 let tree = self.read(cx);
1217 let abs_path = tree.absolutize(&path);
1218 cx.spawn(|cx| async move {
1219 let mtime = cx
1220 .background_executor()
1221 .spawn(async move {
1222 if let Ok(metadata) = fs::metadata(&abs_path) {
1223 metadata.modified().unwrap()
1224 } else {
1225 UNIX_EPOCH
1226 }
1227 })
1228 .await;
1229 let state = handle.read_with(&cx, |tree, _| {
1230 let mut handles = tree.handles.lock();
1231 if let Some(state) = handles.get(&path).and_then(Weak::upgrade) {
1232 state
1233 } else {
1234 let handle_state = if let Some(entry) = tree.entry_for_path(&path) {
1235 FileHandleState {
1236 path: entry.path().clone(),
1237 is_deleted: false,
1238 mtime,
1239 }
1240 } else {
1241 FileHandleState {
1242 path: path.clone(),
1243 is_deleted: !tree.path_is_pending(path),
1244 mtime,
1245 }
1246 };
1247
1248 let state = Arc::new(Mutex::new(handle_state.clone()));
1249 handles.insert(handle_state.path, Arc::downgrade(&state));
1250 state
1251 }
1252 });
1253 FileHandle {
1254 worktree: handle.clone(),
1255 state,
1256 }
1257 })
1258 }
1259
1260 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
1261 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
1262 // extra directory scans, and emit extra scan-state notifications.
1263 //
1264 // This function mutates the worktree's directory and waits for those mutations to be picked up,
1265 // to ensure that all redundant FS events have already been processed.
1266 #[cfg(test)]
1267 fn flush_fs_events<'a>(
1268 &self,
1269 cx: &'a gpui::TestAppContext,
1270 ) -> futures_core::future::LocalBoxFuture<'a, ()> {
1271 use smol::future::FutureExt;
1272
1273 let filename = "fs-event-sentinel";
1274 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
1275 let tree = self.clone();
1276 async move {
1277 fs::write(root_path.join(filename), "").unwrap();
1278 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
1279 .await;
1280
1281 fs::remove_file(root_path.join(filename)).unwrap();
1282 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
1283 .await;
1284
1285 cx.read(|cx| tree.read(cx).scan_complete()).await;
1286 }
1287 .boxed_local()
1288 }
1289}
1290
1291pub enum FileIter<'a> {
1292 All(Cursor<'a, Entry, FileCount, FileCount>),
1293 Visible(Cursor<'a, Entry, VisibleFileCount, VisibleFileCount>),
1294}
1295
1296impl<'a> FileIter<'a> {
1297 fn all(snapshot: &'a Snapshot, start: usize) -> Self {
1298 let mut cursor = snapshot.entries.cursor();
1299 cursor.seek(&FileCount(start), SeekBias::Right, &());
1300 Self::All(cursor)
1301 }
1302
1303 fn visible(snapshot: &'a Snapshot, start: usize) -> Self {
1304 let mut cursor = snapshot.entries.cursor();
1305 cursor.seek(&VisibleFileCount(start), SeekBias::Right, &());
1306 Self::Visible(cursor)
1307 }
1308
1309 fn next_internal(&mut self) {
1310 match self {
1311 Self::All(cursor) => {
1312 let ix = *cursor.start();
1313 cursor.seek_forward(&FileCount(ix.0 + 1), SeekBias::Right, &());
1314 }
1315 Self::Visible(cursor) => {
1316 let ix = *cursor.start();
1317 cursor.seek_forward(&VisibleFileCount(ix.0 + 1), SeekBias::Right, &());
1318 }
1319 }
1320 }
1321
1322 fn item(&self) -> Option<&'a Entry> {
1323 match self {
1324 Self::All(cursor) => cursor.item(),
1325 Self::Visible(cursor) => cursor.item(),
1326 }
1327 }
1328}
1329
1330impl<'a> Iterator for FileIter<'a> {
1331 type Item = &'a Entry;
1332
1333 fn next(&mut self) -> Option<Self::Item> {
1334 if let Some(entry) = self.item() {
1335 self.next_internal();
1336 Some(entry)
1337 } else {
1338 None
1339 }
1340 }
1341}
1342
1343struct ChildEntriesIter<'a> {
1344 parent_path: &'a Path,
1345 cursor: Cursor<'a, Entry, PathSearch<'a>, ()>,
1346}
1347
1348impl<'a> ChildEntriesIter<'a> {
1349 fn new(parent_path: &'a Path, snapshot: &'a Snapshot) -> Self {
1350 let mut cursor = snapshot.entries.cursor();
1351 cursor.seek(&PathSearch::Exact(parent_path), SeekBias::Right, &());
1352 Self {
1353 parent_path,
1354 cursor,
1355 }
1356 }
1357}
1358
1359impl<'a> Iterator for ChildEntriesIter<'a> {
1360 type Item = &'a Entry;
1361
1362 fn next(&mut self) -> Option<Self::Item> {
1363 if let Some(item) = self.cursor.item() {
1364 if item.path().starts_with(self.parent_path) {
1365 self.cursor
1366 .seek_forward(&PathSearch::Successor(item.path()), SeekBias::Left, &());
1367 Some(item)
1368 } else {
1369 None
1370 }
1371 } else {
1372 None
1373 }
1374 }
1375}
1376
1377fn mounted_volume_paths() -> Vec<PathBuf> {
1378 unsafe {
1379 let mut stat_ptr: *mut libc::statfs = std::ptr::null_mut();
1380 let count = libc::getmntinfo(&mut stat_ptr as *mut _, libc::MNT_WAIT);
1381 if count >= 0 {
1382 std::slice::from_raw_parts(stat_ptr, count as usize)
1383 .iter()
1384 .map(|stat| {
1385 PathBuf::from(OsStr::from_bytes(
1386 CStr::from_ptr(&stat.f_mntonname[0]).to_bytes(),
1387 ))
1388 })
1389 .collect()
1390 } else {
1391 panic!("failed to run getmntinfo");
1392 }
1393 }
1394}
1395
1396#[cfg(test)]
1397mod tests {
1398 use super::*;
1399 use crate::editor::Buffer;
1400 use crate::test::*;
1401 use anyhow::Result;
1402 use rand::prelude::*;
1403 use serde_json::json;
1404 use std::env;
1405 use std::fmt::Write;
1406 use std::os::unix;
1407 use std::time::{SystemTime, UNIX_EPOCH};
1408
1409 #[gpui::test]
1410 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
1411 let dir = temp_tree(json!({
1412 "root": {
1413 "apple": "",
1414 "banana": {
1415 "carrot": {
1416 "date": "",
1417 "endive": "",
1418 }
1419 },
1420 "fennel": {
1421 "grape": "",
1422 }
1423 }
1424 }));
1425
1426 let root_link_path = dir.path().join("root_link");
1427 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1428 unix::fs::symlink(
1429 &dir.path().join("root/fennel"),
1430 &dir.path().join("root/finnochio"),
1431 )
1432 .unwrap();
1433
1434 let tree = cx.add_model(|cx| Worktree::new(root_link_path, cx));
1435
1436 cx.read(|cx| tree.read(cx).scan_complete()).await;
1437 cx.read(|cx| {
1438 let tree = tree.read(cx);
1439 assert_eq!(tree.file_count(), 5);
1440
1441 assert_eq!(
1442 tree.inode_for_path("fennel/grape"),
1443 tree.inode_for_path("finnochio/grape")
1444 );
1445
1446 let results = match_paths(
1447 Some(tree.snapshot()).iter(),
1448 "bna",
1449 false,
1450 false,
1451 false,
1452 10,
1453 Default::default(),
1454 cx.thread_pool().clone(),
1455 )
1456 .into_iter()
1457 .map(|result| result.path)
1458 .collect::<Vec<Arc<Path>>>();
1459 assert_eq!(
1460 results,
1461 vec![
1462 PathBuf::from("banana/carrot/date").into(),
1463 PathBuf::from("banana/carrot/endive").into(),
1464 ]
1465 );
1466 })
1467 }
1468
1469 #[gpui::test]
1470 async fn test_save_file(mut cx: gpui::TestAppContext) {
1471 let dir = temp_tree(json!({
1472 "file1": "the old contents",
1473 }));
1474
1475 let tree = cx.add_model(|cx| Worktree::new(dir.path(), cx));
1476 cx.read(|cx| tree.read(cx).scan_complete()).await;
1477 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
1478
1479 let buffer = cx.add_model(|cx| Buffer::new(1, "a line of text.\n".repeat(10 * 1024), cx));
1480
1481 let path = tree.update(&mut cx, |tree, cx| {
1482 let path = tree.files(0).next().unwrap().path().clone();
1483 assert_eq!(path.file_name().unwrap(), "file1");
1484 smol::block_on(tree.save(&path, buffer.read(cx).snapshot().text(), cx.as_ref()))
1485 .unwrap();
1486 path
1487 });
1488
1489 let history = cx
1490 .read(|cx| tree.read(cx).load_history(&path, cx))
1491 .await
1492 .unwrap();
1493 cx.read(|cx| {
1494 assert_eq!(history.base_text.as_ref(), buffer.read(cx).text());
1495 });
1496 }
1497
1498 #[gpui::test]
1499 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
1500 let dir = temp_tree(json!({
1501 "file1": "the old contents",
1502 }));
1503
1504 let tree = cx.add_model(|cx| Worktree::new(dir.path().join("file1"), cx));
1505 cx.read(|cx| tree.read(cx).scan_complete()).await;
1506 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
1507
1508 let buffer = cx.add_model(|cx| Buffer::new(1, "a line of text.\n".repeat(10 * 1024), cx));
1509
1510 let file = cx.update(|cx| tree.file("", cx)).await;
1511 cx.update(|cx| {
1512 assert_eq!(file.path().file_name(), None);
1513 smol::block_on(file.save(buffer.read(cx).snapshot().text(), cx.as_ref())).unwrap();
1514 });
1515
1516 let history = cx.read(|cx| file.load_history(cx)).await.unwrap();
1517 cx.read(|cx| assert_eq!(history.base_text.as_ref(), buffer.read(cx).text()));
1518 }
1519
1520 #[gpui::test]
1521 async fn test_rescan_simple(mut cx: gpui::TestAppContext) {
1522 let dir = temp_tree(json!({
1523 "a": {
1524 "file1": "",
1525 "file2": "",
1526 "file3": "",
1527 },
1528 "b": {
1529 "c": {
1530 "file4": "",
1531 "file5": "",
1532 }
1533 }
1534 }));
1535
1536 let tree = cx.add_model(|cx| Worktree::new(dir.path(), cx));
1537 let file2 = cx.update(|cx| tree.file("a/file2", cx)).await;
1538 let file3 = cx.update(|cx| tree.file("a/file3", cx)).await;
1539 let file4 = cx.update(|cx| tree.file("b/c/file4", cx)).await;
1540 let file5 = cx.update(|cx| tree.file("b/c/file5", cx)).await;
1541 let non_existent_file = cx.update(|cx| tree.file("a/file_x", cx)).await;
1542
1543 // After scanning, the worktree knows which files exist and which don't.
1544 cx.read(|cx| tree.read(cx).scan_complete()).await;
1545 assert!(!file2.is_deleted());
1546 assert!(!file3.is_deleted());
1547 assert!(!file4.is_deleted());
1548 assert!(!file5.is_deleted());
1549 assert!(non_existent_file.is_deleted());
1550
1551 tree.flush_fs_events(&cx).await;
1552 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
1553 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
1554 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
1555 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
1556 tree.flush_fs_events(&cx).await;
1557
1558 cx.read(|cx| {
1559 assert_eq!(
1560 tree.read(cx)
1561 .paths()
1562 .map(|p| p.to_str().unwrap())
1563 .collect::<Vec<_>>(),
1564 vec![
1565 "a",
1566 "a/file1",
1567 "a/file2.new",
1568 "b",
1569 "d",
1570 "d/file3",
1571 "d/file4"
1572 ]
1573 );
1574
1575 assert_eq!(file2.path().to_str().unwrap(), "a/file2.new");
1576 assert_eq!(file4.path().as_ref(), Path::new("d/file4"));
1577 assert_eq!(file5.path().as_ref(), Path::new("d/file5"));
1578 assert!(!file2.is_deleted());
1579 assert!(!file4.is_deleted());
1580 assert!(file5.is_deleted());
1581
1582 // Right now, this rename isn't detected because the target path
1583 // no longer exists on the file system by the time we process the
1584 // rename event.
1585 assert_eq!(file3.path().as_ref(), Path::new("a/file3"));
1586 assert!(file3.is_deleted());
1587 });
1588 }
1589
1590 #[gpui::test]
1591 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
1592 let dir = temp_tree(json!({
1593 ".git": {},
1594 ".gitignore": "ignored-dir\n",
1595 "tracked-dir": {
1596 "tracked-file1": "tracked contents",
1597 },
1598 "ignored-dir": {
1599 "ignored-file1": "ignored contents",
1600 }
1601 }));
1602
1603 let tree = cx.add_model(|cx| Worktree::new(dir.path(), cx));
1604 cx.read(|cx| tree.read(cx).scan_complete()).await;
1605 tree.flush_fs_events(&cx).await;
1606 cx.read(|cx| {
1607 let tree = tree.read(cx);
1608 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
1609 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
1610 assert_eq!(tracked.is_ignored(), false);
1611 assert_eq!(ignored.is_ignored(), true);
1612 });
1613
1614 fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
1615 fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
1616 tree.flush_fs_events(&cx).await;
1617 cx.read(|cx| {
1618 let tree = tree.read(cx);
1619 let dot_git = tree.entry_for_path(".git").unwrap();
1620 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
1621 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
1622 assert_eq!(tracked.is_ignored(), false);
1623 assert_eq!(ignored.is_ignored(), true);
1624 assert_eq!(dot_git.is_ignored(), true);
1625 });
1626 }
1627
1628 #[test]
1629 fn test_path_is_pending() {
1630 let mut snapshot = Snapshot {
1631 id: 0,
1632 scan_id: 0,
1633 abs_path: Path::new("").into(),
1634 entries: Default::default(),
1635 ignores: Default::default(),
1636 root_name: Default::default(),
1637 };
1638
1639 snapshot.entries.edit(
1640 vec![
1641 Edit::Insert(Entry {
1642 path: Path::new("b").into(),
1643 kind: EntryKind::Dir,
1644 inode: 0,
1645 is_ignored: false,
1646 is_symlink: false,
1647 }),
1648 Edit::Insert(Entry {
1649 path: Path::new("b/a").into(),
1650 kind: EntryKind::Dir,
1651 inode: 0,
1652 is_ignored: false,
1653 is_symlink: false,
1654 }),
1655 Edit::Insert(Entry {
1656 path: Path::new("b/c").into(),
1657 kind: EntryKind::PendingDir,
1658 inode: 0,
1659 is_ignored: false,
1660 is_symlink: false,
1661 }),
1662 Edit::Insert(Entry {
1663 path: Path::new("b/e").into(),
1664 kind: EntryKind::Dir,
1665 inode: 0,
1666 is_ignored: false,
1667 is_symlink: false,
1668 }),
1669 ],
1670 &(),
1671 );
1672
1673 assert!(!snapshot.path_is_pending("b/a"));
1674 assert!(!snapshot.path_is_pending("b/b"));
1675 assert!(snapshot.path_is_pending("b/c"));
1676 assert!(snapshot.path_is_pending("b/c/x"));
1677 assert!(!snapshot.path_is_pending("b/d"));
1678 assert!(!snapshot.path_is_pending("b/e"));
1679 }
1680
1681 #[test]
1682 fn test_mounted_volume_paths() {
1683 let paths = mounted_volume_paths();
1684 assert!(paths.contains(&"/".into()));
1685 }
1686
1687 #[test]
1688 fn test_random() {
1689 let iterations = env::var("ITERATIONS")
1690 .map(|i| i.parse().unwrap())
1691 .unwrap_or(100);
1692 let operations = env::var("OPERATIONS")
1693 .map(|o| o.parse().unwrap())
1694 .unwrap_or(40);
1695 let initial_entries = env::var("INITIAL_ENTRIES")
1696 .map(|o| o.parse().unwrap())
1697 .unwrap_or(20);
1698 let seeds = if let Ok(seed) = env::var("SEED").map(|s| s.parse().unwrap()) {
1699 seed..seed + 1
1700 } else {
1701 0..iterations
1702 };
1703
1704 for seed in seeds {
1705 dbg!(seed);
1706 let mut rng = StdRng::seed_from_u64(seed);
1707
1708 let root_dir = tempdir::TempDir::new(&format!("test-{}", seed)).unwrap();
1709 for _ in 0..initial_entries {
1710 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
1711 }
1712 log::info!("Generated initial tree");
1713
1714 let (notify_tx, _notify_rx) = smol::channel::unbounded();
1715 let mut scanner = BackgroundScanner::new(
1716 Arc::new(Mutex::new(Snapshot {
1717 id: 0,
1718 scan_id: 0,
1719 abs_path: root_dir.path().into(),
1720 entries: Default::default(),
1721 ignores: Default::default(),
1722 root_name: Default::default(),
1723 })),
1724 Arc::new(Mutex::new(Default::default())),
1725 notify_tx,
1726 0,
1727 );
1728 scanner.scan_dirs().unwrap();
1729 scanner.snapshot().check_invariants();
1730
1731 let mut events = Vec::new();
1732 let mut mutations_len = operations;
1733 while mutations_len > 1 {
1734 if !events.is_empty() && rng.gen_bool(0.4) {
1735 let len = rng.gen_range(0..=events.len());
1736 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
1737 log::info!("Delivering events: {:#?}", to_deliver);
1738 scanner.process_events(to_deliver);
1739 scanner.snapshot().check_invariants();
1740 } else {
1741 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
1742 mutations_len -= 1;
1743 }
1744 }
1745 log::info!("Quiescing: {:#?}", events);
1746 scanner.process_events(events);
1747 scanner.snapshot().check_invariants();
1748
1749 let (notify_tx, _notify_rx) = smol::channel::unbounded();
1750 let mut new_scanner = BackgroundScanner::new(
1751 Arc::new(Mutex::new(Snapshot {
1752 id: 0,
1753 scan_id: 0,
1754 abs_path: root_dir.path().into(),
1755 entries: Default::default(),
1756 ignores: Default::default(),
1757 root_name: Default::default(),
1758 })),
1759 Arc::new(Mutex::new(Default::default())),
1760 notify_tx,
1761 1,
1762 );
1763 new_scanner.scan_dirs().unwrap();
1764 assert_eq!(scanner.snapshot().to_vec(), new_scanner.snapshot().to_vec());
1765 }
1766 }
1767
1768 fn randomly_mutate_tree(
1769 root_path: &Path,
1770 insertion_probability: f64,
1771 rng: &mut impl Rng,
1772 ) -> Result<Vec<fsevent::Event>> {
1773 let root_path = root_path.canonicalize().unwrap();
1774 let (dirs, files) = read_dir_recursive(root_path.clone());
1775
1776 let mut events = Vec::new();
1777 let mut record_event = |path: PathBuf| {
1778 events.push(fsevent::Event {
1779 event_id: SystemTime::now()
1780 .duration_since(UNIX_EPOCH)
1781 .unwrap()
1782 .as_secs(),
1783 flags: fsevent::StreamFlags::empty(),
1784 path,
1785 });
1786 };
1787
1788 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
1789 let path = dirs.choose(rng).unwrap();
1790 let new_path = path.join(gen_name(rng));
1791
1792 if rng.gen() {
1793 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
1794 fs::create_dir(&new_path)?;
1795 } else {
1796 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
1797 fs::write(&new_path, "")?;
1798 }
1799 record_event(new_path);
1800 } else if rng.gen_bool(0.05) {
1801 let ignore_dir_path = dirs.choose(rng).unwrap();
1802 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
1803
1804 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
1805 let files_to_ignore = {
1806 let len = rng.gen_range(0..=subfiles.len());
1807 subfiles.choose_multiple(rng, len)
1808 };
1809 let dirs_to_ignore = {
1810 let len = rng.gen_range(0..subdirs.len());
1811 subdirs.choose_multiple(rng, len)
1812 };
1813
1814 let mut ignore_contents = String::new();
1815 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
1816 write!(
1817 ignore_contents,
1818 "{}\n",
1819 path_to_ignore
1820 .strip_prefix(&ignore_dir_path)?
1821 .to_str()
1822 .unwrap()
1823 )
1824 .unwrap();
1825 }
1826 log::info!(
1827 "Creating {:?} with contents:\n{}",
1828 ignore_path.strip_prefix(&root_path)?,
1829 ignore_contents
1830 );
1831 fs::write(&ignore_path, ignore_contents).unwrap();
1832 record_event(ignore_path);
1833 } else {
1834 let old_path = {
1835 let file_path = files.choose(rng);
1836 let dir_path = dirs[1..].choose(rng);
1837 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
1838 };
1839
1840 let is_rename = rng.gen();
1841 if is_rename {
1842 let new_path_parent = dirs
1843 .iter()
1844 .filter(|d| !d.starts_with(old_path))
1845 .choose(rng)
1846 .unwrap();
1847
1848 let overwrite_existing_dir =
1849 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
1850 let new_path = if overwrite_existing_dir {
1851 fs::remove_dir_all(&new_path_parent).ok();
1852 new_path_parent.to_path_buf()
1853 } else {
1854 new_path_parent.join(gen_name(rng))
1855 };
1856
1857 log::info!(
1858 "Renaming {:?} to {}{:?}",
1859 old_path.strip_prefix(&root_path)?,
1860 if overwrite_existing_dir {
1861 "overwrite "
1862 } else {
1863 ""
1864 },
1865 new_path.strip_prefix(&root_path)?
1866 );
1867 fs::rename(&old_path, &new_path)?;
1868 record_event(old_path.clone());
1869 record_event(new_path);
1870 } else if old_path.is_dir() {
1871 let (dirs, files) = read_dir_recursive(old_path.clone());
1872
1873 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
1874 fs::remove_dir_all(&old_path).unwrap();
1875 for file in files {
1876 record_event(file);
1877 }
1878 for dir in dirs {
1879 record_event(dir);
1880 }
1881 } else {
1882 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
1883 fs::remove_file(old_path).unwrap();
1884 record_event(old_path.clone());
1885 }
1886 }
1887
1888 Ok(events)
1889 }
1890
1891 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
1892 let child_entries = fs::read_dir(&path).unwrap();
1893 let mut dirs = vec![path];
1894 let mut files = Vec::new();
1895 for child_entry in child_entries {
1896 let child_path = child_entry.unwrap().path();
1897 if child_path.is_dir() {
1898 let (child_dirs, child_files) = read_dir_recursive(child_path);
1899 dirs.extend(child_dirs);
1900 files.extend(child_files);
1901 } else {
1902 files.push(child_path);
1903 }
1904 }
1905 (dirs, files)
1906 }
1907
1908 fn gen_name(rng: &mut impl Rng) -> String {
1909 (0..6)
1910 .map(|_| rng.sample(rand::distributions::Alphanumeric))
1911 .map(char::from)
1912 .collect()
1913 }
1914
1915 impl Snapshot {
1916 fn check_invariants(&self) {
1917 let mut files = self.files(0);
1918 let mut visible_files = self.visible_files(0);
1919 for entry in self.entries.cursor::<(), ()>() {
1920 if entry.is_file() {
1921 assert_eq!(files.next().unwrap().inode(), entry.inode);
1922 if !entry.is_ignored {
1923 assert_eq!(visible_files.next().unwrap().inode(), entry.inode);
1924 }
1925 }
1926 }
1927 assert!(files.next().is_none());
1928 assert!(visible_files.next().is_none());
1929
1930 let mut bfs_paths = Vec::new();
1931 let mut stack = vec![Path::new("")];
1932 while let Some(path) = stack.pop() {
1933 bfs_paths.push(path);
1934 let ix = stack.len();
1935 for child_entry in self.child_entries(path) {
1936 stack.insert(ix, child_entry.path());
1937 }
1938 }
1939
1940 let dfs_paths = self
1941 .entries
1942 .cursor::<(), ()>()
1943 .map(|e| e.path().as_ref())
1944 .collect::<Vec<_>>();
1945 assert_eq!(bfs_paths, dfs_paths);
1946
1947 for (ignore_parent_path, _) in &self.ignores {
1948 assert!(self.entry_for_path(ignore_parent_path).is_some());
1949 assert!(self
1950 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
1951 .is_some());
1952 }
1953 }
1954
1955 fn to_vec(&self) -> Vec<(&Path, u64, bool)> {
1956 let mut paths = Vec::new();
1957 for entry in self.entries.cursor::<(), ()>() {
1958 paths.push((entry.path().as_ref(), entry.inode(), entry.is_ignored()));
1959 }
1960 paths.sort_by(|a, b| a.0.cmp(&b.0));
1961 paths
1962 }
1963 }
1964}