1mod char_bag;
2mod fuzzy;
3mod ignore;
4
5use self::{char_bag::CharBag, ignore::IgnoreStack};
6use crate::{
7 editor::{History, Rope},
8 rpc::{self, proto, ConnectionId},
9 sum_tree::{self, Cursor, Edit, SumTree},
10 util::Bias,
11};
12use ::ignore::gitignore::Gitignore;
13use anyhow::{Context, Result};
14pub use fuzzy::{match_paths, PathMatch};
15use gpui::{scoped_pool, AppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task};
16use lazy_static::lazy_static;
17use parking_lot::Mutex;
18use postage::{
19 prelude::{Sink, Stream},
20 watch,
21};
22use smol::{channel::Sender, lock::Mutex as AsyncMutex};
23use std::{
24 cmp,
25 collections::{HashMap, HashSet},
26 ffi::{CStr, OsStr, OsString},
27 fmt, fs,
28 future::Future,
29 hash::Hash,
30 io::{self, Read, Write},
31 ops::Deref,
32 os::unix::{ffi::OsStrExt, fs::MetadataExt},
33 path::{Path, PathBuf},
34 sync::{
35 atomic::{AtomicU64, Ordering::SeqCst},
36 Arc, Weak,
37 },
38 time::{Duration, UNIX_EPOCH},
39};
40
41lazy_static! {
42 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
43}
44
45#[derive(Clone, Debug)]
46enum ScanState {
47 Idle,
48 Scanning,
49 Err(Arc<io::Error>),
50}
51
52pub enum Worktree {
53 Local(LocalWorktree),
54 Remote(RemoteWorktree),
55}
56
57impl Entity for Worktree {
58 type Event = ();
59}
60
61impl Worktree {
62 pub fn local(path: impl Into<Arc<Path>>, cx: &mut ModelContext<Worktree>) -> Self {
63 Worktree::Local(LocalWorktree::new(path, cx))
64 }
65
66 pub fn remote(
67 id: usize,
68 worktree: proto::Worktree,
69 rpc: rpc::Client,
70 connection_id: ConnectionId,
71 cx: &mut ModelContext<Worktree>,
72 ) -> Self {
73 Worktree::Remote(RemoteWorktree::new(id, worktree, rpc, connection_id, cx))
74 }
75
76 pub fn as_local(&self) -> Option<&LocalWorktree> {
77 if let Worktree::Local(worktree) = self {
78 Some(worktree)
79 } else {
80 None
81 }
82 }
83
84 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
85 if let Worktree::Local(worktree) = self {
86 Some(worktree)
87 } else {
88 None
89 }
90 }
91
92 pub fn snapshot(&self) -> Snapshot {
93 match self {
94 Worktree::Local(worktree) => worktree.snapshot(),
95 Worktree::Remote(worktree) => worktree.snapshot.clone(),
96 }
97 }
98
99 pub fn load_history(
100 &self,
101 path: &Path,
102 cx: &AppContext,
103 ) -> impl Future<Output = Result<History>> {
104 match self {
105 Worktree::Local(worktree) => worktree.load_history(path, cx),
106 Worktree::Remote(worktree) => todo!(),
107 }
108 }
109
110 pub fn save(
111 &self,
112 path: &Path,
113 content: Rope,
114 cx: &AppContext,
115 ) -> impl Future<Output = Result<()>> {
116 match self {
117 Worktree::Local(worktree) => worktree.save(path, content, cx),
118 Worktree::Remote(worktree) => todo!(),
119 }
120 }
121}
122
123impl Deref for Worktree {
124 type Target = Snapshot;
125
126 fn deref(&self) -> &Self::Target {
127 match self {
128 Worktree::Local(worktree) => &worktree.snapshot,
129 Worktree::Remote(worktree) => &worktree.snapshot,
130 }
131 }
132}
133
134pub struct LocalWorktree {
135 snapshot: Snapshot,
136 background_snapshot: Arc<Mutex<Snapshot>>,
137 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
138 next_handle_id: AtomicU64,
139 scan_state: (watch::Sender<ScanState>, watch::Receiver<ScanState>),
140 _event_stream_handle: fsevent::Handle,
141 poll_scheduled: bool,
142 rpc: Option<rpc::Client>,
143}
144
145#[derive(Clone)]
146pub struct FileHandle {
147 worktree: ModelHandle<Worktree>,
148 state: Arc<Mutex<FileHandleState>>,
149}
150
151#[derive(Clone)]
152struct FileHandleState {
153 path: Arc<Path>,
154 is_deleted: bool,
155 mtime: Duration,
156 id: u64,
157 rpc: Option<(ConnectionId, rpc::Client)>,
158}
159
160impl Drop for FileHandleState {
161 fn drop(&mut self) {
162 if let Some((connection_id, rpc)) = self.rpc.take() {
163 let id = self.id;
164 smol::spawn(async move {
165 if let Err(error) = rpc.send(connection_id, proto::CloseFile { id }).await {
166 log::warn!("error closing file {}: {}", id, error);
167 }
168 })
169 .detach();
170 }
171 }
172}
173
174impl LocalWorktree {
175 fn new(path: impl Into<Arc<Path>>, cx: &mut ModelContext<Worktree>) -> Self {
176 let abs_path = path.into();
177 let (scan_state_tx, scan_state_rx) = smol::channel::unbounded();
178 let id = cx.model_id();
179 let snapshot = Snapshot {
180 id,
181 scan_id: 0,
182 abs_path,
183 root_name: Default::default(),
184 ignores: Default::default(),
185 entries: Default::default(),
186 };
187 let (event_stream, event_stream_handle) =
188 fsevent::EventStream::new(&[snapshot.abs_path.as_ref()], Duration::from_millis(100));
189
190 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
191 let handles = Arc::new(Mutex::new(Default::default()));
192
193 let tree = Self {
194 snapshot,
195 background_snapshot: background_snapshot.clone(),
196 handles: handles.clone(),
197 next_handle_id: Default::default(),
198 scan_state: watch::channel_with(ScanState::Scanning),
199 _event_stream_handle: event_stream_handle,
200 poll_scheduled: false,
201 rpc: None,
202 };
203
204 std::thread::spawn(move || {
205 let scanner = BackgroundScanner::new(background_snapshot, handles, scan_state_tx, id);
206 scanner.run(event_stream)
207 });
208
209 cx.spawn(|this, mut cx| {
210 let this = this.downgrade();
211 async move {
212 while let Ok(scan_state) = scan_state_rx.recv().await {
213 let alive = cx.update(|cx| {
214 if let Some(handle) = this.upgrade(&cx) {
215 handle.update(cx, |this, cx| {
216 if let Worktree::Local(worktree) = this {
217 worktree.observe_scan_state(scan_state, cx)
218 } else {
219 unreachable!()
220 }
221 });
222 true
223 } else {
224 false
225 }
226 });
227
228 if !alive {
229 break;
230 }
231 }
232 }
233 })
234 .detach();
235
236 tree
237 }
238
239 pub fn scan_complete(&self) -> impl Future<Output = ()> {
240 let mut scan_state_rx = self.scan_state.1.clone();
241 async move {
242 let mut scan_state = Some(scan_state_rx.borrow().clone());
243 while let Some(ScanState::Scanning) = scan_state {
244 scan_state = scan_state_rx.recv().await;
245 }
246 }
247 }
248
249 fn observe_scan_state(&mut self, scan_state: ScanState, cx: &mut ModelContext<Worktree>) {
250 let _ = self.scan_state.0.blocking_send(scan_state);
251 self.poll_entries(cx);
252 }
253
254 fn poll_entries(&mut self, cx: &mut ModelContext<Worktree>) {
255 self.snapshot = self.background_snapshot.lock().clone();
256 cx.notify();
257
258 if self.is_scanning() && !self.poll_scheduled {
259 cx.spawn(|this, mut cx| async move {
260 smol::Timer::after(Duration::from_millis(100)).await;
261 this.update(&mut cx, |this, cx| {
262 if let Worktree::Local(worktree) = this {
263 worktree.poll_scheduled = false;
264 worktree.poll_entries(cx);
265 } else {
266 unreachable!()
267 }
268 })
269 })
270 .detach();
271 self.poll_scheduled = true;
272 }
273 }
274
275 fn is_scanning(&self) -> bool {
276 if let ScanState::Scanning = *self.scan_state.1.borrow() {
277 true
278 } else {
279 false
280 }
281 }
282
283 pub fn snapshot(&self) -> Snapshot {
284 self.snapshot.clone()
285 }
286
287 pub fn abs_path(&self) -> &Path {
288 self.snapshot.abs_path.as_ref()
289 }
290
291 pub fn contains_abs_path(&self, path: &Path) -> bool {
292 path.starts_with(&self.snapshot.abs_path)
293 }
294
295 fn absolutize(&self, path: &Path) -> PathBuf {
296 if path.file_name().is_some() {
297 self.snapshot.abs_path.join(path)
298 } else {
299 self.snapshot.abs_path.to_path_buf()
300 }
301 }
302
303 pub fn load_history(
304 &self,
305 path: &Path,
306 cx: &AppContext,
307 ) -> impl Future<Output = Result<History>> {
308 let path = path.to_path_buf();
309 let abs_path = self.absolutize(&path);
310 cx.background_executor().spawn(async move {
311 let mut file = fs::File::open(&abs_path)?;
312 let mut base_text = String::new();
313 file.read_to_string(&mut base_text)?;
314 Ok(History::new(Arc::from(base_text)))
315 })
316 }
317
318 pub fn save(&self, path: &Path, content: Rope, cx: &AppContext) -> Task<Result<()>> {
319 let handles = self.handles.clone();
320 let path = path.to_path_buf();
321 let abs_path = self.absolutize(&path);
322 cx.background_executor().spawn(async move {
323 let buffer_size = content.summary().bytes.min(10 * 1024);
324 let file = fs::File::create(&abs_path)?;
325 let mut writer = io::BufWriter::with_capacity(buffer_size, &file);
326 for chunk in content.chunks() {
327 writer.write(chunk.as_bytes())?;
328 }
329 writer.flush()?;
330
331 if let Some(handle) = handles.lock().get(&*path).and_then(Weak::upgrade) {
332 let mut handle = handle.lock();
333 handle.mtime = file.metadata()?.modified()?.duration_since(UNIX_EPOCH)?;
334 handle.is_deleted = false;
335 }
336
337 Ok(())
338 })
339 }
340
341 pub fn share(
342 &mut self,
343 client: rpc::Client,
344 connection_id: ConnectionId,
345 cx: &mut ModelContext<Worktree>,
346 ) -> Task<anyhow::Result<(u64, String)>> {
347 self.rpc = Some(client.clone());
348 let root_name = self.root_name.clone();
349 let snapshot = self.snapshot();
350 let handle = cx.handle();
351 cx.spawn(|_this, cx| async move {
352 let entries = cx
353 .background_executor()
354 .spawn(async move {
355 snapshot
356 .entries
357 .cursor::<(), ()>()
358 .map(|entry| proto::Entry {
359 is_dir: entry.is_dir(),
360 path: entry.path.to_string_lossy().to_string(),
361 inode: entry.inode,
362 is_symlink: entry.is_symlink,
363 is_ignored: entry.is_ignored,
364 })
365 .collect()
366 })
367 .await;
368
369 let share_response = client
370 .request(
371 connection_id,
372 proto::ShareWorktree {
373 worktree: Some(proto::Worktree { root_name, entries }),
374 },
375 )
376 .await?;
377
378 client.state.lock().await.shared_worktrees.insert(handle);
379
380 log::info!("sharing worktree {:?}", share_response);
381 Ok((share_response.worktree_id, share_response.access_token))
382 })
383 }
384}
385
386impl Deref for LocalWorktree {
387 type Target = Snapshot;
388
389 fn deref(&self) -> &Self::Target {
390 &self.snapshot
391 }
392}
393
394impl fmt::Debug for LocalWorktree {
395 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
396 self.snapshot.fmt(f)
397 }
398}
399
400pub struct RemoteWorktree {
401 id: usize,
402 snapshot: Snapshot,
403 handles: Arc<Mutex<HashMap<Arc<Path>, Arc<AsyncMutex<Weak<Mutex<FileHandleState>>>>>>>,
404 rpc: rpc::Client,
405 connection_id: ConnectionId,
406}
407
408impl RemoteWorktree {
409 fn new(
410 id: usize,
411 worktree: proto::Worktree,
412 rpc: rpc::Client,
413 connection_id: ConnectionId,
414 cx: &mut ModelContext<Worktree>,
415 ) -> Self {
416 let root_char_bag: CharBag = worktree
417 .root_name
418 .chars()
419 .map(|c| c.to_ascii_lowercase())
420 .collect();
421 let mut entries = SumTree::new();
422 entries.extend(
423 worktree.entries.into_iter().map(|entry| {
424 let kind = if entry.is_dir {
425 EntryKind::Dir
426 } else {
427 let mut char_bag = root_char_bag.clone();
428 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
429 EntryKind::File(char_bag)
430 };
431 Entry {
432 kind,
433 path: Path::new(&entry.path).into(),
434 inode: entry.inode,
435 is_symlink: entry.is_symlink,
436 is_ignored: entry.is_ignored,
437 }
438 }),
439 &(),
440 );
441 let snapshot = Snapshot {
442 id,
443 scan_id: 0,
444 abs_path: Path::new("").into(),
445 root_name: worktree.root_name,
446 ignores: Default::default(),
447 entries,
448 };
449 Self {
450 id,
451 snapshot,
452 handles: Default::default(),
453 rpc,
454 connection_id,
455 }
456 }
457}
458
459#[derive(Clone)]
460pub struct Snapshot {
461 id: usize,
462 scan_id: usize,
463 abs_path: Arc<Path>,
464 root_name: String,
465 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
466 entries: SumTree<Entry>,
467}
468
469impl Snapshot {
470 pub fn file_count(&self) -> usize {
471 self.entries.summary().file_count
472 }
473
474 pub fn visible_file_count(&self) -> usize {
475 self.entries.summary().visible_file_count
476 }
477
478 pub fn files(&self, start: usize) -> FileIter {
479 FileIter::all(self, start)
480 }
481
482 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
483 self.entries
484 .cursor::<(), ()>()
485 .skip(1)
486 .map(|entry| entry.path())
487 }
488
489 pub fn visible_files(&self, start: usize) -> FileIter {
490 FileIter::visible(self, start)
491 }
492
493 fn child_entries<'a>(&'a self, path: &'a Path) -> ChildEntriesIter<'a> {
494 ChildEntriesIter::new(path, self)
495 }
496
497 pub fn root_entry(&self) -> &Entry {
498 self.entry_for_path("").unwrap()
499 }
500
501 /// Returns the filename of the snapshot's root, plus a trailing slash if the snapshot's root is
502 /// a directory.
503 pub fn root_name(&self) -> &str {
504 &self.root_name
505 }
506
507 fn path_is_pending(&self, path: impl AsRef<Path>) -> bool {
508 if self.entries.is_empty() {
509 return true;
510 }
511 let path = path.as_ref();
512 let mut cursor = self.entries.cursor::<_, ()>();
513 if cursor.seek(&PathSearch::Exact(path), Bias::Left, &()) {
514 let entry = cursor.item().unwrap();
515 if entry.path.as_ref() == path {
516 return matches!(entry.kind, EntryKind::PendingDir);
517 }
518 }
519 if let Some(entry) = cursor.prev_item() {
520 matches!(entry.kind, EntryKind::PendingDir) && path.starts_with(entry.path.as_ref())
521 } else {
522 false
523 }
524 }
525
526 fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
527 let mut cursor = self.entries.cursor::<_, ()>();
528 if cursor.seek(&PathSearch::Exact(path.as_ref()), Bias::Left, &()) {
529 cursor.item()
530 } else {
531 None
532 }
533 }
534
535 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
536 self.entry_for_path(path.as_ref()).map(|e| e.inode())
537 }
538
539 fn insert_entry(&mut self, entry: Entry) {
540 if !entry.is_dir() && entry.path().file_name() == Some(&GITIGNORE) {
541 let (ignore, err) = Gitignore::new(self.abs_path.join(entry.path()));
542 if let Some(err) = err {
543 log::error!("error in ignore file {:?} - {:?}", entry.path(), err);
544 }
545
546 let ignore_dir_path = entry.path().parent().unwrap();
547 self.ignores
548 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
549 }
550 self.entries.insert(entry, &());
551 }
552
553 fn populate_dir(
554 &mut self,
555 parent_path: Arc<Path>,
556 entries: impl IntoIterator<Item = Entry>,
557 ignore: Option<Arc<Gitignore>>,
558 ) {
559 let mut edits = Vec::new();
560
561 let mut parent_entry = self
562 .entries
563 .get(&PathKey(parent_path.clone()), &())
564 .unwrap()
565 .clone();
566 if let Some(ignore) = ignore {
567 self.ignores.insert(parent_path, (ignore, self.scan_id));
568 }
569 if matches!(parent_entry.kind, EntryKind::PendingDir) {
570 parent_entry.kind = EntryKind::Dir;
571 } else {
572 unreachable!();
573 }
574 edits.push(Edit::Insert(parent_entry));
575
576 for entry in entries {
577 edits.push(Edit::Insert(entry));
578 }
579 self.entries.edit(edits, &());
580 }
581
582 fn remove_path(&mut self, path: &Path) {
583 let new_entries = {
584 let mut cursor = self.entries.cursor::<_, ()>();
585 let mut new_entries = cursor.slice(&PathSearch::Exact(path), Bias::Left, &());
586 cursor.seek_forward(&PathSearch::Successor(path), Bias::Left, &());
587 new_entries.push_tree(cursor.suffix(&()), &());
588 new_entries
589 };
590 self.entries = new_entries;
591
592 if path.file_name() == Some(&GITIGNORE) {
593 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
594 *scan_id = self.scan_id;
595 }
596 }
597 }
598
599 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
600 let mut new_ignores = Vec::new();
601 for ancestor in path.ancestors().skip(1) {
602 if let Some((ignore, _)) = self.ignores.get(ancestor) {
603 new_ignores.push((ancestor, Some(ignore.clone())));
604 } else {
605 new_ignores.push((ancestor, None));
606 }
607 }
608
609 let mut ignore_stack = IgnoreStack::none();
610 for (parent_path, ignore) in new_ignores.into_iter().rev() {
611 if ignore_stack.is_path_ignored(&parent_path, true) {
612 ignore_stack = IgnoreStack::all();
613 break;
614 } else if let Some(ignore) = ignore {
615 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
616 }
617 }
618
619 if ignore_stack.is_path_ignored(path, is_dir) {
620 ignore_stack = IgnoreStack::all();
621 }
622
623 ignore_stack
624 }
625}
626
627impl fmt::Debug for Snapshot {
628 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
629 for entry in self.entries.cursor::<(), ()>() {
630 for _ in entry.path().ancestors().skip(1) {
631 write!(f, " ")?;
632 }
633 writeln!(f, "{:?} (inode: {})", entry.path(), entry.inode())?;
634 }
635 Ok(())
636 }
637}
638
639impl FileHandle {
640 pub fn id(&self) -> u64 {
641 self.state.lock().id
642 }
643
644 /// Returns this file's path relative to the root of its worktree.
645 pub fn path(&self) -> Arc<Path> {
646 self.state.lock().path.clone()
647 }
648
649 /// Returns the last component of this handle's absolute path. If this handle refers to the root
650 /// of its worktree, then this method will return the name of the worktree itself.
651 pub fn file_name<'a>(&'a self, cx: &'a AppContext) -> Option<OsString> {
652 self.state
653 .lock()
654 .path
655 .file_name()
656 .or_else(|| Some(OsStr::new(self.worktree.read(cx).root_name())))
657 .map(Into::into)
658 }
659
660 pub fn is_deleted(&self) -> bool {
661 self.state.lock().is_deleted
662 }
663
664 pub fn mtime(&self) -> Duration {
665 self.state.lock().mtime
666 }
667
668 pub fn exists(&self) -> bool {
669 !self.is_deleted()
670 }
671
672 pub fn load_history(&self, cx: &AppContext) -> impl Future<Output = Result<History>> {
673 self.worktree.read(cx).load_history(&self.path(), cx)
674 }
675
676 pub fn save(&self, content: Rope, cx: &AppContext) -> impl Future<Output = Result<()>> {
677 let worktree = self.worktree.read(cx);
678 worktree.save(&self.path(), content, cx)
679 }
680
681 pub fn worktree_id(&self) -> usize {
682 self.worktree.id()
683 }
684
685 pub fn entry_id(&self) -> (usize, Arc<Path>) {
686 (self.worktree.id(), self.path())
687 }
688
689 pub fn observe_from_model<T: Entity>(
690 &self,
691 cx: &mut ModelContext<T>,
692 mut callback: impl FnMut(&mut T, FileHandle, &mut ModelContext<T>) + 'static,
693 ) {
694 let mut prev_state = self.state.lock().clone();
695 let cur_state = Arc::downgrade(&self.state);
696 cx.observe(&self.worktree, move |observer, worktree, cx| {
697 if let Some(cur_state) = cur_state.upgrade() {
698 let cur_state_unlocked = cur_state.lock();
699 if cur_state_unlocked.mtime != prev_state.mtime
700 || cur_state_unlocked.path != prev_state.path
701 {
702 prev_state = cur_state_unlocked.clone();
703 drop(cur_state_unlocked);
704 callback(
705 observer,
706 FileHandle {
707 worktree,
708 state: cur_state,
709 },
710 cx,
711 );
712 }
713 }
714 });
715 }
716}
717
718impl PartialEq for FileHandle {
719 fn eq(&self, other: &Self) -> bool {
720 self.worktree == other.worktree && self.state.lock().id == other.state.lock().id
721 }
722}
723
724impl Eq for FileHandle {}
725
726impl Hash for FileHandle {
727 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
728 self.state.lock().id.hash(state);
729 self.worktree.hash(state);
730 }
731}
732
733#[derive(Clone, Debug)]
734pub struct Entry {
735 kind: EntryKind,
736 path: Arc<Path>,
737 inode: u64,
738 is_symlink: bool,
739 is_ignored: bool,
740}
741
742#[derive(Clone, Debug)]
743pub enum EntryKind {
744 PendingDir,
745 Dir,
746 File(CharBag),
747}
748
749impl Entry {
750 pub fn path(&self) -> &Arc<Path> {
751 &self.path
752 }
753
754 pub fn inode(&self) -> u64 {
755 self.inode
756 }
757
758 pub fn is_ignored(&self) -> bool {
759 self.is_ignored
760 }
761
762 fn is_dir(&self) -> bool {
763 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
764 }
765
766 fn is_file(&self) -> bool {
767 matches!(self.kind, EntryKind::File(_))
768 }
769}
770
771impl sum_tree::Item for Entry {
772 type Summary = EntrySummary;
773
774 fn summary(&self) -> Self::Summary {
775 let file_count;
776 let visible_file_count;
777 if self.is_file() {
778 file_count = 1;
779 if self.is_ignored {
780 visible_file_count = 0;
781 } else {
782 visible_file_count = 1;
783 }
784 } else {
785 file_count = 0;
786 visible_file_count = 0;
787 }
788
789 EntrySummary {
790 max_path: self.path().clone(),
791 file_count,
792 visible_file_count,
793 }
794 }
795}
796
797impl sum_tree::KeyedItem for Entry {
798 type Key = PathKey;
799
800 fn key(&self) -> Self::Key {
801 PathKey(self.path().clone())
802 }
803}
804
805#[derive(Clone, Debug)]
806pub struct EntrySummary {
807 max_path: Arc<Path>,
808 file_count: usize,
809 visible_file_count: usize,
810}
811
812impl Default for EntrySummary {
813 fn default() -> Self {
814 Self {
815 max_path: Arc::from(Path::new("")),
816 file_count: 0,
817 visible_file_count: 0,
818 }
819 }
820}
821
822impl sum_tree::Summary for EntrySummary {
823 type Context = ();
824
825 fn add_summary(&mut self, rhs: &Self, _: &()) {
826 self.max_path = rhs.max_path.clone();
827 self.file_count += rhs.file_count;
828 self.visible_file_count += rhs.visible_file_count;
829 }
830}
831
832#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
833pub struct PathKey(Arc<Path>);
834
835impl Default for PathKey {
836 fn default() -> Self {
837 Self(Path::new("").into())
838 }
839}
840
841impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
842 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
843 self.0 = summary.max_path.clone();
844 }
845}
846
847#[derive(Copy, Clone, Debug, PartialEq, Eq)]
848enum PathSearch<'a> {
849 Exact(&'a Path),
850 Successor(&'a Path),
851}
852
853impl<'a> Ord for PathSearch<'a> {
854 fn cmp(&self, other: &Self) -> cmp::Ordering {
855 match (self, other) {
856 (Self::Exact(a), Self::Exact(b)) => a.cmp(b),
857 (Self::Successor(a), Self::Exact(b)) => {
858 if b.starts_with(a) {
859 cmp::Ordering::Greater
860 } else {
861 a.cmp(b)
862 }
863 }
864 _ => todo!("not sure we need the other two cases"),
865 }
866 }
867}
868
869impl<'a> PartialOrd for PathSearch<'a> {
870 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
871 Some(self.cmp(other))
872 }
873}
874
875impl<'a> Default for PathSearch<'a> {
876 fn default() -> Self {
877 Self::Exact(Path::new("").into())
878 }
879}
880
881impl<'a: 'b, 'b> sum_tree::Dimension<'a, EntrySummary> for PathSearch<'b> {
882 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
883 *self = Self::Exact(summary.max_path.as_ref());
884 }
885}
886
887#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
888pub struct FileCount(usize);
889
890impl<'a> sum_tree::Dimension<'a, EntrySummary> for FileCount {
891 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
892 self.0 += summary.file_count;
893 }
894}
895
896#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
897pub struct VisibleFileCount(usize);
898
899impl<'a> sum_tree::Dimension<'a, EntrySummary> for VisibleFileCount {
900 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
901 self.0 += summary.visible_file_count;
902 }
903}
904
905struct BackgroundScanner {
906 snapshot: Arc<Mutex<Snapshot>>,
907 notify: Sender<ScanState>,
908 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
909 other_mount_paths: HashSet<PathBuf>,
910 thread_pool: scoped_pool::Pool,
911 root_char_bag: CharBag,
912}
913
914impl BackgroundScanner {
915 fn new(
916 snapshot: Arc<Mutex<Snapshot>>,
917 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
918 notify: Sender<ScanState>,
919 worktree_id: usize,
920 ) -> Self {
921 let mut scanner = Self {
922 root_char_bag: Default::default(),
923 snapshot,
924 notify,
925 handles,
926 other_mount_paths: Default::default(),
927 thread_pool: scoped_pool::Pool::new(16, format!("worktree-{}-scanner", worktree_id)),
928 };
929 scanner.update_other_mount_paths();
930 scanner
931 }
932
933 fn update_other_mount_paths(&mut self) {
934 let path = self.snapshot.lock().abs_path.clone();
935 self.other_mount_paths.clear();
936 self.other_mount_paths.extend(
937 mounted_volume_paths()
938 .into_iter()
939 .filter(|mount_path| !path.starts_with(mount_path)),
940 );
941 }
942
943 fn abs_path(&self) -> Arc<Path> {
944 self.snapshot.lock().abs_path.clone()
945 }
946
947 fn snapshot(&self) -> Snapshot {
948 self.snapshot.lock().clone()
949 }
950
951 fn run(mut self, event_stream: fsevent::EventStream) {
952 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
953 return;
954 }
955
956 if let Err(err) = self.scan_dirs() {
957 if smol::block_on(self.notify.send(ScanState::Err(Arc::new(err)))).is_err() {
958 return;
959 }
960 }
961
962 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
963 return;
964 }
965
966 event_stream.run(move |events| {
967 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
968 return false;
969 }
970
971 if !self.process_events(events) {
972 return false;
973 }
974
975 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
976 return false;
977 }
978
979 true
980 });
981 }
982
983 fn scan_dirs(&mut self) -> io::Result<()> {
984 self.snapshot.lock().scan_id += 1;
985
986 let path: Arc<Path> = Arc::from(Path::new(""));
987 let abs_path = self.abs_path();
988 let metadata = fs::metadata(&abs_path)?;
989 let inode = metadata.ino();
990 let is_symlink = fs::symlink_metadata(&abs_path)?.file_type().is_symlink();
991 let is_dir = metadata.file_type().is_dir();
992
993 // After determining whether the root entry is a file or a directory, populate the
994 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
995 let mut root_name = abs_path
996 .file_name()
997 .map_or(String::new(), |f| f.to_string_lossy().to_string());
998 if is_dir {
999 root_name.push('/');
1000 }
1001 self.root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1002 self.snapshot.lock().root_name = root_name;
1003
1004 if is_dir {
1005 self.snapshot.lock().insert_entry(Entry {
1006 kind: EntryKind::PendingDir,
1007 path: path.clone(),
1008 inode,
1009 is_symlink,
1010 is_ignored: false,
1011 });
1012
1013 let (tx, rx) = crossbeam_channel::unbounded();
1014 tx.send(ScanJob {
1015 abs_path: abs_path.to_path_buf(),
1016 path,
1017 ignore_stack: IgnoreStack::none(),
1018 scan_queue: tx.clone(),
1019 })
1020 .unwrap();
1021 drop(tx);
1022
1023 self.thread_pool.scoped(|pool| {
1024 for _ in 0..self.thread_pool.thread_count() {
1025 pool.execute(|| {
1026 while let Ok(job) = rx.recv() {
1027 if let Err(err) = self.scan_dir(&job) {
1028 log::error!("error scanning {:?}: {}", job.abs_path, err);
1029 }
1030 }
1031 });
1032 }
1033 });
1034 } else {
1035 self.snapshot.lock().insert_entry(Entry {
1036 kind: EntryKind::File(self.char_bag(&path)),
1037 path,
1038 inode,
1039 is_symlink,
1040 is_ignored: false,
1041 });
1042 }
1043
1044 self.mark_deleted_file_handles();
1045 Ok(())
1046 }
1047
1048 fn scan_dir(&self, job: &ScanJob) -> io::Result<()> {
1049 let mut new_entries: Vec<Entry> = Vec::new();
1050 let mut new_jobs: Vec<ScanJob> = Vec::new();
1051 let mut ignore_stack = job.ignore_stack.clone();
1052 let mut new_ignore = None;
1053
1054 for child_entry in fs::read_dir(&job.abs_path)? {
1055 let child_entry = child_entry?;
1056 let child_name = child_entry.file_name();
1057 let child_abs_path = job.abs_path.join(&child_name);
1058 let child_path: Arc<Path> = job.path.join(&child_name).into();
1059 let child_is_symlink = child_entry.metadata()?.file_type().is_symlink();
1060 let child_metadata = if let Ok(metadata) = fs::metadata(&child_abs_path) {
1061 metadata
1062 } else {
1063 log::error!("could not get metadata for path {:?}", child_abs_path);
1064 continue;
1065 };
1066
1067 let child_inode = child_metadata.ino();
1068
1069 // Disallow mount points outside the file system containing the root of this worktree
1070 if self.other_mount_paths.contains(&child_abs_path) {
1071 continue;
1072 }
1073
1074 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
1075 if child_name == *GITIGNORE {
1076 let (ignore, err) = Gitignore::new(&child_abs_path);
1077 if let Some(err) = err {
1078 log::error!("error in ignore file {:?} - {:?}", child_path, err);
1079 }
1080 let ignore = Arc::new(ignore);
1081 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1082 new_ignore = Some(ignore);
1083
1084 // Update ignore status of any child entries we've already processed to reflect the
1085 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
1086 // there should rarely be too numerous. Update the ignore stack associated with any
1087 // new jobs as well.
1088 let mut new_jobs = new_jobs.iter_mut();
1089 for entry in &mut new_entries {
1090 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
1091 if entry.is_dir() {
1092 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
1093 IgnoreStack::all()
1094 } else {
1095 ignore_stack.clone()
1096 };
1097 }
1098 }
1099 }
1100
1101 if child_metadata.is_dir() {
1102 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
1103 new_entries.push(Entry {
1104 kind: EntryKind::PendingDir,
1105 path: child_path.clone(),
1106 inode: child_inode,
1107 is_symlink: child_is_symlink,
1108 is_ignored,
1109 });
1110 new_jobs.push(ScanJob {
1111 abs_path: child_abs_path,
1112 path: child_path,
1113 ignore_stack: if is_ignored {
1114 IgnoreStack::all()
1115 } else {
1116 ignore_stack.clone()
1117 },
1118 scan_queue: job.scan_queue.clone(),
1119 });
1120 } else {
1121 let is_ignored = ignore_stack.is_path_ignored(&child_path, false);
1122 new_entries.push(Entry {
1123 kind: EntryKind::File(self.char_bag(&child_path)),
1124 path: child_path,
1125 inode: child_inode,
1126 is_symlink: child_is_symlink,
1127 is_ignored,
1128 });
1129 };
1130 }
1131
1132 self.snapshot
1133 .lock()
1134 .populate_dir(job.path.clone(), new_entries, new_ignore);
1135 for new_job in new_jobs {
1136 job.scan_queue.send(new_job).unwrap();
1137 }
1138
1139 Ok(())
1140 }
1141
1142 fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
1143 self.update_other_mount_paths();
1144
1145 let mut snapshot = self.snapshot();
1146 snapshot.scan_id += 1;
1147
1148 let root_abs_path = if let Ok(abs_path) = snapshot.abs_path.canonicalize() {
1149 abs_path
1150 } else {
1151 return false;
1152 };
1153
1154 let mut renamed_paths: HashMap<u64, PathBuf> = HashMap::new();
1155 let mut handles = self.handles.lock();
1156 let mut updated_handles = HashMap::new();
1157 for event in &events {
1158 let path = if let Ok(path) = event.path.strip_prefix(&root_abs_path) {
1159 path
1160 } else {
1161 continue;
1162 };
1163
1164 let metadata = fs::metadata(&event.path);
1165 if event.flags.contains(fsevent::StreamFlags::ITEM_RENAMED) {
1166 if let Some(inode) = snapshot.inode_for_path(path) {
1167 renamed_paths.insert(inode, path.to_path_buf());
1168 } else if let Ok(metadata) = &metadata {
1169 let new_path = path;
1170 if let Some(old_path) = renamed_paths.get(&metadata.ino()) {
1171 handles.retain(|handle_path, handle_state| {
1172 if let Ok(path_suffix) = handle_path.strip_prefix(&old_path) {
1173 let new_handle_path: Arc<Path> =
1174 if path_suffix.file_name().is_some() {
1175 new_path.join(path_suffix)
1176 } else {
1177 new_path.to_path_buf()
1178 }
1179 .into();
1180 if let Some(handle_state) = Weak::upgrade(&handle_state) {
1181 let mut state = handle_state.lock();
1182 state.path = new_handle_path.clone();
1183 updated_handles
1184 .insert(new_handle_path, Arc::downgrade(&handle_state));
1185 }
1186 false
1187 } else {
1188 true
1189 }
1190 });
1191 handles.extend(updated_handles.drain());
1192 }
1193 }
1194 }
1195
1196 for state in handles.values_mut() {
1197 if let Some(state) = Weak::upgrade(&state) {
1198 let mut state = state.lock();
1199 if state.path.as_ref() == path {
1200 if let Ok(metadata) = &metadata {
1201 state.mtime = metadata
1202 .modified()
1203 .unwrap()
1204 .duration_since(UNIX_EPOCH)
1205 .unwrap();
1206 }
1207 } else if state.path.starts_with(path) {
1208 if let Ok(metadata) = fs::metadata(state.path.as_ref()) {
1209 state.mtime = metadata
1210 .modified()
1211 .unwrap()
1212 .duration_since(UNIX_EPOCH)
1213 .unwrap();
1214 }
1215 }
1216 }
1217 }
1218 }
1219 drop(handles);
1220
1221 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
1222 let mut abs_paths = events.into_iter().map(|e| e.path).peekable();
1223 let (scan_queue_tx, scan_queue_rx) = crossbeam_channel::unbounded();
1224
1225 while let Some(abs_path) = abs_paths.next() {
1226 let path = match abs_path.strip_prefix(&root_abs_path) {
1227 Ok(path) => Arc::from(path.to_path_buf()),
1228 Err(_) => {
1229 log::error!(
1230 "unexpected event {:?} for root path {:?}",
1231 abs_path,
1232 root_abs_path
1233 );
1234 continue;
1235 }
1236 };
1237
1238 while abs_paths.peek().map_or(false, |p| p.starts_with(&abs_path)) {
1239 abs_paths.next();
1240 }
1241
1242 snapshot.remove_path(&path);
1243
1244 match self.fs_entry_for_path(path.clone(), &abs_path) {
1245 Ok(Some(mut fs_entry)) => {
1246 let is_dir = fs_entry.is_dir();
1247 let ignore_stack = snapshot.ignore_stack_for_path(&path, is_dir);
1248 fs_entry.is_ignored = ignore_stack.is_all();
1249 snapshot.insert_entry(fs_entry);
1250 if is_dir {
1251 scan_queue_tx
1252 .send(ScanJob {
1253 abs_path,
1254 path,
1255 ignore_stack,
1256 scan_queue: scan_queue_tx.clone(),
1257 })
1258 .unwrap();
1259 }
1260 }
1261 Ok(None) => {}
1262 Err(err) => {
1263 // TODO - create a special 'error' entry in the entries tree to mark this
1264 log::error!("error reading file on event {:?}", err);
1265 }
1266 }
1267 }
1268
1269 *self.snapshot.lock() = snapshot;
1270
1271 // Scan any directories that were created as part of this event batch.
1272 drop(scan_queue_tx);
1273 self.thread_pool.scoped(|pool| {
1274 for _ in 0..self.thread_pool.thread_count() {
1275 pool.execute(|| {
1276 while let Ok(job) = scan_queue_rx.recv() {
1277 if let Err(err) = self.scan_dir(&job) {
1278 log::error!("error scanning {:?}: {}", job.abs_path, err);
1279 }
1280 }
1281 });
1282 }
1283 });
1284
1285 self.update_ignore_statuses();
1286 self.mark_deleted_file_handles();
1287 true
1288 }
1289
1290 fn update_ignore_statuses(&self) {
1291 let mut snapshot = self.snapshot();
1292
1293 let mut ignores_to_update = Vec::new();
1294 let mut ignores_to_delete = Vec::new();
1295 for (parent_path, (_, scan_id)) in &snapshot.ignores {
1296 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
1297 ignores_to_update.push(parent_path.clone());
1298 }
1299
1300 let ignore_path = parent_path.join(&*GITIGNORE);
1301 if snapshot.entry_for_path(ignore_path).is_none() {
1302 ignores_to_delete.push(parent_path.clone());
1303 }
1304 }
1305
1306 for parent_path in ignores_to_delete {
1307 snapshot.ignores.remove(&parent_path);
1308 self.snapshot.lock().ignores.remove(&parent_path);
1309 }
1310
1311 let (ignore_queue_tx, ignore_queue_rx) = crossbeam_channel::unbounded();
1312 ignores_to_update.sort_unstable();
1313 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
1314 while let Some(parent_path) = ignores_to_update.next() {
1315 while ignores_to_update
1316 .peek()
1317 .map_or(false, |p| p.starts_with(&parent_path))
1318 {
1319 ignores_to_update.next().unwrap();
1320 }
1321
1322 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
1323 ignore_queue_tx
1324 .send(UpdateIgnoreStatusJob {
1325 path: parent_path,
1326 ignore_stack,
1327 ignore_queue: ignore_queue_tx.clone(),
1328 })
1329 .unwrap();
1330 }
1331 drop(ignore_queue_tx);
1332
1333 self.thread_pool.scoped(|scope| {
1334 for _ in 0..self.thread_pool.thread_count() {
1335 scope.execute(|| {
1336 while let Ok(job) = ignore_queue_rx.recv() {
1337 self.update_ignore_status(job, &snapshot);
1338 }
1339 });
1340 }
1341 });
1342 }
1343
1344 fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
1345 let mut ignore_stack = job.ignore_stack;
1346 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
1347 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1348 }
1349
1350 let mut edits = Vec::new();
1351 for mut entry in snapshot.child_entries(&job.path).cloned() {
1352 let was_ignored = entry.is_ignored;
1353 entry.is_ignored = ignore_stack.is_path_ignored(entry.path(), entry.is_dir());
1354 if entry.is_dir() {
1355 let child_ignore_stack = if entry.is_ignored {
1356 IgnoreStack::all()
1357 } else {
1358 ignore_stack.clone()
1359 };
1360 job.ignore_queue
1361 .send(UpdateIgnoreStatusJob {
1362 path: entry.path().clone(),
1363 ignore_stack: child_ignore_stack,
1364 ignore_queue: job.ignore_queue.clone(),
1365 })
1366 .unwrap();
1367 }
1368
1369 if entry.is_ignored != was_ignored {
1370 edits.push(Edit::Insert(entry));
1371 }
1372 }
1373 self.snapshot.lock().entries.edit(edits, &());
1374 }
1375
1376 fn mark_deleted_file_handles(&self) {
1377 let mut handles = self.handles.lock();
1378 let snapshot = self.snapshot.lock();
1379 handles.retain(|path, handle_state| {
1380 if let Some(handle_state) = Weak::upgrade(&handle_state) {
1381 let mut handle_state = handle_state.lock();
1382 handle_state.is_deleted = snapshot.entry_for_path(&path).is_none();
1383 true
1384 } else {
1385 false
1386 }
1387 });
1388 }
1389
1390 fn fs_entry_for_path(&self, path: Arc<Path>, abs_path: &Path) -> Result<Option<Entry>> {
1391 let metadata = match fs::metadata(&abs_path) {
1392 Err(err) => {
1393 return match (err.kind(), err.raw_os_error()) {
1394 (io::ErrorKind::NotFound, _) => Ok(None),
1395 (io::ErrorKind::Other, Some(libc::ENOTDIR)) => Ok(None),
1396 _ => Err(anyhow::Error::new(err)),
1397 }
1398 }
1399 Ok(metadata) => metadata,
1400 };
1401 let inode = metadata.ino();
1402 let is_symlink = fs::symlink_metadata(&abs_path)
1403 .context("failed to read symlink metadata")?
1404 .file_type()
1405 .is_symlink();
1406
1407 let entry = Entry {
1408 kind: if metadata.file_type().is_dir() {
1409 EntryKind::PendingDir
1410 } else {
1411 EntryKind::File(self.char_bag(&path))
1412 },
1413 path,
1414 inode,
1415 is_symlink,
1416 is_ignored: false,
1417 };
1418
1419 Ok(Some(entry))
1420 }
1421
1422 fn char_bag(&self, path: &Path) -> CharBag {
1423 let mut result = self.root_char_bag;
1424 result.extend(
1425 path.to_string_lossy()
1426 .chars()
1427 .map(|c| c.to_ascii_lowercase()),
1428 );
1429 result
1430 }
1431}
1432
1433struct ScanJob {
1434 abs_path: PathBuf,
1435 path: Arc<Path>,
1436 ignore_stack: Arc<IgnoreStack>,
1437 scan_queue: crossbeam_channel::Sender<ScanJob>,
1438}
1439
1440struct UpdateIgnoreStatusJob {
1441 path: Arc<Path>,
1442 ignore_stack: Arc<IgnoreStack>,
1443 ignore_queue: crossbeam_channel::Sender<UpdateIgnoreStatusJob>,
1444}
1445
1446pub trait WorktreeHandle {
1447 fn file(&self, path: impl AsRef<Path>, cx: &mut MutableAppContext) -> Task<Result<FileHandle>>;
1448
1449 #[cfg(test)]
1450 fn flush_fs_events<'a>(
1451 &self,
1452 cx: &'a gpui::TestAppContext,
1453 ) -> futures::future::LocalBoxFuture<'a, ()>;
1454}
1455
1456impl WorktreeHandle for ModelHandle<Worktree> {
1457 fn file(&self, path: impl AsRef<Path>, cx: &mut MutableAppContext) -> Task<Result<FileHandle>> {
1458 let path = Arc::from(path.as_ref());
1459 let handle = self.clone();
1460 let tree = self.read(cx);
1461 match tree {
1462 Worktree::Local(tree) => {
1463 let abs_path = tree.absolutize(&path);
1464 cx.spawn(|cx| async move {
1465 let mtime = cx
1466 .background_executor()
1467 .spawn(async move { fs::metadata(&abs_path) })
1468 .await?
1469 .modified()?
1470 .duration_since(UNIX_EPOCH)?;
1471 let state = handle.read_with(&cx, |tree, _| {
1472 let mut handles = tree.as_local().unwrap().handles.lock();
1473 handles
1474 .get(&path)
1475 .and_then(Weak::upgrade)
1476 .unwrap_or_else(|| {
1477 let id =
1478 tree.as_local().unwrap().next_handle_id.fetch_add(1, SeqCst);
1479 let handle_state = if let Some(entry) = tree.entry_for_path(&path) {
1480 FileHandleState {
1481 path: entry.path().clone(),
1482 is_deleted: false,
1483 mtime,
1484 id,
1485 rpc: None,
1486 }
1487 } else {
1488 FileHandleState {
1489 path: path.clone(),
1490 is_deleted: !tree.path_is_pending(&path),
1491 mtime,
1492 id,
1493 rpc: None,
1494 }
1495 };
1496
1497 let state = Arc::new(Mutex::new(handle_state.clone()));
1498 handles.insert(path, Arc::downgrade(&state));
1499 state
1500 })
1501 });
1502 Ok(FileHandle {
1503 worktree: handle.clone(),
1504 state,
1505 })
1506 })
1507 }
1508 Worktree::Remote(tree) => {
1509 let worktree_id = tree.id;
1510 let connection_id = tree.connection_id;
1511 let rpc = tree.rpc.clone();
1512 let handles = tree.handles.clone();
1513 cx.spawn(|cx| async move {
1514 let state = handles
1515 .lock()
1516 .entry(path.clone())
1517 .or_insert_with(|| Arc::new(AsyncMutex::new(Weak::new())))
1518 .clone();
1519
1520 let mut state = state.lock().await;
1521 if let Some(state) = Weak::upgrade(&state) {
1522 Ok(FileHandle {
1523 worktree: handle,
1524 state,
1525 })
1526 } else {
1527 let response = rpc
1528 .request(
1529 connection_id,
1530 proto::OpenFile {
1531 worktree_id: worktree_id as u64,
1532 path: path.to_string_lossy().to_string(),
1533 },
1534 )
1535 .await?;
1536 let is_deleted = handle.read_with(&cx, |tree, _| {
1537 tree.entry_for_path(&path).is_some() || !tree.path_is_pending(&path)
1538 });
1539 let new_state = Arc::new(Mutex::new(FileHandleState {
1540 path,
1541 is_deleted,
1542 mtime: Duration::from_secs(response.mtime),
1543 id: response.id,
1544 rpc: Some((connection_id, rpc)),
1545 }));
1546 *state = Arc::downgrade(&new_state);
1547 Ok(FileHandle {
1548 worktree: handle,
1549 state: new_state,
1550 })
1551 }
1552 })
1553 }
1554 }
1555 }
1556
1557 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
1558 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
1559 // extra directory scans, and emit extra scan-state notifications.
1560 //
1561 // This function mutates the worktree's directory and waits for those mutations to be picked up,
1562 // to ensure that all redundant FS events have already been processed.
1563 #[cfg(test)]
1564 fn flush_fs_events<'a>(
1565 &self,
1566 cx: &'a gpui::TestAppContext,
1567 ) -> futures::future::LocalBoxFuture<'a, ()> {
1568 use smol::future::FutureExt;
1569
1570 let filename = "fs-event-sentinel";
1571 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
1572 let tree = self.clone();
1573 async move {
1574 fs::write(root_path.join(filename), "").unwrap();
1575 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
1576 .await;
1577
1578 fs::remove_file(root_path.join(filename)).unwrap();
1579 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
1580 .await;
1581
1582 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1583 .await;
1584 }
1585 .boxed_local()
1586 }
1587}
1588
1589pub enum FileIter<'a> {
1590 All(Cursor<'a, Entry, FileCount, FileCount>),
1591 Visible(Cursor<'a, Entry, VisibleFileCount, VisibleFileCount>),
1592}
1593
1594impl<'a> FileIter<'a> {
1595 fn all(snapshot: &'a Snapshot, start: usize) -> Self {
1596 let mut cursor = snapshot.entries.cursor();
1597 cursor.seek(&FileCount(start), Bias::Right, &());
1598 Self::All(cursor)
1599 }
1600
1601 fn visible(snapshot: &'a Snapshot, start: usize) -> Self {
1602 let mut cursor = snapshot.entries.cursor();
1603 cursor.seek(&VisibleFileCount(start), Bias::Right, &());
1604 Self::Visible(cursor)
1605 }
1606
1607 fn next_internal(&mut self) {
1608 match self {
1609 Self::All(cursor) => {
1610 let ix = *cursor.start();
1611 cursor.seek_forward(&FileCount(ix.0 + 1), Bias::Right, &());
1612 }
1613 Self::Visible(cursor) => {
1614 let ix = *cursor.start();
1615 cursor.seek_forward(&VisibleFileCount(ix.0 + 1), Bias::Right, &());
1616 }
1617 }
1618 }
1619
1620 fn item(&self) -> Option<&'a Entry> {
1621 match self {
1622 Self::All(cursor) => cursor.item(),
1623 Self::Visible(cursor) => cursor.item(),
1624 }
1625 }
1626}
1627
1628impl<'a> Iterator for FileIter<'a> {
1629 type Item = &'a Entry;
1630
1631 fn next(&mut self) -> Option<Self::Item> {
1632 if let Some(entry) = self.item() {
1633 self.next_internal();
1634 Some(entry)
1635 } else {
1636 None
1637 }
1638 }
1639}
1640
1641struct ChildEntriesIter<'a> {
1642 parent_path: &'a Path,
1643 cursor: Cursor<'a, Entry, PathSearch<'a>, ()>,
1644}
1645
1646impl<'a> ChildEntriesIter<'a> {
1647 fn new(parent_path: &'a Path, snapshot: &'a Snapshot) -> Self {
1648 let mut cursor = snapshot.entries.cursor();
1649 cursor.seek(&PathSearch::Exact(parent_path), Bias::Right, &());
1650 Self {
1651 parent_path,
1652 cursor,
1653 }
1654 }
1655}
1656
1657impl<'a> Iterator for ChildEntriesIter<'a> {
1658 type Item = &'a Entry;
1659
1660 fn next(&mut self) -> Option<Self::Item> {
1661 if let Some(item) = self.cursor.item() {
1662 if item.path().starts_with(self.parent_path) {
1663 self.cursor
1664 .seek_forward(&PathSearch::Successor(item.path()), Bias::Left, &());
1665 Some(item)
1666 } else {
1667 None
1668 }
1669 } else {
1670 None
1671 }
1672 }
1673}
1674
1675fn mounted_volume_paths() -> Vec<PathBuf> {
1676 unsafe {
1677 let mut stat_ptr: *mut libc::statfs = std::ptr::null_mut();
1678 let count = libc::getmntinfo(&mut stat_ptr as *mut _, libc::MNT_WAIT);
1679 if count >= 0 {
1680 std::slice::from_raw_parts(stat_ptr, count as usize)
1681 .iter()
1682 .map(|stat| {
1683 PathBuf::from(OsStr::from_bytes(
1684 CStr::from_ptr(&stat.f_mntonname[0]).to_bytes(),
1685 ))
1686 })
1687 .collect()
1688 } else {
1689 panic!("failed to run getmntinfo");
1690 }
1691 }
1692}
1693
1694#[cfg(test)]
1695mod tests {
1696 use super::*;
1697 use crate::editor::Buffer;
1698 use crate::test::*;
1699 use anyhow::Result;
1700 use rand::prelude::*;
1701 use serde_json::json;
1702 use std::env;
1703 use std::fmt::Write;
1704 use std::os::unix;
1705 use std::time::SystemTime;
1706
1707 #[gpui::test]
1708 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
1709 let dir = temp_tree(json!({
1710 "root": {
1711 "apple": "",
1712 "banana": {
1713 "carrot": {
1714 "date": "",
1715 "endive": "",
1716 }
1717 },
1718 "fennel": {
1719 "grape": "",
1720 }
1721 }
1722 }));
1723
1724 let root_link_path = dir.path().join("root_link");
1725 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1726 unix::fs::symlink(
1727 &dir.path().join("root/fennel"),
1728 &dir.path().join("root/finnochio"),
1729 )
1730 .unwrap();
1731
1732 let tree = cx.add_model(|cx| Worktree::local(root_link_path, cx));
1733
1734 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1735 .await;
1736 cx.read(|cx| {
1737 let tree = tree.read(cx);
1738 assert_eq!(tree.file_count(), 5);
1739
1740 assert_eq!(
1741 tree.inode_for_path("fennel/grape"),
1742 tree.inode_for_path("finnochio/grape")
1743 );
1744
1745 let results = match_paths(
1746 Some(tree.snapshot()).iter(),
1747 "bna",
1748 false,
1749 false,
1750 false,
1751 10,
1752 Default::default(),
1753 cx.thread_pool().clone(),
1754 )
1755 .into_iter()
1756 .map(|result| result.path)
1757 .collect::<Vec<Arc<Path>>>();
1758 assert_eq!(
1759 results,
1760 vec![
1761 PathBuf::from("banana/carrot/date").into(),
1762 PathBuf::from("banana/carrot/endive").into(),
1763 ]
1764 );
1765 })
1766 }
1767
1768 #[gpui::test]
1769 async fn test_save_file(mut cx: gpui::TestAppContext) {
1770 let dir = temp_tree(json!({
1771 "file1": "the old contents",
1772 }));
1773
1774 let tree = cx.add_model(|cx| Worktree::local(dir.path(), cx));
1775 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1776 .await;
1777 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
1778
1779 let buffer = cx.add_model(|cx| Buffer::new(1, "a line of text.\n".repeat(10 * 1024), cx));
1780
1781 let path = tree.update(&mut cx, |tree, cx| {
1782 let path = tree.files(0).next().unwrap().path().clone();
1783 assert_eq!(path.file_name().unwrap(), "file1");
1784 smol::block_on(tree.save(&path, buffer.read(cx).snapshot().text(), cx.as_ref()))
1785 .unwrap();
1786 path
1787 });
1788
1789 let history = cx
1790 .read(|cx| tree.read(cx).load_history(&path, cx))
1791 .await
1792 .unwrap();
1793 cx.read(|cx| {
1794 assert_eq!(history.base_text.as_ref(), buffer.read(cx).text());
1795 });
1796 }
1797
1798 #[gpui::test]
1799 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
1800 let dir = temp_tree(json!({
1801 "file1": "the old contents",
1802 }));
1803
1804 let tree = cx.add_model(|cx| Worktree::local(dir.path().join("file1"), cx));
1805 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1806 .await;
1807 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
1808
1809 let buffer = cx.add_model(|cx| Buffer::new(1, "a line of text.\n".repeat(10 * 1024), cx));
1810
1811 let file = cx.update(|cx| tree.file("", cx)).await.unwrap();
1812 cx.update(|cx| {
1813 assert_eq!(file.path().file_name(), None);
1814 smol::block_on(file.save(buffer.read(cx).snapshot().text(), cx.as_ref())).unwrap();
1815 });
1816
1817 let history = cx.read(|cx| file.load_history(cx)).await.unwrap();
1818 cx.read(|cx| assert_eq!(history.base_text.as_ref(), buffer.read(cx).text()));
1819 }
1820
1821 #[gpui::test]
1822 async fn test_rescan_simple(mut cx: gpui::TestAppContext) {
1823 let dir = temp_tree(json!({
1824 "a": {
1825 "file1": "",
1826 "file2": "",
1827 "file3": "",
1828 },
1829 "b": {
1830 "c": {
1831 "file4": "",
1832 "file5": "",
1833 }
1834 }
1835 }));
1836
1837 let tree = cx.add_model(|cx| Worktree::local(dir.path(), cx));
1838 let file2 = cx.update(|cx| tree.file("a/file2", cx)).await.unwrap();
1839 let file3 = cx.update(|cx| tree.file("a/file3", cx)).await.unwrap();
1840 let file4 = cx.update(|cx| tree.file("b/c/file4", cx)).await.unwrap();
1841 let file5 = cx.update(|cx| tree.file("b/c/file5", cx)).await.unwrap();
1842 let non_existent_file = cx.update(|cx| tree.file("a/file_x", cx)).await.unwrap();
1843
1844 // After scanning, the worktree knows which files exist and which don't.
1845 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1846 .await;
1847 assert!(!file2.is_deleted());
1848 assert!(!file3.is_deleted());
1849 assert!(!file4.is_deleted());
1850 assert!(!file5.is_deleted());
1851 assert!(non_existent_file.is_deleted());
1852
1853 tree.flush_fs_events(&cx).await;
1854 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
1855 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
1856 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
1857 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
1858 tree.flush_fs_events(&cx).await;
1859
1860 cx.read(|cx| {
1861 assert_eq!(
1862 tree.read(cx)
1863 .paths()
1864 .map(|p| p.to_str().unwrap())
1865 .collect::<Vec<_>>(),
1866 vec![
1867 "a",
1868 "a/file1",
1869 "a/file2.new",
1870 "b",
1871 "d",
1872 "d/file3",
1873 "d/file4"
1874 ]
1875 );
1876
1877 assert_eq!(file2.path().to_str().unwrap(), "a/file2.new");
1878 assert_eq!(file4.path().as_ref(), Path::new("d/file4"));
1879 assert_eq!(file5.path().as_ref(), Path::new("d/file5"));
1880 assert!(!file2.is_deleted());
1881 assert!(!file4.is_deleted());
1882 assert!(file5.is_deleted());
1883
1884 // Right now, this rename isn't detected because the target path
1885 // no longer exists on the file system by the time we process the
1886 // rename event.
1887 assert_eq!(file3.path().as_ref(), Path::new("a/file3"));
1888 assert!(file3.is_deleted());
1889 });
1890 }
1891
1892 #[gpui::test]
1893 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
1894 let dir = temp_tree(json!({
1895 ".git": {},
1896 ".gitignore": "ignored-dir\n",
1897 "tracked-dir": {
1898 "tracked-file1": "tracked contents",
1899 },
1900 "ignored-dir": {
1901 "ignored-file1": "ignored contents",
1902 }
1903 }));
1904
1905 let tree = cx.add_model(|cx| Worktree::local(dir.path(), cx));
1906 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1907 .await;
1908 tree.flush_fs_events(&cx).await;
1909 cx.read(|cx| {
1910 let tree = tree.read(cx);
1911 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
1912 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
1913 assert_eq!(tracked.is_ignored(), false);
1914 assert_eq!(ignored.is_ignored(), true);
1915 });
1916
1917 fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
1918 fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
1919 tree.flush_fs_events(&cx).await;
1920 cx.read(|cx| {
1921 let tree = tree.read(cx);
1922 let dot_git = tree.entry_for_path(".git").unwrap();
1923 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
1924 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
1925 assert_eq!(tracked.is_ignored(), false);
1926 assert_eq!(ignored.is_ignored(), true);
1927 assert_eq!(dot_git.is_ignored(), true);
1928 });
1929 }
1930
1931 #[test]
1932 fn test_path_is_pending() {
1933 let mut snapshot = Snapshot {
1934 id: 0,
1935 scan_id: 0,
1936 abs_path: Path::new("").into(),
1937 entries: Default::default(),
1938 ignores: Default::default(),
1939 root_name: Default::default(),
1940 };
1941
1942 snapshot.entries.edit(
1943 vec![
1944 Edit::Insert(Entry {
1945 path: Path::new("b").into(),
1946 kind: EntryKind::Dir,
1947 inode: 0,
1948 is_ignored: false,
1949 is_symlink: false,
1950 }),
1951 Edit::Insert(Entry {
1952 path: Path::new("b/a").into(),
1953 kind: EntryKind::Dir,
1954 inode: 0,
1955 is_ignored: false,
1956 is_symlink: false,
1957 }),
1958 Edit::Insert(Entry {
1959 path: Path::new("b/c").into(),
1960 kind: EntryKind::PendingDir,
1961 inode: 0,
1962 is_ignored: false,
1963 is_symlink: false,
1964 }),
1965 Edit::Insert(Entry {
1966 path: Path::new("b/e").into(),
1967 kind: EntryKind::Dir,
1968 inode: 0,
1969 is_ignored: false,
1970 is_symlink: false,
1971 }),
1972 ],
1973 &(),
1974 );
1975
1976 assert!(!snapshot.path_is_pending("b/a"));
1977 assert!(!snapshot.path_is_pending("b/b"));
1978 assert!(snapshot.path_is_pending("b/c"));
1979 assert!(snapshot.path_is_pending("b/c/x"));
1980 assert!(!snapshot.path_is_pending("b/d"));
1981 assert!(!snapshot.path_is_pending("b/e"));
1982 }
1983
1984 #[test]
1985 fn test_mounted_volume_paths() {
1986 let paths = mounted_volume_paths();
1987 assert!(paths.contains(&"/".into()));
1988 }
1989
1990 #[test]
1991 fn test_random() {
1992 let iterations = env::var("ITERATIONS")
1993 .map(|i| i.parse().unwrap())
1994 .unwrap_or(100);
1995 let operations = env::var("OPERATIONS")
1996 .map(|o| o.parse().unwrap())
1997 .unwrap_or(40);
1998 let initial_entries = env::var("INITIAL_ENTRIES")
1999 .map(|o| o.parse().unwrap())
2000 .unwrap_or(20);
2001 let seeds = if let Ok(seed) = env::var("SEED").map(|s| s.parse().unwrap()) {
2002 seed..seed + 1
2003 } else {
2004 0..iterations
2005 };
2006
2007 for seed in seeds {
2008 dbg!(seed);
2009 let mut rng = StdRng::seed_from_u64(seed);
2010
2011 let root_dir = tempdir::TempDir::new(&format!("test-{}", seed)).unwrap();
2012 for _ in 0..initial_entries {
2013 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
2014 }
2015 log::info!("Generated initial tree");
2016
2017 let (notify_tx, _notify_rx) = smol::channel::unbounded();
2018 let mut scanner = BackgroundScanner::new(
2019 Arc::new(Mutex::new(Snapshot {
2020 id: 0,
2021 scan_id: 0,
2022 abs_path: root_dir.path().into(),
2023 entries: Default::default(),
2024 ignores: Default::default(),
2025 root_name: Default::default(),
2026 })),
2027 Arc::new(Mutex::new(Default::default())),
2028 notify_tx,
2029 0,
2030 );
2031 scanner.scan_dirs().unwrap();
2032 scanner.snapshot().check_invariants();
2033
2034 let mut events = Vec::new();
2035 let mut mutations_len = operations;
2036 while mutations_len > 1 {
2037 if !events.is_empty() && rng.gen_bool(0.4) {
2038 let len = rng.gen_range(0..=events.len());
2039 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
2040 log::info!("Delivering events: {:#?}", to_deliver);
2041 scanner.process_events(to_deliver);
2042 scanner.snapshot().check_invariants();
2043 } else {
2044 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
2045 mutations_len -= 1;
2046 }
2047 }
2048 log::info!("Quiescing: {:#?}", events);
2049 scanner.process_events(events);
2050 scanner.snapshot().check_invariants();
2051
2052 let (notify_tx, _notify_rx) = smol::channel::unbounded();
2053 let mut new_scanner = BackgroundScanner::new(
2054 Arc::new(Mutex::new(Snapshot {
2055 id: 0,
2056 scan_id: 0,
2057 abs_path: root_dir.path().into(),
2058 entries: Default::default(),
2059 ignores: Default::default(),
2060 root_name: Default::default(),
2061 })),
2062 Arc::new(Mutex::new(Default::default())),
2063 notify_tx,
2064 1,
2065 );
2066 new_scanner.scan_dirs().unwrap();
2067 assert_eq!(scanner.snapshot().to_vec(), new_scanner.snapshot().to_vec());
2068 }
2069 }
2070
2071 fn randomly_mutate_tree(
2072 root_path: &Path,
2073 insertion_probability: f64,
2074 rng: &mut impl Rng,
2075 ) -> Result<Vec<fsevent::Event>> {
2076 let root_path = root_path.canonicalize().unwrap();
2077 let (dirs, files) = read_dir_recursive(root_path.clone());
2078
2079 let mut events = Vec::new();
2080 let mut record_event = |path: PathBuf| {
2081 events.push(fsevent::Event {
2082 event_id: SystemTime::now()
2083 .duration_since(UNIX_EPOCH)
2084 .unwrap()
2085 .as_secs(),
2086 flags: fsevent::StreamFlags::empty(),
2087 path,
2088 });
2089 };
2090
2091 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
2092 let path = dirs.choose(rng).unwrap();
2093 let new_path = path.join(gen_name(rng));
2094
2095 if rng.gen() {
2096 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
2097 fs::create_dir(&new_path)?;
2098 } else {
2099 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
2100 fs::write(&new_path, "")?;
2101 }
2102 record_event(new_path);
2103 } else if rng.gen_bool(0.05) {
2104 let ignore_dir_path = dirs.choose(rng).unwrap();
2105 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
2106
2107 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
2108 let files_to_ignore = {
2109 let len = rng.gen_range(0..=subfiles.len());
2110 subfiles.choose_multiple(rng, len)
2111 };
2112 let dirs_to_ignore = {
2113 let len = rng.gen_range(0..subdirs.len());
2114 subdirs.choose_multiple(rng, len)
2115 };
2116
2117 let mut ignore_contents = String::new();
2118 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
2119 write!(
2120 ignore_contents,
2121 "{}\n",
2122 path_to_ignore
2123 .strip_prefix(&ignore_dir_path)?
2124 .to_str()
2125 .unwrap()
2126 )
2127 .unwrap();
2128 }
2129 log::info!(
2130 "Creating {:?} with contents:\n{}",
2131 ignore_path.strip_prefix(&root_path)?,
2132 ignore_contents
2133 );
2134 fs::write(&ignore_path, ignore_contents).unwrap();
2135 record_event(ignore_path);
2136 } else {
2137 let old_path = {
2138 let file_path = files.choose(rng);
2139 let dir_path = dirs[1..].choose(rng);
2140 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
2141 };
2142
2143 let is_rename = rng.gen();
2144 if is_rename {
2145 let new_path_parent = dirs
2146 .iter()
2147 .filter(|d| !d.starts_with(old_path))
2148 .choose(rng)
2149 .unwrap();
2150
2151 let overwrite_existing_dir =
2152 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
2153 let new_path = if overwrite_existing_dir {
2154 fs::remove_dir_all(&new_path_parent).ok();
2155 new_path_parent.to_path_buf()
2156 } else {
2157 new_path_parent.join(gen_name(rng))
2158 };
2159
2160 log::info!(
2161 "Renaming {:?} to {}{:?}",
2162 old_path.strip_prefix(&root_path)?,
2163 if overwrite_existing_dir {
2164 "overwrite "
2165 } else {
2166 ""
2167 },
2168 new_path.strip_prefix(&root_path)?
2169 );
2170 fs::rename(&old_path, &new_path)?;
2171 record_event(old_path.clone());
2172 record_event(new_path);
2173 } else if old_path.is_dir() {
2174 let (dirs, files) = read_dir_recursive(old_path.clone());
2175
2176 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
2177 fs::remove_dir_all(&old_path).unwrap();
2178 for file in files {
2179 record_event(file);
2180 }
2181 for dir in dirs {
2182 record_event(dir);
2183 }
2184 } else {
2185 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
2186 fs::remove_file(old_path).unwrap();
2187 record_event(old_path.clone());
2188 }
2189 }
2190
2191 Ok(events)
2192 }
2193
2194 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
2195 let child_entries = fs::read_dir(&path).unwrap();
2196 let mut dirs = vec![path];
2197 let mut files = Vec::new();
2198 for child_entry in child_entries {
2199 let child_path = child_entry.unwrap().path();
2200 if child_path.is_dir() {
2201 let (child_dirs, child_files) = read_dir_recursive(child_path);
2202 dirs.extend(child_dirs);
2203 files.extend(child_files);
2204 } else {
2205 files.push(child_path);
2206 }
2207 }
2208 (dirs, files)
2209 }
2210
2211 fn gen_name(rng: &mut impl Rng) -> String {
2212 (0..6)
2213 .map(|_| rng.sample(rand::distributions::Alphanumeric))
2214 .map(char::from)
2215 .collect()
2216 }
2217
2218 impl Snapshot {
2219 fn check_invariants(&self) {
2220 let mut files = self.files(0);
2221 let mut visible_files = self.visible_files(0);
2222 for entry in self.entries.cursor::<(), ()>() {
2223 if entry.is_file() {
2224 assert_eq!(files.next().unwrap().inode(), entry.inode);
2225 if !entry.is_ignored {
2226 assert_eq!(visible_files.next().unwrap().inode(), entry.inode);
2227 }
2228 }
2229 }
2230 assert!(files.next().is_none());
2231 assert!(visible_files.next().is_none());
2232
2233 let mut bfs_paths = Vec::new();
2234 let mut stack = vec![Path::new("")];
2235 while let Some(path) = stack.pop() {
2236 bfs_paths.push(path);
2237 let ix = stack.len();
2238 for child_entry in self.child_entries(path) {
2239 stack.insert(ix, child_entry.path());
2240 }
2241 }
2242
2243 let dfs_paths = self
2244 .entries
2245 .cursor::<(), ()>()
2246 .map(|e| e.path().as_ref())
2247 .collect::<Vec<_>>();
2248 assert_eq!(bfs_paths, dfs_paths);
2249
2250 for (ignore_parent_path, _) in &self.ignores {
2251 assert!(self.entry_for_path(ignore_parent_path).is_some());
2252 assert!(self
2253 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2254 .is_some());
2255 }
2256 }
2257
2258 fn to_vec(&self) -> Vec<(&Path, u64, bool)> {
2259 let mut paths = Vec::new();
2260 for entry in self.entries.cursor::<(), ()>() {
2261 paths.push((entry.path().as_ref(), entry.inode(), entry.is_ignored()));
2262 }
2263 paths.sort_by(|a, b| a.0.cmp(&b.0));
2264 paths
2265 }
2266 }
2267}