1mod char_bag;
2mod fuzzy;
3mod ignore;
4
5use self::{char_bag::CharBag, ignore::IgnoreStack};
6use crate::{
7 editor::{History, Rope},
8 rpc::{self, proto, ConnectionId},
9 sum_tree::{self, Cursor, Edit, SumTree},
10 util::Bias,
11};
12use ::ignore::gitignore::Gitignore;
13use anyhow::{anyhow, Context, Result};
14pub use fuzzy::{match_paths, PathMatch};
15use gpui::{scoped_pool, AppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task};
16use lazy_static::lazy_static;
17use parking_lot::Mutex;
18use postage::{
19 prelude::{Sink, Stream},
20 watch,
21};
22use smol::{channel::Sender, lock::Mutex as AsyncMutex};
23use std::{
24 cmp,
25 collections::{HashMap, HashSet},
26 ffi::{CStr, OsStr, OsString},
27 fmt, fs,
28 future::Future,
29 hash::Hash,
30 io::{self, Read, Write},
31 ops::Deref,
32 os::unix::{ffi::OsStrExt, fs::MetadataExt},
33 path::{Path, PathBuf},
34 sync::{
35 atomic::{AtomicU64, Ordering::SeqCst},
36 Arc, Weak,
37 },
38 time::{Duration, UNIX_EPOCH},
39};
40
41lazy_static! {
42 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
43}
44
45#[derive(Clone, Debug)]
46enum ScanState {
47 Idle,
48 Scanning,
49 Err(Arc<io::Error>),
50}
51
52pub enum Worktree {
53 Local(LocalWorktree),
54 Remote(RemoteWorktree),
55}
56
57impl Entity for Worktree {
58 type Event = ();
59}
60
61impl Worktree {
62 pub fn local(path: impl Into<Arc<Path>>, cx: &mut ModelContext<Worktree>) -> Self {
63 Worktree::Local(LocalWorktree::new(path, cx))
64 }
65
66 pub fn remote(
67 id: usize,
68 worktree: proto::Worktree,
69 rpc: rpc::Client,
70 connection_id: ConnectionId,
71 cx: &mut ModelContext<Worktree>,
72 ) -> Self {
73 Worktree::Remote(RemoteWorktree::new(id, worktree, rpc, connection_id, cx))
74 }
75
76 pub fn as_local(&self) -> Option<&LocalWorktree> {
77 if let Worktree::Local(worktree) = self {
78 Some(worktree)
79 } else {
80 None
81 }
82 }
83
84 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
85 if let Worktree::Local(worktree) = self {
86 Some(worktree)
87 } else {
88 None
89 }
90 }
91
92 pub fn snapshot(&self) -> Snapshot {
93 match self {
94 Worktree::Local(worktree) => worktree.snapshot(),
95 Worktree::Remote(worktree) => worktree.snapshot.clone(),
96 }
97 }
98
99 pub fn save(
100 &self,
101 path: &Path,
102 content: Rope,
103 cx: &AppContext,
104 ) -> impl Future<Output = Result<()>> {
105 match self {
106 Worktree::Local(worktree) => worktree.save(path, content, cx),
107 Worktree::Remote(worktree) => todo!(),
108 }
109 }
110}
111
112impl Deref for Worktree {
113 type Target = Snapshot;
114
115 fn deref(&self) -> &Self::Target {
116 match self {
117 Worktree::Local(worktree) => &worktree.snapshot,
118 Worktree::Remote(worktree) => &worktree.snapshot,
119 }
120 }
121}
122
123pub struct LocalWorktree {
124 snapshot: Snapshot,
125 background_snapshot: Arc<Mutex<Snapshot>>,
126 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
127 next_handle_id: AtomicU64,
128 scan_state: (watch::Sender<ScanState>, watch::Receiver<ScanState>),
129 _event_stream_handle: fsevent::Handle,
130 poll_scheduled: bool,
131 rpc: Option<rpc::Client>,
132}
133
134#[derive(Clone)]
135pub struct FileHandle {
136 worktree: ModelHandle<Worktree>,
137 state: Arc<Mutex<FileHandleState>>,
138}
139
140#[derive(Clone)]
141struct FileHandleState {
142 path: Arc<Path>,
143 is_deleted: bool,
144 mtime: Duration,
145 worktree_id: usize,
146 id: u64,
147 rpc: Option<(ConnectionId, rpc::Client)>,
148}
149
150impl Drop for FileHandleState {
151 fn drop(&mut self) {
152 if let Some((connection_id, rpc)) = self.rpc.take() {
153 let id = self.id;
154 let worktree_id = self.worktree_id as u64;
155 smol::spawn(async move {
156 if let Err(error) = rpc
157 .send(connection_id, proto::CloseFile { worktree_id, id })
158 .await
159 {
160 log::warn!("error closing file {}: {}", id, error);
161 }
162 })
163 .detach();
164 }
165 }
166}
167
168impl LocalWorktree {
169 fn new(path: impl Into<Arc<Path>>, cx: &mut ModelContext<Worktree>) -> Self {
170 let abs_path = path.into();
171 let (scan_state_tx, scan_state_rx) = smol::channel::unbounded();
172 let id = cx.model_id();
173 let snapshot = Snapshot {
174 id,
175 scan_id: 0,
176 abs_path,
177 root_name: Default::default(),
178 ignores: Default::default(),
179 entries: Default::default(),
180 };
181 let (event_stream, event_stream_handle) =
182 fsevent::EventStream::new(&[snapshot.abs_path.as_ref()], Duration::from_millis(100));
183
184 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
185 let handles = Arc::new(Mutex::new(Default::default()));
186
187 let tree = Self {
188 snapshot,
189 background_snapshot: background_snapshot.clone(),
190 handles: handles.clone(),
191 next_handle_id: Default::default(),
192 scan_state: watch::channel_with(ScanState::Scanning),
193 _event_stream_handle: event_stream_handle,
194 poll_scheduled: false,
195 rpc: None,
196 };
197
198 std::thread::spawn(move || {
199 let scanner = BackgroundScanner::new(background_snapshot, handles, scan_state_tx, id);
200 scanner.run(event_stream)
201 });
202
203 cx.spawn(|this, mut cx| {
204 let this = this.downgrade();
205 async move {
206 while let Ok(scan_state) = scan_state_rx.recv().await {
207 let alive = cx.update(|cx| {
208 if let Some(handle) = this.upgrade(&cx) {
209 handle.update(cx, |this, cx| {
210 if let Worktree::Local(worktree) = this {
211 worktree.observe_scan_state(scan_state, cx)
212 } else {
213 unreachable!()
214 }
215 });
216 true
217 } else {
218 false
219 }
220 });
221
222 if !alive {
223 break;
224 }
225 }
226 }
227 })
228 .detach();
229
230 tree
231 }
232
233 pub fn scan_complete(&self) -> impl Future<Output = ()> {
234 let mut scan_state_rx = self.scan_state.1.clone();
235 async move {
236 let mut scan_state = Some(scan_state_rx.borrow().clone());
237 while let Some(ScanState::Scanning) = scan_state {
238 scan_state = scan_state_rx.recv().await;
239 }
240 }
241 }
242
243 fn observe_scan_state(&mut self, scan_state: ScanState, cx: &mut ModelContext<Worktree>) {
244 let _ = self.scan_state.0.blocking_send(scan_state);
245 self.poll_entries(cx);
246 }
247
248 fn poll_entries(&mut self, cx: &mut ModelContext<Worktree>) {
249 self.snapshot = self.background_snapshot.lock().clone();
250 cx.notify();
251
252 if self.is_scanning() && !self.poll_scheduled {
253 cx.spawn(|this, mut cx| async move {
254 smol::Timer::after(Duration::from_millis(100)).await;
255 this.update(&mut cx, |this, cx| {
256 if let Worktree::Local(worktree) = this {
257 worktree.poll_scheduled = false;
258 worktree.poll_entries(cx);
259 } else {
260 unreachable!()
261 }
262 })
263 })
264 .detach();
265 self.poll_scheduled = true;
266 }
267 }
268
269 fn is_scanning(&self) -> bool {
270 if let ScanState::Scanning = *self.scan_state.1.borrow() {
271 true
272 } else {
273 false
274 }
275 }
276
277 pub fn snapshot(&self) -> Snapshot {
278 self.snapshot.clone()
279 }
280
281 pub fn abs_path(&self) -> &Path {
282 self.snapshot.abs_path.as_ref()
283 }
284
285 pub fn contains_abs_path(&self, path: &Path) -> bool {
286 path.starts_with(&self.snapshot.abs_path)
287 }
288
289 fn absolutize(&self, path: &Path) -> PathBuf {
290 if path.file_name().is_some() {
291 self.snapshot.abs_path.join(path)
292 } else {
293 self.snapshot.abs_path.to_path_buf()
294 }
295 }
296
297 pub fn save(&self, path: &Path, content: Rope, cx: &AppContext) -> Task<Result<()>> {
298 let handles = self.handles.clone();
299 let path = path.to_path_buf();
300 let abs_path = self.absolutize(&path);
301 cx.background_executor().spawn(async move {
302 let buffer_size = content.summary().bytes.min(10 * 1024);
303 let file = fs::File::create(&abs_path)?;
304 let mut writer = io::BufWriter::with_capacity(buffer_size, &file);
305 for chunk in content.chunks() {
306 writer.write(chunk.as_bytes())?;
307 }
308 writer.flush()?;
309
310 if let Some(handle) = handles.lock().get(&*path).and_then(Weak::upgrade) {
311 let mut handle = handle.lock();
312 handle.mtime = file.metadata()?.modified()?.duration_since(UNIX_EPOCH)?;
313 handle.is_deleted = false;
314 }
315
316 Ok(())
317 })
318 }
319
320 pub fn share(
321 &mut self,
322 client: rpc::Client,
323 connection_id: ConnectionId,
324 cx: &mut ModelContext<Worktree>,
325 ) -> Task<anyhow::Result<(u64, String)>> {
326 self.rpc = Some(client.clone());
327 let root_name = self.root_name.clone();
328 let snapshot = self.snapshot();
329 let handle = cx.handle();
330 cx.spawn(|_this, cx| async move {
331 let entries = cx
332 .background_executor()
333 .spawn(async move {
334 snapshot
335 .entries
336 .cursor::<(), ()>()
337 .map(|entry| proto::Entry {
338 is_dir: entry.is_dir(),
339 path: entry.path.to_string_lossy().to_string(),
340 inode: entry.inode,
341 is_symlink: entry.is_symlink,
342 is_ignored: entry.is_ignored,
343 })
344 .collect()
345 })
346 .await;
347
348 let share_response = client
349 .request(
350 connection_id,
351 proto::ShareWorktree {
352 worktree: Some(proto::Worktree { root_name, entries }),
353 },
354 )
355 .await?;
356
357 client
358 .state
359 .lock()
360 .await
361 .shared_worktrees
362 .insert(share_response.worktree_id, handle);
363
364 log::info!("sharing worktree {:?}", share_response);
365 Ok((share_response.worktree_id, share_response.access_token))
366 })
367 }
368}
369
370impl Deref for LocalWorktree {
371 type Target = Snapshot;
372
373 fn deref(&self) -> &Self::Target {
374 &self.snapshot
375 }
376}
377
378impl fmt::Debug for LocalWorktree {
379 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
380 self.snapshot.fmt(f)
381 }
382}
383
384pub struct RemoteWorktree {
385 remote_id: usize,
386 snapshot: Snapshot,
387 handles: Arc<Mutex<HashMap<Arc<Path>, Arc<AsyncMutex<Weak<Mutex<FileHandleState>>>>>>>,
388 rpc: rpc::Client,
389 connection_id: ConnectionId,
390}
391
392impl RemoteWorktree {
393 fn new(
394 remote_id: usize,
395 worktree: proto::Worktree,
396 rpc: rpc::Client,
397 connection_id: ConnectionId,
398 cx: &mut ModelContext<Worktree>,
399 ) -> Self {
400 let root_char_bag: CharBag = worktree
401 .root_name
402 .chars()
403 .map(|c| c.to_ascii_lowercase())
404 .collect();
405 let mut entries = SumTree::new();
406 entries.extend(
407 worktree.entries.into_iter().map(|entry| {
408 let kind = if entry.is_dir {
409 EntryKind::Dir
410 } else {
411 let mut char_bag = root_char_bag.clone();
412 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
413 EntryKind::File(char_bag)
414 };
415 Entry {
416 kind,
417 path: Path::new(&entry.path).into(),
418 inode: entry.inode,
419 is_symlink: entry.is_symlink,
420 is_ignored: entry.is_ignored,
421 }
422 }),
423 &(),
424 );
425 let snapshot = Snapshot {
426 id: cx.model_id(),
427 scan_id: 0,
428 abs_path: Path::new("").into(),
429 root_name: worktree.root_name,
430 ignores: Default::default(),
431 entries,
432 };
433 Self {
434 remote_id,
435 snapshot,
436 handles: Default::default(),
437 rpc,
438 connection_id,
439 }
440 }
441}
442
443#[derive(Clone)]
444pub struct Snapshot {
445 id: usize,
446 scan_id: usize,
447 abs_path: Arc<Path>,
448 root_name: String,
449 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
450 entries: SumTree<Entry>,
451}
452
453impl Snapshot {
454 pub fn file_count(&self) -> usize {
455 self.entries.summary().file_count
456 }
457
458 pub fn visible_file_count(&self) -> usize {
459 self.entries.summary().visible_file_count
460 }
461
462 pub fn files(&self, start: usize) -> FileIter {
463 FileIter::all(self, start)
464 }
465
466 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
467 self.entries
468 .cursor::<(), ()>()
469 .skip(1)
470 .map(|entry| entry.path())
471 }
472
473 pub fn visible_files(&self, start: usize) -> FileIter {
474 FileIter::visible(self, start)
475 }
476
477 fn child_entries<'a>(&'a self, path: &'a Path) -> ChildEntriesIter<'a> {
478 ChildEntriesIter::new(path, self)
479 }
480
481 pub fn root_entry(&self) -> &Entry {
482 self.entry_for_path("").unwrap()
483 }
484
485 /// Returns the filename of the snapshot's root, plus a trailing slash if the snapshot's root is
486 /// a directory.
487 pub fn root_name(&self) -> &str {
488 &self.root_name
489 }
490
491 fn path_is_pending(&self, path: impl AsRef<Path>) -> bool {
492 if self.entries.is_empty() {
493 return true;
494 }
495 let path = path.as_ref();
496 let mut cursor = self.entries.cursor::<_, ()>();
497 if cursor.seek(&PathSearch::Exact(path), Bias::Left, &()) {
498 let entry = cursor.item().unwrap();
499 if entry.path.as_ref() == path {
500 return matches!(entry.kind, EntryKind::PendingDir);
501 }
502 }
503 if let Some(entry) = cursor.prev_item() {
504 matches!(entry.kind, EntryKind::PendingDir) && path.starts_with(entry.path.as_ref())
505 } else {
506 false
507 }
508 }
509
510 fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
511 let mut cursor = self.entries.cursor::<_, ()>();
512 if cursor.seek(&PathSearch::Exact(path.as_ref()), Bias::Left, &()) {
513 cursor.item()
514 } else {
515 None
516 }
517 }
518
519 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
520 self.entry_for_path(path.as_ref()).map(|e| e.inode())
521 }
522
523 fn insert_entry(&mut self, entry: Entry) {
524 if !entry.is_dir() && entry.path().file_name() == Some(&GITIGNORE) {
525 let (ignore, err) = Gitignore::new(self.abs_path.join(entry.path()));
526 if let Some(err) = err {
527 log::error!("error in ignore file {:?} - {:?}", entry.path(), err);
528 }
529
530 let ignore_dir_path = entry.path().parent().unwrap();
531 self.ignores
532 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
533 }
534 self.entries.insert(entry, &());
535 }
536
537 fn populate_dir(
538 &mut self,
539 parent_path: Arc<Path>,
540 entries: impl IntoIterator<Item = Entry>,
541 ignore: Option<Arc<Gitignore>>,
542 ) {
543 let mut edits = Vec::new();
544
545 let mut parent_entry = self
546 .entries
547 .get(&PathKey(parent_path.clone()), &())
548 .unwrap()
549 .clone();
550 if let Some(ignore) = ignore {
551 self.ignores.insert(parent_path, (ignore, self.scan_id));
552 }
553 if matches!(parent_entry.kind, EntryKind::PendingDir) {
554 parent_entry.kind = EntryKind::Dir;
555 } else {
556 unreachable!();
557 }
558 edits.push(Edit::Insert(parent_entry));
559
560 for entry in entries {
561 edits.push(Edit::Insert(entry));
562 }
563 self.entries.edit(edits, &());
564 }
565
566 fn remove_path(&mut self, path: &Path) {
567 let new_entries = {
568 let mut cursor = self.entries.cursor::<_, ()>();
569 let mut new_entries = cursor.slice(&PathSearch::Exact(path), Bias::Left, &());
570 cursor.seek_forward(&PathSearch::Successor(path), Bias::Left, &());
571 new_entries.push_tree(cursor.suffix(&()), &());
572 new_entries
573 };
574 self.entries = new_entries;
575
576 if path.file_name() == Some(&GITIGNORE) {
577 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
578 *scan_id = self.scan_id;
579 }
580 }
581 }
582
583 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
584 let mut new_ignores = Vec::new();
585 for ancestor in path.ancestors().skip(1) {
586 if let Some((ignore, _)) = self.ignores.get(ancestor) {
587 new_ignores.push((ancestor, Some(ignore.clone())));
588 } else {
589 new_ignores.push((ancestor, None));
590 }
591 }
592
593 let mut ignore_stack = IgnoreStack::none();
594 for (parent_path, ignore) in new_ignores.into_iter().rev() {
595 if ignore_stack.is_path_ignored(&parent_path, true) {
596 ignore_stack = IgnoreStack::all();
597 break;
598 } else if let Some(ignore) = ignore {
599 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
600 }
601 }
602
603 if ignore_stack.is_path_ignored(path, is_dir) {
604 ignore_stack = IgnoreStack::all();
605 }
606
607 ignore_stack
608 }
609}
610
611impl fmt::Debug for Snapshot {
612 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
613 for entry in self.entries.cursor::<(), ()>() {
614 for _ in entry.path().ancestors().skip(1) {
615 write!(f, " ")?;
616 }
617 writeln!(f, "{:?} (inode: {})", entry.path(), entry.inode())?;
618 }
619 Ok(())
620 }
621}
622
623impl FileHandle {
624 pub fn id(&self) -> u64 {
625 self.state.lock().id
626 }
627
628 /// Returns this file's path relative to the root of its worktree.
629 pub fn path(&self) -> Arc<Path> {
630 self.state.lock().path.clone()
631 }
632
633 /// Returns the last component of this handle's absolute path. If this handle refers to the root
634 /// of its worktree, then this method will return the name of the worktree itself.
635 pub fn file_name<'a>(&'a self, cx: &'a AppContext) -> Option<OsString> {
636 self.state
637 .lock()
638 .path
639 .file_name()
640 .or_else(|| Some(OsStr::new(self.worktree.read(cx).root_name())))
641 .map(Into::into)
642 }
643
644 pub fn is_deleted(&self) -> bool {
645 self.state.lock().is_deleted
646 }
647
648 pub fn mtime(&self) -> Duration {
649 self.state.lock().mtime
650 }
651
652 pub fn exists(&self) -> bool {
653 !self.is_deleted()
654 }
655
656 pub fn load_history(&self, cx: &AppContext) -> Task<Result<History>> {
657 match self.worktree.read(cx) {
658 Worktree::Local(worktree) => {
659 let path = self.state.lock().path.to_path_buf();
660 let abs_path = worktree.absolutize(&path);
661 cx.background_executor().spawn(async move {
662 let mut file = fs::File::open(&abs_path)?;
663 let mut base_text = String::new();
664 file.read_to_string(&mut base_text)?;
665 Ok(History::new(Arc::from(base_text)))
666 })
667 }
668 Worktree::Remote(worktree) => {
669 let state = self.state.lock();
670 let id = state.id;
671 let worktree_id = worktree.remote_id as u64;
672 let (connection_id, rpc) = state.rpc.clone().unwrap();
673 cx.background_executor().spawn(async move {
674 let response = rpc
675 .request(connection_id, proto::OpenBuffer { worktree_id, id })
676 .await?;
677 let buffer = response
678 .buffer
679 .ok_or_else(|| anyhow!("buffer must be present"))?;
680 let mut history = History::new(buffer.content.into());
681 Ok(history)
682 })
683 }
684 }
685 }
686
687 pub fn save(&self, content: Rope, cx: &AppContext) -> impl Future<Output = Result<()>> {
688 let worktree = self.worktree.read(cx);
689 worktree.save(&self.path(), content, cx)
690 }
691
692 pub fn worktree_id(&self) -> usize {
693 self.worktree.id()
694 }
695
696 pub fn entry_id(&self) -> (usize, Arc<Path>) {
697 (self.worktree.id(), self.path())
698 }
699
700 pub fn observe_from_model<T: Entity>(
701 &self,
702 cx: &mut ModelContext<T>,
703 mut callback: impl FnMut(&mut T, FileHandle, &mut ModelContext<T>) + 'static,
704 ) {
705 let mut prev_state = self.state.lock().clone();
706 let cur_state = Arc::downgrade(&self.state);
707 cx.observe(&self.worktree, move |observer, worktree, cx| {
708 if let Some(cur_state) = cur_state.upgrade() {
709 let cur_state_unlocked = cur_state.lock();
710 if cur_state_unlocked.mtime != prev_state.mtime
711 || cur_state_unlocked.path != prev_state.path
712 {
713 prev_state = cur_state_unlocked.clone();
714 drop(cur_state_unlocked);
715 callback(
716 observer,
717 FileHandle {
718 worktree,
719 state: cur_state,
720 },
721 cx,
722 );
723 }
724 }
725 });
726 }
727}
728
729impl PartialEq for FileHandle {
730 fn eq(&self, other: &Self) -> bool {
731 if Arc::ptr_eq(&self.state, &other.state) {
732 true
733 } else {
734 let self_state = self.state.lock();
735 let other_state = other.state.lock();
736 self_state.worktree_id == other_state.worktree_id && self_state.id == other_state.id
737 }
738 }
739}
740
741impl Eq for FileHandle {}
742
743impl Hash for FileHandle {
744 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
745 self.state.lock().id.hash(state);
746 self.worktree.hash(state);
747 }
748}
749
750#[derive(Clone, Debug)]
751pub struct Entry {
752 kind: EntryKind,
753 path: Arc<Path>,
754 inode: u64,
755 is_symlink: bool,
756 is_ignored: bool,
757}
758
759#[derive(Clone, Debug)]
760pub enum EntryKind {
761 PendingDir,
762 Dir,
763 File(CharBag),
764}
765
766impl Entry {
767 pub fn path(&self) -> &Arc<Path> {
768 &self.path
769 }
770
771 pub fn inode(&self) -> u64 {
772 self.inode
773 }
774
775 pub fn is_ignored(&self) -> bool {
776 self.is_ignored
777 }
778
779 fn is_dir(&self) -> bool {
780 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
781 }
782
783 fn is_file(&self) -> bool {
784 matches!(self.kind, EntryKind::File(_))
785 }
786}
787
788impl sum_tree::Item for Entry {
789 type Summary = EntrySummary;
790
791 fn summary(&self) -> Self::Summary {
792 let file_count;
793 let visible_file_count;
794 if self.is_file() {
795 file_count = 1;
796 if self.is_ignored {
797 visible_file_count = 0;
798 } else {
799 visible_file_count = 1;
800 }
801 } else {
802 file_count = 0;
803 visible_file_count = 0;
804 }
805
806 EntrySummary {
807 max_path: self.path().clone(),
808 file_count,
809 visible_file_count,
810 }
811 }
812}
813
814impl sum_tree::KeyedItem for Entry {
815 type Key = PathKey;
816
817 fn key(&self) -> Self::Key {
818 PathKey(self.path().clone())
819 }
820}
821
822#[derive(Clone, Debug)]
823pub struct EntrySummary {
824 max_path: Arc<Path>,
825 file_count: usize,
826 visible_file_count: usize,
827}
828
829impl Default for EntrySummary {
830 fn default() -> Self {
831 Self {
832 max_path: Arc::from(Path::new("")),
833 file_count: 0,
834 visible_file_count: 0,
835 }
836 }
837}
838
839impl sum_tree::Summary for EntrySummary {
840 type Context = ();
841
842 fn add_summary(&mut self, rhs: &Self, _: &()) {
843 self.max_path = rhs.max_path.clone();
844 self.file_count += rhs.file_count;
845 self.visible_file_count += rhs.visible_file_count;
846 }
847}
848
849#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
850pub struct PathKey(Arc<Path>);
851
852impl Default for PathKey {
853 fn default() -> Self {
854 Self(Path::new("").into())
855 }
856}
857
858impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
859 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
860 self.0 = summary.max_path.clone();
861 }
862}
863
864#[derive(Copy, Clone, Debug, PartialEq, Eq)]
865enum PathSearch<'a> {
866 Exact(&'a Path),
867 Successor(&'a Path),
868}
869
870impl<'a> Ord for PathSearch<'a> {
871 fn cmp(&self, other: &Self) -> cmp::Ordering {
872 match (self, other) {
873 (Self::Exact(a), Self::Exact(b)) => a.cmp(b),
874 (Self::Successor(a), Self::Exact(b)) => {
875 if b.starts_with(a) {
876 cmp::Ordering::Greater
877 } else {
878 a.cmp(b)
879 }
880 }
881 _ => todo!("not sure we need the other two cases"),
882 }
883 }
884}
885
886impl<'a> PartialOrd for PathSearch<'a> {
887 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
888 Some(self.cmp(other))
889 }
890}
891
892impl<'a> Default for PathSearch<'a> {
893 fn default() -> Self {
894 Self::Exact(Path::new("").into())
895 }
896}
897
898impl<'a: 'b, 'b> sum_tree::Dimension<'a, EntrySummary> for PathSearch<'b> {
899 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
900 *self = Self::Exact(summary.max_path.as_ref());
901 }
902}
903
904#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
905pub struct FileCount(usize);
906
907impl<'a> sum_tree::Dimension<'a, EntrySummary> for FileCount {
908 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
909 self.0 += summary.file_count;
910 }
911}
912
913#[derive(Copy, Clone, Default, Debug, Eq, PartialEq, Ord, PartialOrd)]
914pub struct VisibleFileCount(usize);
915
916impl<'a> sum_tree::Dimension<'a, EntrySummary> for VisibleFileCount {
917 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
918 self.0 += summary.visible_file_count;
919 }
920}
921
922struct BackgroundScanner {
923 snapshot: Arc<Mutex<Snapshot>>,
924 notify: Sender<ScanState>,
925 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
926 other_mount_paths: HashSet<PathBuf>,
927 thread_pool: scoped_pool::Pool,
928 root_char_bag: CharBag,
929}
930
931impl BackgroundScanner {
932 fn new(
933 snapshot: Arc<Mutex<Snapshot>>,
934 handles: Arc<Mutex<HashMap<Arc<Path>, Weak<Mutex<FileHandleState>>>>>,
935 notify: Sender<ScanState>,
936 worktree_id: usize,
937 ) -> Self {
938 let mut scanner = Self {
939 root_char_bag: Default::default(),
940 snapshot,
941 notify,
942 handles,
943 other_mount_paths: Default::default(),
944 thread_pool: scoped_pool::Pool::new(16, format!("worktree-{}-scanner", worktree_id)),
945 };
946 scanner.update_other_mount_paths();
947 scanner
948 }
949
950 fn update_other_mount_paths(&mut self) {
951 let path = self.snapshot.lock().abs_path.clone();
952 self.other_mount_paths.clear();
953 self.other_mount_paths.extend(
954 mounted_volume_paths()
955 .into_iter()
956 .filter(|mount_path| !path.starts_with(mount_path)),
957 );
958 }
959
960 fn abs_path(&self) -> Arc<Path> {
961 self.snapshot.lock().abs_path.clone()
962 }
963
964 fn snapshot(&self) -> Snapshot {
965 self.snapshot.lock().clone()
966 }
967
968 fn run(mut self, event_stream: fsevent::EventStream) {
969 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
970 return;
971 }
972
973 if let Err(err) = self.scan_dirs() {
974 if smol::block_on(self.notify.send(ScanState::Err(Arc::new(err)))).is_err() {
975 return;
976 }
977 }
978
979 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
980 return;
981 }
982
983 event_stream.run(move |events| {
984 if smol::block_on(self.notify.send(ScanState::Scanning)).is_err() {
985 return false;
986 }
987
988 if !self.process_events(events) {
989 return false;
990 }
991
992 if smol::block_on(self.notify.send(ScanState::Idle)).is_err() {
993 return false;
994 }
995
996 true
997 });
998 }
999
1000 fn scan_dirs(&mut self) -> io::Result<()> {
1001 self.snapshot.lock().scan_id += 1;
1002
1003 let path: Arc<Path> = Arc::from(Path::new(""));
1004 let abs_path = self.abs_path();
1005 let metadata = fs::metadata(&abs_path)?;
1006 let inode = metadata.ino();
1007 let is_symlink = fs::symlink_metadata(&abs_path)?.file_type().is_symlink();
1008 let is_dir = metadata.file_type().is_dir();
1009
1010 // After determining whether the root entry is a file or a directory, populate the
1011 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
1012 let mut root_name = abs_path
1013 .file_name()
1014 .map_or(String::new(), |f| f.to_string_lossy().to_string());
1015 if is_dir {
1016 root_name.push('/');
1017 }
1018 self.root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
1019 self.snapshot.lock().root_name = root_name;
1020
1021 if is_dir {
1022 self.snapshot.lock().insert_entry(Entry {
1023 kind: EntryKind::PendingDir,
1024 path: path.clone(),
1025 inode,
1026 is_symlink,
1027 is_ignored: false,
1028 });
1029
1030 let (tx, rx) = crossbeam_channel::unbounded();
1031 tx.send(ScanJob {
1032 abs_path: abs_path.to_path_buf(),
1033 path,
1034 ignore_stack: IgnoreStack::none(),
1035 scan_queue: tx.clone(),
1036 })
1037 .unwrap();
1038 drop(tx);
1039
1040 self.thread_pool.scoped(|pool| {
1041 for _ in 0..self.thread_pool.thread_count() {
1042 pool.execute(|| {
1043 while let Ok(job) = rx.recv() {
1044 if let Err(err) = self.scan_dir(&job) {
1045 log::error!("error scanning {:?}: {}", job.abs_path, err);
1046 }
1047 }
1048 });
1049 }
1050 });
1051 } else {
1052 self.snapshot.lock().insert_entry(Entry {
1053 kind: EntryKind::File(self.char_bag(&path)),
1054 path,
1055 inode,
1056 is_symlink,
1057 is_ignored: false,
1058 });
1059 }
1060
1061 self.mark_deleted_file_handles();
1062 Ok(())
1063 }
1064
1065 fn scan_dir(&self, job: &ScanJob) -> io::Result<()> {
1066 let mut new_entries: Vec<Entry> = Vec::new();
1067 let mut new_jobs: Vec<ScanJob> = Vec::new();
1068 let mut ignore_stack = job.ignore_stack.clone();
1069 let mut new_ignore = None;
1070
1071 for child_entry in fs::read_dir(&job.abs_path)? {
1072 let child_entry = child_entry?;
1073 let child_name = child_entry.file_name();
1074 let child_abs_path = job.abs_path.join(&child_name);
1075 let child_path: Arc<Path> = job.path.join(&child_name).into();
1076 let child_is_symlink = child_entry.metadata()?.file_type().is_symlink();
1077 let child_metadata = if let Ok(metadata) = fs::metadata(&child_abs_path) {
1078 metadata
1079 } else {
1080 log::error!("could not get metadata for path {:?}", child_abs_path);
1081 continue;
1082 };
1083
1084 let child_inode = child_metadata.ino();
1085
1086 // Disallow mount points outside the file system containing the root of this worktree
1087 if self.other_mount_paths.contains(&child_abs_path) {
1088 continue;
1089 }
1090
1091 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
1092 if child_name == *GITIGNORE {
1093 let (ignore, err) = Gitignore::new(&child_abs_path);
1094 if let Some(err) = err {
1095 log::error!("error in ignore file {:?} - {:?}", child_path, err);
1096 }
1097 let ignore = Arc::new(ignore);
1098 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1099 new_ignore = Some(ignore);
1100
1101 // Update ignore status of any child entries we've already processed to reflect the
1102 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
1103 // there should rarely be too numerous. Update the ignore stack associated with any
1104 // new jobs as well.
1105 let mut new_jobs = new_jobs.iter_mut();
1106 for entry in &mut new_entries {
1107 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
1108 if entry.is_dir() {
1109 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
1110 IgnoreStack::all()
1111 } else {
1112 ignore_stack.clone()
1113 };
1114 }
1115 }
1116 }
1117
1118 if child_metadata.is_dir() {
1119 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
1120 new_entries.push(Entry {
1121 kind: EntryKind::PendingDir,
1122 path: child_path.clone(),
1123 inode: child_inode,
1124 is_symlink: child_is_symlink,
1125 is_ignored,
1126 });
1127 new_jobs.push(ScanJob {
1128 abs_path: child_abs_path,
1129 path: child_path,
1130 ignore_stack: if is_ignored {
1131 IgnoreStack::all()
1132 } else {
1133 ignore_stack.clone()
1134 },
1135 scan_queue: job.scan_queue.clone(),
1136 });
1137 } else {
1138 let is_ignored = ignore_stack.is_path_ignored(&child_path, false);
1139 new_entries.push(Entry {
1140 kind: EntryKind::File(self.char_bag(&child_path)),
1141 path: child_path,
1142 inode: child_inode,
1143 is_symlink: child_is_symlink,
1144 is_ignored,
1145 });
1146 };
1147 }
1148
1149 self.snapshot
1150 .lock()
1151 .populate_dir(job.path.clone(), new_entries, new_ignore);
1152 for new_job in new_jobs {
1153 job.scan_queue.send(new_job).unwrap();
1154 }
1155
1156 Ok(())
1157 }
1158
1159 fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
1160 self.update_other_mount_paths();
1161
1162 let mut snapshot = self.snapshot();
1163 snapshot.scan_id += 1;
1164
1165 let root_abs_path = if let Ok(abs_path) = snapshot.abs_path.canonicalize() {
1166 abs_path
1167 } else {
1168 return false;
1169 };
1170
1171 let mut renamed_paths: HashMap<u64, PathBuf> = HashMap::new();
1172 let mut handles = self.handles.lock();
1173 let mut updated_handles = HashMap::new();
1174 for event in &events {
1175 let path = if let Ok(path) = event.path.strip_prefix(&root_abs_path) {
1176 path
1177 } else {
1178 continue;
1179 };
1180
1181 let metadata = fs::metadata(&event.path);
1182 if event.flags.contains(fsevent::StreamFlags::ITEM_RENAMED) {
1183 if let Some(inode) = snapshot.inode_for_path(path) {
1184 renamed_paths.insert(inode, path.to_path_buf());
1185 } else if let Ok(metadata) = &metadata {
1186 let new_path = path;
1187 if let Some(old_path) = renamed_paths.get(&metadata.ino()) {
1188 handles.retain(|handle_path, handle_state| {
1189 if let Ok(path_suffix) = handle_path.strip_prefix(&old_path) {
1190 let new_handle_path: Arc<Path> =
1191 if path_suffix.file_name().is_some() {
1192 new_path.join(path_suffix)
1193 } else {
1194 new_path.to_path_buf()
1195 }
1196 .into();
1197 if let Some(handle_state) = Weak::upgrade(&handle_state) {
1198 let mut state = handle_state.lock();
1199 state.path = new_handle_path.clone();
1200 updated_handles
1201 .insert(new_handle_path, Arc::downgrade(&handle_state));
1202 }
1203 false
1204 } else {
1205 true
1206 }
1207 });
1208 handles.extend(updated_handles.drain());
1209 }
1210 }
1211 }
1212
1213 for state in handles.values_mut() {
1214 if let Some(state) = Weak::upgrade(&state) {
1215 let mut state = state.lock();
1216 if state.path.as_ref() == path {
1217 if let Ok(metadata) = &metadata {
1218 state.mtime = metadata
1219 .modified()
1220 .unwrap()
1221 .duration_since(UNIX_EPOCH)
1222 .unwrap();
1223 }
1224 } else if state.path.starts_with(path) {
1225 if let Ok(metadata) = fs::metadata(state.path.as_ref()) {
1226 state.mtime = metadata
1227 .modified()
1228 .unwrap()
1229 .duration_since(UNIX_EPOCH)
1230 .unwrap();
1231 }
1232 }
1233 }
1234 }
1235 }
1236 drop(handles);
1237
1238 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
1239 let mut abs_paths = events.into_iter().map(|e| e.path).peekable();
1240 let (scan_queue_tx, scan_queue_rx) = crossbeam_channel::unbounded();
1241
1242 while let Some(abs_path) = abs_paths.next() {
1243 let path = match abs_path.strip_prefix(&root_abs_path) {
1244 Ok(path) => Arc::from(path.to_path_buf()),
1245 Err(_) => {
1246 log::error!(
1247 "unexpected event {:?} for root path {:?}",
1248 abs_path,
1249 root_abs_path
1250 );
1251 continue;
1252 }
1253 };
1254
1255 while abs_paths.peek().map_or(false, |p| p.starts_with(&abs_path)) {
1256 abs_paths.next();
1257 }
1258
1259 snapshot.remove_path(&path);
1260
1261 match self.fs_entry_for_path(path.clone(), &abs_path) {
1262 Ok(Some(mut fs_entry)) => {
1263 let is_dir = fs_entry.is_dir();
1264 let ignore_stack = snapshot.ignore_stack_for_path(&path, is_dir);
1265 fs_entry.is_ignored = ignore_stack.is_all();
1266 snapshot.insert_entry(fs_entry);
1267 if is_dir {
1268 scan_queue_tx
1269 .send(ScanJob {
1270 abs_path,
1271 path,
1272 ignore_stack,
1273 scan_queue: scan_queue_tx.clone(),
1274 })
1275 .unwrap();
1276 }
1277 }
1278 Ok(None) => {}
1279 Err(err) => {
1280 // TODO - create a special 'error' entry in the entries tree to mark this
1281 log::error!("error reading file on event {:?}", err);
1282 }
1283 }
1284 }
1285
1286 *self.snapshot.lock() = snapshot;
1287
1288 // Scan any directories that were created as part of this event batch.
1289 drop(scan_queue_tx);
1290 self.thread_pool.scoped(|pool| {
1291 for _ in 0..self.thread_pool.thread_count() {
1292 pool.execute(|| {
1293 while let Ok(job) = scan_queue_rx.recv() {
1294 if let Err(err) = self.scan_dir(&job) {
1295 log::error!("error scanning {:?}: {}", job.abs_path, err);
1296 }
1297 }
1298 });
1299 }
1300 });
1301
1302 self.update_ignore_statuses();
1303 self.mark_deleted_file_handles();
1304 true
1305 }
1306
1307 fn update_ignore_statuses(&self) {
1308 let mut snapshot = self.snapshot();
1309
1310 let mut ignores_to_update = Vec::new();
1311 let mut ignores_to_delete = Vec::new();
1312 for (parent_path, (_, scan_id)) in &snapshot.ignores {
1313 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
1314 ignores_to_update.push(parent_path.clone());
1315 }
1316
1317 let ignore_path = parent_path.join(&*GITIGNORE);
1318 if snapshot.entry_for_path(ignore_path).is_none() {
1319 ignores_to_delete.push(parent_path.clone());
1320 }
1321 }
1322
1323 for parent_path in ignores_to_delete {
1324 snapshot.ignores.remove(&parent_path);
1325 self.snapshot.lock().ignores.remove(&parent_path);
1326 }
1327
1328 let (ignore_queue_tx, ignore_queue_rx) = crossbeam_channel::unbounded();
1329 ignores_to_update.sort_unstable();
1330 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
1331 while let Some(parent_path) = ignores_to_update.next() {
1332 while ignores_to_update
1333 .peek()
1334 .map_or(false, |p| p.starts_with(&parent_path))
1335 {
1336 ignores_to_update.next().unwrap();
1337 }
1338
1339 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
1340 ignore_queue_tx
1341 .send(UpdateIgnoreStatusJob {
1342 path: parent_path,
1343 ignore_stack,
1344 ignore_queue: ignore_queue_tx.clone(),
1345 })
1346 .unwrap();
1347 }
1348 drop(ignore_queue_tx);
1349
1350 self.thread_pool.scoped(|scope| {
1351 for _ in 0..self.thread_pool.thread_count() {
1352 scope.execute(|| {
1353 while let Ok(job) = ignore_queue_rx.recv() {
1354 self.update_ignore_status(job, &snapshot);
1355 }
1356 });
1357 }
1358 });
1359 }
1360
1361 fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &Snapshot) {
1362 let mut ignore_stack = job.ignore_stack;
1363 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
1364 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
1365 }
1366
1367 let mut edits = Vec::new();
1368 for mut entry in snapshot.child_entries(&job.path).cloned() {
1369 let was_ignored = entry.is_ignored;
1370 entry.is_ignored = ignore_stack.is_path_ignored(entry.path(), entry.is_dir());
1371 if entry.is_dir() {
1372 let child_ignore_stack = if entry.is_ignored {
1373 IgnoreStack::all()
1374 } else {
1375 ignore_stack.clone()
1376 };
1377 job.ignore_queue
1378 .send(UpdateIgnoreStatusJob {
1379 path: entry.path().clone(),
1380 ignore_stack: child_ignore_stack,
1381 ignore_queue: job.ignore_queue.clone(),
1382 })
1383 .unwrap();
1384 }
1385
1386 if entry.is_ignored != was_ignored {
1387 edits.push(Edit::Insert(entry));
1388 }
1389 }
1390 self.snapshot.lock().entries.edit(edits, &());
1391 }
1392
1393 fn mark_deleted_file_handles(&self) {
1394 let mut handles = self.handles.lock();
1395 let snapshot = self.snapshot.lock();
1396 handles.retain(|path, handle_state| {
1397 if let Some(handle_state) = Weak::upgrade(&handle_state) {
1398 let mut handle_state = handle_state.lock();
1399 handle_state.is_deleted = snapshot.entry_for_path(&path).is_none();
1400 true
1401 } else {
1402 false
1403 }
1404 });
1405 }
1406
1407 fn fs_entry_for_path(&self, path: Arc<Path>, abs_path: &Path) -> Result<Option<Entry>> {
1408 let metadata = match fs::metadata(&abs_path) {
1409 Err(err) => {
1410 return match (err.kind(), err.raw_os_error()) {
1411 (io::ErrorKind::NotFound, _) => Ok(None),
1412 (io::ErrorKind::Other, Some(libc::ENOTDIR)) => Ok(None),
1413 _ => Err(anyhow::Error::new(err)),
1414 }
1415 }
1416 Ok(metadata) => metadata,
1417 };
1418 let inode = metadata.ino();
1419 let is_symlink = fs::symlink_metadata(&abs_path)
1420 .context("failed to read symlink metadata")?
1421 .file_type()
1422 .is_symlink();
1423
1424 let entry = Entry {
1425 kind: if metadata.file_type().is_dir() {
1426 EntryKind::PendingDir
1427 } else {
1428 EntryKind::File(self.char_bag(&path))
1429 },
1430 path,
1431 inode,
1432 is_symlink,
1433 is_ignored: false,
1434 };
1435
1436 Ok(Some(entry))
1437 }
1438
1439 fn char_bag(&self, path: &Path) -> CharBag {
1440 let mut result = self.root_char_bag;
1441 result.extend(
1442 path.to_string_lossy()
1443 .chars()
1444 .map(|c| c.to_ascii_lowercase()),
1445 );
1446 result
1447 }
1448}
1449
1450struct ScanJob {
1451 abs_path: PathBuf,
1452 path: Arc<Path>,
1453 ignore_stack: Arc<IgnoreStack>,
1454 scan_queue: crossbeam_channel::Sender<ScanJob>,
1455}
1456
1457struct UpdateIgnoreStatusJob {
1458 path: Arc<Path>,
1459 ignore_stack: Arc<IgnoreStack>,
1460 ignore_queue: crossbeam_channel::Sender<UpdateIgnoreStatusJob>,
1461}
1462
1463pub trait WorktreeHandle {
1464 fn file(&self, path: impl AsRef<Path>, cx: &mut MutableAppContext) -> Task<Result<FileHandle>>;
1465
1466 #[cfg(test)]
1467 fn flush_fs_events<'a>(
1468 &self,
1469 cx: &'a gpui::TestAppContext,
1470 ) -> futures::future::LocalBoxFuture<'a, ()>;
1471}
1472
1473impl WorktreeHandle for ModelHandle<Worktree> {
1474 fn file(&self, path: impl AsRef<Path>, cx: &mut MutableAppContext) -> Task<Result<FileHandle>> {
1475 let path = Arc::from(path.as_ref());
1476 let handle = self.clone();
1477 let tree = self.read(cx);
1478 match tree {
1479 Worktree::Local(tree) => {
1480 let worktree_id = handle.id();
1481 let abs_path = tree.absolutize(&path);
1482 cx.spawn(|cx| async move {
1483 let mtime = cx
1484 .background_executor()
1485 .spawn(async move { fs::metadata(&abs_path) })
1486 .await?
1487 .modified()?
1488 .duration_since(UNIX_EPOCH)?;
1489 let state = handle.read_with(&cx, |tree, _| {
1490 let mut handles = tree.as_local().unwrap().handles.lock();
1491 handles
1492 .get(&path)
1493 .and_then(Weak::upgrade)
1494 .unwrap_or_else(|| {
1495 let id =
1496 tree.as_local().unwrap().next_handle_id.fetch_add(1, SeqCst);
1497 let handle_state = if let Some(entry) = tree.entry_for_path(&path) {
1498 FileHandleState {
1499 path: entry.path().clone(),
1500 is_deleted: false,
1501 mtime,
1502 worktree_id,
1503 id,
1504 rpc: None,
1505 }
1506 } else {
1507 FileHandleState {
1508 path: path.clone(),
1509 is_deleted: !tree.path_is_pending(&path),
1510 mtime,
1511 worktree_id,
1512 id,
1513 rpc: None,
1514 }
1515 };
1516
1517 let state = Arc::new(Mutex::new(handle_state.clone()));
1518 handles.insert(path, Arc::downgrade(&state));
1519 state
1520 })
1521 });
1522 Ok(FileHandle {
1523 worktree: handle.clone(),
1524 state,
1525 })
1526 })
1527 }
1528 Worktree::Remote(tree) => {
1529 let remote_worktree_id = tree.remote_id;
1530 let connection_id = tree.connection_id;
1531 let rpc = tree.rpc.clone();
1532 let handles = tree.handles.clone();
1533 cx.spawn(|cx| async move {
1534 let state = handles
1535 .lock()
1536 .entry(path.clone())
1537 .or_insert_with(|| Arc::new(AsyncMutex::new(Weak::new())))
1538 .clone();
1539
1540 let mut state = state.lock().await;
1541 if let Some(state) = Weak::upgrade(&state) {
1542 Ok(FileHandle {
1543 worktree: handle,
1544 state,
1545 })
1546 } else {
1547 let response = rpc
1548 .request(
1549 connection_id,
1550 proto::OpenFile {
1551 worktree_id: remote_worktree_id as u64,
1552 path: path.to_string_lossy().to_string(),
1553 },
1554 )
1555 .await?;
1556 let is_deleted = handle.read_with(&cx, |tree, _| {
1557 tree.entry_for_path(&path).is_none() && !tree.path_is_pending(&path)
1558 });
1559 let new_state = Arc::new(Mutex::new(FileHandleState {
1560 path,
1561 is_deleted,
1562 mtime: Duration::from_secs(response.mtime),
1563 worktree_id: remote_worktree_id,
1564 id: response.id,
1565 rpc: Some((connection_id, rpc)),
1566 }));
1567 *state = Arc::downgrade(&new_state);
1568 Ok(FileHandle {
1569 worktree: handle,
1570 state: new_state,
1571 })
1572 }
1573 })
1574 }
1575 }
1576 }
1577
1578 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
1579 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
1580 // extra directory scans, and emit extra scan-state notifications.
1581 //
1582 // This function mutates the worktree's directory and waits for those mutations to be picked up,
1583 // to ensure that all redundant FS events have already been processed.
1584 #[cfg(test)]
1585 fn flush_fs_events<'a>(
1586 &self,
1587 cx: &'a gpui::TestAppContext,
1588 ) -> futures::future::LocalBoxFuture<'a, ()> {
1589 use smol::future::FutureExt;
1590
1591 let filename = "fs-event-sentinel";
1592 let root_path = cx.read(|cx| self.read(cx).abs_path.clone());
1593 let tree = self.clone();
1594 async move {
1595 fs::write(root_path.join(filename), "").unwrap();
1596 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
1597 .await;
1598
1599 fs::remove_file(root_path.join(filename)).unwrap();
1600 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
1601 .await;
1602
1603 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1604 .await;
1605 }
1606 .boxed_local()
1607 }
1608}
1609
1610pub enum FileIter<'a> {
1611 All(Cursor<'a, Entry, FileCount, FileCount>),
1612 Visible(Cursor<'a, Entry, VisibleFileCount, VisibleFileCount>),
1613}
1614
1615impl<'a> FileIter<'a> {
1616 fn all(snapshot: &'a Snapshot, start: usize) -> Self {
1617 let mut cursor = snapshot.entries.cursor();
1618 cursor.seek(&FileCount(start), Bias::Right, &());
1619 Self::All(cursor)
1620 }
1621
1622 fn visible(snapshot: &'a Snapshot, start: usize) -> Self {
1623 let mut cursor = snapshot.entries.cursor();
1624 cursor.seek(&VisibleFileCount(start), Bias::Right, &());
1625 Self::Visible(cursor)
1626 }
1627
1628 fn next_internal(&mut self) {
1629 match self {
1630 Self::All(cursor) => {
1631 let ix = *cursor.start();
1632 cursor.seek_forward(&FileCount(ix.0 + 1), Bias::Right, &());
1633 }
1634 Self::Visible(cursor) => {
1635 let ix = *cursor.start();
1636 cursor.seek_forward(&VisibleFileCount(ix.0 + 1), Bias::Right, &());
1637 }
1638 }
1639 }
1640
1641 fn item(&self) -> Option<&'a Entry> {
1642 match self {
1643 Self::All(cursor) => cursor.item(),
1644 Self::Visible(cursor) => cursor.item(),
1645 }
1646 }
1647}
1648
1649impl<'a> Iterator for FileIter<'a> {
1650 type Item = &'a Entry;
1651
1652 fn next(&mut self) -> Option<Self::Item> {
1653 if let Some(entry) = self.item() {
1654 self.next_internal();
1655 Some(entry)
1656 } else {
1657 None
1658 }
1659 }
1660}
1661
1662struct ChildEntriesIter<'a> {
1663 parent_path: &'a Path,
1664 cursor: Cursor<'a, Entry, PathSearch<'a>, ()>,
1665}
1666
1667impl<'a> ChildEntriesIter<'a> {
1668 fn new(parent_path: &'a Path, snapshot: &'a Snapshot) -> Self {
1669 let mut cursor = snapshot.entries.cursor();
1670 cursor.seek(&PathSearch::Exact(parent_path), Bias::Right, &());
1671 Self {
1672 parent_path,
1673 cursor,
1674 }
1675 }
1676}
1677
1678impl<'a> Iterator for ChildEntriesIter<'a> {
1679 type Item = &'a Entry;
1680
1681 fn next(&mut self) -> Option<Self::Item> {
1682 if let Some(item) = self.cursor.item() {
1683 if item.path().starts_with(self.parent_path) {
1684 self.cursor
1685 .seek_forward(&PathSearch::Successor(item.path()), Bias::Left, &());
1686 Some(item)
1687 } else {
1688 None
1689 }
1690 } else {
1691 None
1692 }
1693 }
1694}
1695
1696fn mounted_volume_paths() -> Vec<PathBuf> {
1697 unsafe {
1698 let mut stat_ptr: *mut libc::statfs = std::ptr::null_mut();
1699 let count = libc::getmntinfo(&mut stat_ptr as *mut _, libc::MNT_WAIT);
1700 if count >= 0 {
1701 std::slice::from_raw_parts(stat_ptr, count as usize)
1702 .iter()
1703 .map(|stat| {
1704 PathBuf::from(OsStr::from_bytes(
1705 CStr::from_ptr(&stat.f_mntonname[0]).to_bytes(),
1706 ))
1707 })
1708 .collect()
1709 } else {
1710 panic!("failed to run getmntinfo");
1711 }
1712 }
1713}
1714
1715#[cfg(test)]
1716mod tests {
1717 use super::*;
1718 use crate::editor::Buffer;
1719 use crate::test::*;
1720 use anyhow::Result;
1721 use rand::prelude::*;
1722 use serde_json::json;
1723 use std::env;
1724 use std::fmt::Write;
1725 use std::os::unix;
1726 use std::time::SystemTime;
1727
1728 #[gpui::test]
1729 async fn test_populate_and_search(mut cx: gpui::TestAppContext) {
1730 let dir = temp_tree(json!({
1731 "root": {
1732 "apple": "",
1733 "banana": {
1734 "carrot": {
1735 "date": "",
1736 "endive": "",
1737 }
1738 },
1739 "fennel": {
1740 "grape": "",
1741 }
1742 }
1743 }));
1744
1745 let root_link_path = dir.path().join("root_link");
1746 unix::fs::symlink(&dir.path().join("root"), &root_link_path).unwrap();
1747 unix::fs::symlink(
1748 &dir.path().join("root/fennel"),
1749 &dir.path().join("root/finnochio"),
1750 )
1751 .unwrap();
1752
1753 let tree = cx.add_model(|cx| Worktree::local(root_link_path, cx));
1754
1755 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1756 .await;
1757 cx.read(|cx| {
1758 let tree = tree.read(cx);
1759 assert_eq!(tree.file_count(), 5);
1760
1761 assert_eq!(
1762 tree.inode_for_path("fennel/grape"),
1763 tree.inode_for_path("finnochio/grape")
1764 );
1765
1766 let results = match_paths(
1767 Some(tree.snapshot()).iter(),
1768 "bna",
1769 false,
1770 false,
1771 false,
1772 10,
1773 Default::default(),
1774 cx.thread_pool().clone(),
1775 )
1776 .into_iter()
1777 .map(|result| result.path)
1778 .collect::<Vec<Arc<Path>>>();
1779 assert_eq!(
1780 results,
1781 vec![
1782 PathBuf::from("banana/carrot/date").into(),
1783 PathBuf::from("banana/carrot/endive").into(),
1784 ]
1785 );
1786 })
1787 }
1788
1789 #[gpui::test]
1790 async fn test_save_file(mut cx: gpui::TestAppContext) {
1791 let dir = temp_tree(json!({
1792 "file1": "the old contents",
1793 }));
1794
1795 let tree = cx.add_model(|cx| Worktree::local(dir.path(), cx));
1796 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1797 .await;
1798 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
1799
1800 let buffer = cx.add_model(|cx| Buffer::new(1, "a line of text.\n".repeat(10 * 1024), cx));
1801
1802 let path = tree.update(&mut cx, |tree, cx| {
1803 let path = tree.files(0).next().unwrap().path().clone();
1804 assert_eq!(path.file_name().unwrap(), "file1");
1805 smol::block_on(tree.save(&path, buffer.read(cx).snapshot().text(), cx.as_ref()))
1806 .unwrap();
1807 path
1808 });
1809
1810 let file = cx.update(|cx| tree.file(&path, cx)).await.unwrap();
1811 let history = cx.read(|cx| file.load_history(cx)).await.unwrap();
1812 cx.read(|cx| {
1813 assert_eq!(history.base_text.as_ref(), buffer.read(cx).text());
1814 });
1815 }
1816
1817 #[gpui::test]
1818 async fn test_save_in_single_file_worktree(mut cx: gpui::TestAppContext) {
1819 let dir = temp_tree(json!({
1820 "file1": "the old contents",
1821 }));
1822
1823 let tree = cx.add_model(|cx| Worktree::local(dir.path().join("file1"), cx));
1824 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1825 .await;
1826 cx.read(|cx| assert_eq!(tree.read(cx).file_count(), 1));
1827
1828 let buffer = cx.add_model(|cx| Buffer::new(1, "a line of text.\n".repeat(10 * 1024), cx));
1829
1830 let file = cx.update(|cx| tree.file("", cx)).await.unwrap();
1831 cx.update(|cx| {
1832 assert_eq!(file.path().file_name(), None);
1833 smol::block_on(file.save(buffer.read(cx).snapshot().text(), cx.as_ref())).unwrap();
1834 });
1835
1836 let history = cx.read(|cx| file.load_history(cx)).await.unwrap();
1837 cx.read(|cx| assert_eq!(history.base_text.as_ref(), buffer.read(cx).text()));
1838 }
1839
1840 #[gpui::test]
1841 async fn test_rescan_simple(mut cx: gpui::TestAppContext) {
1842 let dir = temp_tree(json!({
1843 "a": {
1844 "file1": "",
1845 "file2": "",
1846 "file3": "",
1847 },
1848 "b": {
1849 "c": {
1850 "file4": "",
1851 "file5": "",
1852 }
1853 }
1854 }));
1855
1856 let tree = cx.add_model(|cx| Worktree::local(dir.path(), cx));
1857 let file2 = cx.update(|cx| tree.file("a/file2", cx)).await.unwrap();
1858 let file3 = cx.update(|cx| tree.file("a/file3", cx)).await.unwrap();
1859 let file4 = cx.update(|cx| tree.file("b/c/file4", cx)).await.unwrap();
1860 let file5 = cx.update(|cx| tree.file("b/c/file5", cx)).await.unwrap();
1861 let non_existent_file = cx.update(|cx| tree.file("a/file_x", cx)).await.unwrap();
1862
1863 // After scanning, the worktree knows which files exist and which don't.
1864 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1865 .await;
1866 assert!(!file2.is_deleted());
1867 assert!(!file3.is_deleted());
1868 assert!(!file4.is_deleted());
1869 assert!(!file5.is_deleted());
1870 assert!(non_existent_file.is_deleted());
1871
1872 tree.flush_fs_events(&cx).await;
1873 std::fs::rename(dir.path().join("a/file3"), dir.path().join("b/c/file3")).unwrap();
1874 std::fs::remove_file(dir.path().join("b/c/file5")).unwrap();
1875 std::fs::rename(dir.path().join("b/c"), dir.path().join("d")).unwrap();
1876 std::fs::rename(dir.path().join("a/file2"), dir.path().join("a/file2.new")).unwrap();
1877 tree.flush_fs_events(&cx).await;
1878
1879 cx.read(|cx| {
1880 assert_eq!(
1881 tree.read(cx)
1882 .paths()
1883 .map(|p| p.to_str().unwrap())
1884 .collect::<Vec<_>>(),
1885 vec![
1886 "a",
1887 "a/file1",
1888 "a/file2.new",
1889 "b",
1890 "d",
1891 "d/file3",
1892 "d/file4"
1893 ]
1894 );
1895
1896 assert_eq!(file2.path().to_str().unwrap(), "a/file2.new");
1897 assert_eq!(file4.path().as_ref(), Path::new("d/file4"));
1898 assert_eq!(file5.path().as_ref(), Path::new("d/file5"));
1899 assert!(!file2.is_deleted());
1900 assert!(!file4.is_deleted());
1901 assert!(file5.is_deleted());
1902
1903 // Right now, this rename isn't detected because the target path
1904 // no longer exists on the file system by the time we process the
1905 // rename event.
1906 assert_eq!(file3.path().as_ref(), Path::new("a/file3"));
1907 assert!(file3.is_deleted());
1908 });
1909 }
1910
1911 #[gpui::test]
1912 async fn test_rescan_with_gitignore(mut cx: gpui::TestAppContext) {
1913 let dir = temp_tree(json!({
1914 ".git": {},
1915 ".gitignore": "ignored-dir\n",
1916 "tracked-dir": {
1917 "tracked-file1": "tracked contents",
1918 },
1919 "ignored-dir": {
1920 "ignored-file1": "ignored contents",
1921 }
1922 }));
1923
1924 let tree = cx.add_model(|cx| Worktree::local(dir.path(), cx));
1925 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
1926 .await;
1927 tree.flush_fs_events(&cx).await;
1928 cx.read(|cx| {
1929 let tree = tree.read(cx);
1930 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
1931 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
1932 assert_eq!(tracked.is_ignored(), false);
1933 assert_eq!(ignored.is_ignored(), true);
1934 });
1935
1936 fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
1937 fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
1938 tree.flush_fs_events(&cx).await;
1939 cx.read(|cx| {
1940 let tree = tree.read(cx);
1941 let dot_git = tree.entry_for_path(".git").unwrap();
1942 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
1943 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
1944 assert_eq!(tracked.is_ignored(), false);
1945 assert_eq!(ignored.is_ignored(), true);
1946 assert_eq!(dot_git.is_ignored(), true);
1947 });
1948 }
1949
1950 #[test]
1951 fn test_path_is_pending() {
1952 let mut snapshot = Snapshot {
1953 id: 0,
1954 scan_id: 0,
1955 abs_path: Path::new("").into(),
1956 entries: Default::default(),
1957 ignores: Default::default(),
1958 root_name: Default::default(),
1959 };
1960
1961 snapshot.entries.edit(
1962 vec![
1963 Edit::Insert(Entry {
1964 path: Path::new("b").into(),
1965 kind: EntryKind::Dir,
1966 inode: 0,
1967 is_ignored: false,
1968 is_symlink: false,
1969 }),
1970 Edit::Insert(Entry {
1971 path: Path::new("b/a").into(),
1972 kind: EntryKind::Dir,
1973 inode: 0,
1974 is_ignored: false,
1975 is_symlink: false,
1976 }),
1977 Edit::Insert(Entry {
1978 path: Path::new("b/c").into(),
1979 kind: EntryKind::PendingDir,
1980 inode: 0,
1981 is_ignored: false,
1982 is_symlink: false,
1983 }),
1984 Edit::Insert(Entry {
1985 path: Path::new("b/e").into(),
1986 kind: EntryKind::Dir,
1987 inode: 0,
1988 is_ignored: false,
1989 is_symlink: false,
1990 }),
1991 ],
1992 &(),
1993 );
1994
1995 assert!(!snapshot.path_is_pending("b/a"));
1996 assert!(!snapshot.path_is_pending("b/b"));
1997 assert!(snapshot.path_is_pending("b/c"));
1998 assert!(snapshot.path_is_pending("b/c/x"));
1999 assert!(!snapshot.path_is_pending("b/d"));
2000 assert!(!snapshot.path_is_pending("b/e"));
2001 }
2002
2003 #[test]
2004 fn test_mounted_volume_paths() {
2005 let paths = mounted_volume_paths();
2006 assert!(paths.contains(&"/".into()));
2007 }
2008
2009 #[test]
2010 fn test_random() {
2011 let iterations = env::var("ITERATIONS")
2012 .map(|i| i.parse().unwrap())
2013 .unwrap_or(100);
2014 let operations = env::var("OPERATIONS")
2015 .map(|o| o.parse().unwrap())
2016 .unwrap_or(40);
2017 let initial_entries = env::var("INITIAL_ENTRIES")
2018 .map(|o| o.parse().unwrap())
2019 .unwrap_or(20);
2020 let seeds = if let Ok(seed) = env::var("SEED").map(|s| s.parse().unwrap()) {
2021 seed..seed + 1
2022 } else {
2023 0..iterations
2024 };
2025
2026 for seed in seeds {
2027 dbg!(seed);
2028 let mut rng = StdRng::seed_from_u64(seed);
2029
2030 let root_dir = tempdir::TempDir::new(&format!("test-{}", seed)).unwrap();
2031 for _ in 0..initial_entries {
2032 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
2033 }
2034 log::info!("Generated initial tree");
2035
2036 let (notify_tx, _notify_rx) = smol::channel::unbounded();
2037 let mut scanner = BackgroundScanner::new(
2038 Arc::new(Mutex::new(Snapshot {
2039 id: 0,
2040 scan_id: 0,
2041 abs_path: root_dir.path().into(),
2042 entries: Default::default(),
2043 ignores: Default::default(),
2044 root_name: Default::default(),
2045 })),
2046 Arc::new(Mutex::new(Default::default())),
2047 notify_tx,
2048 0,
2049 );
2050 scanner.scan_dirs().unwrap();
2051 scanner.snapshot().check_invariants();
2052
2053 let mut events = Vec::new();
2054 let mut mutations_len = operations;
2055 while mutations_len > 1 {
2056 if !events.is_empty() && rng.gen_bool(0.4) {
2057 let len = rng.gen_range(0..=events.len());
2058 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
2059 log::info!("Delivering events: {:#?}", to_deliver);
2060 scanner.process_events(to_deliver);
2061 scanner.snapshot().check_invariants();
2062 } else {
2063 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
2064 mutations_len -= 1;
2065 }
2066 }
2067 log::info!("Quiescing: {:#?}", events);
2068 scanner.process_events(events);
2069 scanner.snapshot().check_invariants();
2070
2071 let (notify_tx, _notify_rx) = smol::channel::unbounded();
2072 let mut new_scanner = BackgroundScanner::new(
2073 Arc::new(Mutex::new(Snapshot {
2074 id: 0,
2075 scan_id: 0,
2076 abs_path: root_dir.path().into(),
2077 entries: Default::default(),
2078 ignores: Default::default(),
2079 root_name: Default::default(),
2080 })),
2081 Arc::new(Mutex::new(Default::default())),
2082 notify_tx,
2083 1,
2084 );
2085 new_scanner.scan_dirs().unwrap();
2086 assert_eq!(scanner.snapshot().to_vec(), new_scanner.snapshot().to_vec());
2087 }
2088 }
2089
2090 fn randomly_mutate_tree(
2091 root_path: &Path,
2092 insertion_probability: f64,
2093 rng: &mut impl Rng,
2094 ) -> Result<Vec<fsevent::Event>> {
2095 let root_path = root_path.canonicalize().unwrap();
2096 let (dirs, files) = read_dir_recursive(root_path.clone());
2097
2098 let mut events = Vec::new();
2099 let mut record_event = |path: PathBuf| {
2100 events.push(fsevent::Event {
2101 event_id: SystemTime::now()
2102 .duration_since(UNIX_EPOCH)
2103 .unwrap()
2104 .as_secs(),
2105 flags: fsevent::StreamFlags::empty(),
2106 path,
2107 });
2108 };
2109
2110 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
2111 let path = dirs.choose(rng).unwrap();
2112 let new_path = path.join(gen_name(rng));
2113
2114 if rng.gen() {
2115 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
2116 fs::create_dir(&new_path)?;
2117 } else {
2118 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
2119 fs::write(&new_path, "")?;
2120 }
2121 record_event(new_path);
2122 } else if rng.gen_bool(0.05) {
2123 let ignore_dir_path = dirs.choose(rng).unwrap();
2124 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
2125
2126 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
2127 let files_to_ignore = {
2128 let len = rng.gen_range(0..=subfiles.len());
2129 subfiles.choose_multiple(rng, len)
2130 };
2131 let dirs_to_ignore = {
2132 let len = rng.gen_range(0..subdirs.len());
2133 subdirs.choose_multiple(rng, len)
2134 };
2135
2136 let mut ignore_contents = String::new();
2137 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
2138 write!(
2139 ignore_contents,
2140 "{}\n",
2141 path_to_ignore
2142 .strip_prefix(&ignore_dir_path)?
2143 .to_str()
2144 .unwrap()
2145 )
2146 .unwrap();
2147 }
2148 log::info!(
2149 "Creating {:?} with contents:\n{}",
2150 ignore_path.strip_prefix(&root_path)?,
2151 ignore_contents
2152 );
2153 fs::write(&ignore_path, ignore_contents).unwrap();
2154 record_event(ignore_path);
2155 } else {
2156 let old_path = {
2157 let file_path = files.choose(rng);
2158 let dir_path = dirs[1..].choose(rng);
2159 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
2160 };
2161
2162 let is_rename = rng.gen();
2163 if is_rename {
2164 let new_path_parent = dirs
2165 .iter()
2166 .filter(|d| !d.starts_with(old_path))
2167 .choose(rng)
2168 .unwrap();
2169
2170 let overwrite_existing_dir =
2171 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
2172 let new_path = if overwrite_existing_dir {
2173 fs::remove_dir_all(&new_path_parent).ok();
2174 new_path_parent.to_path_buf()
2175 } else {
2176 new_path_parent.join(gen_name(rng))
2177 };
2178
2179 log::info!(
2180 "Renaming {:?} to {}{:?}",
2181 old_path.strip_prefix(&root_path)?,
2182 if overwrite_existing_dir {
2183 "overwrite "
2184 } else {
2185 ""
2186 },
2187 new_path.strip_prefix(&root_path)?
2188 );
2189 fs::rename(&old_path, &new_path)?;
2190 record_event(old_path.clone());
2191 record_event(new_path);
2192 } else if old_path.is_dir() {
2193 let (dirs, files) = read_dir_recursive(old_path.clone());
2194
2195 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
2196 fs::remove_dir_all(&old_path).unwrap();
2197 for file in files {
2198 record_event(file);
2199 }
2200 for dir in dirs {
2201 record_event(dir);
2202 }
2203 } else {
2204 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
2205 fs::remove_file(old_path).unwrap();
2206 record_event(old_path.clone());
2207 }
2208 }
2209
2210 Ok(events)
2211 }
2212
2213 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
2214 let child_entries = fs::read_dir(&path).unwrap();
2215 let mut dirs = vec![path];
2216 let mut files = Vec::new();
2217 for child_entry in child_entries {
2218 let child_path = child_entry.unwrap().path();
2219 if child_path.is_dir() {
2220 let (child_dirs, child_files) = read_dir_recursive(child_path);
2221 dirs.extend(child_dirs);
2222 files.extend(child_files);
2223 } else {
2224 files.push(child_path);
2225 }
2226 }
2227 (dirs, files)
2228 }
2229
2230 fn gen_name(rng: &mut impl Rng) -> String {
2231 (0..6)
2232 .map(|_| rng.sample(rand::distributions::Alphanumeric))
2233 .map(char::from)
2234 .collect()
2235 }
2236
2237 impl Snapshot {
2238 fn check_invariants(&self) {
2239 let mut files = self.files(0);
2240 let mut visible_files = self.visible_files(0);
2241 for entry in self.entries.cursor::<(), ()>() {
2242 if entry.is_file() {
2243 assert_eq!(files.next().unwrap().inode(), entry.inode);
2244 if !entry.is_ignored {
2245 assert_eq!(visible_files.next().unwrap().inode(), entry.inode);
2246 }
2247 }
2248 }
2249 assert!(files.next().is_none());
2250 assert!(visible_files.next().is_none());
2251
2252 let mut bfs_paths = Vec::new();
2253 let mut stack = vec![Path::new("")];
2254 while let Some(path) = stack.pop() {
2255 bfs_paths.push(path);
2256 let ix = stack.len();
2257 for child_entry in self.child_entries(path) {
2258 stack.insert(ix, child_entry.path());
2259 }
2260 }
2261
2262 let dfs_paths = self
2263 .entries
2264 .cursor::<(), ()>()
2265 .map(|e| e.path().as_ref())
2266 .collect::<Vec<_>>();
2267 assert_eq!(bfs_paths, dfs_paths);
2268
2269 for (ignore_parent_path, _) in &self.ignores {
2270 assert!(self.entry_for_path(ignore_parent_path).is_some());
2271 assert!(self
2272 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
2273 .is_some());
2274 }
2275 }
2276
2277 fn to_vec(&self) -> Vec<(&Path, u64, bool)> {
2278 let mut paths = Vec::new();
2279 for entry in self.entries.cursor::<(), ()>() {
2280 paths.push((entry.path().as_ref(), entry.inode(), entry.is_ignored()));
2281 }
2282 paths.sort_by(|a, b| a.0.cmp(&b.0));
2283 paths
2284 }
2285 }
2286}