1use crate::{ProjectEntryId, RemoveOptions};
2
3use super::{
4 fs::{self, Fs},
5 ignore::IgnoreStack,
6 DiagnosticSummary,
7};
8use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
9use anyhow::{anyhow, Context, Result};
10use client::{proto, Client};
11use clock::ReplicaId;
12use collections::{HashMap, VecDeque};
13use futures::{
14 channel::{
15 mpsc::{self, UnboundedSender},
16 oneshot,
17 },
18 Stream, StreamExt,
19};
20use fuzzy::CharBag;
21use gpui::{
22 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
23 Task,
24};
25use language::{
26 proto::{deserialize_version, serialize_version},
27 Buffer, DiagnosticEntry, PointUtf16, Rope,
28};
29use lazy_static::lazy_static;
30use parking_lot::Mutex;
31use postage::{
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use smol::channel::{self, Sender};
36use std::{
37 any::Any,
38 cmp::{self, Ordering},
39 convert::TryFrom,
40 ffi::{OsStr, OsString},
41 fmt,
42 future::Future,
43 ops::{Deref, DerefMut},
44 os::unix::prelude::{OsStrExt, OsStringExt},
45 path::{Path, PathBuf},
46 sync::{atomic::AtomicUsize, Arc},
47 task::Poll,
48 time::{Duration, SystemTime},
49};
50use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap};
51use util::{ResultExt, TryFutureExt};
52
53lazy_static! {
54 static ref GITIGNORE: &'static OsStr = OsStr::new(".gitignore");
55}
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 background_snapshot: Arc<Mutex<LocalSnapshot>>,
68 last_scan_state_rx: watch::Receiver<ScanState>,
69 _background_scanner_task: Option<Task<()>>,
70 poll_task: Option<Task<()>>,
71 share: Option<ShareState>,
72 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
73 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
74 client: Arc<Client>,
75 fs: Arc<dyn Fs>,
76 visible: bool,
77}
78
79pub struct RemoteWorktree {
80 pub snapshot: Snapshot,
81 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
82 project_id: u64,
83 client: Arc<Client>,
84 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
85 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
86 replica_id: ReplicaId,
87 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
88 visible: bool,
89}
90
91#[derive(Clone)]
92pub struct Snapshot {
93 id: WorktreeId,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98 scan_id: usize,
99 is_complete: bool,
100}
101
102#[derive(Clone)]
103pub struct LocalSnapshot {
104 abs_path: Arc<Path>,
105 ignores: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
106 removed_entry_ids: HashMap<u64, ProjectEntryId>,
107 next_entry_id: Arc<AtomicUsize>,
108 snapshot: Snapshot,
109}
110
111impl Deref for LocalSnapshot {
112 type Target = Snapshot;
113
114 fn deref(&self) -> &Self::Target {
115 &self.snapshot
116 }
117}
118
119impl DerefMut for LocalSnapshot {
120 fn deref_mut(&mut self) -> &mut Self::Target {
121 &mut self.snapshot
122 }
123}
124
125#[derive(Clone, Debug)]
126enum ScanState {
127 Idle,
128 Scanning,
129 Err(Arc<anyhow::Error>),
130}
131
132struct ShareState {
133 project_id: u64,
134 snapshots_tx: Sender<LocalSnapshot>,
135 _maintain_remote_snapshot: Option<Task<Option<()>>>,
136}
137
138pub enum Event {
139 UpdatedEntries,
140}
141
142impl Entity for Worktree {
143 type Event = Event;
144}
145
146impl Worktree {
147 pub async fn local(
148 client: Arc<Client>,
149 path: impl Into<Arc<Path>>,
150 visible: bool,
151 fs: Arc<dyn Fs>,
152 next_entry_id: Arc<AtomicUsize>,
153 cx: &mut AsyncAppContext,
154 ) -> Result<ModelHandle<Self>> {
155 let (tree, scan_states_tx) =
156 LocalWorktree::new(client, path, visible, fs.clone(), next_entry_id, cx).await?;
157 tree.update(cx, |tree, cx| {
158 let tree = tree.as_local_mut().unwrap();
159 let abs_path = tree.abs_path().clone();
160 let background_snapshot = tree.background_snapshot.clone();
161 let background = cx.background().clone();
162 tree._background_scanner_task = Some(cx.background().spawn(async move {
163 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
164 let scanner =
165 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
166 scanner.run(events).await;
167 }));
168 });
169 Ok(tree)
170 }
171
172 pub fn remote(
173 project_remote_id: u64,
174 replica_id: ReplicaId,
175 worktree: proto::WorktreeMetadata,
176 client: Arc<Client>,
177 cx: &mut MutableAppContext,
178 ) -> ModelHandle<Self> {
179 let remote_id = worktree.id;
180 let root_char_bag: CharBag = worktree
181 .root_name
182 .chars()
183 .map(|c| c.to_ascii_lowercase())
184 .collect();
185 let root_name = worktree.root_name.clone();
186 let visible = worktree.visible;
187 let snapshot = Snapshot {
188 id: WorktreeId(remote_id as usize),
189 root_name,
190 root_char_bag,
191 entries_by_path: Default::default(),
192 entries_by_id: Default::default(),
193 scan_id: 0,
194 is_complete: false,
195 };
196
197 let (updates_tx, mut updates_rx) = mpsc::unbounded();
198 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
199 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
200 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
201 Worktree::Remote(RemoteWorktree {
202 project_id: project_remote_id,
203 replica_id,
204 snapshot: snapshot.clone(),
205 background_snapshot: background_snapshot.clone(),
206 updates_tx: Some(updates_tx),
207 snapshot_subscriptions: Default::default(),
208 client: client.clone(),
209 diagnostic_summaries: Default::default(),
210 visible,
211 })
212 });
213
214 cx.background()
215 .spawn(async move {
216 while let Some(update) = updates_rx.next().await {
217 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
218 log::error!("error applying worktree update: {}", error);
219 }
220 snapshot_updated_tx.send(()).await.ok();
221 }
222 })
223 .detach();
224
225 cx.spawn(|mut cx| {
226 let this = worktree_handle.downgrade();
227 async move {
228 while let Some(_) = snapshot_updated_rx.recv().await {
229 if let Some(this) = this.upgrade(&cx) {
230 this.update(&mut cx, |this, cx| {
231 this.poll_snapshot(cx);
232 let this = this.as_remote_mut().unwrap();
233 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
234 if this.observed_snapshot(*scan_id) {
235 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
236 let _ = tx.send(());
237 } else {
238 break;
239 }
240 }
241 });
242 } else {
243 break;
244 }
245 }
246 }
247 })
248 .detach();
249
250 worktree_handle
251 }
252
253 pub fn as_local(&self) -> Option<&LocalWorktree> {
254 if let Worktree::Local(worktree) = self {
255 Some(worktree)
256 } else {
257 None
258 }
259 }
260
261 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
262 if let Worktree::Remote(worktree) = self {
263 Some(worktree)
264 } else {
265 None
266 }
267 }
268
269 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
270 if let Worktree::Local(worktree) = self {
271 Some(worktree)
272 } else {
273 None
274 }
275 }
276
277 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
278 if let Worktree::Remote(worktree) = self {
279 Some(worktree)
280 } else {
281 None
282 }
283 }
284
285 pub fn is_local(&self) -> bool {
286 matches!(self, Worktree::Local(_))
287 }
288
289 pub fn is_remote(&self) -> bool {
290 !self.is_local()
291 }
292
293 pub fn snapshot(&self) -> Snapshot {
294 match self {
295 Worktree::Local(worktree) => worktree.snapshot().snapshot,
296 Worktree::Remote(worktree) => worktree.snapshot(),
297 }
298 }
299
300 pub fn scan_id(&self) -> usize {
301 match self {
302 Worktree::Local(worktree) => worktree.snapshot.scan_id,
303 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
304 }
305 }
306
307 pub fn is_visible(&self) -> bool {
308 match self {
309 Worktree::Local(worktree) => worktree.visible,
310 Worktree::Remote(worktree) => worktree.visible,
311 }
312 }
313
314 pub fn replica_id(&self) -> ReplicaId {
315 match self {
316 Worktree::Local(_) => 0,
317 Worktree::Remote(worktree) => worktree.replica_id,
318 }
319 }
320
321 pub fn diagnostic_summaries<'a>(
322 &'a self,
323 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + 'a {
324 match self {
325 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
326 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
327 }
328 .iter()
329 .map(|(path, summary)| (path.0.clone(), summary.clone()))
330 }
331
332 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
333 match self {
334 Self::Local(worktree) => {
335 let is_fake_fs = worktree.fs.is_fake();
336 worktree.snapshot = worktree.background_snapshot.lock().clone();
337 if worktree.is_scanning() {
338 if worktree.poll_task.is_none() {
339 worktree.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
340 if is_fake_fs {
341 #[cfg(any(test, feature = "test-support"))]
342 cx.background().simulate_random_delay().await;
343 } else {
344 smol::Timer::after(Duration::from_millis(100)).await;
345 }
346 if let Some(this) = this.upgrade(&cx) {
347 this.update(&mut cx, |this, cx| {
348 this.as_local_mut().unwrap().poll_task = None;
349 this.poll_snapshot(cx);
350 });
351 }
352 }));
353 }
354 } else {
355 worktree.poll_task.take();
356 cx.emit(Event::UpdatedEntries);
357 }
358 }
359 Self::Remote(worktree) => {
360 worktree.snapshot = worktree.background_snapshot.lock().clone();
361 cx.emit(Event::UpdatedEntries);
362 }
363 };
364
365 cx.notify();
366 }
367}
368
369impl LocalWorktree {
370 async fn new(
371 client: Arc<Client>,
372 path: impl Into<Arc<Path>>,
373 visible: bool,
374 fs: Arc<dyn Fs>,
375 next_entry_id: Arc<AtomicUsize>,
376 cx: &mut AsyncAppContext,
377 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
378 let abs_path = path.into();
379 let path: Arc<Path> = Arc::from(Path::new(""));
380
381 // After determining whether the root entry is a file or a directory, populate the
382 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
383 let root_name = abs_path
384 .file_name()
385 .map_or(String::new(), |f| f.to_string_lossy().to_string());
386 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
387 let metadata = fs
388 .metadata(&abs_path)
389 .await
390 .context("failed to stat worktree path")?;
391
392 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
393 let (mut last_scan_state_tx, last_scan_state_rx) = watch::channel_with(ScanState::Scanning);
394 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
395 let mut snapshot = LocalSnapshot {
396 abs_path,
397 ignores: Default::default(),
398 removed_entry_ids: Default::default(),
399 next_entry_id,
400 snapshot: Snapshot {
401 id: WorktreeId::from_usize(cx.model_id()),
402 root_name: root_name.clone(),
403 root_char_bag,
404 entries_by_path: Default::default(),
405 entries_by_id: Default::default(),
406 scan_id: 0,
407 is_complete: true,
408 },
409 };
410 if let Some(metadata) = metadata {
411 let entry = Entry::new(
412 path.into(),
413 &metadata,
414 &snapshot.next_entry_id,
415 snapshot.root_char_bag,
416 );
417 snapshot.insert_entry(entry, fs.as_ref());
418 }
419
420 let tree = Self {
421 snapshot: snapshot.clone(),
422 background_snapshot: Arc::new(Mutex::new(snapshot)),
423 last_scan_state_rx,
424 _background_scanner_task: None,
425 share: None,
426 poll_task: None,
427 diagnostics: Default::default(),
428 diagnostic_summaries: Default::default(),
429 client,
430 fs,
431 visible,
432 };
433
434 cx.spawn_weak(|this, mut cx| async move {
435 while let Some(scan_state) = scan_states_rx.next().await {
436 if let Some(this) = this.upgrade(&cx) {
437 last_scan_state_tx.blocking_send(scan_state).ok();
438 this.update(&mut cx, |this, cx| {
439 this.poll_snapshot(cx);
440 this.as_local().unwrap().broadcast_snapshot()
441 })
442 .await;
443 } else {
444 break;
445 }
446 }
447 })
448 .detach();
449
450 Worktree::Local(tree)
451 });
452
453 Ok((tree, scan_states_tx))
454 }
455
456 pub fn contains_abs_path(&self, path: &Path) -> bool {
457 path.starts_with(&self.abs_path)
458 }
459
460 fn absolutize(&self, path: &Path) -> PathBuf {
461 if path.file_name().is_some() {
462 self.abs_path.join(path)
463 } else {
464 self.abs_path.to_path_buf()
465 }
466 }
467
468 pub(crate) fn load_buffer(
469 &mut self,
470 path: &Path,
471 cx: &mut ModelContext<Worktree>,
472 ) -> Task<Result<ModelHandle<Buffer>>> {
473 let path = Arc::from(path);
474 cx.spawn(move |this, mut cx| async move {
475 let (file, contents) = this
476 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
477 .await?;
478 Ok(cx.add_model(|cx| Buffer::from_file(0, contents, Arc::new(file), cx)))
479 })
480 }
481
482 pub fn diagnostics_for_path(&self, path: &Path) -> Option<Vec<DiagnosticEntry<PointUtf16>>> {
483 self.diagnostics.get(path).cloned()
484 }
485
486 pub fn update_diagnostics(
487 &mut self,
488 language_server_id: usize,
489 worktree_path: Arc<Path>,
490 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
491 _: &mut ModelContext<Worktree>,
492 ) -> Result<bool> {
493 self.diagnostics.remove(&worktree_path);
494 let old_summary = self
495 .diagnostic_summaries
496 .remove(&PathKey(worktree_path.clone()))
497 .unwrap_or_default();
498 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
499 if !new_summary.is_empty() {
500 self.diagnostic_summaries
501 .insert(PathKey(worktree_path.clone()), new_summary);
502 self.diagnostics.insert(worktree_path.clone(), diagnostics);
503 }
504
505 let updated = !old_summary.is_empty() || !new_summary.is_empty();
506 if updated {
507 if let Some(share) = self.share.as_ref() {
508 self.client
509 .send(proto::UpdateDiagnosticSummary {
510 project_id: share.project_id,
511 worktree_id: self.id().to_proto(),
512 summary: Some(proto::DiagnosticSummary {
513 path: worktree_path.to_string_lossy().to_string(),
514 language_server_id: language_server_id as u64,
515 error_count: new_summary.error_count as u32,
516 warning_count: new_summary.warning_count as u32,
517 }),
518 })
519 .log_err();
520 }
521 }
522
523 Ok(updated)
524 }
525
526 pub fn scan_complete(&self) -> impl Future<Output = ()> {
527 let mut scan_state_rx = self.last_scan_state_rx.clone();
528 async move {
529 let mut scan_state = Some(scan_state_rx.borrow().clone());
530 while let Some(ScanState::Scanning) = scan_state {
531 scan_state = scan_state_rx.recv().await;
532 }
533 }
534 }
535
536 fn is_scanning(&self) -> bool {
537 if let ScanState::Scanning = *self.last_scan_state_rx.borrow() {
538 true
539 } else {
540 false
541 }
542 }
543
544 pub fn snapshot(&self) -> LocalSnapshot {
545 self.snapshot.clone()
546 }
547
548 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
549 proto::WorktreeMetadata {
550 id: self.id().to_proto(),
551 root_name: self.root_name().to_string(),
552 visible: self.visible,
553 }
554 }
555
556 fn load(&self, path: &Path, cx: &mut ModelContext<Worktree>) -> Task<Result<(File, String)>> {
557 let handle = cx.handle();
558 let path = Arc::from(path);
559 let abs_path = self.absolutize(&path);
560 let fs = self.fs.clone();
561 cx.spawn(|this, mut cx| async move {
562 let text = fs.load(&abs_path).await?;
563 // Eagerly populate the snapshot with an updated entry for the loaded file
564 let entry = this
565 .update(&mut cx, |this, cx| {
566 this.as_local()
567 .unwrap()
568 .refresh_entry(path, abs_path, None, cx)
569 })
570 .await?;
571 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
572 Ok((
573 File {
574 entry_id: Some(entry.id),
575 worktree: handle,
576 path: entry.path,
577 mtime: entry.mtime,
578 is_local: true,
579 },
580 text,
581 ))
582 })
583 }
584
585 pub fn save_buffer_as(
586 &self,
587 buffer_handle: ModelHandle<Buffer>,
588 path: impl Into<Arc<Path>>,
589 cx: &mut ModelContext<Worktree>,
590 ) -> Task<Result<()>> {
591 let buffer = buffer_handle.read(cx);
592 let text = buffer.as_rope().clone();
593 let fingerprint = text.fingerprint();
594 let version = buffer.version();
595 let save = self.write_file(path, text, cx);
596 let handle = cx.handle();
597 cx.as_mut().spawn(|mut cx| async move {
598 let entry = save.await?;
599 let file = File {
600 entry_id: Some(entry.id),
601 worktree: handle,
602 path: entry.path,
603 mtime: entry.mtime,
604 is_local: true,
605 };
606
607 buffer_handle.update(&mut cx, |buffer, cx| {
608 buffer.did_save(version, fingerprint, file.mtime, Some(Arc::new(file)), cx);
609 });
610
611 Ok(())
612 })
613 }
614
615 pub fn create_entry(
616 &self,
617 path: impl Into<Arc<Path>>,
618 is_dir: bool,
619 cx: &mut ModelContext<Worktree>,
620 ) -> Task<Result<Entry>> {
621 self.write_entry_internal(
622 path,
623 if is_dir {
624 None
625 } else {
626 Some(Default::default())
627 },
628 cx,
629 )
630 }
631
632 pub fn write_file(
633 &self,
634 path: impl Into<Arc<Path>>,
635 text: Rope,
636 cx: &mut ModelContext<Worktree>,
637 ) -> Task<Result<Entry>> {
638 self.write_entry_internal(path, Some(text), cx)
639 }
640
641 pub fn delete_entry(
642 &self,
643 entry_id: ProjectEntryId,
644 cx: &mut ModelContext<Worktree>,
645 ) -> Option<Task<Result<()>>> {
646 let entry = self.entry_for_id(entry_id)?.clone();
647 let abs_path = self.absolutize(&entry.path);
648 let delete = cx.background().spawn({
649 let fs = self.fs.clone();
650 let abs_path = abs_path.clone();
651 async move {
652 if entry.is_file() {
653 fs.remove_file(&abs_path, Default::default()).await
654 } else {
655 fs.remove_dir(
656 &abs_path,
657 RemoveOptions {
658 recursive: true,
659 ignore_if_not_exists: false,
660 },
661 )
662 .await
663 }
664 }
665 });
666
667 Some(cx.spawn(|this, mut cx| async move {
668 delete.await?;
669 this.update(&mut cx, |this, _| {
670 let this = this.as_local_mut().unwrap();
671 let mut snapshot = this.background_snapshot.lock();
672 snapshot.delete_entry(entry_id);
673 });
674 this.update(&mut cx, |this, cx| {
675 this.poll_snapshot(cx);
676 this.as_local().unwrap().broadcast_snapshot()
677 })
678 .await;
679 Ok(())
680 }))
681 }
682
683 pub fn rename_entry(
684 &self,
685 entry_id: ProjectEntryId,
686 new_path: impl Into<Arc<Path>>,
687 cx: &mut ModelContext<Worktree>,
688 ) -> Option<Task<Result<Entry>>> {
689 let old_path = self.entry_for_id(entry_id)?.path.clone();
690 let new_path = new_path.into();
691 let abs_old_path = self.absolutize(&old_path);
692 let abs_new_path = self.absolutize(&new_path);
693 let rename = cx.background().spawn({
694 let fs = self.fs.clone();
695 let abs_new_path = abs_new_path.clone();
696 async move {
697 fs.rename(&abs_old_path, &abs_new_path, Default::default())
698 .await
699 }
700 });
701
702 Some(cx.spawn(|this, mut cx| async move {
703 rename.await?;
704 let entry = this
705 .update(&mut cx, |this, cx| {
706 this.as_local_mut().unwrap().refresh_entry(
707 new_path.clone(),
708 abs_new_path,
709 Some(old_path),
710 cx,
711 )
712 })
713 .await?;
714 this.update(&mut cx, |this, cx| {
715 this.poll_snapshot(cx);
716 this.as_local().unwrap().broadcast_snapshot()
717 })
718 .await;
719 Ok(entry)
720 }))
721 }
722
723 pub fn copy_entry(
724 &self,
725 entry_id: ProjectEntryId,
726 new_path: impl Into<Arc<Path>>,
727 cx: &mut ModelContext<Worktree>,
728 ) -> Option<Task<Result<Entry>>> {
729 let old_path = self.entry_for_id(entry_id)?.path.clone();
730 let new_path = new_path.into();
731 let abs_old_path = self.absolutize(&old_path);
732 let abs_new_path = self.absolutize(&new_path);
733 let copy = cx.background().spawn({
734 let fs = self.fs.clone();
735 let abs_new_path = abs_new_path.clone();
736 async move {
737 fs.copy(&abs_old_path, &abs_new_path, Default::default())
738 .await
739 }
740 });
741
742 Some(cx.spawn(|this, mut cx| async move {
743 copy.await?;
744 let entry = this
745 .update(&mut cx, |this, cx| {
746 this.as_local_mut().unwrap().refresh_entry(
747 new_path.clone(),
748 abs_new_path,
749 None,
750 cx,
751 )
752 })
753 .await?;
754 this.update(&mut cx, |this, cx| {
755 this.poll_snapshot(cx);
756 this.as_local().unwrap().broadcast_snapshot()
757 })
758 .await;
759 Ok(entry)
760 }))
761 }
762
763 fn write_entry_internal(
764 &self,
765 path: impl Into<Arc<Path>>,
766 text_if_file: Option<Rope>,
767 cx: &mut ModelContext<Worktree>,
768 ) -> Task<Result<Entry>> {
769 let path = path.into();
770 let abs_path = self.absolutize(&path);
771 let write = cx.background().spawn({
772 let fs = self.fs.clone();
773 let abs_path = abs_path.clone();
774 async move {
775 if let Some(text) = text_if_file {
776 fs.save(&abs_path, &text).await
777 } else {
778 fs.create_dir(&abs_path).await
779 }
780 }
781 });
782
783 cx.spawn(|this, mut cx| async move {
784 write.await?;
785 let entry = this
786 .update(&mut cx, |this, cx| {
787 this.as_local_mut()
788 .unwrap()
789 .refresh_entry(path, abs_path, None, cx)
790 })
791 .await?;
792 this.update(&mut cx, |this, cx| {
793 this.poll_snapshot(cx);
794 this.as_local().unwrap().broadcast_snapshot()
795 })
796 .await;
797 Ok(entry)
798 })
799 }
800
801 fn refresh_entry(
802 &self,
803 path: Arc<Path>,
804 abs_path: PathBuf,
805 old_path: Option<Arc<Path>>,
806 cx: &mut ModelContext<Worktree>,
807 ) -> Task<Result<Entry>> {
808 let fs = self.fs.clone();
809 let root_char_bag;
810 let next_entry_id;
811 {
812 let snapshot = self.background_snapshot.lock();
813 root_char_bag = snapshot.root_char_bag;
814 next_entry_id = snapshot.next_entry_id.clone();
815 }
816 cx.spawn_weak(|this, mut cx| async move {
817 let mut entry = Entry::new(
818 path.clone(),
819 &fs.metadata(&abs_path)
820 .await?
821 .ok_or_else(|| anyhow!("could not read saved file metadata"))?,
822 &next_entry_id,
823 root_char_bag,
824 );
825
826 let this = this
827 .upgrade(&cx)
828 .ok_or_else(|| anyhow!("worktree was dropped"))?;
829 let (entry, snapshot, snapshots_tx) = this.read_with(&cx, |this, _| {
830 let this = this.as_local().unwrap();
831 let mut snapshot = this.background_snapshot.lock();
832 entry.is_ignored = snapshot
833 .ignore_stack_for_path(&path, entry.is_dir())
834 .is_path_ignored(&path, entry.is_dir());
835 if let Some(old_path) = old_path {
836 snapshot.remove_path(&old_path);
837 }
838 let entry = snapshot.insert_entry(entry, fs.as_ref());
839 snapshot.scan_id += 1;
840 let snapshots_tx = this.share.as_ref().map(|s| s.snapshots_tx.clone());
841 (entry, snapshot.clone(), snapshots_tx)
842 });
843 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
844
845 if let Some(snapshots_tx) = snapshots_tx {
846 snapshots_tx.send(snapshot).await.ok();
847 }
848
849 Ok(entry)
850 })
851 }
852
853 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
854 let (share_tx, share_rx) = oneshot::channel();
855 let (snapshots_to_send_tx, snapshots_to_send_rx) =
856 smol::channel::unbounded::<LocalSnapshot>();
857 if self.share.is_some() {
858 let _ = share_tx.send(Ok(()));
859 } else {
860 let rpc = self.client.clone();
861 let worktree_id = cx.model_id() as u64;
862 let maintain_remote_snapshot = cx.background().spawn({
863 let rpc = rpc.clone();
864 let diagnostic_summaries = self.diagnostic_summaries.clone();
865 async move {
866 let mut prev_snapshot = match snapshots_to_send_rx.recv().await {
867 Ok(snapshot) => {
868 let update = proto::UpdateWorktree {
869 project_id,
870 worktree_id,
871 root_name: snapshot.root_name().to_string(),
872 updated_entries: snapshot
873 .entries_by_path
874 .iter()
875 .map(Into::into)
876 .collect(),
877 removed_entries: Default::default(),
878 scan_id: snapshot.scan_id as u64,
879 is_last_update: true,
880 };
881 if let Err(error) = send_worktree_update(&rpc, update).await {
882 let _ = share_tx.send(Err(error));
883 return Err(anyhow!("failed to send initial update worktree"));
884 } else {
885 let _ = share_tx.send(Ok(()));
886 snapshot
887 }
888 }
889 Err(error) => {
890 let _ = share_tx.send(Err(error.into()));
891 return Err(anyhow!("failed to send initial update worktree"));
892 }
893 };
894
895 for (path, summary) in diagnostic_summaries.iter() {
896 rpc.send(proto::UpdateDiagnosticSummary {
897 project_id,
898 worktree_id,
899 summary: Some(summary.to_proto(&path.0)),
900 })?;
901 }
902
903 while let Ok(mut snapshot) = snapshots_to_send_rx.recv().await {
904 while let Ok(newer_snapshot) = snapshots_to_send_rx.try_recv() {
905 snapshot = newer_snapshot;
906 }
907
908 send_worktree_update(
909 &rpc,
910 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true),
911 )
912 .await?;
913 prev_snapshot = snapshot;
914 }
915
916 Ok::<_, anyhow::Error>(())
917 }
918 .log_err()
919 });
920 self.share = Some(ShareState {
921 project_id,
922 snapshots_tx: snapshots_to_send_tx.clone(),
923 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
924 });
925 }
926
927 cx.spawn_weak(|this, cx| async move {
928 if let Some(this) = this.upgrade(&cx) {
929 this.read_with(&cx, |this, _| {
930 let this = this.as_local().unwrap();
931 let _ = snapshots_to_send_tx.try_send(this.snapshot());
932 });
933 }
934 share_rx
935 .await
936 .unwrap_or_else(|_| Err(anyhow!("share ended")))
937 })
938 }
939
940 pub fn unshare(&mut self) {
941 self.share.take();
942 }
943
944 pub fn is_shared(&self) -> bool {
945 self.share.is_some()
946 }
947
948 fn broadcast_snapshot(&self) -> impl Future<Output = ()> {
949 let mut to_send = None;
950 if !self.is_scanning() {
951 if let Some(share) = self.share.as_ref() {
952 to_send = Some((self.snapshot(), share.snapshots_tx.clone()));
953 }
954 }
955
956 async move {
957 if let Some((snapshot, snapshots_to_send_tx)) = to_send {
958 if let Err(err) = snapshots_to_send_tx.send(snapshot).await {
959 log::error!("error submitting snapshot to send {}", err);
960 }
961 }
962 }
963 }
964}
965
966impl RemoteWorktree {
967 fn snapshot(&self) -> Snapshot {
968 self.snapshot.clone()
969 }
970
971 pub fn disconnected_from_host(&mut self) {
972 self.updates_tx.take();
973 }
974
975 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
976 if let Some(updates_tx) = &self.updates_tx {
977 updates_tx
978 .unbounded_send(update)
979 .expect("consumer runs to completion");
980 }
981 }
982
983 fn observed_snapshot(&self, scan_id: usize) -> bool {
984 self.scan_id > scan_id || (self.scan_id == scan_id && self.is_complete)
985 }
986
987 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = ()> {
988 let (tx, rx) = oneshot::channel();
989 if self.observed_snapshot(scan_id) {
990 let _ = tx.send(());
991 } else {
992 match self
993 .snapshot_subscriptions
994 .binary_search_by_key(&scan_id, |probe| probe.0)
995 {
996 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
997 }
998 }
999
1000 async move {
1001 let _ = rx.await;
1002 }
1003 }
1004
1005 pub fn update_diagnostic_summary(
1006 &mut self,
1007 path: Arc<Path>,
1008 summary: &proto::DiagnosticSummary,
1009 ) {
1010 let summary = DiagnosticSummary {
1011 language_server_id: summary.language_server_id as usize,
1012 error_count: summary.error_count as usize,
1013 warning_count: summary.warning_count as usize,
1014 };
1015 if summary.is_empty() {
1016 self.diagnostic_summaries.remove(&PathKey(path.clone()));
1017 } else {
1018 self.diagnostic_summaries
1019 .insert(PathKey(path.clone()), summary);
1020 }
1021 }
1022
1023 pub fn insert_entry(
1024 &mut self,
1025 entry: proto::Entry,
1026 scan_id: usize,
1027 cx: &mut ModelContext<Worktree>,
1028 ) -> Task<Result<Entry>> {
1029 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1030 cx.spawn(|this, mut cx| async move {
1031 wait_for_snapshot.await;
1032 this.update(&mut cx, |worktree, _| {
1033 let worktree = worktree.as_remote_mut().unwrap();
1034 let mut snapshot = worktree.background_snapshot.lock();
1035 let entry = snapshot.insert_entry(entry);
1036 worktree.snapshot = snapshot.clone();
1037 entry
1038 })
1039 })
1040 }
1041
1042 pub(crate) fn delete_entry(
1043 &mut self,
1044 id: ProjectEntryId,
1045 scan_id: usize,
1046 cx: &mut ModelContext<Worktree>,
1047 ) -> Task<Result<()>> {
1048 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1049 cx.spawn(|this, mut cx| async move {
1050 wait_for_snapshot.await;
1051 this.update(&mut cx, |worktree, _| {
1052 let worktree = worktree.as_remote_mut().unwrap();
1053 let mut snapshot = worktree.background_snapshot.lock();
1054 snapshot.delete_entry(id);
1055 worktree.snapshot = snapshot.clone();
1056 });
1057 Ok(())
1058 })
1059 }
1060}
1061
1062impl Snapshot {
1063 pub fn id(&self) -> WorktreeId {
1064 self.id
1065 }
1066
1067 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1068 self.entries_by_id.get(&entry_id, &()).is_some()
1069 }
1070
1071 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1072 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1073 let old_entry = self.entries_by_id.insert_or_replace(
1074 PathEntry {
1075 id: entry.id,
1076 path: entry.path.clone(),
1077 is_ignored: entry.is_ignored,
1078 scan_id: 0,
1079 },
1080 &(),
1081 );
1082 if let Some(old_entry) = old_entry {
1083 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1084 }
1085 self.entries_by_path.insert_or_replace(entry.clone(), &());
1086 Ok(entry)
1087 }
1088
1089 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1090 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1091 self.entries_by_path = {
1092 let mut cursor = self.entries_by_path.cursor();
1093 let mut new_entries_by_path =
1094 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1095 while let Some(entry) = cursor.item() {
1096 if entry.path.starts_with(&removed_entry.path) {
1097 self.entries_by_id.remove(&entry.id, &());
1098 cursor.next(&());
1099 } else {
1100 break;
1101 }
1102 }
1103 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1104 new_entries_by_path
1105 };
1106
1107 true
1108 } else {
1109 false
1110 }
1111 }
1112
1113 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1114 let mut entries_by_path_edits = Vec::new();
1115 let mut entries_by_id_edits = Vec::new();
1116 for entry_id in update.removed_entries {
1117 let entry = self
1118 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1119 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1120 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1121 entries_by_id_edits.push(Edit::Remove(entry.id));
1122 }
1123
1124 for entry in update.updated_entries {
1125 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1126 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1127 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1128 }
1129 entries_by_id_edits.push(Edit::Insert(PathEntry {
1130 id: entry.id,
1131 path: entry.path.clone(),
1132 is_ignored: entry.is_ignored,
1133 scan_id: 0,
1134 }));
1135 entries_by_path_edits.push(Edit::Insert(entry));
1136 }
1137
1138 self.entries_by_path.edit(entries_by_path_edits, &());
1139 self.entries_by_id.edit(entries_by_id_edits, &());
1140 self.scan_id = update.scan_id as usize;
1141 self.is_complete = update.is_last_update;
1142
1143 Ok(())
1144 }
1145
1146 pub fn file_count(&self) -> usize {
1147 self.entries_by_path.summary().file_count
1148 }
1149
1150 pub fn visible_file_count(&self) -> usize {
1151 self.entries_by_path.summary().visible_file_count
1152 }
1153
1154 fn traverse_from_offset(
1155 &self,
1156 include_dirs: bool,
1157 include_ignored: bool,
1158 start_offset: usize,
1159 ) -> Traversal {
1160 let mut cursor = self.entries_by_path.cursor();
1161 cursor.seek(
1162 &TraversalTarget::Count {
1163 count: start_offset,
1164 include_dirs,
1165 include_ignored,
1166 },
1167 Bias::Right,
1168 &(),
1169 );
1170 Traversal {
1171 cursor,
1172 include_dirs,
1173 include_ignored,
1174 }
1175 }
1176
1177 fn traverse_from_path(
1178 &self,
1179 include_dirs: bool,
1180 include_ignored: bool,
1181 path: &Path,
1182 ) -> Traversal {
1183 let mut cursor = self.entries_by_path.cursor();
1184 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1185 Traversal {
1186 cursor,
1187 include_dirs,
1188 include_ignored,
1189 }
1190 }
1191
1192 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1193 self.traverse_from_offset(false, include_ignored, start)
1194 }
1195
1196 pub fn entries(&self, include_ignored: bool) -> Traversal {
1197 self.traverse_from_offset(true, include_ignored, 0)
1198 }
1199
1200 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1201 let empty_path = Path::new("");
1202 self.entries_by_path
1203 .cursor::<()>()
1204 .filter(move |entry| entry.path.as_ref() != empty_path)
1205 .map(|entry| &entry.path)
1206 }
1207
1208 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1209 let mut cursor = self.entries_by_path.cursor();
1210 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1211 let traversal = Traversal {
1212 cursor,
1213 include_dirs: true,
1214 include_ignored: true,
1215 };
1216 ChildEntriesIter {
1217 traversal,
1218 parent_path,
1219 }
1220 }
1221
1222 pub fn root_entry(&self) -> Option<&Entry> {
1223 self.entry_for_path("")
1224 }
1225
1226 pub fn root_name(&self) -> &str {
1227 &self.root_name
1228 }
1229
1230 pub fn scan_id(&self) -> usize {
1231 self.scan_id
1232 }
1233
1234 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1235 let path = path.as_ref();
1236 self.traverse_from_path(true, true, path)
1237 .entry()
1238 .and_then(|entry| {
1239 if entry.path.as_ref() == path {
1240 Some(entry)
1241 } else {
1242 None
1243 }
1244 })
1245 }
1246
1247 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1248 let entry = self.entries_by_id.get(&id, &())?;
1249 self.entry_for_path(&entry.path)
1250 }
1251
1252 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1253 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1254 }
1255}
1256
1257impl LocalSnapshot {
1258 pub fn abs_path(&self) -> &Arc<Path> {
1259 &self.abs_path
1260 }
1261
1262 #[cfg(test)]
1263 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1264 let root_name = self.root_name.clone();
1265 proto::UpdateWorktree {
1266 project_id,
1267 worktree_id: self.id().to_proto(),
1268 root_name,
1269 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1270 removed_entries: Default::default(),
1271 scan_id: self.scan_id as u64,
1272 is_last_update: true,
1273 }
1274 }
1275
1276 pub(crate) fn build_update(
1277 &self,
1278 other: &Self,
1279 project_id: u64,
1280 worktree_id: u64,
1281 include_ignored: bool,
1282 ) -> proto::UpdateWorktree {
1283 let mut updated_entries = Vec::new();
1284 let mut removed_entries = Vec::new();
1285 let mut self_entries = self
1286 .entries_by_id
1287 .cursor::<()>()
1288 .filter(|e| include_ignored || !e.is_ignored)
1289 .peekable();
1290 let mut other_entries = other
1291 .entries_by_id
1292 .cursor::<()>()
1293 .filter(|e| include_ignored || !e.is_ignored)
1294 .peekable();
1295 loop {
1296 match (self_entries.peek(), other_entries.peek()) {
1297 (Some(self_entry), Some(other_entry)) => {
1298 match Ord::cmp(&self_entry.id, &other_entry.id) {
1299 Ordering::Less => {
1300 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1301 updated_entries.push(entry);
1302 self_entries.next();
1303 }
1304 Ordering::Equal => {
1305 if self_entry.scan_id != other_entry.scan_id {
1306 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1307 updated_entries.push(entry);
1308 }
1309
1310 self_entries.next();
1311 other_entries.next();
1312 }
1313 Ordering::Greater => {
1314 removed_entries.push(other_entry.id.to_proto());
1315 other_entries.next();
1316 }
1317 }
1318 }
1319 (Some(self_entry), None) => {
1320 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1321 updated_entries.push(entry);
1322 self_entries.next();
1323 }
1324 (None, Some(other_entry)) => {
1325 removed_entries.push(other_entry.id.to_proto());
1326 other_entries.next();
1327 }
1328 (None, None) => break,
1329 }
1330 }
1331
1332 proto::UpdateWorktree {
1333 project_id,
1334 worktree_id,
1335 root_name: self.root_name().to_string(),
1336 updated_entries,
1337 removed_entries,
1338 scan_id: self.scan_id as u64,
1339 is_last_update: true,
1340 }
1341 }
1342
1343 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1344 if !entry.is_dir() && entry.path.file_name() == Some(&GITIGNORE) {
1345 let abs_path = self.abs_path.join(&entry.path);
1346 match build_gitignore(&abs_path, fs) {
1347 Ok(ignore) => {
1348 let ignore_dir_path = entry.path.parent().unwrap();
1349 self.ignores
1350 .insert(ignore_dir_path.into(), (Arc::new(ignore), self.scan_id));
1351 }
1352 Err(error) => {
1353 log::error!(
1354 "error loading .gitignore file {:?} - {:?}",
1355 &entry.path,
1356 error
1357 );
1358 }
1359 }
1360 }
1361
1362 self.reuse_entry_id(&mut entry);
1363 self.entries_by_path.insert_or_replace(entry.clone(), &());
1364 let scan_id = self.scan_id;
1365 self.entries_by_id.insert_or_replace(
1366 PathEntry {
1367 id: entry.id,
1368 path: entry.path.clone(),
1369 is_ignored: entry.is_ignored,
1370 scan_id,
1371 },
1372 &(),
1373 );
1374 entry
1375 }
1376
1377 fn populate_dir(
1378 &mut self,
1379 parent_path: Arc<Path>,
1380 entries: impl IntoIterator<Item = Entry>,
1381 ignore: Option<Arc<Gitignore>>,
1382 ) {
1383 let mut parent_entry = if let Some(parent_entry) =
1384 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1385 {
1386 parent_entry.clone()
1387 } else {
1388 log::warn!(
1389 "populating a directory {:?} that has been removed",
1390 parent_path
1391 );
1392 return;
1393 };
1394
1395 if let Some(ignore) = ignore {
1396 self.ignores.insert(parent_path, (ignore, self.scan_id));
1397 }
1398 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1399 parent_entry.kind = EntryKind::Dir;
1400 } else {
1401 unreachable!();
1402 }
1403
1404 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1405 let mut entries_by_id_edits = Vec::new();
1406
1407 for mut entry in entries {
1408 self.reuse_entry_id(&mut entry);
1409 entries_by_id_edits.push(Edit::Insert(PathEntry {
1410 id: entry.id,
1411 path: entry.path.clone(),
1412 is_ignored: entry.is_ignored,
1413 scan_id: self.scan_id,
1414 }));
1415 entries_by_path_edits.push(Edit::Insert(entry));
1416 }
1417
1418 self.entries_by_path.edit(entries_by_path_edits, &());
1419 self.entries_by_id.edit(entries_by_id_edits, &());
1420 }
1421
1422 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1423 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1424 entry.id = removed_entry_id;
1425 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1426 entry.id = existing_entry.id;
1427 }
1428 }
1429
1430 fn remove_path(&mut self, path: &Path) {
1431 let mut new_entries;
1432 let removed_entries;
1433 {
1434 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1435 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1436 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1437 new_entries.push_tree(cursor.suffix(&()), &());
1438 }
1439 self.entries_by_path = new_entries;
1440
1441 let mut entries_by_id_edits = Vec::new();
1442 for entry in removed_entries.cursor::<()>() {
1443 let removed_entry_id = self
1444 .removed_entry_ids
1445 .entry(entry.inode)
1446 .or_insert(entry.id);
1447 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1448 entries_by_id_edits.push(Edit::Remove(entry.id));
1449 }
1450 self.entries_by_id.edit(entries_by_id_edits, &());
1451
1452 if path.file_name() == Some(&GITIGNORE) {
1453 if let Some((_, scan_id)) = self.ignores.get_mut(path.parent().unwrap()) {
1454 *scan_id = self.snapshot.scan_id;
1455 }
1456 }
1457 }
1458
1459 fn ignore_stack_for_path(&self, path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1460 let mut new_ignores = Vec::new();
1461 for ancestor in path.ancestors().skip(1) {
1462 if let Some((ignore, _)) = self.ignores.get(ancestor) {
1463 new_ignores.push((ancestor, Some(ignore.clone())));
1464 } else {
1465 new_ignores.push((ancestor, None));
1466 }
1467 }
1468
1469 let mut ignore_stack = IgnoreStack::none();
1470 for (parent_path, ignore) in new_ignores.into_iter().rev() {
1471 if ignore_stack.is_path_ignored(&parent_path, true) {
1472 ignore_stack = IgnoreStack::all();
1473 break;
1474 } else if let Some(ignore) = ignore {
1475 ignore_stack = ignore_stack.append(Arc::from(parent_path), ignore);
1476 }
1477 }
1478
1479 if ignore_stack.is_path_ignored(path, is_dir) {
1480 ignore_stack = IgnoreStack::all();
1481 }
1482
1483 ignore_stack
1484 }
1485}
1486
1487fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1488 let contents = smol::block_on(fs.load(&abs_path))?;
1489 let parent = abs_path.parent().unwrap_or(Path::new("/"));
1490 let mut builder = GitignoreBuilder::new(parent);
1491 for line in contents.lines() {
1492 builder.add_line(Some(abs_path.into()), line)?;
1493 }
1494 Ok(builder.build()?)
1495}
1496
1497impl WorktreeId {
1498 pub fn from_usize(handle_id: usize) -> Self {
1499 Self(handle_id)
1500 }
1501
1502 pub(crate) fn from_proto(id: u64) -> Self {
1503 Self(id as usize)
1504 }
1505
1506 pub fn to_proto(&self) -> u64 {
1507 self.0 as u64
1508 }
1509
1510 pub fn to_usize(&self) -> usize {
1511 self.0
1512 }
1513}
1514
1515impl fmt::Display for WorktreeId {
1516 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1517 self.0.fmt(f)
1518 }
1519}
1520
1521impl Deref for Worktree {
1522 type Target = Snapshot;
1523
1524 fn deref(&self) -> &Self::Target {
1525 match self {
1526 Worktree::Local(worktree) => &worktree.snapshot,
1527 Worktree::Remote(worktree) => &worktree.snapshot,
1528 }
1529 }
1530}
1531
1532impl Deref for LocalWorktree {
1533 type Target = LocalSnapshot;
1534
1535 fn deref(&self) -> &Self::Target {
1536 &self.snapshot
1537 }
1538}
1539
1540impl Deref for RemoteWorktree {
1541 type Target = Snapshot;
1542
1543 fn deref(&self) -> &Self::Target {
1544 &self.snapshot
1545 }
1546}
1547
1548impl fmt::Debug for LocalWorktree {
1549 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1550 self.snapshot.fmt(f)
1551 }
1552}
1553
1554impl fmt::Debug for Snapshot {
1555 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1556 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1557 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1558
1559 impl<'a> fmt::Debug for EntriesByPath<'a> {
1560 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1561 f.debug_map()
1562 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1563 .finish()
1564 }
1565 }
1566
1567 impl<'a> fmt::Debug for EntriesById<'a> {
1568 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1569 f.debug_list().entries(self.0.iter()).finish()
1570 }
1571 }
1572
1573 f.debug_struct("Snapshot")
1574 .field("id", &self.id)
1575 .field("root_name", &self.root_name)
1576 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1577 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1578 .finish()
1579 }
1580}
1581
1582#[derive(Clone, PartialEq)]
1583pub struct File {
1584 pub worktree: ModelHandle<Worktree>,
1585 pub path: Arc<Path>,
1586 pub mtime: SystemTime,
1587 pub(crate) entry_id: Option<ProjectEntryId>,
1588 pub(crate) is_local: bool,
1589}
1590
1591impl language::File for File {
1592 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1593 if self.is_local {
1594 Some(self)
1595 } else {
1596 None
1597 }
1598 }
1599
1600 fn mtime(&self) -> SystemTime {
1601 self.mtime
1602 }
1603
1604 fn path(&self) -> &Arc<Path> {
1605 &self.path
1606 }
1607
1608 fn full_path(&self, cx: &AppContext) -> PathBuf {
1609 let mut full_path = PathBuf::new();
1610 full_path.push(self.worktree.read(cx).root_name());
1611 if self.path.components().next().is_some() {
1612 full_path.push(&self.path);
1613 }
1614 full_path
1615 }
1616
1617 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1618 /// of its worktree, then this method will return the name of the worktree itself.
1619 fn file_name(&self, cx: &AppContext) -> OsString {
1620 self.path
1621 .file_name()
1622 .map(|name| name.into())
1623 .unwrap_or_else(|| OsString::from(&self.worktree.read(cx).root_name))
1624 }
1625
1626 fn is_deleted(&self) -> bool {
1627 self.entry_id.is_none()
1628 }
1629
1630 fn save(
1631 &self,
1632 buffer_id: u64,
1633 text: Rope,
1634 version: clock::Global,
1635 cx: &mut MutableAppContext,
1636 ) -> Task<Result<(clock::Global, String, SystemTime)>> {
1637 self.worktree.update(cx, |worktree, cx| match worktree {
1638 Worktree::Local(worktree) => {
1639 let rpc = worktree.client.clone();
1640 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1641 let fingerprint = text.fingerprint();
1642 let save = worktree.write_file(self.path.clone(), text, cx);
1643 cx.background().spawn(async move {
1644 let entry = save.await?;
1645 if let Some(project_id) = project_id {
1646 rpc.send(proto::BufferSaved {
1647 project_id,
1648 buffer_id,
1649 version: serialize_version(&version),
1650 mtime: Some(entry.mtime.into()),
1651 fingerprint: fingerprint.clone(),
1652 })?;
1653 }
1654 Ok((version, fingerprint, entry.mtime))
1655 })
1656 }
1657 Worktree::Remote(worktree) => {
1658 let rpc = worktree.client.clone();
1659 let project_id = worktree.project_id;
1660 cx.foreground().spawn(async move {
1661 let response = rpc
1662 .request(proto::SaveBuffer {
1663 project_id,
1664 buffer_id,
1665 version: serialize_version(&version),
1666 })
1667 .await?;
1668 let version = deserialize_version(response.version);
1669 let mtime = response
1670 .mtime
1671 .ok_or_else(|| anyhow!("missing mtime"))?
1672 .into();
1673 Ok((version, response.fingerprint, mtime))
1674 })
1675 }
1676 })
1677 }
1678
1679 fn as_any(&self) -> &dyn Any {
1680 self
1681 }
1682
1683 fn to_proto(&self) -> rpc::proto::File {
1684 rpc::proto::File {
1685 worktree_id: self.worktree.id() as u64,
1686 entry_id: self.entry_id.map(|entry_id| entry_id.to_proto()),
1687 path: self.path.to_string_lossy().into(),
1688 mtime: Some(self.mtime.into()),
1689 }
1690 }
1691}
1692
1693impl language::LocalFile for File {
1694 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1695 self.worktree
1696 .read(cx)
1697 .as_local()
1698 .unwrap()
1699 .abs_path
1700 .join(&self.path)
1701 }
1702
1703 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1704 let worktree = self.worktree.read(cx).as_local().unwrap();
1705 let abs_path = worktree.absolutize(&self.path);
1706 let fs = worktree.fs.clone();
1707 cx.background()
1708 .spawn(async move { fs.load(&abs_path).await })
1709 }
1710
1711 fn buffer_reloaded(
1712 &self,
1713 buffer_id: u64,
1714 version: &clock::Global,
1715 fingerprint: String,
1716 mtime: SystemTime,
1717 cx: &mut MutableAppContext,
1718 ) {
1719 let worktree = self.worktree.read(cx).as_local().unwrap();
1720 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1721 worktree
1722 .client
1723 .send(proto::BufferReloaded {
1724 project_id,
1725 buffer_id,
1726 version: serialize_version(&version),
1727 mtime: Some(mtime.into()),
1728 fingerprint,
1729 })
1730 .log_err();
1731 }
1732 }
1733}
1734
1735impl File {
1736 pub fn from_proto(
1737 proto: rpc::proto::File,
1738 worktree: ModelHandle<Worktree>,
1739 cx: &AppContext,
1740 ) -> Result<Self> {
1741 let worktree_id = worktree
1742 .read(cx)
1743 .as_remote()
1744 .ok_or_else(|| anyhow!("not remote"))?
1745 .id();
1746
1747 if worktree_id.to_proto() != proto.worktree_id {
1748 return Err(anyhow!("worktree id does not match file"));
1749 }
1750
1751 Ok(Self {
1752 worktree,
1753 path: Path::new(&proto.path).into(),
1754 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1755 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
1756 is_local: false,
1757 })
1758 }
1759
1760 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
1761 file.and_then(|f| f.as_any().downcast_ref())
1762 }
1763
1764 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1765 self.worktree.read(cx).id()
1766 }
1767
1768 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1769 self.entry_id
1770 }
1771}
1772
1773#[derive(Clone, Debug, PartialEq, Eq)]
1774pub struct Entry {
1775 pub id: ProjectEntryId,
1776 pub kind: EntryKind,
1777 pub path: Arc<Path>,
1778 pub inode: u64,
1779 pub mtime: SystemTime,
1780 pub is_symlink: bool,
1781 pub is_ignored: bool,
1782}
1783
1784#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1785pub enum EntryKind {
1786 PendingDir,
1787 Dir,
1788 File(CharBag),
1789}
1790
1791impl Entry {
1792 fn new(
1793 path: Arc<Path>,
1794 metadata: &fs::Metadata,
1795 next_entry_id: &AtomicUsize,
1796 root_char_bag: CharBag,
1797 ) -> Self {
1798 Self {
1799 id: ProjectEntryId::new(next_entry_id),
1800 kind: if metadata.is_dir {
1801 EntryKind::PendingDir
1802 } else {
1803 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1804 },
1805 path,
1806 inode: metadata.inode,
1807 mtime: metadata.mtime,
1808 is_symlink: metadata.is_symlink,
1809 is_ignored: false,
1810 }
1811 }
1812
1813 pub fn is_dir(&self) -> bool {
1814 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1815 }
1816
1817 pub fn is_file(&self) -> bool {
1818 matches!(self.kind, EntryKind::File(_))
1819 }
1820}
1821
1822impl sum_tree::Item for Entry {
1823 type Summary = EntrySummary;
1824
1825 fn summary(&self) -> Self::Summary {
1826 let visible_count = if self.is_ignored { 0 } else { 1 };
1827 let file_count;
1828 let visible_file_count;
1829 if self.is_file() {
1830 file_count = 1;
1831 visible_file_count = visible_count;
1832 } else {
1833 file_count = 0;
1834 visible_file_count = 0;
1835 }
1836
1837 EntrySummary {
1838 max_path: self.path.clone(),
1839 count: 1,
1840 visible_count,
1841 file_count,
1842 visible_file_count,
1843 }
1844 }
1845}
1846
1847impl sum_tree::KeyedItem for Entry {
1848 type Key = PathKey;
1849
1850 fn key(&self) -> Self::Key {
1851 PathKey(self.path.clone())
1852 }
1853}
1854
1855#[derive(Clone, Debug)]
1856pub struct EntrySummary {
1857 max_path: Arc<Path>,
1858 count: usize,
1859 visible_count: usize,
1860 file_count: usize,
1861 visible_file_count: usize,
1862}
1863
1864impl Default for EntrySummary {
1865 fn default() -> Self {
1866 Self {
1867 max_path: Arc::from(Path::new("")),
1868 count: 0,
1869 visible_count: 0,
1870 file_count: 0,
1871 visible_file_count: 0,
1872 }
1873 }
1874}
1875
1876impl sum_tree::Summary for EntrySummary {
1877 type Context = ();
1878
1879 fn add_summary(&mut self, rhs: &Self, _: &()) {
1880 self.max_path = rhs.max_path.clone();
1881 self.count += rhs.count;
1882 self.visible_count += rhs.visible_count;
1883 self.file_count += rhs.file_count;
1884 self.visible_file_count += rhs.visible_file_count;
1885 }
1886}
1887
1888#[derive(Clone, Debug)]
1889struct PathEntry {
1890 id: ProjectEntryId,
1891 path: Arc<Path>,
1892 is_ignored: bool,
1893 scan_id: usize,
1894}
1895
1896impl sum_tree::Item for PathEntry {
1897 type Summary = PathEntrySummary;
1898
1899 fn summary(&self) -> Self::Summary {
1900 PathEntrySummary { max_id: self.id }
1901 }
1902}
1903
1904impl sum_tree::KeyedItem for PathEntry {
1905 type Key = ProjectEntryId;
1906
1907 fn key(&self) -> Self::Key {
1908 self.id
1909 }
1910}
1911
1912#[derive(Clone, Debug, Default)]
1913struct PathEntrySummary {
1914 max_id: ProjectEntryId,
1915}
1916
1917impl sum_tree::Summary for PathEntrySummary {
1918 type Context = ();
1919
1920 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
1921 self.max_id = summary.max_id;
1922 }
1923}
1924
1925impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
1926 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
1927 *self = summary.max_id;
1928 }
1929}
1930
1931#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
1932pub struct PathKey(Arc<Path>);
1933
1934impl Default for PathKey {
1935 fn default() -> Self {
1936 Self(Path::new("").into())
1937 }
1938}
1939
1940impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
1941 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
1942 self.0 = summary.max_path.clone();
1943 }
1944}
1945
1946struct BackgroundScanner {
1947 fs: Arc<dyn Fs>,
1948 snapshot: Arc<Mutex<LocalSnapshot>>,
1949 notify: UnboundedSender<ScanState>,
1950 executor: Arc<executor::Background>,
1951}
1952
1953impl BackgroundScanner {
1954 fn new(
1955 snapshot: Arc<Mutex<LocalSnapshot>>,
1956 notify: UnboundedSender<ScanState>,
1957 fs: Arc<dyn Fs>,
1958 executor: Arc<executor::Background>,
1959 ) -> Self {
1960 Self {
1961 fs,
1962 snapshot,
1963 notify,
1964 executor,
1965 }
1966 }
1967
1968 fn abs_path(&self) -> Arc<Path> {
1969 self.snapshot.lock().abs_path.clone()
1970 }
1971
1972 fn snapshot(&self) -> LocalSnapshot {
1973 self.snapshot.lock().clone()
1974 }
1975
1976 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
1977 if self.notify.unbounded_send(ScanState::Scanning).is_err() {
1978 return;
1979 }
1980
1981 if let Err(err) = self.scan_dirs().await {
1982 if self
1983 .notify
1984 .unbounded_send(ScanState::Err(Arc::new(err)))
1985 .is_err()
1986 {
1987 return;
1988 }
1989 }
1990
1991 if self.notify.unbounded_send(ScanState::Idle).is_err() {
1992 return;
1993 }
1994
1995 futures::pin_mut!(events_rx);
1996
1997 while let Some(mut events) = events_rx.next().await {
1998 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
1999 events.extend(additional_events);
2000 }
2001
2002 if self.notify.unbounded_send(ScanState::Scanning).is_err() {
2003 break;
2004 }
2005
2006 if !self.process_events(events).await {
2007 break;
2008 }
2009
2010 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2011 break;
2012 }
2013 }
2014 }
2015
2016 async fn scan_dirs(&mut self) -> Result<()> {
2017 let root_char_bag;
2018 let next_entry_id;
2019 let is_dir;
2020 {
2021 let snapshot = self.snapshot.lock();
2022 root_char_bag = snapshot.root_char_bag;
2023 next_entry_id = snapshot.next_entry_id.clone();
2024 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir())
2025 };
2026
2027 if is_dir {
2028 let path: Arc<Path> = Arc::from(Path::new(""));
2029 let abs_path = self.abs_path();
2030 let (tx, rx) = channel::unbounded();
2031 self.executor
2032 .block(tx.send(ScanJob {
2033 abs_path: abs_path.to_path_buf(),
2034 path,
2035 ignore_stack: IgnoreStack::none(),
2036 scan_queue: tx.clone(),
2037 }))
2038 .unwrap();
2039 drop(tx);
2040
2041 self.executor
2042 .scoped(|scope| {
2043 for _ in 0..self.executor.num_cpus() {
2044 scope.spawn(async {
2045 while let Ok(job) = rx.recv().await {
2046 if let Err(err) = self
2047 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2048 .await
2049 {
2050 log::error!("error scanning {:?}: {}", job.abs_path, err);
2051 }
2052 }
2053 });
2054 }
2055 })
2056 .await;
2057 }
2058
2059 Ok(())
2060 }
2061
2062 async fn scan_dir(
2063 &self,
2064 root_char_bag: CharBag,
2065 next_entry_id: Arc<AtomicUsize>,
2066 job: &ScanJob,
2067 ) -> Result<()> {
2068 let mut new_entries: Vec<Entry> = Vec::new();
2069 let mut new_jobs: Vec<ScanJob> = Vec::new();
2070 let mut ignore_stack = job.ignore_stack.clone();
2071 let mut new_ignore = None;
2072
2073 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2074 while let Some(child_abs_path) = child_paths.next().await {
2075 let child_abs_path = match child_abs_path {
2076 Ok(child_abs_path) => child_abs_path,
2077 Err(error) => {
2078 log::error!("error processing entry {:?}", error);
2079 continue;
2080 }
2081 };
2082 let child_name = child_abs_path.file_name().unwrap();
2083 let child_path: Arc<Path> = job.path.join(child_name).into();
2084 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2085 Ok(Some(metadata)) => metadata,
2086 Ok(None) => continue,
2087 Err(err) => {
2088 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2089 continue;
2090 }
2091 };
2092
2093 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2094 if child_name == *GITIGNORE {
2095 match build_gitignore(&child_abs_path, self.fs.as_ref()) {
2096 Ok(ignore) => {
2097 let ignore = Arc::new(ignore);
2098 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2099 new_ignore = Some(ignore);
2100 }
2101 Err(error) => {
2102 log::error!(
2103 "error loading .gitignore file {:?} - {:?}",
2104 child_name,
2105 error
2106 );
2107 }
2108 }
2109
2110 // Update ignore status of any child entries we've already processed to reflect the
2111 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2112 // there should rarely be too numerous. Update the ignore stack associated with any
2113 // new jobs as well.
2114 let mut new_jobs = new_jobs.iter_mut();
2115 for entry in &mut new_entries {
2116 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2117 if entry.is_dir() {
2118 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2119 IgnoreStack::all()
2120 } else {
2121 ignore_stack.clone()
2122 };
2123 }
2124 }
2125 }
2126
2127 let mut child_entry = Entry::new(
2128 child_path.clone(),
2129 &child_metadata,
2130 &next_entry_id,
2131 root_char_bag,
2132 );
2133
2134 if child_metadata.is_dir {
2135 let is_ignored = ignore_stack.is_path_ignored(&child_path, true);
2136 child_entry.is_ignored = is_ignored;
2137 new_entries.push(child_entry);
2138 new_jobs.push(ScanJob {
2139 abs_path: child_abs_path,
2140 path: child_path,
2141 ignore_stack: if is_ignored {
2142 IgnoreStack::all()
2143 } else {
2144 ignore_stack.clone()
2145 },
2146 scan_queue: job.scan_queue.clone(),
2147 });
2148 } else {
2149 child_entry.is_ignored = ignore_stack.is_path_ignored(&child_path, false);
2150 new_entries.push(child_entry);
2151 };
2152 }
2153
2154 self.snapshot
2155 .lock()
2156 .populate_dir(job.path.clone(), new_entries, new_ignore);
2157 for new_job in new_jobs {
2158 job.scan_queue.send(new_job).await.unwrap();
2159 }
2160
2161 Ok(())
2162 }
2163
2164 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2165 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2166 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2167
2168 let root_char_bag;
2169 let root_abs_path;
2170 let next_entry_id;
2171 {
2172 let snapshot = self.snapshot.lock();
2173 root_char_bag = snapshot.root_char_bag;
2174 root_abs_path = snapshot.abs_path.clone();
2175 next_entry_id = snapshot.next_entry_id.clone();
2176 }
2177
2178 let root_abs_path = if let Ok(abs_path) = self.fs.canonicalize(&root_abs_path).await {
2179 abs_path
2180 } else {
2181 return false;
2182 };
2183 let metadata = futures::future::join_all(
2184 events
2185 .iter()
2186 .map(|event| self.fs.metadata(&event.path))
2187 .collect::<Vec<_>>(),
2188 )
2189 .await;
2190
2191 // Hold the snapshot lock while clearing and re-inserting the root entries
2192 // for each event. This way, the snapshot is not observable to the foreground
2193 // thread while this operation is in-progress.
2194 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2195 {
2196 let mut snapshot = self.snapshot.lock();
2197 snapshot.scan_id += 1;
2198 for event in &events {
2199 if let Ok(path) = event.path.strip_prefix(&root_abs_path) {
2200 snapshot.remove_path(&path);
2201 }
2202 }
2203
2204 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2205 let path: Arc<Path> = match event.path.strip_prefix(&root_abs_path) {
2206 Ok(path) => Arc::from(path.to_path_buf()),
2207 Err(_) => {
2208 log::error!(
2209 "unexpected event {:?} for root path {:?}",
2210 event.path,
2211 root_abs_path
2212 );
2213 continue;
2214 }
2215 };
2216
2217 match metadata {
2218 Ok(Some(metadata)) => {
2219 let ignore_stack = snapshot.ignore_stack_for_path(&path, metadata.is_dir);
2220 let mut fs_entry = Entry::new(
2221 path.clone(),
2222 &metadata,
2223 snapshot.next_entry_id.as_ref(),
2224 snapshot.root_char_bag,
2225 );
2226 fs_entry.is_ignored = ignore_stack.is_all();
2227 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2228 if metadata.is_dir {
2229 self.executor
2230 .block(scan_queue_tx.send(ScanJob {
2231 abs_path: event.path,
2232 path,
2233 ignore_stack,
2234 scan_queue: scan_queue_tx.clone(),
2235 }))
2236 .unwrap();
2237 }
2238 }
2239 Ok(None) => {}
2240 Err(err) => {
2241 // TODO - create a special 'error' entry in the entries tree to mark this
2242 log::error!("error reading file on event {:?}", err);
2243 }
2244 }
2245 }
2246 drop(scan_queue_tx);
2247 }
2248
2249 // Scan any directories that were created as part of this event batch.
2250 self.executor
2251 .scoped(|scope| {
2252 for _ in 0..self.executor.num_cpus() {
2253 scope.spawn(async {
2254 while let Ok(job) = scan_queue_rx.recv().await {
2255 if let Err(err) = self
2256 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2257 .await
2258 {
2259 log::error!("error scanning {:?}: {}", job.abs_path, err);
2260 }
2261 }
2262 });
2263 }
2264 })
2265 .await;
2266
2267 // Attempt to detect renames only over a single batch of file-system events.
2268 self.snapshot.lock().removed_entry_ids.clear();
2269
2270 self.update_ignore_statuses().await;
2271 true
2272 }
2273
2274 async fn update_ignore_statuses(&self) {
2275 let mut snapshot = self.snapshot();
2276
2277 let mut ignores_to_update = Vec::new();
2278 let mut ignores_to_delete = Vec::new();
2279 for (parent_path, (_, scan_id)) in &snapshot.ignores {
2280 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2281 ignores_to_update.push(parent_path.clone());
2282 }
2283
2284 let ignore_path = parent_path.join(&*GITIGNORE);
2285 if snapshot.entry_for_path(ignore_path).is_none() {
2286 ignores_to_delete.push(parent_path.clone());
2287 }
2288 }
2289
2290 for parent_path in ignores_to_delete {
2291 snapshot.ignores.remove(&parent_path);
2292 self.snapshot.lock().ignores.remove(&parent_path);
2293 }
2294
2295 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2296 ignores_to_update.sort_unstable();
2297 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2298 while let Some(parent_path) = ignores_to_update.next() {
2299 while ignores_to_update
2300 .peek()
2301 .map_or(false, |p| p.starts_with(&parent_path))
2302 {
2303 ignores_to_update.next().unwrap();
2304 }
2305
2306 let ignore_stack = snapshot.ignore_stack_for_path(&parent_path, true);
2307 ignore_queue_tx
2308 .send(UpdateIgnoreStatusJob {
2309 path: parent_path,
2310 ignore_stack,
2311 ignore_queue: ignore_queue_tx.clone(),
2312 })
2313 .await
2314 .unwrap();
2315 }
2316 drop(ignore_queue_tx);
2317
2318 self.executor
2319 .scoped(|scope| {
2320 for _ in 0..self.executor.num_cpus() {
2321 scope.spawn(async {
2322 while let Ok(job) = ignore_queue_rx.recv().await {
2323 self.update_ignore_status(job, &snapshot).await;
2324 }
2325 });
2326 }
2327 })
2328 .await;
2329 }
2330
2331 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2332 let mut ignore_stack = job.ignore_stack;
2333 if let Some((ignore, _)) = snapshot.ignores.get(&job.path) {
2334 ignore_stack = ignore_stack.append(job.path.clone(), ignore.clone());
2335 }
2336
2337 let mut entries_by_id_edits = Vec::new();
2338 let mut entries_by_path_edits = Vec::new();
2339 for mut entry in snapshot.child_entries(&job.path).cloned() {
2340 let was_ignored = entry.is_ignored;
2341 entry.is_ignored = ignore_stack.is_path_ignored(&entry.path, entry.is_dir());
2342 if entry.is_dir() {
2343 let child_ignore_stack = if entry.is_ignored {
2344 IgnoreStack::all()
2345 } else {
2346 ignore_stack.clone()
2347 };
2348 job.ignore_queue
2349 .send(UpdateIgnoreStatusJob {
2350 path: entry.path.clone(),
2351 ignore_stack: child_ignore_stack,
2352 ignore_queue: job.ignore_queue.clone(),
2353 })
2354 .await
2355 .unwrap();
2356 }
2357
2358 if entry.is_ignored != was_ignored {
2359 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2360 path_entry.scan_id = snapshot.scan_id;
2361 path_entry.is_ignored = entry.is_ignored;
2362 entries_by_id_edits.push(Edit::Insert(path_entry));
2363 entries_by_path_edits.push(Edit::Insert(entry));
2364 }
2365 }
2366
2367 let mut snapshot = self.snapshot.lock();
2368 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2369 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2370 }
2371}
2372
2373fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2374 let mut result = root_char_bag;
2375 result.extend(
2376 path.to_string_lossy()
2377 .chars()
2378 .map(|c| c.to_ascii_lowercase()),
2379 );
2380 result
2381}
2382
2383struct ScanJob {
2384 abs_path: PathBuf,
2385 path: Arc<Path>,
2386 ignore_stack: Arc<IgnoreStack>,
2387 scan_queue: Sender<ScanJob>,
2388}
2389
2390struct UpdateIgnoreStatusJob {
2391 path: Arc<Path>,
2392 ignore_stack: Arc<IgnoreStack>,
2393 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2394}
2395
2396pub trait WorktreeHandle {
2397 #[cfg(any(test, feature = "test-support"))]
2398 fn flush_fs_events<'a>(
2399 &self,
2400 cx: &'a gpui::TestAppContext,
2401 ) -> futures::future::LocalBoxFuture<'a, ()>;
2402}
2403
2404impl WorktreeHandle for ModelHandle<Worktree> {
2405 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2406 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2407 // extra directory scans, and emit extra scan-state notifications.
2408 //
2409 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2410 // to ensure that all redundant FS events have already been processed.
2411 #[cfg(any(test, feature = "test-support"))]
2412 fn flush_fs_events<'a>(
2413 &self,
2414 cx: &'a gpui::TestAppContext,
2415 ) -> futures::future::LocalBoxFuture<'a, ()> {
2416 use smol::future::FutureExt;
2417
2418 let filename = "fs-event-sentinel";
2419 let tree = self.clone();
2420 let (fs, root_path) = self.read_with(cx, |tree, _| {
2421 let tree = tree.as_local().unwrap();
2422 (tree.fs.clone(), tree.abs_path().clone())
2423 });
2424
2425 async move {
2426 fs.create_file(&root_path.join(filename), Default::default())
2427 .await
2428 .unwrap();
2429 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_some())
2430 .await;
2431
2432 fs.remove_file(&root_path.join(filename), Default::default())
2433 .await
2434 .unwrap();
2435 tree.condition(&cx, |tree, _| tree.entry_for_path(filename).is_none())
2436 .await;
2437
2438 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2439 .await;
2440 }
2441 .boxed_local()
2442 }
2443}
2444
2445#[derive(Clone, Debug)]
2446struct TraversalProgress<'a> {
2447 max_path: &'a Path,
2448 count: usize,
2449 visible_count: usize,
2450 file_count: usize,
2451 visible_file_count: usize,
2452}
2453
2454impl<'a> TraversalProgress<'a> {
2455 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2456 match (include_ignored, include_dirs) {
2457 (true, true) => self.count,
2458 (true, false) => self.file_count,
2459 (false, true) => self.visible_count,
2460 (false, false) => self.visible_file_count,
2461 }
2462 }
2463}
2464
2465impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2466 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2467 self.max_path = summary.max_path.as_ref();
2468 self.count += summary.count;
2469 self.visible_count += summary.visible_count;
2470 self.file_count += summary.file_count;
2471 self.visible_file_count += summary.visible_file_count;
2472 }
2473}
2474
2475impl<'a> Default for TraversalProgress<'a> {
2476 fn default() -> Self {
2477 Self {
2478 max_path: Path::new(""),
2479 count: 0,
2480 visible_count: 0,
2481 file_count: 0,
2482 visible_file_count: 0,
2483 }
2484 }
2485}
2486
2487pub struct Traversal<'a> {
2488 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2489 include_ignored: bool,
2490 include_dirs: bool,
2491}
2492
2493impl<'a> Traversal<'a> {
2494 pub fn advance(&mut self) -> bool {
2495 self.advance_to_offset(self.offset() + 1)
2496 }
2497
2498 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2499 self.cursor.seek_forward(
2500 &TraversalTarget::Count {
2501 count: offset,
2502 include_dirs: self.include_dirs,
2503 include_ignored: self.include_ignored,
2504 },
2505 Bias::Right,
2506 &(),
2507 )
2508 }
2509
2510 pub fn advance_to_sibling(&mut self) -> bool {
2511 while let Some(entry) = self.cursor.item() {
2512 self.cursor.seek_forward(
2513 &TraversalTarget::PathSuccessor(&entry.path),
2514 Bias::Left,
2515 &(),
2516 );
2517 if let Some(entry) = self.cursor.item() {
2518 if (self.include_dirs || !entry.is_dir())
2519 && (self.include_ignored || !entry.is_ignored)
2520 {
2521 return true;
2522 }
2523 }
2524 }
2525 false
2526 }
2527
2528 pub fn entry(&self) -> Option<&'a Entry> {
2529 self.cursor.item()
2530 }
2531
2532 pub fn offset(&self) -> usize {
2533 self.cursor
2534 .start()
2535 .count(self.include_dirs, self.include_ignored)
2536 }
2537}
2538
2539impl<'a> Iterator for Traversal<'a> {
2540 type Item = &'a Entry;
2541
2542 fn next(&mut self) -> Option<Self::Item> {
2543 if let Some(item) = self.entry() {
2544 self.advance();
2545 Some(item)
2546 } else {
2547 None
2548 }
2549 }
2550}
2551
2552#[derive(Debug)]
2553enum TraversalTarget<'a> {
2554 Path(&'a Path),
2555 PathSuccessor(&'a Path),
2556 Count {
2557 count: usize,
2558 include_ignored: bool,
2559 include_dirs: bool,
2560 },
2561}
2562
2563impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2564 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2565 match self {
2566 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2567 TraversalTarget::PathSuccessor(path) => {
2568 if !cursor_location.max_path.starts_with(path) {
2569 Ordering::Equal
2570 } else {
2571 Ordering::Greater
2572 }
2573 }
2574 TraversalTarget::Count {
2575 count,
2576 include_dirs,
2577 include_ignored,
2578 } => Ord::cmp(
2579 count,
2580 &cursor_location.count(*include_dirs, *include_ignored),
2581 ),
2582 }
2583 }
2584}
2585
2586struct ChildEntriesIter<'a> {
2587 parent_path: &'a Path,
2588 traversal: Traversal<'a>,
2589}
2590
2591impl<'a> Iterator for ChildEntriesIter<'a> {
2592 type Item = &'a Entry;
2593
2594 fn next(&mut self) -> Option<Self::Item> {
2595 if let Some(item) = self.traversal.entry() {
2596 if item.path.starts_with(&self.parent_path) {
2597 self.traversal.advance_to_sibling();
2598 return Some(item);
2599 }
2600 }
2601 None
2602 }
2603}
2604
2605impl<'a> From<&'a Entry> for proto::Entry {
2606 fn from(entry: &'a Entry) -> Self {
2607 Self {
2608 id: entry.id.to_proto(),
2609 is_dir: entry.is_dir(),
2610 path: entry.path.as_os_str().as_bytes().to_vec(),
2611 inode: entry.inode,
2612 mtime: Some(entry.mtime.into()),
2613 is_symlink: entry.is_symlink,
2614 is_ignored: entry.is_ignored,
2615 }
2616 }
2617}
2618
2619impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2620 type Error = anyhow::Error;
2621
2622 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2623 if let Some(mtime) = entry.mtime {
2624 let kind = if entry.is_dir {
2625 EntryKind::Dir
2626 } else {
2627 let mut char_bag = root_char_bag.clone();
2628 char_bag.extend(
2629 String::from_utf8_lossy(&entry.path)
2630 .chars()
2631 .map(|c| c.to_ascii_lowercase()),
2632 );
2633 EntryKind::File(char_bag)
2634 };
2635 let path: Arc<Path> = PathBuf::from(OsString::from_vec(entry.path)).into();
2636 Ok(Entry {
2637 id: ProjectEntryId::from_proto(entry.id),
2638 kind,
2639 path: path.clone(),
2640 inode: entry.inode,
2641 mtime: mtime.into(),
2642 is_symlink: entry.is_symlink,
2643 is_ignored: entry.is_ignored,
2644 })
2645 } else {
2646 Err(anyhow!(
2647 "missing mtime in remote worktree entry {:?}",
2648 entry.path
2649 ))
2650 }
2651 }
2652}
2653
2654async fn send_worktree_update(client: &Arc<Client>, update: proto::UpdateWorktree) -> Result<()> {
2655 #[cfg(any(test, feature = "test-support"))]
2656 const MAX_CHUNK_SIZE: usize = 2;
2657 #[cfg(not(any(test, feature = "test-support")))]
2658 const MAX_CHUNK_SIZE: usize = 256;
2659
2660 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
2661 client.request(update).await?;
2662 }
2663
2664 Ok(())
2665}
2666
2667#[cfg(test)]
2668mod tests {
2669 use super::*;
2670 use crate::fs::FakeFs;
2671 use anyhow::Result;
2672 use client::test::FakeHttpClient;
2673 use fs::RealFs;
2674 use gpui::TestAppContext;
2675 use rand::prelude::*;
2676 use serde_json::json;
2677 use std::{
2678 env,
2679 fmt::Write,
2680 time::{SystemTime, UNIX_EPOCH},
2681 };
2682 use util::test::temp_tree;
2683
2684 #[gpui::test]
2685 async fn test_traversal(cx: &mut TestAppContext) {
2686 let fs = FakeFs::new(cx.background());
2687 fs.insert_tree(
2688 "/root",
2689 json!({
2690 ".gitignore": "a/b\n",
2691 "a": {
2692 "b": "",
2693 "c": "",
2694 }
2695 }),
2696 )
2697 .await;
2698
2699 let http_client = FakeHttpClient::with_404_response();
2700 let client = Client::new(http_client);
2701
2702 let tree = Worktree::local(
2703 client,
2704 Arc::from(Path::new("/root")),
2705 true,
2706 fs,
2707 Default::default(),
2708 &mut cx.to_async(),
2709 )
2710 .await
2711 .unwrap();
2712 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2713 .await;
2714
2715 tree.read_with(cx, |tree, _| {
2716 assert_eq!(
2717 tree.entries(false)
2718 .map(|entry| entry.path.as_ref())
2719 .collect::<Vec<_>>(),
2720 vec![
2721 Path::new(""),
2722 Path::new(".gitignore"),
2723 Path::new("a"),
2724 Path::new("a/c"),
2725 ]
2726 );
2727 assert_eq!(
2728 tree.entries(true)
2729 .map(|entry| entry.path.as_ref())
2730 .collect::<Vec<_>>(),
2731 vec![
2732 Path::new(""),
2733 Path::new(".gitignore"),
2734 Path::new("a"),
2735 Path::new("a/b"),
2736 Path::new("a/c"),
2737 ]
2738 );
2739 })
2740 }
2741
2742 #[gpui::test]
2743 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
2744 let dir = temp_tree(json!({
2745 ".git": {},
2746 ".gitignore": "ignored-dir\n",
2747 "tracked-dir": {
2748 "tracked-file1": "tracked contents",
2749 },
2750 "ignored-dir": {
2751 "ignored-file1": "ignored contents",
2752 }
2753 }));
2754
2755 let http_client = FakeHttpClient::with_404_response();
2756 let client = Client::new(http_client.clone());
2757
2758 let tree = Worktree::local(
2759 client,
2760 dir.path(),
2761 true,
2762 Arc::new(RealFs),
2763 Default::default(),
2764 &mut cx.to_async(),
2765 )
2766 .await
2767 .unwrap();
2768 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2769 .await;
2770 tree.flush_fs_events(&cx).await;
2771 cx.read(|cx| {
2772 let tree = tree.read(cx);
2773 let tracked = tree.entry_for_path("tracked-dir/tracked-file1").unwrap();
2774 let ignored = tree.entry_for_path("ignored-dir/ignored-file1").unwrap();
2775 assert_eq!(tracked.is_ignored, false);
2776 assert_eq!(ignored.is_ignored, true);
2777 });
2778
2779 std::fs::write(dir.path().join("tracked-dir/tracked-file2"), "").unwrap();
2780 std::fs::write(dir.path().join("ignored-dir/ignored-file2"), "").unwrap();
2781 tree.flush_fs_events(&cx).await;
2782 cx.read(|cx| {
2783 let tree = tree.read(cx);
2784 let dot_git = tree.entry_for_path(".git").unwrap();
2785 let tracked = tree.entry_for_path("tracked-dir/tracked-file2").unwrap();
2786 let ignored = tree.entry_for_path("ignored-dir/ignored-file2").unwrap();
2787 assert_eq!(tracked.is_ignored, false);
2788 assert_eq!(ignored.is_ignored, true);
2789 assert_eq!(dot_git.is_ignored, true);
2790 });
2791 }
2792
2793 #[gpui::test]
2794 async fn test_write_file(cx: &mut TestAppContext) {
2795 let dir = temp_tree(json!({
2796 ".git": {},
2797 ".gitignore": "ignored-dir\n",
2798 "tracked-dir": {},
2799 "ignored-dir": {}
2800 }));
2801
2802 let http_client = FakeHttpClient::with_404_response();
2803 let client = Client::new(http_client.clone());
2804
2805 let tree = Worktree::local(
2806 client,
2807 dir.path(),
2808 true,
2809 Arc::new(RealFs),
2810 Default::default(),
2811 &mut cx.to_async(),
2812 )
2813 .await
2814 .unwrap();
2815 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2816 .await;
2817 tree.flush_fs_events(&cx).await;
2818
2819 tree.update(cx, |tree, cx| {
2820 tree.as_local().unwrap().write_file(
2821 Path::new("tracked-dir/file.txt"),
2822 "hello".into(),
2823 cx,
2824 )
2825 })
2826 .await
2827 .unwrap();
2828 tree.update(cx, |tree, cx| {
2829 tree.as_local().unwrap().write_file(
2830 Path::new("ignored-dir/file.txt"),
2831 "world".into(),
2832 cx,
2833 )
2834 })
2835 .await
2836 .unwrap();
2837
2838 tree.read_with(cx, |tree, _| {
2839 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
2840 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
2841 assert_eq!(tracked.is_ignored, false);
2842 assert_eq!(ignored.is_ignored, true);
2843 });
2844 }
2845
2846 #[gpui::test(iterations = 100)]
2847 fn test_random(mut rng: StdRng) {
2848 let operations = env::var("OPERATIONS")
2849 .map(|o| o.parse().unwrap())
2850 .unwrap_or(40);
2851 let initial_entries = env::var("INITIAL_ENTRIES")
2852 .map(|o| o.parse().unwrap())
2853 .unwrap_or(20);
2854
2855 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
2856 for _ in 0..initial_entries {
2857 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
2858 }
2859 log::info!("Generated initial tree");
2860
2861 let (notify_tx, _notify_rx) = mpsc::unbounded();
2862 let fs = Arc::new(RealFs);
2863 let next_entry_id = Arc::new(AtomicUsize::new(0));
2864 let mut initial_snapshot = LocalSnapshot {
2865 abs_path: root_dir.path().into(),
2866 removed_entry_ids: Default::default(),
2867 ignores: Default::default(),
2868 next_entry_id: next_entry_id.clone(),
2869 snapshot: Snapshot {
2870 id: WorktreeId::from_usize(0),
2871 entries_by_path: Default::default(),
2872 entries_by_id: Default::default(),
2873 root_name: Default::default(),
2874 root_char_bag: Default::default(),
2875 scan_id: 0,
2876 is_complete: true,
2877 },
2878 };
2879 initial_snapshot.insert_entry(
2880 Entry::new(
2881 Path::new("").into(),
2882 &smol::block_on(fs.metadata(root_dir.path()))
2883 .unwrap()
2884 .unwrap(),
2885 &next_entry_id,
2886 Default::default(),
2887 ),
2888 fs.as_ref(),
2889 );
2890 let mut scanner = BackgroundScanner::new(
2891 Arc::new(Mutex::new(initial_snapshot.clone())),
2892 notify_tx,
2893 fs.clone(),
2894 Arc::new(gpui::executor::Background::new()),
2895 );
2896 smol::block_on(scanner.scan_dirs()).unwrap();
2897 scanner.snapshot().check_invariants();
2898
2899 let mut events = Vec::new();
2900 let mut snapshots = Vec::new();
2901 let mut mutations_len = operations;
2902 while mutations_len > 1 {
2903 if !events.is_empty() && rng.gen_bool(0.4) {
2904 let len = rng.gen_range(0..=events.len());
2905 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
2906 log::info!("Delivering events: {:#?}", to_deliver);
2907 smol::block_on(scanner.process_events(to_deliver));
2908 scanner.snapshot().check_invariants();
2909 } else {
2910 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
2911 mutations_len -= 1;
2912 }
2913
2914 if rng.gen_bool(0.2) {
2915 snapshots.push(scanner.snapshot());
2916 }
2917 }
2918 log::info!("Quiescing: {:#?}", events);
2919 smol::block_on(scanner.process_events(events));
2920 scanner.snapshot().check_invariants();
2921
2922 let (notify_tx, _notify_rx) = mpsc::unbounded();
2923 let mut new_scanner = BackgroundScanner::new(
2924 Arc::new(Mutex::new(initial_snapshot)),
2925 notify_tx,
2926 scanner.fs.clone(),
2927 scanner.executor.clone(),
2928 );
2929 smol::block_on(new_scanner.scan_dirs()).unwrap();
2930 assert_eq!(
2931 scanner.snapshot().to_vec(true),
2932 new_scanner.snapshot().to_vec(true)
2933 );
2934
2935 for mut prev_snapshot in snapshots {
2936 let include_ignored = rng.gen::<bool>();
2937 if !include_ignored {
2938 let mut entries_by_path_edits = Vec::new();
2939 let mut entries_by_id_edits = Vec::new();
2940 for entry in prev_snapshot
2941 .entries_by_id
2942 .cursor::<()>()
2943 .filter(|e| e.is_ignored)
2944 {
2945 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
2946 entries_by_id_edits.push(Edit::Remove(entry.id));
2947 }
2948
2949 prev_snapshot
2950 .entries_by_path
2951 .edit(entries_by_path_edits, &());
2952 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
2953 }
2954
2955 let update = scanner
2956 .snapshot()
2957 .build_update(&prev_snapshot, 0, 0, include_ignored);
2958 prev_snapshot.apply_remote_update(update).unwrap();
2959 assert_eq!(
2960 prev_snapshot.to_vec(true),
2961 scanner.snapshot().to_vec(include_ignored)
2962 );
2963 }
2964 }
2965
2966 fn randomly_mutate_tree(
2967 root_path: &Path,
2968 insertion_probability: f64,
2969 rng: &mut impl Rng,
2970 ) -> Result<Vec<fsevent::Event>> {
2971 let root_path = root_path.canonicalize().unwrap();
2972 let (dirs, files) = read_dir_recursive(root_path.clone());
2973
2974 let mut events = Vec::new();
2975 let mut record_event = |path: PathBuf| {
2976 events.push(fsevent::Event {
2977 event_id: SystemTime::now()
2978 .duration_since(UNIX_EPOCH)
2979 .unwrap()
2980 .as_secs(),
2981 flags: fsevent::StreamFlags::empty(),
2982 path,
2983 });
2984 };
2985
2986 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
2987 let path = dirs.choose(rng).unwrap();
2988 let new_path = path.join(gen_name(rng));
2989
2990 if rng.gen() {
2991 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
2992 std::fs::create_dir(&new_path)?;
2993 } else {
2994 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
2995 std::fs::write(&new_path, "")?;
2996 }
2997 record_event(new_path);
2998 } else if rng.gen_bool(0.05) {
2999 let ignore_dir_path = dirs.choose(rng).unwrap();
3000 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3001
3002 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3003 let files_to_ignore = {
3004 let len = rng.gen_range(0..=subfiles.len());
3005 subfiles.choose_multiple(rng, len)
3006 };
3007 let dirs_to_ignore = {
3008 let len = rng.gen_range(0..subdirs.len());
3009 subdirs.choose_multiple(rng, len)
3010 };
3011
3012 let mut ignore_contents = String::new();
3013 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3014 write!(
3015 ignore_contents,
3016 "{}\n",
3017 path_to_ignore
3018 .strip_prefix(&ignore_dir_path)?
3019 .to_str()
3020 .unwrap()
3021 )
3022 .unwrap();
3023 }
3024 log::info!(
3025 "Creating {:?} with contents:\n{}",
3026 ignore_path.strip_prefix(&root_path)?,
3027 ignore_contents
3028 );
3029 std::fs::write(&ignore_path, ignore_contents).unwrap();
3030 record_event(ignore_path);
3031 } else {
3032 let old_path = {
3033 let file_path = files.choose(rng);
3034 let dir_path = dirs[1..].choose(rng);
3035 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3036 };
3037
3038 let is_rename = rng.gen();
3039 if is_rename {
3040 let new_path_parent = dirs
3041 .iter()
3042 .filter(|d| !d.starts_with(old_path))
3043 .choose(rng)
3044 .unwrap();
3045
3046 let overwrite_existing_dir =
3047 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3048 let new_path = if overwrite_existing_dir {
3049 std::fs::remove_dir_all(&new_path_parent).ok();
3050 new_path_parent.to_path_buf()
3051 } else {
3052 new_path_parent.join(gen_name(rng))
3053 };
3054
3055 log::info!(
3056 "Renaming {:?} to {}{:?}",
3057 old_path.strip_prefix(&root_path)?,
3058 if overwrite_existing_dir {
3059 "overwrite "
3060 } else {
3061 ""
3062 },
3063 new_path.strip_prefix(&root_path)?
3064 );
3065 std::fs::rename(&old_path, &new_path)?;
3066 record_event(old_path.clone());
3067 record_event(new_path);
3068 } else if old_path.is_dir() {
3069 let (dirs, files) = read_dir_recursive(old_path.clone());
3070
3071 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3072 std::fs::remove_dir_all(&old_path).unwrap();
3073 for file in files {
3074 record_event(file);
3075 }
3076 for dir in dirs {
3077 record_event(dir);
3078 }
3079 } else {
3080 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3081 std::fs::remove_file(old_path).unwrap();
3082 record_event(old_path.clone());
3083 }
3084 }
3085
3086 Ok(events)
3087 }
3088
3089 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3090 let child_entries = std::fs::read_dir(&path).unwrap();
3091 let mut dirs = vec![path];
3092 let mut files = Vec::new();
3093 for child_entry in child_entries {
3094 let child_path = child_entry.unwrap().path();
3095 if child_path.is_dir() {
3096 let (child_dirs, child_files) = read_dir_recursive(child_path);
3097 dirs.extend(child_dirs);
3098 files.extend(child_files);
3099 } else {
3100 files.push(child_path);
3101 }
3102 }
3103 (dirs, files)
3104 }
3105
3106 fn gen_name(rng: &mut impl Rng) -> String {
3107 (0..6)
3108 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3109 .map(char::from)
3110 .collect()
3111 }
3112
3113 impl LocalSnapshot {
3114 fn check_invariants(&self) {
3115 let mut files = self.files(true, 0);
3116 let mut visible_files = self.files(false, 0);
3117 for entry in self.entries_by_path.cursor::<()>() {
3118 if entry.is_file() {
3119 assert_eq!(files.next().unwrap().inode, entry.inode);
3120 if !entry.is_ignored {
3121 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3122 }
3123 }
3124 }
3125 assert!(files.next().is_none());
3126 assert!(visible_files.next().is_none());
3127
3128 let mut bfs_paths = Vec::new();
3129 let mut stack = vec![Path::new("")];
3130 while let Some(path) = stack.pop() {
3131 bfs_paths.push(path);
3132 let ix = stack.len();
3133 for child_entry in self.child_entries(path) {
3134 stack.insert(ix, &child_entry.path);
3135 }
3136 }
3137
3138 let dfs_paths_via_iter = self
3139 .entries_by_path
3140 .cursor::<()>()
3141 .map(|e| e.path.as_ref())
3142 .collect::<Vec<_>>();
3143 assert_eq!(bfs_paths, dfs_paths_via_iter);
3144
3145 let dfs_paths_via_traversal = self
3146 .entries(true)
3147 .map(|e| e.path.as_ref())
3148 .collect::<Vec<_>>();
3149 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3150
3151 for (ignore_parent_path, _) in &self.ignores {
3152 assert!(self.entry_for_path(ignore_parent_path).is_some());
3153 assert!(self
3154 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3155 .is_some());
3156 }
3157 }
3158
3159 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3160 let mut paths = Vec::new();
3161 for entry in self.entries_by_path.cursor::<()>() {
3162 if include_ignored || !entry.is_ignored {
3163 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3164 }
3165 }
3166 paths.sort_by(|a, b| a.0.cmp(&b.0));
3167 paths
3168 }
3169 }
3170}