1use super::{
2 fs::{self, Fs},
3 ignore::IgnoreStack,
4 DiagnosticSummary,
5};
6use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
7use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
8use anyhow::{anyhow, Context, Result};
9use client::{proto, Client};
10use clock::ReplicaId;
11use collections::{HashMap, VecDeque};
12use futures::{
13 channel::{
14 mpsc::{self, UnboundedSender},
15 oneshot,
16 },
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::repository::GitRepository;
21use git::{DOT_GIT, GITIGNORE};
22use gpui::{
23 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
24 Task,
25};
26use language::{
27 proto::{deserialize_version, serialize_line_ending, serialize_version},
28 Buffer, DiagnosticEntry, LineEnding, PointUtf16, Rope,
29};
30use parking_lot::Mutex;
31use postage::{
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use settings::Settings;
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::{OsStr, OsString},
42 fmt,
43 future::Future,
44 ops::{Deref, DerefMut},
45 os::unix::prelude::{OsStrExt, OsStringExt},
46 path::{Path, PathBuf},
47 sync::{atomic::AtomicUsize, Arc},
48 task::Poll,
49 time::{Duration, SystemTime},
50};
51use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
52use util::{ResultExt, TryFutureExt};
53
54#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
55pub struct WorktreeId(usize);
56
57#[allow(clippy::large_enum_variant)]
58pub enum Worktree {
59 Local(LocalWorktree),
60 Remote(RemoteWorktree),
61}
62
63pub struct LocalWorktree {
64 snapshot: LocalSnapshot,
65 background_snapshot: Arc<Mutex<LocalSnapshot>>,
66 last_scan_state_rx: watch::Receiver<ScanState>,
67 _background_scanner_task: Option<Task<()>>,
68 poll_task: Option<Task<()>>,
69 share: Option<ShareState>,
70 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
71 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 visible: bool,
75}
76
77pub struct RemoteWorktree {
78 pub snapshot: Snapshot,
79 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
80 project_id: u64,
81 client: Arc<Client>,
82 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
83 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
84 replica_id: ReplicaId,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 visible: bool,
87}
88
89#[derive(Clone)]
90pub struct Snapshot {
91 id: WorktreeId,
92 root_name: String,
93 root_char_bag: CharBag,
94 entries_by_path: SumTree<Entry>,
95 entries_by_id: SumTree<PathEntry>,
96 scan_id: usize,
97 is_complete: bool,
98}
99
100#[derive(Clone)]
101pub struct GitRepositoryEntry {
102 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
103
104 pub(crate) scan_id: usize,
105 // Path to folder containing the .git file or directory
106 pub(crate) content_path: Arc<Path>,
107 // Path to the actual .git folder.
108 // Note: if .git is a file, this points to the folder indicated by the .git file
109 pub(crate) git_dir_path: Arc<Path>,
110}
111
112impl std::fmt::Debug for GitRepositoryEntry {
113 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
114 f.debug_struct("GitRepositoryEntry")
115 .field("content_path", &self.content_path)
116 .field("git_dir_path", &self.git_dir_path)
117 .field("libgit_repository", &"LibGitRepository")
118 .finish()
119 }
120}
121
122// impl Clone for GitRepositoryEntry {
123// fn clone(&self) -> Self {
124// GitRepositoryEntry { repo: self.repo.boxed_clone(), scan_id: self.scan_id }
125// }
126// }
127
128pub struct LocalSnapshot {
129 abs_path: Arc<Path>,
130 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
131 git_repositories: Vec<GitRepositoryEntry>,
132 removed_entry_ids: HashMap<u64, ProjectEntryId>,
133 next_entry_id: Arc<AtomicUsize>,
134 snapshot: Snapshot,
135 extension_counts: HashMap<OsString, usize>,
136}
137
138impl Clone for LocalSnapshot {
139 fn clone(&self) -> Self {
140 Self {
141 abs_path: self.abs_path.clone(),
142 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
143 git_repositories: self.git_repositories.iter().cloned().collect(),
144 removed_entry_ids: self.removed_entry_ids.clone(),
145 next_entry_id: self.next_entry_id.clone(),
146 snapshot: self.snapshot.clone(),
147 extension_counts: self.extension_counts.clone(),
148 }
149 }
150}
151
152impl Deref for LocalSnapshot {
153 type Target = Snapshot;
154
155 fn deref(&self) -> &Self::Target {
156 &self.snapshot
157 }
158}
159
160impl DerefMut for LocalSnapshot {
161 fn deref_mut(&mut self) -> &mut Self::Target {
162 &mut self.snapshot
163 }
164}
165
166#[derive(Clone, Debug)]
167enum ScanState {
168 Idle,
169 /// The worktree is performing its initial scan of the filesystem.
170 Initializing,
171 /// The worktree is updating in response to filesystem events.
172 Updating,
173 Err(Arc<anyhow::Error>),
174}
175
176struct ShareState {
177 project_id: u64,
178 snapshots_tx: watch::Sender<LocalSnapshot>,
179 _maintain_remote_snapshot: Option<Task<Option<()>>>,
180}
181
182pub enum Event {
183 UpdatedEntries,
184 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
185}
186
187impl Entity for Worktree {
188 type Event = Event;
189}
190
191impl Worktree {
192 pub async fn local(
193 client: Arc<Client>,
194 path: impl Into<Arc<Path>>,
195 visible: bool,
196 fs: Arc<dyn Fs>,
197 next_entry_id: Arc<AtomicUsize>,
198 cx: &mut AsyncAppContext,
199 ) -> Result<ModelHandle<Self>> {
200 let (tree, scan_states_tx) =
201 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
202 tree.update(cx, |tree, cx| {
203 let tree = tree.as_local_mut().unwrap();
204 let abs_path = tree.abs_path().clone();
205 let background_snapshot = tree.background_snapshot.clone();
206 let background = cx.background().clone();
207 tree._background_scanner_task = Some(cx.background().spawn(async move {
208 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
209 let scanner =
210 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
211 scanner.run(events).await;
212 }));
213 });
214 Ok(tree)
215 }
216
217 pub fn remote(
218 project_remote_id: u64,
219 replica_id: ReplicaId,
220 worktree: proto::WorktreeMetadata,
221 client: Arc<Client>,
222 cx: &mut MutableAppContext,
223 ) -> ModelHandle<Self> {
224 let remote_id = worktree.id;
225 let root_char_bag: CharBag = worktree
226 .root_name
227 .chars()
228 .map(|c| c.to_ascii_lowercase())
229 .collect();
230 let root_name = worktree.root_name.clone();
231 let visible = worktree.visible;
232 let snapshot = Snapshot {
233 id: WorktreeId(remote_id as usize),
234 root_name,
235 root_char_bag,
236 entries_by_path: Default::default(),
237 entries_by_id: Default::default(),
238 scan_id: 0,
239 is_complete: false,
240 };
241
242 let (updates_tx, mut updates_rx) = mpsc::unbounded();
243 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
244 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
245 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
246 Worktree::Remote(RemoteWorktree {
247 project_id: project_remote_id,
248 replica_id,
249 snapshot: snapshot.clone(),
250 background_snapshot: background_snapshot.clone(),
251 updates_tx: Some(updates_tx),
252 snapshot_subscriptions: Default::default(),
253 client: client.clone(),
254 diagnostic_summaries: Default::default(),
255 visible,
256 })
257 });
258
259 cx.background()
260 .spawn(async move {
261 while let Some(update) = updates_rx.next().await {
262 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
263 log::error!("error applying worktree update: {}", error);
264 }
265 snapshot_updated_tx.send(()).await.ok();
266 }
267 })
268 .detach();
269
270 cx.spawn(|mut cx| {
271 let this = worktree_handle.downgrade();
272 async move {
273 while (snapshot_updated_rx.recv().await).is_some() {
274 if let Some(this) = this.upgrade(&cx) {
275 this.update(&mut cx, |this, cx| {
276 this.poll_snapshot(cx);
277 let this = this.as_remote_mut().unwrap();
278 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
279 if this.observed_snapshot(*scan_id) {
280 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
281 let _ = tx.send(());
282 } else {
283 break;
284 }
285 }
286 });
287 } else {
288 break;
289 }
290 }
291 }
292 })
293 .detach();
294
295 worktree_handle
296 }
297
298 pub fn as_local(&self) -> Option<&LocalWorktree> {
299 if let Worktree::Local(worktree) = self {
300 Some(worktree)
301 } else {
302 None
303 }
304 }
305
306 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
307 if let Worktree::Remote(worktree) = self {
308 Some(worktree)
309 } else {
310 None
311 }
312 }
313
314 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
315 if let Worktree::Local(worktree) = self {
316 Some(worktree)
317 } else {
318 None
319 }
320 }
321
322 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
323 if let Worktree::Remote(worktree) = self {
324 Some(worktree)
325 } else {
326 None
327 }
328 }
329
330 pub fn is_local(&self) -> bool {
331 matches!(self, Worktree::Local(_))
332 }
333
334 pub fn is_remote(&self) -> bool {
335 !self.is_local()
336 }
337
338 pub fn snapshot(&self) -> Snapshot {
339 match self {
340 Worktree::Local(worktree) => worktree.snapshot().snapshot,
341 Worktree::Remote(worktree) => worktree.snapshot(),
342 }
343 }
344
345 pub fn scan_id(&self) -> usize {
346 match self {
347 Worktree::Local(worktree) => worktree.snapshot.scan_id,
348 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
349 }
350 }
351
352 pub fn is_visible(&self) -> bool {
353 match self {
354 Worktree::Local(worktree) => worktree.visible,
355 Worktree::Remote(worktree) => worktree.visible,
356 }
357 }
358
359 pub fn replica_id(&self) -> ReplicaId {
360 match self {
361 Worktree::Local(_) => 0,
362 Worktree::Remote(worktree) => worktree.replica_id,
363 }
364 }
365
366 pub fn diagnostic_summaries(
367 &self,
368 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
369 match self {
370 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
371 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
372 }
373 .iter()
374 .map(|(path, summary)| (path.0.clone(), *summary))
375 }
376
377 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
378 match self {
379 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
380 Self::Remote(worktree) => worktree.poll_snapshot(cx),
381 };
382 }
383}
384
385impl LocalWorktree {
386 async fn create(
387 client: Arc<Client>,
388 path: impl Into<Arc<Path>>,
389 visible: bool,
390 fs: Arc<dyn Fs>,
391 next_entry_id: Arc<AtomicUsize>,
392 cx: &mut AsyncAppContext,
393 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
394 let abs_path = path.into();
395 let path: Arc<Path> = Arc::from(Path::new(""));
396
397 // After determining whether the root entry is a file or a directory, populate the
398 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
399 let root_name = abs_path
400 .file_name()
401 .map_or(String::new(), |f| f.to_string_lossy().to_string());
402 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
403 let metadata = fs
404 .metadata(&abs_path)
405 .await
406 .context("failed to stat worktree path")?;
407
408 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
409 let (mut last_scan_state_tx, last_scan_state_rx) =
410 watch::channel_with(ScanState::Initializing);
411 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
412 let mut snapshot = LocalSnapshot {
413 abs_path,
414 ignores_by_parent_abs_path: Default::default(),
415 git_repositories: Default::default(),
416 removed_entry_ids: Default::default(),
417 next_entry_id,
418 snapshot: Snapshot {
419 id: WorktreeId::from_usize(cx.model_id()),
420 root_name: root_name.clone(),
421 root_char_bag,
422 entries_by_path: Default::default(),
423 entries_by_id: Default::default(),
424 scan_id: 0,
425 is_complete: true,
426 },
427 extension_counts: Default::default(),
428 };
429 if let Some(metadata) = metadata {
430 let entry = Entry::new(
431 path,
432 &metadata,
433 &snapshot.next_entry_id,
434 snapshot.root_char_bag,
435 );
436 snapshot.insert_entry(entry, fs.as_ref());
437 }
438
439 let tree = Self {
440 snapshot: snapshot.clone(),
441 background_snapshot: Arc::new(Mutex::new(snapshot)),
442 last_scan_state_rx,
443 _background_scanner_task: None,
444 share: None,
445 poll_task: None,
446 diagnostics: Default::default(),
447 diagnostic_summaries: Default::default(),
448 client,
449 fs,
450 visible,
451 };
452
453 cx.spawn_weak(|this, mut cx| async move {
454 while let Some(scan_state) = scan_states_rx.next().await {
455 if let Some(this) = this.upgrade(&cx) {
456 last_scan_state_tx.blocking_send(scan_state).ok();
457 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
458 } else {
459 break;
460 }
461 }
462 })
463 .detach();
464
465 Worktree::Local(tree)
466 });
467
468 Ok((tree, scan_states_tx))
469 }
470
471 pub fn contains_abs_path(&self, path: &Path) -> bool {
472 path.starts_with(&self.abs_path)
473 }
474
475 fn absolutize(&self, path: &Path) -> PathBuf {
476 if path.file_name().is_some() {
477 self.abs_path.join(path)
478 } else {
479 self.abs_path.to_path_buf()
480 }
481 }
482
483 pub(crate) fn load_buffer(
484 &mut self,
485 path: &Path,
486 cx: &mut ModelContext<Worktree>,
487 ) -> Task<Result<ModelHandle<Buffer>>> {
488 let path = Arc::from(path);
489 cx.spawn(move |this, mut cx| async move {
490 let (file, contents, head_text) = this
491 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
492 .await?;
493 Ok(cx.add_model(|cx| {
494 let mut buffer = Buffer::from_file(0, contents, head_text, Arc::new(file), cx);
495 buffer.git_diff_recalc(cx);
496 buffer
497 }))
498 })
499 }
500
501 pub fn diagnostics_for_path(&self, path: &Path) -> Option<Vec<DiagnosticEntry<PointUtf16>>> {
502 self.diagnostics.get(path).cloned()
503 }
504
505 pub fn update_diagnostics(
506 &mut self,
507 language_server_id: usize,
508 worktree_path: Arc<Path>,
509 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
510 _: &mut ModelContext<Worktree>,
511 ) -> Result<bool> {
512 self.diagnostics.remove(&worktree_path);
513 let old_summary = self
514 .diagnostic_summaries
515 .remove(&PathKey(worktree_path.clone()))
516 .unwrap_or_default();
517 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
518 if !new_summary.is_empty() {
519 self.diagnostic_summaries
520 .insert(PathKey(worktree_path.clone()), new_summary);
521 self.diagnostics.insert(worktree_path.clone(), diagnostics);
522 }
523
524 let updated = !old_summary.is_empty() || !new_summary.is_empty();
525 if updated {
526 if let Some(share) = self.share.as_ref() {
527 self.client
528 .send(proto::UpdateDiagnosticSummary {
529 project_id: share.project_id,
530 worktree_id: self.id().to_proto(),
531 summary: Some(proto::DiagnosticSummary {
532 path: worktree_path.to_string_lossy().to_string(),
533 language_server_id: language_server_id as u64,
534 error_count: new_summary.error_count as u32,
535 warning_count: new_summary.warning_count as u32,
536 }),
537 })
538 .log_err();
539 }
540 }
541
542 Ok(updated)
543 }
544
545 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
546 self.poll_task.take();
547
548 match self.scan_state() {
549 ScanState::Idle => {
550 let new_snapshot = self.background_snapshot.lock().clone();
551 let updated_repos = Self::changed_repos(
552 &self.snapshot.git_repositories,
553 &new_snapshot.git_repositories,
554 );
555 self.snapshot = new_snapshot;
556
557 if let Some(share) = self.share.as_mut() {
558 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
559 }
560
561 cx.emit(Event::UpdatedEntries);
562
563 if !updated_repos.is_empty() {
564 cx.emit(Event::UpdatedGitRepositories(updated_repos));
565 }
566 }
567
568 ScanState::Initializing => {
569 let is_fake_fs = self.fs.is_fake();
570
571 let new_snapshot = self.background_snapshot.lock().clone();
572 let updated_repos = Self::changed_repos(
573 &self.snapshot.git_repositories,
574 &new_snapshot.git_repositories,
575 );
576 self.snapshot = new_snapshot;
577
578 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
579 if is_fake_fs {
580 #[cfg(any(test, feature = "test-support"))]
581 cx.background().simulate_random_delay().await;
582 } else {
583 smol::Timer::after(Duration::from_millis(100)).await;
584 }
585 if let Some(this) = this.upgrade(&cx) {
586 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
587 }
588 }));
589
590 cx.emit(Event::UpdatedEntries);
591
592 if !updated_repos.is_empty() {
593 cx.emit(Event::UpdatedGitRepositories(updated_repos));
594 }
595 }
596
597 _ => {
598 if force {
599 self.snapshot = self.background_snapshot.lock().clone();
600 }
601 }
602 }
603
604 cx.notify();
605 }
606
607 fn changed_repos(
608 old_repos: &[GitRepositoryEntry],
609 new_repos: &[GitRepositoryEntry],
610 ) -> Vec<GitRepositoryEntry> {
611 fn diff<'a>(
612 a: &'a [GitRepositoryEntry],
613 b: &'a [GitRepositoryEntry],
614 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
615 ) {
616 for a_repo in a {
617 let matched = b.iter().find(|b_repo| {
618 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
619 });
620
621 if matched.is_none() {
622 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
623 }
624 }
625 }
626
627 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
628
629 diff(old_repos, new_repos, &mut updated);
630 diff(new_repos, old_repos, &mut updated);
631
632 updated.into_values().collect()
633 }
634
635 pub fn scan_complete(&self) -> impl Future<Output = ()> {
636 let mut scan_state_rx = self.last_scan_state_rx.clone();
637 async move {
638 let mut scan_state = Some(scan_state_rx.borrow().clone());
639 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
640 scan_state = scan_state_rx.recv().await;
641 }
642 }
643 }
644
645 fn scan_state(&self) -> ScanState {
646 self.last_scan_state_rx.borrow().clone()
647 }
648
649 pub fn snapshot(&self) -> LocalSnapshot {
650 self.snapshot.clone()
651 }
652
653 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
654 proto::WorktreeMetadata {
655 id: self.id().to_proto(),
656 root_name: self.root_name().to_string(),
657 visible: self.visible,
658 }
659 }
660
661 fn load(
662 &self,
663 path: &Path,
664 cx: &mut ModelContext<Worktree>,
665 ) -> Task<Result<(File, String, Option<String>)>> {
666 let handle = cx.handle();
667 let path = Arc::from(path);
668 let abs_path = self.absolutize(&path);
669 let fs = self.fs.clone();
670 let snapshot = self.snapshot();
671
672 let files_included = cx
673 .global::<Settings>()
674 .editor_overrides
675 .git_gutter
676 .unwrap_or_default()
677 .files_included;
678
679 cx.spawn(|this, mut cx| async move {
680 let text = fs.load(&abs_path).await?;
681
682 let head_text = if matches!(
683 files_included,
684 settings::GitFilesIncluded::All | settings::GitFilesIncluded::OnlyTracked
685 ) {
686 let results = if let Some(repo) = snapshot.repo_for(&abs_path) {
687 cx.background()
688 .spawn({
689 let path = path.clone();
690 async move { repo.repo.lock().load_head_text(&path) }
691 })
692 .await
693 } else {
694 None
695 };
696
697 if files_included == settings::GitFilesIncluded::All {
698 results.or_else(|| Some(text.clone()))
699 } else {
700 results
701 }
702 } else {
703 None
704 };
705
706 // Eagerly populate the snapshot with an updated entry for the loaded file
707 let entry = this
708 .update(&mut cx, |this, cx| {
709 this.as_local()
710 .unwrap()
711 .refresh_entry(path, abs_path, None, cx)
712 })
713 .await?;
714
715 Ok((
716 File {
717 entry_id: Some(entry.id),
718 worktree: handle,
719 path: entry.path,
720 mtime: entry.mtime,
721 is_local: true,
722 },
723 text,
724 head_text,
725 ))
726 })
727 }
728
729 pub fn save_buffer_as(
730 &self,
731 buffer_handle: ModelHandle<Buffer>,
732 path: impl Into<Arc<Path>>,
733 cx: &mut ModelContext<Worktree>,
734 ) -> Task<Result<()>> {
735 let buffer = buffer_handle.read(cx);
736 let text = buffer.as_rope().clone();
737 let fingerprint = text.fingerprint();
738 let version = buffer.version();
739 let save = self.write_file(path, text, buffer.line_ending(), cx);
740 let handle = cx.handle();
741 cx.as_mut().spawn(|mut cx| async move {
742 let entry = save.await?;
743 let file = File {
744 entry_id: Some(entry.id),
745 worktree: handle,
746 path: entry.path,
747 mtime: entry.mtime,
748 is_local: true,
749 };
750
751 buffer_handle.update(&mut cx, |buffer, cx| {
752 buffer.did_save(version, fingerprint, file.mtime, Some(Arc::new(file)), cx);
753 });
754
755 Ok(())
756 })
757 }
758
759 pub fn create_entry(
760 &self,
761 path: impl Into<Arc<Path>>,
762 is_dir: bool,
763 cx: &mut ModelContext<Worktree>,
764 ) -> Task<Result<Entry>> {
765 self.write_entry_internal(
766 path,
767 if is_dir {
768 None
769 } else {
770 Some(Default::default())
771 },
772 cx,
773 )
774 }
775
776 pub fn write_file(
777 &self,
778 path: impl Into<Arc<Path>>,
779 text: Rope,
780 line_ending: LineEnding,
781 cx: &mut ModelContext<Worktree>,
782 ) -> Task<Result<Entry>> {
783 self.write_entry_internal(path, Some((text, line_ending)), cx)
784 }
785
786 pub fn delete_entry(
787 &self,
788 entry_id: ProjectEntryId,
789 cx: &mut ModelContext<Worktree>,
790 ) -> Option<Task<Result<()>>> {
791 let entry = self.entry_for_id(entry_id)?.clone();
792 let abs_path = self.absolutize(&entry.path);
793 let delete = cx.background().spawn({
794 let fs = self.fs.clone();
795 let abs_path = abs_path;
796 async move {
797 if entry.is_file() {
798 fs.remove_file(&abs_path, Default::default()).await
799 } else {
800 fs.remove_dir(
801 &abs_path,
802 RemoveOptions {
803 recursive: true,
804 ignore_if_not_exists: false,
805 },
806 )
807 .await
808 }
809 }
810 });
811
812 Some(cx.spawn(|this, mut cx| async move {
813 delete.await?;
814 this.update(&mut cx, |this, cx| {
815 let this = this.as_local_mut().unwrap();
816 {
817 let mut snapshot = this.background_snapshot.lock();
818 snapshot.delete_entry(entry_id);
819 }
820 this.poll_snapshot(true, cx);
821 });
822 Ok(())
823 }))
824 }
825
826 pub fn rename_entry(
827 &self,
828 entry_id: ProjectEntryId,
829 new_path: impl Into<Arc<Path>>,
830 cx: &mut ModelContext<Worktree>,
831 ) -> Option<Task<Result<Entry>>> {
832 let old_path = self.entry_for_id(entry_id)?.path.clone();
833 let new_path = new_path.into();
834 let abs_old_path = self.absolutize(&old_path);
835 let abs_new_path = self.absolutize(&new_path);
836 let rename = cx.background().spawn({
837 let fs = self.fs.clone();
838 let abs_new_path = abs_new_path.clone();
839 async move {
840 fs.rename(&abs_old_path, &abs_new_path, Default::default())
841 .await
842 }
843 });
844
845 Some(cx.spawn(|this, mut cx| async move {
846 rename.await?;
847 let entry = this
848 .update(&mut cx, |this, cx| {
849 this.as_local_mut().unwrap().refresh_entry(
850 new_path.clone(),
851 abs_new_path,
852 Some(old_path),
853 cx,
854 )
855 })
856 .await?;
857 Ok(entry)
858 }))
859 }
860
861 pub fn copy_entry(
862 &self,
863 entry_id: ProjectEntryId,
864 new_path: impl Into<Arc<Path>>,
865 cx: &mut ModelContext<Worktree>,
866 ) -> Option<Task<Result<Entry>>> {
867 let old_path = self.entry_for_id(entry_id)?.path.clone();
868 let new_path = new_path.into();
869 let abs_old_path = self.absolutize(&old_path);
870 let abs_new_path = self.absolutize(&new_path);
871 let copy = cx.background().spawn({
872 let fs = self.fs.clone();
873 let abs_new_path = abs_new_path.clone();
874 async move {
875 copy_recursive(
876 fs.as_ref(),
877 &abs_old_path,
878 &abs_new_path,
879 Default::default(),
880 )
881 .await
882 }
883 });
884
885 Some(cx.spawn(|this, mut cx| async move {
886 copy.await?;
887 let entry = this
888 .update(&mut cx, |this, cx| {
889 this.as_local_mut().unwrap().refresh_entry(
890 new_path.clone(),
891 abs_new_path,
892 None,
893 cx,
894 )
895 })
896 .await?;
897 Ok(entry)
898 }))
899 }
900
901 fn write_entry_internal(
902 &self,
903 path: impl Into<Arc<Path>>,
904 text_if_file: Option<(Rope, LineEnding)>,
905 cx: &mut ModelContext<Worktree>,
906 ) -> Task<Result<Entry>> {
907 let path = path.into();
908 let abs_path = self.absolutize(&path);
909 let write = cx.background().spawn({
910 let fs = self.fs.clone();
911 let abs_path = abs_path.clone();
912 async move {
913 if let Some((text, line_ending)) = text_if_file {
914 fs.save(&abs_path, &text, line_ending).await
915 } else {
916 fs.create_dir(&abs_path).await
917 }
918 }
919 });
920
921 cx.spawn(|this, mut cx| async move {
922 write.await?;
923 let entry = this
924 .update(&mut cx, |this, cx| {
925 this.as_local_mut()
926 .unwrap()
927 .refresh_entry(path, abs_path, None, cx)
928 })
929 .await?;
930 Ok(entry)
931 })
932 }
933
934 fn refresh_entry(
935 &self,
936 path: Arc<Path>,
937 abs_path: PathBuf,
938 old_path: Option<Arc<Path>>,
939 cx: &mut ModelContext<Worktree>,
940 ) -> Task<Result<Entry>> {
941 let fs = self.fs.clone();
942 let root_char_bag;
943 let next_entry_id;
944 {
945 let snapshot = self.background_snapshot.lock();
946 root_char_bag = snapshot.root_char_bag;
947 next_entry_id = snapshot.next_entry_id.clone();
948 }
949 cx.spawn_weak(|this, mut cx| async move {
950 let metadata = fs
951 .metadata(&abs_path)
952 .await?
953 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
954 let this = this
955 .upgrade(&cx)
956 .ok_or_else(|| anyhow!("worktree was dropped"))?;
957 this.update(&mut cx, |this, cx| {
958 let this = this.as_local_mut().unwrap();
959 let inserted_entry;
960 {
961 let mut snapshot = this.background_snapshot.lock();
962 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
963 entry.is_ignored = snapshot
964 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
965 .is_abs_path_ignored(&abs_path, entry.is_dir());
966 if let Some(old_path) = old_path {
967 snapshot.remove_path(&old_path);
968 }
969 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
970 snapshot.scan_id += 1;
971 }
972 this.poll_snapshot(true, cx);
973 Ok(inserted_entry)
974 })
975 })
976 }
977
978 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
979 let (share_tx, share_rx) = oneshot::channel();
980
981 if self.share.is_some() {
982 let _ = share_tx.send(Ok(()));
983 } else {
984 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
985 let rpc = self.client.clone();
986 let worktree_id = cx.model_id() as u64;
987 let maintain_remote_snapshot = cx.background().spawn({
988 let rpc = rpc;
989 let diagnostic_summaries = self.diagnostic_summaries.clone();
990 async move {
991 let mut prev_snapshot = match snapshots_rx.recv().await {
992 Some(snapshot) => {
993 let update = proto::UpdateWorktree {
994 project_id,
995 worktree_id,
996 root_name: snapshot.root_name().to_string(),
997 updated_entries: snapshot
998 .entries_by_path
999 .iter()
1000 .map(Into::into)
1001 .collect(),
1002 removed_entries: Default::default(),
1003 scan_id: snapshot.scan_id as u64,
1004 is_last_update: true,
1005 };
1006 if let Err(error) = send_worktree_update(&rpc, update).await {
1007 let _ = share_tx.send(Err(error));
1008 return Err(anyhow!("failed to send initial update worktree"));
1009 } else {
1010 let _ = share_tx.send(Ok(()));
1011 snapshot
1012 }
1013 }
1014 None => {
1015 share_tx
1016 .send(Err(anyhow!("worktree dropped before share completed")))
1017 .ok();
1018 return Err(anyhow!("failed to send initial update worktree"));
1019 }
1020 };
1021
1022 for (path, summary) in diagnostic_summaries.iter() {
1023 rpc.send(proto::UpdateDiagnosticSummary {
1024 project_id,
1025 worktree_id,
1026 summary: Some(summary.to_proto(&path.0)),
1027 })?;
1028 }
1029
1030 while let Some(snapshot) = snapshots_rx.recv().await {
1031 send_worktree_update(
1032 &rpc,
1033 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true),
1034 )
1035 .await?;
1036 prev_snapshot = snapshot;
1037 }
1038
1039 Ok::<_, anyhow::Error>(())
1040 }
1041 .log_err()
1042 });
1043 self.share = Some(ShareState {
1044 project_id,
1045 snapshots_tx,
1046 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1047 });
1048 }
1049
1050 cx.foreground().spawn(async move {
1051 share_rx
1052 .await
1053 .unwrap_or_else(|_| Err(anyhow!("share ended")))
1054 })
1055 }
1056
1057 pub fn unshare(&mut self) {
1058 self.share.take();
1059 }
1060
1061 pub fn is_shared(&self) -> bool {
1062 self.share.is_some()
1063 }
1064
1065 pub fn send_extension_counts(&self, project_id: u64) {
1066 let mut extensions = Vec::new();
1067 let mut counts = Vec::new();
1068
1069 for (extension, count) in self.extension_counts() {
1070 extensions.push(extension.to_string_lossy().to_string());
1071 counts.push(*count as u32);
1072 }
1073
1074 self.client
1075 .send(proto::UpdateWorktreeExtensions {
1076 project_id,
1077 worktree_id: self.id().to_proto(),
1078 extensions,
1079 counts,
1080 })
1081 .log_err();
1082 }
1083}
1084
1085impl RemoteWorktree {
1086 fn snapshot(&self) -> Snapshot {
1087 self.snapshot.clone()
1088 }
1089
1090 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1091 self.snapshot = self.background_snapshot.lock().clone();
1092 cx.emit(Event::UpdatedEntries);
1093 cx.notify();
1094 }
1095
1096 pub fn disconnected_from_host(&mut self) {
1097 self.updates_tx.take();
1098 self.snapshot_subscriptions.clear();
1099 }
1100
1101 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1102 if let Some(updates_tx) = &self.updates_tx {
1103 updates_tx
1104 .unbounded_send(update)
1105 .expect("consumer runs to completion");
1106 }
1107 }
1108
1109 fn observed_snapshot(&self, scan_id: usize) -> bool {
1110 self.scan_id > scan_id || (self.scan_id == scan_id && self.is_complete)
1111 }
1112
1113 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = ()> {
1114 let (tx, rx) = oneshot::channel();
1115 if self.observed_snapshot(scan_id) {
1116 let _ = tx.send(());
1117 } else {
1118 match self
1119 .snapshot_subscriptions
1120 .binary_search_by_key(&scan_id, |probe| probe.0)
1121 {
1122 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1123 }
1124 }
1125
1126 async move {
1127 let _ = rx.await;
1128 }
1129 }
1130
1131 pub fn update_diagnostic_summary(
1132 &mut self,
1133 path: Arc<Path>,
1134 summary: &proto::DiagnosticSummary,
1135 ) {
1136 let summary = DiagnosticSummary {
1137 language_server_id: summary.language_server_id as usize,
1138 error_count: summary.error_count as usize,
1139 warning_count: summary.warning_count as usize,
1140 };
1141 if summary.is_empty() {
1142 self.diagnostic_summaries.remove(&PathKey(path));
1143 } else {
1144 self.diagnostic_summaries.insert(PathKey(path), summary);
1145 }
1146 }
1147
1148 pub fn insert_entry(
1149 &mut self,
1150 entry: proto::Entry,
1151 scan_id: usize,
1152 cx: &mut ModelContext<Worktree>,
1153 ) -> Task<Result<Entry>> {
1154 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1155 cx.spawn(|this, mut cx| async move {
1156 wait_for_snapshot.await;
1157 this.update(&mut cx, |worktree, _| {
1158 let worktree = worktree.as_remote_mut().unwrap();
1159 let mut snapshot = worktree.background_snapshot.lock();
1160 let entry = snapshot.insert_entry(entry);
1161 worktree.snapshot = snapshot.clone();
1162 entry
1163 })
1164 })
1165 }
1166
1167 pub(crate) fn delete_entry(
1168 &mut self,
1169 id: ProjectEntryId,
1170 scan_id: usize,
1171 cx: &mut ModelContext<Worktree>,
1172 ) -> Task<Result<()>> {
1173 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1174 cx.spawn(|this, mut cx| async move {
1175 wait_for_snapshot.await;
1176 this.update(&mut cx, |worktree, _| {
1177 let worktree = worktree.as_remote_mut().unwrap();
1178 let mut snapshot = worktree.background_snapshot.lock();
1179 snapshot.delete_entry(id);
1180 worktree.snapshot = snapshot.clone();
1181 });
1182 Ok(())
1183 })
1184 }
1185}
1186
1187impl Snapshot {
1188 pub fn id(&self) -> WorktreeId {
1189 self.id
1190 }
1191
1192 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1193 self.entries_by_id.get(&entry_id, &()).is_some()
1194 }
1195
1196 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1197 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1198 let old_entry = self.entries_by_id.insert_or_replace(
1199 PathEntry {
1200 id: entry.id,
1201 path: entry.path.clone(),
1202 is_ignored: entry.is_ignored,
1203 scan_id: 0,
1204 },
1205 &(),
1206 );
1207 if let Some(old_entry) = old_entry {
1208 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1209 }
1210 self.entries_by_path.insert_or_replace(entry.clone(), &());
1211 Ok(entry)
1212 }
1213
1214 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1215 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1216 self.entries_by_path = {
1217 let mut cursor = self.entries_by_path.cursor();
1218 let mut new_entries_by_path =
1219 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1220 while let Some(entry) = cursor.item() {
1221 if entry.path.starts_with(&removed_entry.path) {
1222 self.entries_by_id.remove(&entry.id, &());
1223 cursor.next(&());
1224 } else {
1225 break;
1226 }
1227 }
1228 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1229 new_entries_by_path
1230 };
1231
1232 true
1233 } else {
1234 false
1235 }
1236 }
1237
1238 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1239 let mut entries_by_path_edits = Vec::new();
1240 let mut entries_by_id_edits = Vec::new();
1241 for entry_id in update.removed_entries {
1242 let entry = self
1243 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1244 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1245 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1246 entries_by_id_edits.push(Edit::Remove(entry.id));
1247 }
1248
1249 for entry in update.updated_entries {
1250 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1251 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1252 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1253 }
1254 entries_by_id_edits.push(Edit::Insert(PathEntry {
1255 id: entry.id,
1256 path: entry.path.clone(),
1257 is_ignored: entry.is_ignored,
1258 scan_id: 0,
1259 }));
1260 entries_by_path_edits.push(Edit::Insert(entry));
1261 }
1262
1263 self.entries_by_path.edit(entries_by_path_edits, &());
1264 self.entries_by_id.edit(entries_by_id_edits, &());
1265 self.scan_id = update.scan_id as usize;
1266 self.is_complete = update.is_last_update;
1267
1268 Ok(())
1269 }
1270
1271 pub fn file_count(&self) -> usize {
1272 self.entries_by_path.summary().file_count
1273 }
1274
1275 pub fn visible_file_count(&self) -> usize {
1276 self.entries_by_path.summary().visible_file_count
1277 }
1278
1279 fn traverse_from_offset(
1280 &self,
1281 include_dirs: bool,
1282 include_ignored: bool,
1283 start_offset: usize,
1284 ) -> Traversal {
1285 let mut cursor = self.entries_by_path.cursor();
1286 cursor.seek(
1287 &TraversalTarget::Count {
1288 count: start_offset,
1289 include_dirs,
1290 include_ignored,
1291 },
1292 Bias::Right,
1293 &(),
1294 );
1295 Traversal {
1296 cursor,
1297 include_dirs,
1298 include_ignored,
1299 }
1300 }
1301
1302 fn traverse_from_path(
1303 &self,
1304 include_dirs: bool,
1305 include_ignored: bool,
1306 path: &Path,
1307 ) -> Traversal {
1308 let mut cursor = self.entries_by_path.cursor();
1309 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1310 Traversal {
1311 cursor,
1312 include_dirs,
1313 include_ignored,
1314 }
1315 }
1316
1317 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1318 self.traverse_from_offset(false, include_ignored, start)
1319 }
1320
1321 pub fn entries(&self, include_ignored: bool) -> Traversal {
1322 self.traverse_from_offset(true, include_ignored, 0)
1323 }
1324
1325 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1326 let empty_path = Path::new("");
1327 self.entries_by_path
1328 .cursor::<()>()
1329 .filter(move |entry| entry.path.as_ref() != empty_path)
1330 .map(|entry| &entry.path)
1331 }
1332
1333 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1334 let mut cursor = self.entries_by_path.cursor();
1335 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1336 let traversal = Traversal {
1337 cursor,
1338 include_dirs: true,
1339 include_ignored: true,
1340 };
1341 ChildEntriesIter {
1342 traversal,
1343 parent_path,
1344 }
1345 }
1346
1347 pub fn root_entry(&self) -> Option<&Entry> {
1348 self.entry_for_path("")
1349 }
1350
1351 pub fn root_name(&self) -> &str {
1352 &self.root_name
1353 }
1354
1355 pub fn scan_id(&self) -> usize {
1356 self.scan_id
1357 }
1358
1359 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1360 let path = path.as_ref();
1361 self.traverse_from_path(true, true, path)
1362 .entry()
1363 .and_then(|entry| {
1364 if entry.path.as_ref() == path {
1365 Some(entry)
1366 } else {
1367 None
1368 }
1369 })
1370 }
1371
1372 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1373 let entry = self.entries_by_id.get(&id, &())?;
1374 self.entry_for_path(&entry.path)
1375 }
1376
1377 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1378 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1379 }
1380}
1381
1382impl LocalSnapshot {
1383 pub fn abs_path(&self) -> &Arc<Path> {
1384 &self.abs_path
1385 }
1386
1387 pub fn extension_counts(&self) -> &HashMap<OsString, usize> {
1388 &self.extension_counts
1389 }
1390
1391 // Gives the most specific git repository for a given path
1392 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1393 self.git_repositories
1394 .iter()
1395 .rev() //git_repository is ordered lexicographically
1396 .find(|repo| repo.manages(path))
1397 .cloned()
1398 }
1399
1400 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1401 // Git repositories cannot be nested, so we don't need to reverse the order
1402 self.git_repositories
1403 .iter_mut()
1404 .find(|repo| repo.in_dot_git(path))
1405 }
1406
1407 #[cfg(test)]
1408 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1409 let root_name = self.root_name.clone();
1410 proto::UpdateWorktree {
1411 project_id,
1412 worktree_id: self.id().to_proto(),
1413 root_name,
1414 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1415 removed_entries: Default::default(),
1416 scan_id: self.scan_id as u64,
1417 is_last_update: true,
1418 }
1419 }
1420
1421 pub(crate) fn build_update(
1422 &self,
1423 other: &Self,
1424 project_id: u64,
1425 worktree_id: u64,
1426 include_ignored: bool,
1427 ) -> proto::UpdateWorktree {
1428 let mut updated_entries = Vec::new();
1429 let mut removed_entries = Vec::new();
1430 let mut self_entries = self
1431 .entries_by_id
1432 .cursor::<()>()
1433 .filter(|e| include_ignored || !e.is_ignored)
1434 .peekable();
1435 let mut other_entries = other
1436 .entries_by_id
1437 .cursor::<()>()
1438 .filter(|e| include_ignored || !e.is_ignored)
1439 .peekable();
1440 loop {
1441 match (self_entries.peek(), other_entries.peek()) {
1442 (Some(self_entry), Some(other_entry)) => {
1443 match Ord::cmp(&self_entry.id, &other_entry.id) {
1444 Ordering::Less => {
1445 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1446 updated_entries.push(entry);
1447 self_entries.next();
1448 }
1449 Ordering::Equal => {
1450 if self_entry.scan_id != other_entry.scan_id {
1451 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1452 updated_entries.push(entry);
1453 }
1454
1455 self_entries.next();
1456 other_entries.next();
1457 }
1458 Ordering::Greater => {
1459 removed_entries.push(other_entry.id.to_proto());
1460 other_entries.next();
1461 }
1462 }
1463 }
1464 (Some(self_entry), None) => {
1465 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1466 updated_entries.push(entry);
1467 self_entries.next();
1468 }
1469 (None, Some(other_entry)) => {
1470 removed_entries.push(other_entry.id.to_proto());
1471 other_entries.next();
1472 }
1473 (None, None) => break,
1474 }
1475 }
1476
1477 proto::UpdateWorktree {
1478 project_id,
1479 worktree_id,
1480 root_name: self.root_name().to_string(),
1481 updated_entries,
1482 removed_entries,
1483 scan_id: self.scan_id as u64,
1484 is_last_update: true,
1485 }
1486 }
1487
1488 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1489 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1490 let abs_path = self.abs_path.join(&entry.path);
1491 match smol::block_on(build_gitignore(&abs_path, fs)) {
1492 Ok(ignore) => {
1493 self.ignores_by_parent_abs_path.insert(
1494 abs_path.parent().unwrap().into(),
1495 (Arc::new(ignore), self.scan_id),
1496 );
1497 }
1498 Err(error) => {
1499 log::error!(
1500 "error loading .gitignore file {:?} - {:?}",
1501 &entry.path,
1502 error
1503 );
1504 }
1505 }
1506 }
1507
1508 self.reuse_entry_id(&mut entry);
1509
1510 if entry.kind == EntryKind::PendingDir {
1511 if let Some(existing_entry) =
1512 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1513 {
1514 entry.kind = existing_entry.kind;
1515 }
1516 }
1517
1518 self.entries_by_path.insert_or_replace(entry.clone(), &());
1519 let scan_id = self.scan_id;
1520 let removed_entry = self.entries_by_id.insert_or_replace(
1521 PathEntry {
1522 id: entry.id,
1523 path: entry.path.clone(),
1524 is_ignored: entry.is_ignored,
1525 scan_id,
1526 },
1527 &(),
1528 );
1529
1530 if let Some(removed_entry) = removed_entry {
1531 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1532 }
1533 self.inc_extension_count(&entry.path, entry.is_ignored);
1534
1535 entry
1536 }
1537
1538 fn populate_dir(
1539 &mut self,
1540 parent_path: Arc<Path>,
1541 entries: impl IntoIterator<Item = Entry>,
1542 ignore: Option<Arc<Gitignore>>,
1543 fs: &dyn Fs,
1544 ) {
1545 let mut parent_entry = if let Some(parent_entry) =
1546 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1547 {
1548 parent_entry.clone()
1549 } else {
1550 log::warn!(
1551 "populating a directory {:?} that has been removed",
1552 parent_path
1553 );
1554 return;
1555 };
1556
1557 if let Some(ignore) = ignore {
1558 self.ignores_by_parent_abs_path.insert(
1559 self.abs_path.join(&parent_path).into(),
1560 (ignore, self.scan_id),
1561 );
1562 }
1563 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1564 parent_entry.kind = EntryKind::Dir;
1565 } else {
1566 unreachable!();
1567 }
1568
1569 if parent_path.file_name() == Some(&DOT_GIT) {
1570 let abs_path = self.abs_path.join(&parent_path);
1571 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1572 if let Err(ix) = self
1573 .git_repositories
1574 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1575 {
1576 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1577 self.git_repositories.insert(
1578 ix,
1579 GitRepositoryEntry {
1580 repo,
1581 scan_id: 0,
1582 content_path,
1583 git_dir_path: parent_path,
1584 },
1585 );
1586 }
1587 }
1588 }
1589
1590 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1591 let mut entries_by_id_edits = Vec::new();
1592
1593 for mut entry in entries {
1594 self.reuse_entry_id(&mut entry);
1595 self.inc_extension_count(&entry.path, entry.is_ignored);
1596 entries_by_id_edits.push(Edit::Insert(PathEntry {
1597 id: entry.id,
1598 path: entry.path.clone(),
1599 is_ignored: entry.is_ignored,
1600 scan_id: self.scan_id,
1601 }));
1602 entries_by_path_edits.push(Edit::Insert(entry));
1603 }
1604
1605 self.entries_by_path.edit(entries_by_path_edits, &());
1606 let removed_entries = self.entries_by_id.edit(entries_by_id_edits, &());
1607
1608 for removed_entry in removed_entries {
1609 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1610 }
1611 }
1612
1613 fn inc_extension_count(&mut self, path: &Path, ignored: bool) {
1614 if !ignored {
1615 if let Some(extension) = path.extension() {
1616 if let Some(count) = self.extension_counts.get_mut(extension) {
1617 *count += 1;
1618 } else {
1619 self.extension_counts.insert(extension.into(), 1);
1620 }
1621 }
1622 }
1623 }
1624
1625 fn dec_extension_count(&mut self, path: &Path, ignored: bool) {
1626 if !ignored {
1627 if let Some(extension) = path.extension() {
1628 if let Some(count) = self.extension_counts.get_mut(extension) {
1629 *count -= 1;
1630 }
1631 }
1632 }
1633 }
1634
1635 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1636 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1637 entry.id = removed_entry_id;
1638 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1639 entry.id = existing_entry.id;
1640 }
1641 }
1642
1643 fn remove_path(&mut self, path: &Path) {
1644 let mut new_entries;
1645 let removed_entries;
1646 {
1647 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1648 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1649 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1650 new_entries.push_tree(cursor.suffix(&()), &());
1651 }
1652 self.entries_by_path = new_entries;
1653
1654 let mut entries_by_id_edits = Vec::new();
1655 for entry in removed_entries.cursor::<()>() {
1656 let removed_entry_id = self
1657 .removed_entry_ids
1658 .entry(entry.inode)
1659 .or_insert(entry.id);
1660 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1661 entries_by_id_edits.push(Edit::Remove(entry.id));
1662 self.dec_extension_count(&entry.path, entry.is_ignored);
1663 }
1664 self.entries_by_id.edit(entries_by_id_edits, &());
1665
1666 if path.file_name() == Some(&GITIGNORE) {
1667 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1668 if let Some((_, scan_id)) = self
1669 .ignores_by_parent_abs_path
1670 .get_mut(abs_parent_path.as_path())
1671 {
1672 *scan_id = self.snapshot.scan_id;
1673 }
1674 } else if path.file_name() == Some(&DOT_GIT) {
1675 let parent_path = path.parent().unwrap();
1676 if let Ok(ix) = self
1677 .git_repositories
1678 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1679 {
1680 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1681 }
1682 }
1683 }
1684
1685 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1686 let mut inodes = TreeSet::default();
1687 for ancestor in path.ancestors().skip(1) {
1688 if let Some(entry) = self.entry_for_path(ancestor) {
1689 inodes.insert(entry.inode);
1690 }
1691 }
1692 inodes
1693 }
1694
1695 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1696 let mut new_ignores = Vec::new();
1697 for ancestor in abs_path.ancestors().skip(1) {
1698 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1699 new_ignores.push((ancestor, Some(ignore.clone())));
1700 } else {
1701 new_ignores.push((ancestor, None));
1702 }
1703 }
1704
1705 let mut ignore_stack = IgnoreStack::none();
1706 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1707 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1708 ignore_stack = IgnoreStack::all();
1709 break;
1710 } else if let Some(ignore) = ignore {
1711 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1712 }
1713 }
1714
1715 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1716 ignore_stack = IgnoreStack::all();
1717 }
1718
1719 ignore_stack
1720 }
1721
1722 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1723 &self.git_repositories
1724 }
1725}
1726// Worktree root
1727// |
1728// git_dir_path: c/d/.git
1729//in_dot_git Query: c/d/.git/HEAD
1730// Manages Query: c/d/e/f/a.txt
1731
1732impl GitRepositoryEntry {
1733 pub(crate) fn manages(&self, path: &Path) -> bool {
1734 path.starts_with(self.content_path.as_ref())
1735 }
1736
1737 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1738 path.starts_with(self.git_dir_path.as_ref())
1739 }
1740}
1741
1742async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1743 let contents = fs.load(abs_path).await?;
1744 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1745 let mut builder = GitignoreBuilder::new(parent);
1746 for line in contents.lines() {
1747 builder.add_line(Some(abs_path.into()), line)?;
1748 }
1749 Ok(builder.build()?)
1750}
1751
1752impl WorktreeId {
1753 pub fn from_usize(handle_id: usize) -> Self {
1754 Self(handle_id)
1755 }
1756
1757 pub(crate) fn from_proto(id: u64) -> Self {
1758 Self(id as usize)
1759 }
1760
1761 pub fn to_proto(&self) -> u64 {
1762 self.0 as u64
1763 }
1764
1765 pub fn to_usize(&self) -> usize {
1766 self.0
1767 }
1768}
1769
1770impl fmt::Display for WorktreeId {
1771 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1772 self.0.fmt(f)
1773 }
1774}
1775
1776impl Deref for Worktree {
1777 type Target = Snapshot;
1778
1779 fn deref(&self) -> &Self::Target {
1780 match self {
1781 Worktree::Local(worktree) => &worktree.snapshot,
1782 Worktree::Remote(worktree) => &worktree.snapshot,
1783 }
1784 }
1785}
1786
1787impl Deref for LocalWorktree {
1788 type Target = LocalSnapshot;
1789
1790 fn deref(&self) -> &Self::Target {
1791 &self.snapshot
1792 }
1793}
1794
1795impl Deref for RemoteWorktree {
1796 type Target = Snapshot;
1797
1798 fn deref(&self) -> &Self::Target {
1799 &self.snapshot
1800 }
1801}
1802
1803impl fmt::Debug for LocalWorktree {
1804 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1805 self.snapshot.fmt(f)
1806 }
1807}
1808
1809impl fmt::Debug for Snapshot {
1810 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1811 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1812 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1813
1814 impl<'a> fmt::Debug for EntriesByPath<'a> {
1815 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1816 f.debug_map()
1817 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1818 .finish()
1819 }
1820 }
1821
1822 impl<'a> fmt::Debug for EntriesById<'a> {
1823 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1824 f.debug_list().entries(self.0.iter()).finish()
1825 }
1826 }
1827
1828 f.debug_struct("Snapshot")
1829 .field("id", &self.id)
1830 .field("root_name", &self.root_name)
1831 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1832 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1833 .finish()
1834 }
1835}
1836
1837#[derive(Clone, PartialEq)]
1838pub struct File {
1839 pub worktree: ModelHandle<Worktree>,
1840 pub path: Arc<Path>,
1841 pub mtime: SystemTime,
1842 pub(crate) entry_id: Option<ProjectEntryId>,
1843 pub(crate) is_local: bool,
1844}
1845
1846impl language::File for File {
1847 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1848 if self.is_local {
1849 Some(self)
1850 } else {
1851 None
1852 }
1853 }
1854
1855 fn mtime(&self) -> SystemTime {
1856 self.mtime
1857 }
1858
1859 fn path(&self) -> &Arc<Path> {
1860 &self.path
1861 }
1862
1863 fn full_path(&self, cx: &AppContext) -> PathBuf {
1864 let mut full_path = PathBuf::new();
1865 full_path.push(self.worktree.read(cx).root_name());
1866 if self.path.components().next().is_some() {
1867 full_path.push(&self.path);
1868 }
1869 full_path
1870 }
1871
1872 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1873 /// of its worktree, then this method will return the name of the worktree itself.
1874 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1875 self.path
1876 .file_name()
1877 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1878 }
1879
1880 fn is_deleted(&self) -> bool {
1881 self.entry_id.is_none()
1882 }
1883
1884 fn save(
1885 &self,
1886 buffer_id: u64,
1887 text: Rope,
1888 version: clock::Global,
1889 line_ending: LineEnding,
1890 cx: &mut MutableAppContext,
1891 ) -> Task<Result<(clock::Global, String, SystemTime)>> {
1892 self.worktree.update(cx, |worktree, cx| match worktree {
1893 Worktree::Local(worktree) => {
1894 let rpc = worktree.client.clone();
1895 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1896 let fingerprint = text.fingerprint();
1897 let save = worktree.write_file(self.path.clone(), text, line_ending, cx);
1898 cx.background().spawn(async move {
1899 let entry = save.await?;
1900 if let Some(project_id) = project_id {
1901 rpc.send(proto::BufferSaved {
1902 project_id,
1903 buffer_id,
1904 version: serialize_version(&version),
1905 mtime: Some(entry.mtime.into()),
1906 fingerprint: fingerprint.clone(),
1907 })?;
1908 }
1909 Ok((version, fingerprint, entry.mtime))
1910 })
1911 }
1912 Worktree::Remote(worktree) => {
1913 let rpc = worktree.client.clone();
1914 let project_id = worktree.project_id;
1915 cx.foreground().spawn(async move {
1916 let response = rpc
1917 .request(proto::SaveBuffer {
1918 project_id,
1919 buffer_id,
1920 version: serialize_version(&version),
1921 })
1922 .await?;
1923 let version = deserialize_version(response.version);
1924 let mtime = response
1925 .mtime
1926 .ok_or_else(|| anyhow!("missing mtime"))?
1927 .into();
1928 Ok((version, response.fingerprint, mtime))
1929 })
1930 }
1931 })
1932 }
1933
1934 fn as_any(&self) -> &dyn Any {
1935 self
1936 }
1937
1938 fn to_proto(&self) -> rpc::proto::File {
1939 rpc::proto::File {
1940 worktree_id: self.worktree.id() as u64,
1941 entry_id: self.entry_id.map(|entry_id| entry_id.to_proto()),
1942 path: self.path.to_string_lossy().into(),
1943 mtime: Some(self.mtime.into()),
1944 }
1945 }
1946}
1947
1948impl language::LocalFile for File {
1949 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1950 self.worktree
1951 .read(cx)
1952 .as_local()
1953 .unwrap()
1954 .abs_path
1955 .join(&self.path)
1956 }
1957
1958 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1959 let worktree = self.worktree.read(cx).as_local().unwrap();
1960 let abs_path = worktree.absolutize(&self.path);
1961 let fs = worktree.fs.clone();
1962 cx.background()
1963 .spawn(async move { fs.load(&abs_path).await })
1964 }
1965
1966 fn buffer_reloaded(
1967 &self,
1968 buffer_id: u64,
1969 version: &clock::Global,
1970 fingerprint: String,
1971 line_ending: LineEnding,
1972 mtime: SystemTime,
1973 cx: &mut MutableAppContext,
1974 ) {
1975 let worktree = self.worktree.read(cx).as_local().unwrap();
1976 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1977 worktree
1978 .client
1979 .send(proto::BufferReloaded {
1980 project_id,
1981 buffer_id,
1982 version: serialize_version(version),
1983 mtime: Some(mtime.into()),
1984 fingerprint,
1985 line_ending: serialize_line_ending(line_ending) as i32,
1986 })
1987 .log_err();
1988 }
1989 }
1990}
1991
1992impl File {
1993 pub fn from_proto(
1994 proto: rpc::proto::File,
1995 worktree: ModelHandle<Worktree>,
1996 cx: &AppContext,
1997 ) -> Result<Self> {
1998 let worktree_id = worktree
1999 .read(cx)
2000 .as_remote()
2001 .ok_or_else(|| anyhow!("not remote"))?
2002 .id();
2003
2004 if worktree_id.to_proto() != proto.worktree_id {
2005 return Err(anyhow!("worktree id does not match file"));
2006 }
2007
2008 Ok(Self {
2009 worktree,
2010 path: Path::new(&proto.path).into(),
2011 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2012 entry_id: proto.entry_id.map(ProjectEntryId::from_proto),
2013 is_local: false,
2014 })
2015 }
2016
2017 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2018 file.and_then(|f| f.as_any().downcast_ref())
2019 }
2020
2021 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2022 self.worktree.read(cx).id()
2023 }
2024
2025 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2026 self.entry_id
2027 }
2028}
2029
2030#[derive(Clone, Debug, PartialEq, Eq)]
2031pub struct Entry {
2032 pub id: ProjectEntryId,
2033 pub kind: EntryKind,
2034 pub path: Arc<Path>,
2035 pub inode: u64,
2036 pub mtime: SystemTime,
2037 pub is_symlink: bool,
2038 pub is_ignored: bool,
2039}
2040
2041#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2042pub enum EntryKind {
2043 PendingDir,
2044 Dir,
2045 File(CharBag),
2046}
2047
2048impl Entry {
2049 fn new(
2050 path: Arc<Path>,
2051 metadata: &fs::Metadata,
2052 next_entry_id: &AtomicUsize,
2053 root_char_bag: CharBag,
2054 ) -> Self {
2055 Self {
2056 id: ProjectEntryId::new(next_entry_id),
2057 kind: if metadata.is_dir {
2058 EntryKind::PendingDir
2059 } else {
2060 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2061 },
2062 path,
2063 inode: metadata.inode,
2064 mtime: metadata.mtime,
2065 is_symlink: metadata.is_symlink,
2066 is_ignored: false,
2067 }
2068 }
2069
2070 pub fn is_dir(&self) -> bool {
2071 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2072 }
2073
2074 pub fn is_file(&self) -> bool {
2075 matches!(self.kind, EntryKind::File(_))
2076 }
2077}
2078
2079impl sum_tree::Item for Entry {
2080 type Summary = EntrySummary;
2081
2082 fn summary(&self) -> Self::Summary {
2083 let visible_count = if self.is_ignored { 0 } else { 1 };
2084 let file_count;
2085 let visible_file_count;
2086 if self.is_file() {
2087 file_count = 1;
2088 visible_file_count = visible_count;
2089 } else {
2090 file_count = 0;
2091 visible_file_count = 0;
2092 }
2093
2094 EntrySummary {
2095 max_path: self.path.clone(),
2096 count: 1,
2097 visible_count,
2098 file_count,
2099 visible_file_count,
2100 }
2101 }
2102}
2103
2104impl sum_tree::KeyedItem for Entry {
2105 type Key = PathKey;
2106
2107 fn key(&self) -> Self::Key {
2108 PathKey(self.path.clone())
2109 }
2110}
2111
2112#[derive(Clone, Debug)]
2113pub struct EntrySummary {
2114 max_path: Arc<Path>,
2115 count: usize,
2116 visible_count: usize,
2117 file_count: usize,
2118 visible_file_count: usize,
2119}
2120
2121impl Default for EntrySummary {
2122 fn default() -> Self {
2123 Self {
2124 max_path: Arc::from(Path::new("")),
2125 count: 0,
2126 visible_count: 0,
2127 file_count: 0,
2128 visible_file_count: 0,
2129 }
2130 }
2131}
2132
2133impl sum_tree::Summary for EntrySummary {
2134 type Context = ();
2135
2136 fn add_summary(&mut self, rhs: &Self, _: &()) {
2137 self.max_path = rhs.max_path.clone();
2138 self.count += rhs.count;
2139 self.visible_count += rhs.visible_count;
2140 self.file_count += rhs.file_count;
2141 self.visible_file_count += rhs.visible_file_count;
2142 }
2143}
2144
2145#[derive(Clone, Debug)]
2146struct PathEntry {
2147 id: ProjectEntryId,
2148 path: Arc<Path>,
2149 is_ignored: bool,
2150 scan_id: usize,
2151}
2152
2153impl sum_tree::Item for PathEntry {
2154 type Summary = PathEntrySummary;
2155
2156 fn summary(&self) -> Self::Summary {
2157 PathEntrySummary { max_id: self.id }
2158 }
2159}
2160
2161impl sum_tree::KeyedItem for PathEntry {
2162 type Key = ProjectEntryId;
2163
2164 fn key(&self) -> Self::Key {
2165 self.id
2166 }
2167}
2168
2169#[derive(Clone, Debug, Default)]
2170struct PathEntrySummary {
2171 max_id: ProjectEntryId,
2172}
2173
2174impl sum_tree::Summary for PathEntrySummary {
2175 type Context = ();
2176
2177 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2178 self.max_id = summary.max_id;
2179 }
2180}
2181
2182impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2183 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2184 *self = summary.max_id;
2185 }
2186}
2187
2188#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2189pub struct PathKey(Arc<Path>);
2190
2191impl Default for PathKey {
2192 fn default() -> Self {
2193 Self(Path::new("").into())
2194 }
2195}
2196
2197impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2198 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2199 self.0 = summary.max_path.clone();
2200 }
2201}
2202
2203struct BackgroundScanner {
2204 fs: Arc<dyn Fs>,
2205 snapshot: Arc<Mutex<LocalSnapshot>>,
2206 notify: UnboundedSender<ScanState>,
2207 executor: Arc<executor::Background>,
2208}
2209
2210impl BackgroundScanner {
2211 fn new(
2212 snapshot: Arc<Mutex<LocalSnapshot>>,
2213 notify: UnboundedSender<ScanState>,
2214 fs: Arc<dyn Fs>,
2215 executor: Arc<executor::Background>,
2216 ) -> Self {
2217 Self {
2218 fs,
2219 snapshot,
2220 notify,
2221 executor,
2222 }
2223 }
2224
2225 fn abs_path(&self) -> Arc<Path> {
2226 self.snapshot.lock().abs_path.clone()
2227 }
2228
2229 fn snapshot(&self) -> LocalSnapshot {
2230 self.snapshot.lock().clone()
2231 }
2232
2233 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2234 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2235 return;
2236 }
2237
2238 if let Err(err) = self.scan_dirs().await {
2239 if self
2240 .notify
2241 .unbounded_send(ScanState::Err(Arc::new(err)))
2242 .is_err()
2243 {
2244 return;
2245 }
2246 }
2247
2248 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2249 return;
2250 }
2251
2252 futures::pin_mut!(events_rx);
2253
2254 while let Some(mut events) = events_rx.next().await {
2255 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2256 events.extend(additional_events);
2257 }
2258
2259 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2260 break;
2261 }
2262
2263 if !self.process_events(events).await {
2264 break;
2265 }
2266
2267 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2268 break;
2269 }
2270 }
2271 }
2272
2273 async fn scan_dirs(&mut self) -> Result<()> {
2274 let root_char_bag;
2275 let root_abs_path;
2276 let root_inode;
2277 let is_dir;
2278 let next_entry_id;
2279 {
2280 let snapshot = self.snapshot.lock();
2281 root_char_bag = snapshot.root_char_bag;
2282 root_abs_path = snapshot.abs_path.clone();
2283 root_inode = snapshot.root_entry().map(|e| e.inode);
2284 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2285 next_entry_id = snapshot.next_entry_id.clone();
2286 };
2287
2288 // Populate ignores above the root.
2289 for ancestor in root_abs_path.ancestors().skip(1) {
2290 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2291 {
2292 self.snapshot
2293 .lock()
2294 .ignores_by_parent_abs_path
2295 .insert(ancestor.into(), (ignore.into(), 0));
2296 }
2297 }
2298
2299 let ignore_stack = {
2300 let mut snapshot = self.snapshot.lock();
2301 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2302 if ignore_stack.is_all() {
2303 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2304 root_entry.is_ignored = true;
2305 snapshot.insert_entry(root_entry, self.fs.as_ref());
2306 }
2307 }
2308 ignore_stack
2309 };
2310
2311 if is_dir {
2312 let path: Arc<Path> = Arc::from(Path::new(""));
2313 let mut ancestor_inodes = TreeSet::default();
2314 if let Some(root_inode) = root_inode {
2315 ancestor_inodes.insert(root_inode);
2316 }
2317
2318 let (tx, rx) = channel::unbounded();
2319 self.executor
2320 .block(tx.send(ScanJob {
2321 abs_path: root_abs_path.to_path_buf(),
2322 path,
2323 ignore_stack,
2324 ancestor_inodes,
2325 scan_queue: tx.clone(),
2326 }))
2327 .unwrap();
2328 drop(tx);
2329
2330 self.executor
2331 .scoped(|scope| {
2332 for _ in 0..self.executor.num_cpus() {
2333 scope.spawn(async {
2334 while let Ok(job) = rx.recv().await {
2335 if let Err(err) = self
2336 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2337 .await
2338 {
2339 log::error!("error scanning {:?}: {}", job.abs_path, err);
2340 }
2341 }
2342 });
2343 }
2344 })
2345 .await;
2346 }
2347
2348 Ok(())
2349 }
2350
2351 async fn scan_dir(
2352 &self,
2353 root_char_bag: CharBag,
2354 next_entry_id: Arc<AtomicUsize>,
2355 job: &ScanJob,
2356 ) -> Result<()> {
2357 let mut new_entries: Vec<Entry> = Vec::new();
2358 let mut new_jobs: Vec<ScanJob> = Vec::new();
2359 let mut ignore_stack = job.ignore_stack.clone();
2360 let mut new_ignore = None;
2361
2362 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2363 while let Some(child_abs_path) = child_paths.next().await {
2364 let child_abs_path = match child_abs_path {
2365 Ok(child_abs_path) => child_abs_path,
2366 Err(error) => {
2367 log::error!("error processing entry {:?}", error);
2368 continue;
2369 }
2370 };
2371 let child_name = child_abs_path.file_name().unwrap();
2372 let child_path: Arc<Path> = job.path.join(child_name).into();
2373 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2374 Ok(Some(metadata)) => metadata,
2375 Ok(None) => continue,
2376 Err(err) => {
2377 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2378 continue;
2379 }
2380 };
2381
2382 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2383 if child_name == *GITIGNORE {
2384 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2385 Ok(ignore) => {
2386 let ignore = Arc::new(ignore);
2387 ignore_stack =
2388 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2389 new_ignore = Some(ignore);
2390 }
2391 Err(error) => {
2392 log::error!(
2393 "error loading .gitignore file {:?} - {:?}",
2394 child_name,
2395 error
2396 );
2397 }
2398 }
2399
2400 // Update ignore status of any child entries we've already processed to reflect the
2401 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2402 // there should rarely be too numerous. Update the ignore stack associated with any
2403 // new jobs as well.
2404 let mut new_jobs = new_jobs.iter_mut();
2405 for entry in &mut new_entries {
2406 let entry_abs_path = self.abs_path().join(&entry.path);
2407 entry.is_ignored =
2408 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2409 if entry.is_dir() {
2410 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2411 IgnoreStack::all()
2412 } else {
2413 ignore_stack.clone()
2414 };
2415 }
2416 }
2417 }
2418
2419 let mut child_entry = Entry::new(
2420 child_path.clone(),
2421 &child_metadata,
2422 &next_entry_id,
2423 root_char_bag,
2424 );
2425
2426 if child_entry.is_dir() {
2427 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2428 child_entry.is_ignored = is_ignored;
2429
2430 if !job.ancestor_inodes.contains(&child_entry.inode) {
2431 let mut ancestor_inodes = job.ancestor_inodes.clone();
2432 ancestor_inodes.insert(child_entry.inode);
2433 new_jobs.push(ScanJob {
2434 abs_path: child_abs_path,
2435 path: child_path,
2436 ignore_stack: if is_ignored {
2437 IgnoreStack::all()
2438 } else {
2439 ignore_stack.clone()
2440 },
2441 ancestor_inodes,
2442 scan_queue: job.scan_queue.clone(),
2443 });
2444 }
2445 } else {
2446 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2447 }
2448
2449 new_entries.push(child_entry);
2450 }
2451
2452 self.snapshot.lock().populate_dir(
2453 job.path.clone(),
2454 new_entries,
2455 new_ignore,
2456 self.fs.as_ref(),
2457 );
2458 for new_job in new_jobs {
2459 job.scan_queue.send(new_job).await.unwrap();
2460 }
2461
2462 Ok(())
2463 }
2464
2465 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2466 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2467 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2468
2469 let root_char_bag;
2470 let root_abs_path;
2471 let next_entry_id;
2472 {
2473 let snapshot = self.snapshot.lock();
2474 root_char_bag = snapshot.root_char_bag;
2475 root_abs_path = snapshot.abs_path.clone();
2476 next_entry_id = snapshot.next_entry_id.clone();
2477 }
2478
2479 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2480 path
2481 } else {
2482 return false;
2483 };
2484 let metadata = futures::future::join_all(
2485 events
2486 .iter()
2487 .map(|event| self.fs.metadata(&event.path))
2488 .collect::<Vec<_>>(),
2489 )
2490 .await;
2491
2492 // Hold the snapshot lock while clearing and re-inserting the root entries
2493 // for each event. This way, the snapshot is not observable to the foreground
2494 // thread while this operation is in-progress.
2495 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2496 {
2497 let mut snapshot = self.snapshot.lock();
2498 snapshot.scan_id += 1;
2499 for event in &events {
2500 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2501 snapshot.remove_path(path);
2502 }
2503 }
2504
2505 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2506 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2507 Ok(path) => Arc::from(path.to_path_buf()),
2508 Err(_) => {
2509 log::error!(
2510 "unexpected event {:?} for root path {:?}",
2511 event.path,
2512 root_canonical_path
2513 );
2514 continue;
2515 }
2516 };
2517 let abs_path = root_abs_path.join(&path);
2518
2519 match metadata {
2520 Ok(Some(metadata)) => {
2521 let ignore_stack =
2522 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2523 let mut fs_entry = Entry::new(
2524 path.clone(),
2525 &metadata,
2526 snapshot.next_entry_id.as_ref(),
2527 snapshot.root_char_bag,
2528 );
2529 fs_entry.is_ignored = ignore_stack.is_all();
2530 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2531
2532 let scan_id = snapshot.scan_id;
2533 if let Some(repo) = snapshot.in_dot_git(&path) {
2534 repo.scan_id = scan_id;
2535 }
2536
2537 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2538 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2539 ancestor_inodes.insert(metadata.inode);
2540 self.executor
2541 .block(scan_queue_tx.send(ScanJob {
2542 abs_path,
2543 path,
2544 ignore_stack,
2545 ancestor_inodes,
2546 scan_queue: scan_queue_tx.clone(),
2547 }))
2548 .unwrap();
2549 }
2550 }
2551 Ok(None) => {}
2552 Err(err) => {
2553 // TODO - create a special 'error' entry in the entries tree to mark this
2554 log::error!("error reading file on event {:?}", err);
2555 }
2556 }
2557 }
2558 drop(scan_queue_tx);
2559 }
2560
2561 // Scan any directories that were created as part of this event batch.
2562 self.executor
2563 .scoped(|scope| {
2564 for _ in 0..self.executor.num_cpus() {
2565 scope.spawn(async {
2566 while let Ok(job) = scan_queue_rx.recv().await {
2567 if let Err(err) = self
2568 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2569 .await
2570 {
2571 log::error!("error scanning {:?}: {}", job.abs_path, err);
2572 }
2573 }
2574 });
2575 }
2576 })
2577 .await;
2578
2579 // Attempt to detect renames only over a single batch of file-system events.
2580 self.snapshot.lock().removed_entry_ids.clear();
2581
2582 self.update_ignore_statuses().await;
2583 self.update_git_repositories().await;
2584 true
2585 }
2586
2587 async fn update_ignore_statuses(&self) {
2588 let mut snapshot = self.snapshot();
2589
2590 let mut ignores_to_update = Vec::new();
2591 let mut ignores_to_delete = Vec::new();
2592 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2593 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2594 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2595 ignores_to_update.push(parent_abs_path.clone());
2596 }
2597
2598 let ignore_path = parent_path.join(&*GITIGNORE);
2599 if snapshot.entry_for_path(ignore_path).is_none() {
2600 ignores_to_delete.push(parent_abs_path.clone());
2601 }
2602 }
2603 }
2604
2605 for parent_abs_path in ignores_to_delete {
2606 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2607 self.snapshot
2608 .lock()
2609 .ignores_by_parent_abs_path
2610 .remove(&parent_abs_path);
2611 }
2612
2613 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2614 ignores_to_update.sort_unstable();
2615 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2616 while let Some(parent_abs_path) = ignores_to_update.next() {
2617 while ignores_to_update
2618 .peek()
2619 .map_or(false, |p| p.starts_with(&parent_abs_path))
2620 {
2621 ignores_to_update.next().unwrap();
2622 }
2623
2624 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2625 ignore_queue_tx
2626 .send(UpdateIgnoreStatusJob {
2627 abs_path: parent_abs_path,
2628 ignore_stack,
2629 ignore_queue: ignore_queue_tx.clone(),
2630 })
2631 .await
2632 .unwrap();
2633 }
2634 drop(ignore_queue_tx);
2635
2636 self.executor
2637 .scoped(|scope| {
2638 for _ in 0..self.executor.num_cpus() {
2639 scope.spawn(async {
2640 while let Ok(job) = ignore_queue_rx.recv().await {
2641 self.update_ignore_status(job, &snapshot).await;
2642 }
2643 });
2644 }
2645 })
2646 .await;
2647 }
2648
2649 // TODO: Clarify what is going on here because re-loading every git repository
2650 // on every file system event seems wrong
2651 async fn update_git_repositories(&self) {
2652 let mut snapshot = self.snapshot.lock();
2653
2654 let new_repos = snapshot
2655 .git_repositories
2656 .iter()
2657 .cloned()
2658 .filter_map(|mut repo_entry| {
2659 let repo = self
2660 .fs
2661 .open_repo(&snapshot.abs_path.join(&repo_entry.git_dir_path))?;
2662 repo_entry.repo = repo;
2663 Some(repo_entry)
2664 })
2665 .collect();
2666
2667 snapshot.git_repositories = new_repos;
2668 }
2669
2670 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2671 let mut ignore_stack = job.ignore_stack;
2672 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2673 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2674 }
2675
2676 let mut entries_by_id_edits = Vec::new();
2677 let mut entries_by_path_edits = Vec::new();
2678 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2679 for mut entry in snapshot.child_entries(path).cloned() {
2680 let was_ignored = entry.is_ignored;
2681 let abs_path = self.abs_path().join(&entry.path);
2682 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2683 if entry.is_dir() {
2684 let child_ignore_stack = if entry.is_ignored {
2685 IgnoreStack::all()
2686 } else {
2687 ignore_stack.clone()
2688 };
2689 job.ignore_queue
2690 .send(UpdateIgnoreStatusJob {
2691 abs_path: abs_path.into(),
2692 ignore_stack: child_ignore_stack,
2693 ignore_queue: job.ignore_queue.clone(),
2694 })
2695 .await
2696 .unwrap();
2697 }
2698
2699 if entry.is_ignored != was_ignored {
2700 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2701 path_entry.scan_id = snapshot.scan_id;
2702 path_entry.is_ignored = entry.is_ignored;
2703 entries_by_id_edits.push(Edit::Insert(path_entry));
2704 entries_by_path_edits.push(Edit::Insert(entry));
2705 }
2706 }
2707
2708 let mut snapshot = self.snapshot.lock();
2709 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2710 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2711 }
2712}
2713
2714fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2715 let mut result = root_char_bag;
2716 result.extend(
2717 path.to_string_lossy()
2718 .chars()
2719 .map(|c| c.to_ascii_lowercase()),
2720 );
2721 result
2722}
2723
2724struct ScanJob {
2725 abs_path: PathBuf,
2726 path: Arc<Path>,
2727 ignore_stack: Arc<IgnoreStack>,
2728 scan_queue: Sender<ScanJob>,
2729 ancestor_inodes: TreeSet<u64>,
2730}
2731
2732struct UpdateIgnoreStatusJob {
2733 abs_path: Arc<Path>,
2734 ignore_stack: Arc<IgnoreStack>,
2735 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2736}
2737
2738pub trait WorktreeHandle {
2739 #[cfg(any(test, feature = "test-support"))]
2740 fn flush_fs_events<'a>(
2741 &self,
2742 cx: &'a gpui::TestAppContext,
2743 ) -> futures::future::LocalBoxFuture<'a, ()>;
2744}
2745
2746impl WorktreeHandle for ModelHandle<Worktree> {
2747 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2748 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2749 // extra directory scans, and emit extra scan-state notifications.
2750 //
2751 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2752 // to ensure that all redundant FS events have already been processed.
2753 #[cfg(any(test, feature = "test-support"))]
2754 fn flush_fs_events<'a>(
2755 &self,
2756 cx: &'a gpui::TestAppContext,
2757 ) -> futures::future::LocalBoxFuture<'a, ()> {
2758 use smol::future::FutureExt;
2759
2760 let filename = "fs-event-sentinel";
2761 let tree = self.clone();
2762 let (fs, root_path) = self.read_with(cx, |tree, _| {
2763 let tree = tree.as_local().unwrap();
2764 (tree.fs.clone(), tree.abs_path().clone())
2765 });
2766
2767 async move {
2768 fs.create_file(&root_path.join(filename), Default::default())
2769 .await
2770 .unwrap();
2771 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2772 .await;
2773
2774 fs.remove_file(&root_path.join(filename), Default::default())
2775 .await
2776 .unwrap();
2777 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2778 .await;
2779
2780 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2781 .await;
2782 }
2783 .boxed_local()
2784 }
2785}
2786
2787#[derive(Clone, Debug)]
2788struct TraversalProgress<'a> {
2789 max_path: &'a Path,
2790 count: usize,
2791 visible_count: usize,
2792 file_count: usize,
2793 visible_file_count: usize,
2794}
2795
2796impl<'a> TraversalProgress<'a> {
2797 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2798 match (include_ignored, include_dirs) {
2799 (true, true) => self.count,
2800 (true, false) => self.file_count,
2801 (false, true) => self.visible_count,
2802 (false, false) => self.visible_file_count,
2803 }
2804 }
2805}
2806
2807impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2808 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2809 self.max_path = summary.max_path.as_ref();
2810 self.count += summary.count;
2811 self.visible_count += summary.visible_count;
2812 self.file_count += summary.file_count;
2813 self.visible_file_count += summary.visible_file_count;
2814 }
2815}
2816
2817impl<'a> Default for TraversalProgress<'a> {
2818 fn default() -> Self {
2819 Self {
2820 max_path: Path::new(""),
2821 count: 0,
2822 visible_count: 0,
2823 file_count: 0,
2824 visible_file_count: 0,
2825 }
2826 }
2827}
2828
2829pub struct Traversal<'a> {
2830 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2831 include_ignored: bool,
2832 include_dirs: bool,
2833}
2834
2835impl<'a> Traversal<'a> {
2836 pub fn advance(&mut self) -> bool {
2837 self.advance_to_offset(self.offset() + 1)
2838 }
2839
2840 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2841 self.cursor.seek_forward(
2842 &TraversalTarget::Count {
2843 count: offset,
2844 include_dirs: self.include_dirs,
2845 include_ignored: self.include_ignored,
2846 },
2847 Bias::Right,
2848 &(),
2849 )
2850 }
2851
2852 pub fn advance_to_sibling(&mut self) -> bool {
2853 while let Some(entry) = self.cursor.item() {
2854 self.cursor.seek_forward(
2855 &TraversalTarget::PathSuccessor(&entry.path),
2856 Bias::Left,
2857 &(),
2858 );
2859 if let Some(entry) = self.cursor.item() {
2860 if (self.include_dirs || !entry.is_dir())
2861 && (self.include_ignored || !entry.is_ignored)
2862 {
2863 return true;
2864 }
2865 }
2866 }
2867 false
2868 }
2869
2870 pub fn entry(&self) -> Option<&'a Entry> {
2871 self.cursor.item()
2872 }
2873
2874 pub fn offset(&self) -> usize {
2875 self.cursor
2876 .start()
2877 .count(self.include_dirs, self.include_ignored)
2878 }
2879}
2880
2881impl<'a> Iterator for Traversal<'a> {
2882 type Item = &'a Entry;
2883
2884 fn next(&mut self) -> Option<Self::Item> {
2885 if let Some(item) = self.entry() {
2886 self.advance();
2887 Some(item)
2888 } else {
2889 None
2890 }
2891 }
2892}
2893
2894#[derive(Debug)]
2895enum TraversalTarget<'a> {
2896 Path(&'a Path),
2897 PathSuccessor(&'a Path),
2898 Count {
2899 count: usize,
2900 include_ignored: bool,
2901 include_dirs: bool,
2902 },
2903}
2904
2905impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2906 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2907 match self {
2908 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2909 TraversalTarget::PathSuccessor(path) => {
2910 if !cursor_location.max_path.starts_with(path) {
2911 Ordering::Equal
2912 } else {
2913 Ordering::Greater
2914 }
2915 }
2916 TraversalTarget::Count {
2917 count,
2918 include_dirs,
2919 include_ignored,
2920 } => Ord::cmp(
2921 count,
2922 &cursor_location.count(*include_dirs, *include_ignored),
2923 ),
2924 }
2925 }
2926}
2927
2928struct ChildEntriesIter<'a> {
2929 parent_path: &'a Path,
2930 traversal: Traversal<'a>,
2931}
2932
2933impl<'a> Iterator for ChildEntriesIter<'a> {
2934 type Item = &'a Entry;
2935
2936 fn next(&mut self) -> Option<Self::Item> {
2937 if let Some(item) = self.traversal.entry() {
2938 if item.path.starts_with(&self.parent_path) {
2939 self.traversal.advance_to_sibling();
2940 return Some(item);
2941 }
2942 }
2943 None
2944 }
2945}
2946
2947impl<'a> From<&'a Entry> for proto::Entry {
2948 fn from(entry: &'a Entry) -> Self {
2949 Self {
2950 id: entry.id.to_proto(),
2951 is_dir: entry.is_dir(),
2952 path: entry.path.as_os_str().as_bytes().to_vec(),
2953 inode: entry.inode,
2954 mtime: Some(entry.mtime.into()),
2955 is_symlink: entry.is_symlink,
2956 is_ignored: entry.is_ignored,
2957 }
2958 }
2959}
2960
2961impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2962 type Error = anyhow::Error;
2963
2964 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2965 if let Some(mtime) = entry.mtime {
2966 let kind = if entry.is_dir {
2967 EntryKind::Dir
2968 } else {
2969 let mut char_bag = *root_char_bag;
2970 char_bag.extend(
2971 String::from_utf8_lossy(&entry.path)
2972 .chars()
2973 .map(|c| c.to_ascii_lowercase()),
2974 );
2975 EntryKind::File(char_bag)
2976 };
2977 let path: Arc<Path> = PathBuf::from(OsString::from_vec(entry.path)).into();
2978 Ok(Entry {
2979 id: ProjectEntryId::from_proto(entry.id),
2980 kind,
2981 path,
2982 inode: entry.inode,
2983 mtime: mtime.into(),
2984 is_symlink: entry.is_symlink,
2985 is_ignored: entry.is_ignored,
2986 })
2987 } else {
2988 Err(anyhow!(
2989 "missing mtime in remote worktree entry {:?}",
2990 entry.path
2991 ))
2992 }
2993 }
2994}
2995
2996async fn send_worktree_update(client: &Arc<Client>, update: proto::UpdateWorktree) -> Result<()> {
2997 #[cfg(any(test, feature = "test-support"))]
2998 const MAX_CHUNK_SIZE: usize = 2;
2999 #[cfg(not(any(test, feature = "test-support")))]
3000 const MAX_CHUNK_SIZE: usize = 256;
3001
3002 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
3003 client.request(update).await?;
3004 }
3005
3006 Ok(())
3007}
3008
3009#[cfg(test)]
3010mod tests {
3011 use super::*;
3012 use crate::fs::FakeFs;
3013 use anyhow::Result;
3014 use client::test::FakeHttpClient;
3015 use fs::RealFs;
3016 use git::repository::FakeGitRepository;
3017 use gpui::{executor::Deterministic, TestAppContext};
3018 use rand::prelude::*;
3019 use serde_json::json;
3020 use std::{
3021 env,
3022 fmt::Write,
3023 time::{SystemTime, UNIX_EPOCH},
3024 };
3025
3026 use util::test::temp_tree;
3027
3028 #[gpui::test]
3029 async fn test_traversal(cx: &mut TestAppContext) {
3030 let fs = FakeFs::new(cx.background());
3031 fs.insert_tree(
3032 "/root",
3033 json!({
3034 ".gitignore": "a/b\n",
3035 "a": {
3036 "b": "",
3037 "c": "",
3038 }
3039 }),
3040 )
3041 .await;
3042
3043 let http_client = FakeHttpClient::with_404_response();
3044 let client = cx.read(|cx| Client::new(http_client, cx));
3045
3046 let tree = Worktree::local(
3047 client,
3048 Arc::from(Path::new("/root")),
3049 true,
3050 fs,
3051 Default::default(),
3052 &mut cx.to_async(),
3053 )
3054 .await
3055 .unwrap();
3056 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3057 .await;
3058
3059 tree.read_with(cx, |tree, _| {
3060 assert_eq!(
3061 tree.entries(false)
3062 .map(|entry| entry.path.as_ref())
3063 .collect::<Vec<_>>(),
3064 vec![
3065 Path::new(""),
3066 Path::new(".gitignore"),
3067 Path::new("a"),
3068 Path::new("a/c"),
3069 ]
3070 );
3071 assert_eq!(
3072 tree.entries(true)
3073 .map(|entry| entry.path.as_ref())
3074 .collect::<Vec<_>>(),
3075 vec![
3076 Path::new(""),
3077 Path::new(".gitignore"),
3078 Path::new("a"),
3079 Path::new("a/b"),
3080 Path::new("a/c"),
3081 ]
3082 );
3083 })
3084 }
3085
3086 #[gpui::test(iterations = 10)]
3087 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3088 let fs = FakeFs::new(cx.background());
3089 fs.insert_tree(
3090 "/root",
3091 json!({
3092 "lib": {
3093 "a": {
3094 "a.txt": ""
3095 },
3096 "b": {
3097 "b.txt": ""
3098 }
3099 }
3100 }),
3101 )
3102 .await;
3103 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3104 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3105
3106 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3107 let tree = Worktree::local(
3108 client,
3109 Arc::from(Path::new("/root")),
3110 true,
3111 fs.clone(),
3112 Default::default(),
3113 &mut cx.to_async(),
3114 )
3115 .await
3116 .unwrap();
3117
3118 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3119 .await;
3120
3121 tree.read_with(cx, |tree, _| {
3122 assert_eq!(
3123 tree.entries(false)
3124 .map(|entry| entry.path.as_ref())
3125 .collect::<Vec<_>>(),
3126 vec![
3127 Path::new(""),
3128 Path::new("lib"),
3129 Path::new("lib/a"),
3130 Path::new("lib/a/a.txt"),
3131 Path::new("lib/a/lib"),
3132 Path::new("lib/b"),
3133 Path::new("lib/b/b.txt"),
3134 Path::new("lib/b/lib"),
3135 ]
3136 );
3137 });
3138
3139 fs.rename(
3140 Path::new("/root/lib/a/lib"),
3141 Path::new("/root/lib/a/lib-2"),
3142 Default::default(),
3143 )
3144 .await
3145 .unwrap();
3146 executor.run_until_parked();
3147 tree.read_with(cx, |tree, _| {
3148 assert_eq!(
3149 tree.entries(false)
3150 .map(|entry| entry.path.as_ref())
3151 .collect::<Vec<_>>(),
3152 vec![
3153 Path::new(""),
3154 Path::new("lib"),
3155 Path::new("lib/a"),
3156 Path::new("lib/a/a.txt"),
3157 Path::new("lib/a/lib-2"),
3158 Path::new("lib/b"),
3159 Path::new("lib/b/b.txt"),
3160 Path::new("lib/b/lib"),
3161 ]
3162 );
3163 });
3164 }
3165
3166 #[gpui::test]
3167 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3168 let parent_dir = temp_tree(json!({
3169 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3170 "tree": {
3171 ".git": {},
3172 ".gitignore": "ignored-dir\n",
3173 "tracked-dir": {
3174 "tracked-file1": "",
3175 "ancestor-ignored-file1": "",
3176 },
3177 "ignored-dir": {
3178 "ignored-file1": ""
3179 }
3180 }
3181 }));
3182 let dir = parent_dir.path().join("tree");
3183
3184 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3185
3186 let tree = Worktree::local(
3187 client,
3188 dir.as_path(),
3189 true,
3190 Arc::new(RealFs),
3191 Default::default(),
3192 &mut cx.to_async(),
3193 )
3194 .await
3195 .unwrap();
3196 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3197 .await;
3198 tree.flush_fs_events(cx).await;
3199 cx.read(|cx| {
3200 let tree = tree.read(cx);
3201 assert!(
3202 !tree
3203 .entry_for_path("tracked-dir/tracked-file1")
3204 .unwrap()
3205 .is_ignored
3206 );
3207 assert!(
3208 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3209 .unwrap()
3210 .is_ignored
3211 );
3212 assert!(
3213 tree.entry_for_path("ignored-dir/ignored-file1")
3214 .unwrap()
3215 .is_ignored
3216 );
3217 });
3218
3219 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3220 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3221 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3222 tree.flush_fs_events(cx).await;
3223 cx.read(|cx| {
3224 let tree = tree.read(cx);
3225 assert!(
3226 !tree
3227 .entry_for_path("tracked-dir/tracked-file2")
3228 .unwrap()
3229 .is_ignored
3230 );
3231 assert!(
3232 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3233 .unwrap()
3234 .is_ignored
3235 );
3236 assert!(
3237 tree.entry_for_path("ignored-dir/ignored-file2")
3238 .unwrap()
3239 .is_ignored
3240 );
3241 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3242 });
3243 }
3244
3245 #[gpui::test]
3246 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3247 let root = temp_tree(json!({
3248 "dir1": {
3249 ".git": {},
3250 "deps": {
3251 "dep1": {
3252 ".git": {},
3253 "src": {
3254 "a.txt": ""
3255 }
3256 }
3257 },
3258 "src": {
3259 "b.txt": ""
3260 }
3261 },
3262 "c.txt": ""
3263 }));
3264
3265 let http_client = FakeHttpClient::with_404_response();
3266 let client = cx.read(|cx| Client::new(http_client, cx));
3267 let tree = Worktree::local(
3268 client,
3269 root.path(),
3270 true,
3271 Arc::new(RealFs),
3272 Default::default(),
3273 &mut cx.to_async(),
3274 )
3275 .await
3276 .unwrap();
3277
3278 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3279 .await;
3280 tree.flush_fs_events(cx).await;
3281
3282 tree.read_with(cx, |tree, _cx| {
3283 let tree = tree.as_local().unwrap();
3284
3285 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3286
3287 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3288 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3289 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3290
3291 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3292 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3293 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3294 });
3295
3296 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3297 let tree = tree.as_local().unwrap();
3298 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3299 });
3300
3301 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3302 tree.flush_fs_events(cx).await;
3303
3304 tree.read_with(cx, |tree, _cx| {
3305 let tree = tree.as_local().unwrap();
3306 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3307 assert_ne!(
3308 original_scan_id, new_scan_id,
3309 "original {original_scan_id}, new {new_scan_id}"
3310 );
3311 });
3312
3313 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3314 tree.flush_fs_events(cx).await;
3315
3316 tree.read_with(cx, |tree, _cx| {
3317 let tree = tree.as_local().unwrap();
3318
3319 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3320 });
3321 }
3322
3323 #[test]
3324 fn test_changed_repos() {
3325 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3326 GitRepositoryEntry {
3327 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3328 scan_id,
3329 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3330 git_dir_path: git_dir_path.as_ref().into(),
3331 }
3332 }
3333
3334 let prev_repos: Vec<GitRepositoryEntry> = vec![
3335 fake_entry("/.git", 0),
3336 fake_entry("/a/.git", 0),
3337 fake_entry("/a/b/.git", 0),
3338 ];
3339
3340 let new_repos: Vec<GitRepositoryEntry> = vec![
3341 fake_entry("/a/.git", 1),
3342 fake_entry("/a/b/.git", 0),
3343 fake_entry("/a/c/.git", 0),
3344 ];
3345
3346 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3347
3348 // Deletion retained
3349 assert!(res
3350 .iter()
3351 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3352 .is_some());
3353
3354 // Update retained
3355 assert!(res
3356 .iter()
3357 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3358 .is_some());
3359
3360 // Addition retained
3361 assert!(res
3362 .iter()
3363 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3364 .is_some());
3365
3366 // Nochange, not retained
3367 assert!(res
3368 .iter()
3369 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3370 .is_none());
3371 }
3372
3373 #[gpui::test]
3374 async fn test_write_file(cx: &mut TestAppContext) {
3375 let dir = temp_tree(json!({
3376 ".git": {},
3377 ".gitignore": "ignored-dir\n",
3378 "tracked-dir": {},
3379 "ignored-dir": {}
3380 }));
3381
3382 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3383
3384 let tree = Worktree::local(
3385 client,
3386 dir.path(),
3387 true,
3388 Arc::new(RealFs),
3389 Default::default(),
3390 &mut cx.to_async(),
3391 )
3392 .await
3393 .unwrap();
3394 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3395 .await;
3396 tree.flush_fs_events(cx).await;
3397
3398 tree.update(cx, |tree, cx| {
3399 tree.as_local().unwrap().write_file(
3400 Path::new("tracked-dir/file.txt"),
3401 "hello".into(),
3402 Default::default(),
3403 cx,
3404 )
3405 })
3406 .await
3407 .unwrap();
3408 tree.update(cx, |tree, cx| {
3409 tree.as_local().unwrap().write_file(
3410 Path::new("ignored-dir/file.txt"),
3411 "world".into(),
3412 Default::default(),
3413 cx,
3414 )
3415 })
3416 .await
3417 .unwrap();
3418
3419 tree.read_with(cx, |tree, _| {
3420 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3421 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3422 assert!(!tracked.is_ignored);
3423 assert!(ignored.is_ignored);
3424 });
3425 }
3426
3427 #[gpui::test(iterations = 30)]
3428 async fn test_create_directory(cx: &mut TestAppContext) {
3429 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3430
3431 let fs = FakeFs::new(cx.background());
3432 fs.insert_tree(
3433 "/a",
3434 json!({
3435 "b": {},
3436 "c": {},
3437 "d": {},
3438 }),
3439 )
3440 .await;
3441
3442 let tree = Worktree::local(
3443 client,
3444 "/a".as_ref(),
3445 true,
3446 fs,
3447 Default::default(),
3448 &mut cx.to_async(),
3449 )
3450 .await
3451 .unwrap();
3452
3453 let entry = tree
3454 .update(cx, |tree, cx| {
3455 tree.as_local_mut()
3456 .unwrap()
3457 .create_entry("a/e".as_ref(), true, cx)
3458 })
3459 .await
3460 .unwrap();
3461 assert!(entry.is_dir());
3462
3463 cx.foreground().run_until_parked();
3464 tree.read_with(cx, |tree, _| {
3465 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3466 });
3467 }
3468
3469 #[gpui::test(iterations = 100)]
3470 fn test_random(mut rng: StdRng) {
3471 let operations = env::var("OPERATIONS")
3472 .map(|o| o.parse().unwrap())
3473 .unwrap_or(40);
3474 let initial_entries = env::var("INITIAL_ENTRIES")
3475 .map(|o| o.parse().unwrap())
3476 .unwrap_or(20);
3477
3478 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3479 for _ in 0..initial_entries {
3480 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3481 }
3482 log::info!("Generated initial tree");
3483
3484 let (notify_tx, _notify_rx) = mpsc::unbounded();
3485 let fs = Arc::new(RealFs);
3486 let next_entry_id = Arc::new(AtomicUsize::new(0));
3487 let mut initial_snapshot = LocalSnapshot {
3488 abs_path: root_dir.path().into(),
3489 removed_entry_ids: Default::default(),
3490 ignores_by_parent_abs_path: Default::default(),
3491 git_repositories: Default::default(),
3492 next_entry_id: next_entry_id.clone(),
3493 snapshot: Snapshot {
3494 id: WorktreeId::from_usize(0),
3495 entries_by_path: Default::default(),
3496 entries_by_id: Default::default(),
3497 root_name: Default::default(),
3498 root_char_bag: Default::default(),
3499 scan_id: 0,
3500 is_complete: true,
3501 },
3502 extension_counts: Default::default(),
3503 };
3504 initial_snapshot.insert_entry(
3505 Entry::new(
3506 Path::new("").into(),
3507 &smol::block_on(fs.metadata(root_dir.path()))
3508 .unwrap()
3509 .unwrap(),
3510 &next_entry_id,
3511 Default::default(),
3512 ),
3513 fs.as_ref(),
3514 );
3515 let mut scanner = BackgroundScanner::new(
3516 Arc::new(Mutex::new(initial_snapshot.clone())),
3517 notify_tx,
3518 fs.clone(),
3519 Arc::new(gpui::executor::Background::new()),
3520 );
3521 smol::block_on(scanner.scan_dirs()).unwrap();
3522 scanner.snapshot().check_invariants();
3523
3524 let mut events = Vec::new();
3525 let mut snapshots = Vec::new();
3526 let mut mutations_len = operations;
3527 while mutations_len > 1 {
3528 if !events.is_empty() && rng.gen_bool(0.4) {
3529 let len = rng.gen_range(0..=events.len());
3530 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3531 log::info!("Delivering events: {:#?}", to_deliver);
3532 smol::block_on(scanner.process_events(to_deliver));
3533 scanner.snapshot().check_invariants();
3534 } else {
3535 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3536 mutations_len -= 1;
3537 }
3538
3539 if rng.gen_bool(0.2) {
3540 snapshots.push(scanner.snapshot());
3541 }
3542 }
3543 log::info!("Quiescing: {:#?}", events);
3544 smol::block_on(scanner.process_events(events));
3545 scanner.snapshot().check_invariants();
3546
3547 let (notify_tx, _notify_rx) = mpsc::unbounded();
3548 let mut new_scanner = BackgroundScanner::new(
3549 Arc::new(Mutex::new(initial_snapshot)),
3550 notify_tx,
3551 scanner.fs.clone(),
3552 scanner.executor.clone(),
3553 );
3554 smol::block_on(new_scanner.scan_dirs()).unwrap();
3555 assert_eq!(
3556 scanner.snapshot().to_vec(true),
3557 new_scanner.snapshot().to_vec(true)
3558 );
3559
3560 for mut prev_snapshot in snapshots {
3561 let include_ignored = rng.gen::<bool>();
3562 if !include_ignored {
3563 let mut entries_by_path_edits = Vec::new();
3564 let mut entries_by_id_edits = Vec::new();
3565 for entry in prev_snapshot
3566 .entries_by_id
3567 .cursor::<()>()
3568 .filter(|e| e.is_ignored)
3569 {
3570 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3571 entries_by_id_edits.push(Edit::Remove(entry.id));
3572 }
3573
3574 prev_snapshot
3575 .entries_by_path
3576 .edit(entries_by_path_edits, &());
3577 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3578 }
3579
3580 let update = scanner
3581 .snapshot()
3582 .build_update(&prev_snapshot, 0, 0, include_ignored);
3583 prev_snapshot.apply_remote_update(update).unwrap();
3584 assert_eq!(
3585 prev_snapshot.to_vec(true),
3586 scanner.snapshot().to_vec(include_ignored)
3587 );
3588 }
3589 }
3590
3591 fn randomly_mutate_tree(
3592 root_path: &Path,
3593 insertion_probability: f64,
3594 rng: &mut impl Rng,
3595 ) -> Result<Vec<fsevent::Event>> {
3596 let root_path = root_path.canonicalize().unwrap();
3597 let (dirs, files) = read_dir_recursive(root_path.clone());
3598
3599 let mut events = Vec::new();
3600 let mut record_event = |path: PathBuf| {
3601 events.push(fsevent::Event {
3602 event_id: SystemTime::now()
3603 .duration_since(UNIX_EPOCH)
3604 .unwrap()
3605 .as_secs(),
3606 flags: fsevent::StreamFlags::empty(),
3607 path,
3608 });
3609 };
3610
3611 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3612 let path = dirs.choose(rng).unwrap();
3613 let new_path = path.join(gen_name(rng));
3614
3615 if rng.gen() {
3616 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3617 std::fs::create_dir(&new_path)?;
3618 } else {
3619 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3620 std::fs::write(&new_path, "")?;
3621 }
3622 record_event(new_path);
3623 } else if rng.gen_bool(0.05) {
3624 let ignore_dir_path = dirs.choose(rng).unwrap();
3625 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3626
3627 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3628 let files_to_ignore = {
3629 let len = rng.gen_range(0..=subfiles.len());
3630 subfiles.choose_multiple(rng, len)
3631 };
3632 let dirs_to_ignore = {
3633 let len = rng.gen_range(0..subdirs.len());
3634 subdirs.choose_multiple(rng, len)
3635 };
3636
3637 let mut ignore_contents = String::new();
3638 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3639 writeln!(
3640 ignore_contents,
3641 "{}",
3642 path_to_ignore
3643 .strip_prefix(&ignore_dir_path)?
3644 .to_str()
3645 .unwrap()
3646 )
3647 .unwrap();
3648 }
3649 log::info!(
3650 "Creating {:?} with contents:\n{}",
3651 ignore_path.strip_prefix(&root_path)?,
3652 ignore_contents
3653 );
3654 std::fs::write(&ignore_path, ignore_contents).unwrap();
3655 record_event(ignore_path);
3656 } else {
3657 let old_path = {
3658 let file_path = files.choose(rng);
3659 let dir_path = dirs[1..].choose(rng);
3660 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3661 };
3662
3663 let is_rename = rng.gen();
3664 if is_rename {
3665 let new_path_parent = dirs
3666 .iter()
3667 .filter(|d| !d.starts_with(old_path))
3668 .choose(rng)
3669 .unwrap();
3670
3671 let overwrite_existing_dir =
3672 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3673 let new_path = if overwrite_existing_dir {
3674 std::fs::remove_dir_all(&new_path_parent).ok();
3675 new_path_parent.to_path_buf()
3676 } else {
3677 new_path_parent.join(gen_name(rng))
3678 };
3679
3680 log::info!(
3681 "Renaming {:?} to {}{:?}",
3682 old_path.strip_prefix(&root_path)?,
3683 if overwrite_existing_dir {
3684 "overwrite "
3685 } else {
3686 ""
3687 },
3688 new_path.strip_prefix(&root_path)?
3689 );
3690 std::fs::rename(&old_path, &new_path)?;
3691 record_event(old_path.clone());
3692 record_event(new_path);
3693 } else if old_path.is_dir() {
3694 let (dirs, files) = read_dir_recursive(old_path.clone());
3695
3696 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3697 std::fs::remove_dir_all(&old_path).unwrap();
3698 for file in files {
3699 record_event(file);
3700 }
3701 for dir in dirs {
3702 record_event(dir);
3703 }
3704 } else {
3705 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3706 std::fs::remove_file(old_path).unwrap();
3707 record_event(old_path.clone());
3708 }
3709 }
3710
3711 Ok(events)
3712 }
3713
3714 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3715 let child_entries = std::fs::read_dir(&path).unwrap();
3716 let mut dirs = vec![path];
3717 let mut files = Vec::new();
3718 for child_entry in child_entries {
3719 let child_path = child_entry.unwrap().path();
3720 if child_path.is_dir() {
3721 let (child_dirs, child_files) = read_dir_recursive(child_path);
3722 dirs.extend(child_dirs);
3723 files.extend(child_files);
3724 } else {
3725 files.push(child_path);
3726 }
3727 }
3728 (dirs, files)
3729 }
3730
3731 fn gen_name(rng: &mut impl Rng) -> String {
3732 (0..6)
3733 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3734 .map(char::from)
3735 .collect()
3736 }
3737
3738 impl LocalSnapshot {
3739 fn check_invariants(&self) {
3740 let mut files = self.files(true, 0);
3741 let mut visible_files = self.files(false, 0);
3742 for entry in self.entries_by_path.cursor::<()>() {
3743 if entry.is_file() {
3744 assert_eq!(files.next().unwrap().inode, entry.inode);
3745 if !entry.is_ignored {
3746 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3747 }
3748 }
3749 }
3750 assert!(files.next().is_none());
3751 assert!(visible_files.next().is_none());
3752
3753 let mut bfs_paths = Vec::new();
3754 let mut stack = vec![Path::new("")];
3755 while let Some(path) = stack.pop() {
3756 bfs_paths.push(path);
3757 let ix = stack.len();
3758 for child_entry in self.child_entries(path) {
3759 stack.insert(ix, &child_entry.path);
3760 }
3761 }
3762
3763 let dfs_paths_via_iter = self
3764 .entries_by_path
3765 .cursor::<()>()
3766 .map(|e| e.path.as_ref())
3767 .collect::<Vec<_>>();
3768 assert_eq!(bfs_paths, dfs_paths_via_iter);
3769
3770 let dfs_paths_via_traversal = self
3771 .entries(true)
3772 .map(|e| e.path.as_ref())
3773 .collect::<Vec<_>>();
3774 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3775
3776 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3777 let ignore_parent_path =
3778 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3779 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3780 assert!(self
3781 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3782 .is_some());
3783 }
3784
3785 // Ensure extension counts are correct.
3786 let mut expected_extension_counts = HashMap::default();
3787 for extension in self.entries(false).filter_map(|e| e.path.extension()) {
3788 *expected_extension_counts
3789 .entry(extension.into())
3790 .or_insert(0) += 1;
3791 }
3792 assert_eq!(self.extension_counts, expected_extension_counts);
3793 }
3794
3795 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3796 let mut paths = Vec::new();
3797 for entry in self.entries_by_path.cursor::<()>() {
3798 if include_ignored || !entry.is_ignored {
3799 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3800 }
3801 }
3802 paths.sort_by(|a, b| a.0.cmp(b.0));
3803 paths
3804 }
3805 }
3806}