1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 pin::Pin,
48 sync::{
49 atomic::{AtomicUsize, Ordering::SeqCst},
50 Arc,
51 },
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<
72 Arc<Path>,
73 Vec<(
74 LanguageServerId,
75 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
76 )>,
77 >,
78 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
79 client: Arc<Client>,
80 fs: Arc<dyn Fs>,
81 visible: bool,
82}
83
84pub struct RemoteWorktree {
85 snapshot: Snapshot,
86 background_snapshot: Arc<Mutex<Snapshot>>,
87 project_id: u64,
88 client: Arc<Client>,
89 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
90 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
91 replica_id: ReplicaId,
92 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
93 visible: bool,
94 disconnected: bool,
95}
96
97#[derive(Clone)]
98pub struct Snapshot {
99 id: WorktreeId,
100 abs_path: Arc<Path>,
101 root_name: String,
102 root_char_bag: CharBag,
103 entries_by_path: SumTree<Entry>,
104 entries_by_id: SumTree<PathEntry>,
105 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
106
107 /// A number that increases every time the worktree begins scanning
108 /// a set of paths from the filesystem. This scanning could be caused
109 /// by some operation performed on the worktree, such as reading or
110 /// writing a file, or by an event reported by the filesystem.
111 scan_id: usize,
112
113 /// The latest scan id that has completed, and whose preceding scans
114 /// have all completed. The current `scan_id` could be more than one
115 /// greater than the `completed_scan_id` if operations are performed
116 /// on the worktree while it is processing a file-system event.
117 completed_scan_id: usize,
118}
119
120#[derive(Clone, Debug, Eq, PartialEq)]
121pub struct RepositoryEntry {
122 pub(crate) scan_id: usize,
123 pub(crate) work_directory: WorkDirectoryEntry,
124 pub(crate) branch: Option<Arc<str>>,
125}
126
127impl RepositoryEntry {
128 pub fn branch(&self) -> Option<Arc<str>> {
129 self.branch.clone()
130 }
131
132 pub fn work_directory_id(&self) -> ProjectEntryId {
133 *self.work_directory
134 }
135
136 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
137 snapshot
138 .entry_for_id(self.work_directory_id())
139 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
140 }
141
142 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
143 self.work_directory.contains(snapshot, path)
144 }
145}
146
147impl From<&RepositoryEntry> for proto::RepositoryEntry {
148 fn from(value: &RepositoryEntry) -> Self {
149 proto::RepositoryEntry {
150 scan_id: value.scan_id as u64,
151 work_directory_id: value.work_directory.to_proto(),
152 branch: value.branch.as_ref().map(|str| str.to_string()),
153 }
154 }
155}
156
157/// This path corresponds to the 'content path' (the folder that contains the .git)
158#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
159pub struct RepositoryWorkDirectory(Arc<Path>);
160
161impl Default for RepositoryWorkDirectory {
162 fn default() -> Self {
163 RepositoryWorkDirectory(Arc::from(Path::new("")))
164 }
165}
166
167#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
168pub struct WorkDirectoryEntry(ProjectEntryId);
169
170impl WorkDirectoryEntry {
171 // Note that these paths should be relative to the worktree root.
172 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
173 snapshot
174 .entry_for_id(self.0)
175 .map(|entry| path.starts_with(&entry.path))
176 .unwrap_or(false)
177 }
178
179 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
180 worktree.entry_for_id(self.0).and_then(|entry| {
181 path.strip_prefix(&entry.path)
182 .ok()
183 .map(move |path| RepoPath(path.to_owned()))
184 })
185 }
186}
187
188impl Deref for WorkDirectoryEntry {
189 type Target = ProjectEntryId;
190
191 fn deref(&self) -> &Self::Target {
192 &self.0
193 }
194}
195
196impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
197 fn from(value: ProjectEntryId) -> Self {
198 WorkDirectoryEntry(value)
199 }
200}
201
202#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
203pub struct RepoPath(PathBuf);
204
205impl AsRef<Path> for RepoPath {
206 fn as_ref(&self) -> &Path {
207 self.0.as_ref()
208 }
209}
210
211impl Deref for RepoPath {
212 type Target = PathBuf;
213
214 fn deref(&self) -> &Self::Target {
215 &self.0
216 }
217}
218
219impl AsRef<Path> for RepositoryWorkDirectory {
220 fn as_ref(&self) -> &Path {
221 self.0.as_ref()
222 }
223}
224
225#[derive(Debug, Clone)]
226pub struct LocalSnapshot {
227 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
228 // The ProjectEntryId corresponds to the entry for the .git dir
229 // work_directory_id
230 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
231 removed_entry_ids: HashMap<u64, ProjectEntryId>,
232 next_entry_id: Arc<AtomicUsize>,
233 snapshot: Snapshot,
234}
235
236#[derive(Debug, Clone)]
237pub struct LocalRepositoryEntry {
238 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
239 /// Path to the actual .git folder.
240 /// Note: if .git is a file, this points to the folder indicated by the .git file
241 pub(crate) git_dir_path: Arc<Path>,
242}
243
244impl LocalRepositoryEntry {
245 // Note that this path should be relative to the worktree root.
246 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
247 path.starts_with(self.git_dir_path.as_ref())
248 }
249}
250
251impl Deref for LocalSnapshot {
252 type Target = Snapshot;
253
254 fn deref(&self) -> &Self::Target {
255 &self.snapshot
256 }
257}
258
259impl DerefMut for LocalSnapshot {
260 fn deref_mut(&mut self) -> &mut Self::Target {
261 &mut self.snapshot
262 }
263}
264
265enum ScanState {
266 Started,
267 Updated {
268 snapshot: LocalSnapshot,
269 changes: HashMap<Arc<Path>, PathChange>,
270 barrier: Option<barrier::Sender>,
271 scanning: bool,
272 },
273}
274
275struct ShareState {
276 project_id: u64,
277 snapshots_tx: watch::Sender<LocalSnapshot>,
278 resume_updates: watch::Sender<()>,
279 _maintain_remote_snapshot: Task<Option<()>>,
280}
281
282pub enum Event {
283 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
284 UpdatedGitRepositories(Vec<RepositoryEntry>),
285}
286
287impl Entity for Worktree {
288 type Event = Event;
289}
290
291impl Worktree {
292 pub async fn local(
293 client: Arc<Client>,
294 path: impl Into<Arc<Path>>,
295 visible: bool,
296 fs: Arc<dyn Fs>,
297 next_entry_id: Arc<AtomicUsize>,
298 cx: &mut AsyncAppContext,
299 ) -> Result<ModelHandle<Self>> {
300 // After determining whether the root entry is a file or a directory, populate the
301 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
302 let abs_path = path.into();
303 let metadata = fs
304 .metadata(&abs_path)
305 .await
306 .context("failed to stat worktree path")?;
307
308 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
309 let root_name = abs_path
310 .file_name()
311 .map_or(String::new(), |f| f.to_string_lossy().to_string());
312
313 let mut snapshot = LocalSnapshot {
314 ignores_by_parent_abs_path: Default::default(),
315 removed_entry_ids: Default::default(),
316 git_repositories: Default::default(),
317 next_entry_id,
318 snapshot: Snapshot {
319 id: WorktreeId::from_usize(cx.model_id()),
320 abs_path: abs_path.clone(),
321 root_name: root_name.clone(),
322 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
323 entries_by_path: Default::default(),
324 entries_by_id: Default::default(),
325 repository_entries: Default::default(),
326 scan_id: 1,
327 completed_scan_id: 0,
328 },
329 };
330
331 if let Some(metadata) = metadata {
332 snapshot.insert_entry(
333 Entry::new(
334 Arc::from(Path::new("")),
335 &metadata,
336 &snapshot.next_entry_id,
337 snapshot.root_char_bag,
338 ),
339 fs.as_ref(),
340 );
341 }
342
343 let (path_changes_tx, path_changes_rx) = channel::unbounded();
344 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
345
346 cx.spawn_weak(|this, mut cx| async move {
347 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
348 this.update(&mut cx, |this, cx| {
349 let this = this.as_local_mut().unwrap();
350 match state {
351 ScanState::Started => {
352 *this.is_scanning.0.borrow_mut() = true;
353 }
354 ScanState::Updated {
355 snapshot,
356 changes,
357 barrier,
358 scanning,
359 } => {
360 *this.is_scanning.0.borrow_mut() = scanning;
361 this.set_snapshot(snapshot, cx);
362 cx.emit(Event::UpdatedEntries(changes));
363 drop(barrier);
364 }
365 }
366 cx.notify();
367 });
368 }
369 })
370 .detach();
371
372 let background_scanner_task = cx.background().spawn({
373 let fs = fs.clone();
374 let snapshot = snapshot.clone();
375 let background = cx.background().clone();
376 async move {
377 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
378 BackgroundScanner::new(
379 snapshot,
380 fs,
381 scan_states_tx,
382 background,
383 path_changes_rx,
384 )
385 .run(events)
386 .await;
387 }
388 });
389
390 Worktree::Local(LocalWorktree {
391 snapshot,
392 is_scanning: watch::channel_with(true),
393 share: None,
394 path_changes_tx,
395 _background_scanner_task: background_scanner_task,
396 diagnostics: Default::default(),
397 diagnostic_summaries: Default::default(),
398 client,
399 fs,
400 visible,
401 })
402 }))
403 }
404
405 pub fn remote(
406 project_remote_id: u64,
407 replica_id: ReplicaId,
408 worktree: proto::WorktreeMetadata,
409 client: Arc<Client>,
410 cx: &mut AppContext,
411 ) -> ModelHandle<Self> {
412 cx.add_model(|cx: &mut ModelContext<Self>| {
413 let snapshot = Snapshot {
414 id: WorktreeId(worktree.id as usize),
415 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
416 root_name: worktree.root_name.clone(),
417 root_char_bag: worktree
418 .root_name
419 .chars()
420 .map(|c| c.to_ascii_lowercase())
421 .collect(),
422 entries_by_path: Default::default(),
423 entries_by_id: Default::default(),
424 repository_entries: Default::default(),
425 scan_id: 1,
426 completed_scan_id: 0,
427 };
428
429 let (updates_tx, mut updates_rx) = mpsc::unbounded();
430 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
431 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
432
433 cx.background()
434 .spawn({
435 let background_snapshot = background_snapshot.clone();
436 async move {
437 while let Some(update) = updates_rx.next().await {
438 if let Err(error) =
439 background_snapshot.lock().apply_remote_update(update)
440 {
441 log::error!("error applying worktree update: {}", error);
442 }
443 snapshot_updated_tx.send(()).await.ok();
444 }
445 }
446 })
447 .detach();
448
449 cx.spawn_weak(|this, mut cx| async move {
450 while (snapshot_updated_rx.recv().await).is_some() {
451 if let Some(this) = this.upgrade(&cx) {
452 this.update(&mut cx, |this, cx| {
453 let this = this.as_remote_mut().unwrap();
454 this.snapshot = this.background_snapshot.lock().clone();
455 cx.emit(Event::UpdatedEntries(Default::default()));
456 cx.notify();
457 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
458 if this.observed_snapshot(*scan_id) {
459 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
460 let _ = tx.send(());
461 } else {
462 break;
463 }
464 }
465 });
466 } else {
467 break;
468 }
469 }
470 })
471 .detach();
472
473 Worktree::Remote(RemoteWorktree {
474 project_id: project_remote_id,
475 replica_id,
476 snapshot: snapshot.clone(),
477 background_snapshot,
478 updates_tx: Some(updates_tx),
479 snapshot_subscriptions: Default::default(),
480 client: client.clone(),
481 diagnostic_summaries: Default::default(),
482 visible: worktree.visible,
483 disconnected: false,
484 })
485 })
486 }
487
488 pub fn as_local(&self) -> Option<&LocalWorktree> {
489 if let Worktree::Local(worktree) = self {
490 Some(worktree)
491 } else {
492 None
493 }
494 }
495
496 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
497 if let Worktree::Remote(worktree) = self {
498 Some(worktree)
499 } else {
500 None
501 }
502 }
503
504 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
505 if let Worktree::Local(worktree) = self {
506 Some(worktree)
507 } else {
508 None
509 }
510 }
511
512 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
513 if let Worktree::Remote(worktree) = self {
514 Some(worktree)
515 } else {
516 None
517 }
518 }
519
520 pub fn is_local(&self) -> bool {
521 matches!(self, Worktree::Local(_))
522 }
523
524 pub fn is_remote(&self) -> bool {
525 !self.is_local()
526 }
527
528 pub fn snapshot(&self) -> Snapshot {
529 match self {
530 Worktree::Local(worktree) => worktree.snapshot().snapshot,
531 Worktree::Remote(worktree) => worktree.snapshot(),
532 }
533 }
534
535 pub fn scan_id(&self) -> usize {
536 match self {
537 Worktree::Local(worktree) => worktree.snapshot.scan_id,
538 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
539 }
540 }
541
542 pub fn completed_scan_id(&self) -> usize {
543 match self {
544 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
545 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
546 }
547 }
548
549 pub fn is_visible(&self) -> bool {
550 match self {
551 Worktree::Local(worktree) => worktree.visible,
552 Worktree::Remote(worktree) => worktree.visible,
553 }
554 }
555
556 pub fn replica_id(&self) -> ReplicaId {
557 match self {
558 Worktree::Local(_) => 0,
559 Worktree::Remote(worktree) => worktree.replica_id,
560 }
561 }
562
563 pub fn diagnostic_summaries(
564 &self,
565 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
566 match self {
567 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
568 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
569 }
570 .iter()
571 .flat_map(|(path, summaries)| {
572 summaries
573 .iter()
574 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
575 })
576 }
577
578 pub fn abs_path(&self) -> Arc<Path> {
579 match self {
580 Worktree::Local(worktree) => worktree.abs_path.clone(),
581 Worktree::Remote(worktree) => worktree.abs_path.clone(),
582 }
583 }
584}
585
586impl LocalWorktree {
587 pub fn contains_abs_path(&self, path: &Path) -> bool {
588 path.starts_with(&self.abs_path)
589 }
590
591 fn absolutize(&self, path: &Path) -> PathBuf {
592 if path.file_name().is_some() {
593 self.abs_path.join(path)
594 } else {
595 self.abs_path.to_path_buf()
596 }
597 }
598
599 pub(crate) fn load_buffer(
600 &mut self,
601 id: u64,
602 path: &Path,
603 cx: &mut ModelContext<Worktree>,
604 ) -> Task<Result<ModelHandle<Buffer>>> {
605 let path = Arc::from(path);
606 cx.spawn(move |this, mut cx| async move {
607 let (file, contents, diff_base) = this
608 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
609 .await?;
610 let text_buffer = cx
611 .background()
612 .spawn(async move { text::Buffer::new(0, id, contents) })
613 .await;
614 Ok(cx.add_model(|cx| {
615 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
616 buffer.git_diff_recalc(cx);
617 buffer
618 }))
619 })
620 }
621
622 pub fn diagnostics_for_path(
623 &self,
624 path: &Path,
625 ) -> Vec<(
626 LanguageServerId,
627 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
628 )> {
629 self.diagnostics.get(path).cloned().unwrap_or_default()
630 }
631
632 pub fn update_diagnostics(
633 &mut self,
634 server_id: LanguageServerId,
635 worktree_path: Arc<Path>,
636 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
637 _: &mut ModelContext<Worktree>,
638 ) -> Result<bool> {
639 let summaries_by_server_id = self
640 .diagnostic_summaries
641 .entry(worktree_path.clone())
642 .or_default();
643
644 let old_summary = summaries_by_server_id
645 .remove(&server_id)
646 .unwrap_or_default();
647
648 let new_summary = DiagnosticSummary::new(&diagnostics);
649 if new_summary.is_empty() {
650 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
651 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
652 diagnostics_by_server_id.remove(ix);
653 }
654 if diagnostics_by_server_id.is_empty() {
655 self.diagnostics.remove(&worktree_path);
656 }
657 }
658 } else {
659 summaries_by_server_id.insert(server_id, new_summary);
660 let diagnostics_by_server_id =
661 self.diagnostics.entry(worktree_path.clone()).or_default();
662 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
663 Ok(ix) => {
664 diagnostics_by_server_id[ix] = (server_id, diagnostics);
665 }
666 Err(ix) => {
667 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
668 }
669 }
670 }
671
672 if !old_summary.is_empty() || !new_summary.is_empty() {
673 if let Some(share) = self.share.as_ref() {
674 self.client
675 .send(proto::UpdateDiagnosticSummary {
676 project_id: share.project_id,
677 worktree_id: self.id().to_proto(),
678 summary: Some(proto::DiagnosticSummary {
679 path: worktree_path.to_string_lossy().to_string(),
680 language_server_id: server_id.0 as u64,
681 error_count: new_summary.error_count as u32,
682 warning_count: new_summary.warning_count as u32,
683 }),
684 })
685 .log_err();
686 }
687 }
688
689 Ok(!old_summary.is_empty() || !new_summary.is_empty())
690 }
691
692 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
693 let updated_repos = Self::changed_repos(
694 &self.snapshot.repository_entries,
695 &new_snapshot.repository_entries,
696 );
697 self.snapshot = new_snapshot;
698
699 if let Some(share) = self.share.as_mut() {
700 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
701 }
702
703 if !updated_repos.is_empty() {
704 cx.emit(Event::UpdatedGitRepositories(updated_repos));
705 }
706 }
707
708 fn changed_repos(
709 old_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
710 new_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
711 ) -> Vec<RepositoryEntry> {
712 fn diff<'a>(
713 a: impl Iterator<Item = &'a RepositoryEntry>,
714 mut b: impl Iterator<Item = &'a RepositoryEntry>,
715 updated: &mut HashMap<ProjectEntryId, RepositoryEntry>,
716 ) {
717 for a_repo in a {
718 let matched = b.find(|b_repo| {
719 a_repo.work_directory == b_repo.work_directory
720 && a_repo.scan_id == b_repo.scan_id
721 });
722
723 if matched.is_none() {
724 updated.insert(*a_repo.work_directory, a_repo.clone());
725 }
726 }
727 }
728
729 let mut updated = HashMap::<ProjectEntryId, RepositoryEntry>::default();
730
731 diff(old_repos.values(), new_repos.values(), &mut updated);
732 diff(new_repos.values(), old_repos.values(), &mut updated);
733
734 updated.into_values().collect()
735 }
736
737 pub fn scan_complete(&self) -> impl Future<Output = ()> {
738 let mut is_scanning_rx = self.is_scanning.1.clone();
739 async move {
740 let mut is_scanning = is_scanning_rx.borrow().clone();
741 while is_scanning {
742 if let Some(value) = is_scanning_rx.recv().await {
743 is_scanning = value;
744 } else {
745 break;
746 }
747 }
748 }
749 }
750
751 pub fn snapshot(&self) -> LocalSnapshot {
752 self.snapshot.clone()
753 }
754
755 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
756 proto::WorktreeMetadata {
757 id: self.id().to_proto(),
758 root_name: self.root_name().to_string(),
759 visible: self.visible,
760 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
761 }
762 }
763
764 fn load(
765 &self,
766 path: &Path,
767 cx: &mut ModelContext<Worktree>,
768 ) -> Task<Result<(File, String, Option<String>)>> {
769 let handle = cx.handle();
770 let path = Arc::from(path);
771 let abs_path = self.absolutize(&path);
772 let fs = self.fs.clone();
773 let snapshot = self.snapshot();
774
775 let mut index_task = None;
776
777 if let Some(repo) = snapshot.repo_for(&path) {
778 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
779 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
780 let repo = repo.repo_ptr.to_owned();
781 index_task = Some(
782 cx.background()
783 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
784 );
785 }
786 }
787
788 cx.spawn(|this, mut cx| async move {
789 let text = fs.load(&abs_path).await?;
790
791 let diff_base = if let Some(index_task) = index_task {
792 index_task.await
793 } else {
794 None
795 };
796
797 // Eagerly populate the snapshot with an updated entry for the loaded file
798 let entry = this
799 .update(&mut cx, |this, cx| {
800 this.as_local().unwrap().refresh_entry(path, None, cx)
801 })
802 .await?;
803
804 Ok((
805 File {
806 entry_id: entry.id,
807 worktree: handle,
808 path: entry.path,
809 mtime: entry.mtime,
810 is_local: true,
811 is_deleted: false,
812 },
813 text,
814 diff_base,
815 ))
816 })
817 }
818
819 pub fn save_buffer(
820 &self,
821 buffer_handle: ModelHandle<Buffer>,
822 path: Arc<Path>,
823 has_changed_file: bool,
824 cx: &mut ModelContext<Worktree>,
825 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
826 let handle = cx.handle();
827 let buffer = buffer_handle.read(cx);
828
829 let rpc = self.client.clone();
830 let buffer_id = buffer.remote_id();
831 let project_id = self.share.as_ref().map(|share| share.project_id);
832
833 let text = buffer.as_rope().clone();
834 let fingerprint = text.fingerprint();
835 let version = buffer.version();
836 let save = self.write_file(path, text, buffer.line_ending(), cx);
837
838 cx.as_mut().spawn(|mut cx| async move {
839 let entry = save.await?;
840
841 if has_changed_file {
842 let new_file = Arc::new(File {
843 entry_id: entry.id,
844 worktree: handle,
845 path: entry.path,
846 mtime: entry.mtime,
847 is_local: true,
848 is_deleted: false,
849 });
850
851 if let Some(project_id) = project_id {
852 rpc.send(proto::UpdateBufferFile {
853 project_id,
854 buffer_id,
855 file: Some(new_file.to_proto()),
856 })
857 .log_err();
858 }
859
860 buffer_handle.update(&mut cx, |buffer, cx| {
861 if has_changed_file {
862 buffer.file_updated(new_file, cx).detach();
863 }
864 });
865 }
866
867 if let Some(project_id) = project_id {
868 rpc.send(proto::BufferSaved {
869 project_id,
870 buffer_id,
871 version: serialize_version(&version),
872 mtime: Some(entry.mtime.into()),
873 fingerprint: serialize_fingerprint(fingerprint),
874 })?;
875 }
876
877 buffer_handle.update(&mut cx, |buffer, cx| {
878 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
879 });
880
881 Ok((version, fingerprint, entry.mtime))
882 })
883 }
884
885 pub fn create_entry(
886 &self,
887 path: impl Into<Arc<Path>>,
888 is_dir: bool,
889 cx: &mut ModelContext<Worktree>,
890 ) -> Task<Result<Entry>> {
891 let path = path.into();
892 let abs_path = self.absolutize(&path);
893 let fs = self.fs.clone();
894 let write = cx.background().spawn(async move {
895 if is_dir {
896 fs.create_dir(&abs_path).await
897 } else {
898 fs.save(&abs_path, &Default::default(), Default::default())
899 .await
900 }
901 });
902
903 cx.spawn(|this, mut cx| async move {
904 write.await?;
905 this.update(&mut cx, |this, cx| {
906 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
907 })
908 .await
909 })
910 }
911
912 pub fn write_file(
913 &self,
914 path: impl Into<Arc<Path>>,
915 text: Rope,
916 line_ending: LineEnding,
917 cx: &mut ModelContext<Worktree>,
918 ) -> Task<Result<Entry>> {
919 let path = path.into();
920 let abs_path = self.absolutize(&path);
921 let fs = self.fs.clone();
922 let write = cx
923 .background()
924 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
925
926 cx.spawn(|this, mut cx| async move {
927 write.await?;
928 this.update(&mut cx, |this, cx| {
929 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
930 })
931 .await
932 })
933 }
934
935 pub fn delete_entry(
936 &self,
937 entry_id: ProjectEntryId,
938 cx: &mut ModelContext<Worktree>,
939 ) -> Option<Task<Result<()>>> {
940 let entry = self.entry_for_id(entry_id)?.clone();
941 let abs_path = self.abs_path.clone();
942 let fs = self.fs.clone();
943
944 let delete = cx.background().spawn(async move {
945 let mut abs_path = fs.canonicalize(&abs_path).await?;
946 if entry.path.file_name().is_some() {
947 abs_path = abs_path.join(&entry.path);
948 }
949 if entry.is_file() {
950 fs.remove_file(&abs_path, Default::default()).await?;
951 } else {
952 fs.remove_dir(
953 &abs_path,
954 RemoveOptions {
955 recursive: true,
956 ignore_if_not_exists: false,
957 },
958 )
959 .await?;
960 }
961 anyhow::Ok(abs_path)
962 });
963
964 Some(cx.spawn(|this, mut cx| async move {
965 let abs_path = delete.await?;
966 let (tx, mut rx) = barrier::channel();
967 this.update(&mut cx, |this, _| {
968 this.as_local_mut()
969 .unwrap()
970 .path_changes_tx
971 .try_send((vec![abs_path], tx))
972 })?;
973 rx.recv().await;
974 Ok(())
975 }))
976 }
977
978 pub fn rename_entry(
979 &self,
980 entry_id: ProjectEntryId,
981 new_path: impl Into<Arc<Path>>,
982 cx: &mut ModelContext<Worktree>,
983 ) -> Option<Task<Result<Entry>>> {
984 let old_path = self.entry_for_id(entry_id)?.path.clone();
985 let new_path = new_path.into();
986 let abs_old_path = self.absolutize(&old_path);
987 let abs_new_path = self.absolutize(&new_path);
988 let fs = self.fs.clone();
989 let rename = cx.background().spawn(async move {
990 fs.rename(&abs_old_path, &abs_new_path, Default::default())
991 .await
992 });
993
994 Some(cx.spawn(|this, mut cx| async move {
995 rename.await?;
996 this.update(&mut cx, |this, cx| {
997 this.as_local_mut()
998 .unwrap()
999 .refresh_entry(new_path.clone(), Some(old_path), cx)
1000 })
1001 .await
1002 }))
1003 }
1004
1005 pub fn copy_entry(
1006 &self,
1007 entry_id: ProjectEntryId,
1008 new_path: impl Into<Arc<Path>>,
1009 cx: &mut ModelContext<Worktree>,
1010 ) -> Option<Task<Result<Entry>>> {
1011 let old_path = self.entry_for_id(entry_id)?.path.clone();
1012 let new_path = new_path.into();
1013 let abs_old_path = self.absolutize(&old_path);
1014 let abs_new_path = self.absolutize(&new_path);
1015 let fs = self.fs.clone();
1016 let copy = cx.background().spawn(async move {
1017 copy_recursive(
1018 fs.as_ref(),
1019 &abs_old_path,
1020 &abs_new_path,
1021 Default::default(),
1022 )
1023 .await
1024 });
1025
1026 Some(cx.spawn(|this, mut cx| async move {
1027 copy.await?;
1028 this.update(&mut cx, |this, cx| {
1029 this.as_local_mut()
1030 .unwrap()
1031 .refresh_entry(new_path.clone(), None, cx)
1032 })
1033 .await
1034 }))
1035 }
1036
1037 fn refresh_entry(
1038 &self,
1039 path: Arc<Path>,
1040 old_path: Option<Arc<Path>>,
1041 cx: &mut ModelContext<Worktree>,
1042 ) -> Task<Result<Entry>> {
1043 let fs = self.fs.clone();
1044 let abs_root_path = self.abs_path.clone();
1045 let path_changes_tx = self.path_changes_tx.clone();
1046 cx.spawn_weak(move |this, mut cx| async move {
1047 let abs_path = fs.canonicalize(&abs_root_path).await?;
1048 let mut paths = Vec::with_capacity(2);
1049 paths.push(if path.file_name().is_some() {
1050 abs_path.join(&path)
1051 } else {
1052 abs_path.clone()
1053 });
1054 if let Some(old_path) = old_path {
1055 paths.push(if old_path.file_name().is_some() {
1056 abs_path.join(&old_path)
1057 } else {
1058 abs_path.clone()
1059 });
1060 }
1061
1062 let (tx, mut rx) = barrier::channel();
1063 path_changes_tx.try_send((paths, tx))?;
1064 rx.recv().await;
1065 this.upgrade(&cx)
1066 .ok_or_else(|| anyhow!("worktree was dropped"))?
1067 .update(&mut cx, |this, _| {
1068 this.entry_for_path(path)
1069 .cloned()
1070 .ok_or_else(|| anyhow!("failed to read path after update"))
1071 })
1072 })
1073 }
1074
1075 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1076 let (share_tx, share_rx) = oneshot::channel();
1077
1078 if let Some(share) = self.share.as_mut() {
1079 let _ = share_tx.send(());
1080 *share.resume_updates.borrow_mut() = ();
1081 } else {
1082 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1083 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1084 let worktree_id = cx.model_id() as u64;
1085
1086 for (path, summaries) in &self.diagnostic_summaries {
1087 for (&server_id, summary) in summaries {
1088 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1089 project_id,
1090 worktree_id,
1091 summary: Some(summary.to_proto(server_id, &path)),
1092 }) {
1093 return Task::ready(Err(e));
1094 }
1095 }
1096 }
1097
1098 let _maintain_remote_snapshot = cx.background().spawn({
1099 let client = self.client.clone();
1100 async move {
1101 let mut share_tx = Some(share_tx);
1102 let mut prev_snapshot = LocalSnapshot {
1103 ignores_by_parent_abs_path: Default::default(),
1104 removed_entry_ids: Default::default(),
1105 next_entry_id: Default::default(),
1106 git_repositories: Default::default(),
1107 snapshot: Snapshot {
1108 id: WorktreeId(worktree_id as usize),
1109 abs_path: Path::new("").into(),
1110 root_name: Default::default(),
1111 root_char_bag: Default::default(),
1112 entries_by_path: Default::default(),
1113 entries_by_id: Default::default(),
1114 repository_entries: Default::default(),
1115 scan_id: 0,
1116 completed_scan_id: 0,
1117 },
1118 };
1119 while let Some(snapshot) = snapshots_rx.recv().await {
1120 #[cfg(any(test, feature = "test-support"))]
1121 const MAX_CHUNK_SIZE: usize = 2;
1122 #[cfg(not(any(test, feature = "test-support")))]
1123 const MAX_CHUNK_SIZE: usize = 256;
1124
1125 let update =
1126 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1127 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1128 let _ = resume_updates_rx.try_recv();
1129 while let Err(error) = client.request(update.clone()).await {
1130 log::error!("failed to send worktree update: {}", error);
1131 log::info!("waiting to resume updates");
1132 if resume_updates_rx.next().await.is_none() {
1133 return Ok(());
1134 }
1135 }
1136 }
1137
1138 if let Some(share_tx) = share_tx.take() {
1139 let _ = share_tx.send(());
1140 }
1141
1142 prev_snapshot = snapshot;
1143 }
1144
1145 Ok::<_, anyhow::Error>(())
1146 }
1147 .log_err()
1148 });
1149
1150 self.share = Some(ShareState {
1151 project_id,
1152 snapshots_tx,
1153 resume_updates: resume_updates_tx,
1154 _maintain_remote_snapshot,
1155 });
1156 }
1157
1158 cx.foreground()
1159 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1160 }
1161
1162 pub fn unshare(&mut self) {
1163 self.share.take();
1164 }
1165
1166 pub fn is_shared(&self) -> bool {
1167 self.share.is_some()
1168 }
1169
1170 pub fn load_index_text(
1171 &self,
1172 repo: RepositoryEntry,
1173 repo_path: RepoPath,
1174 cx: &mut ModelContext<Worktree>,
1175 ) -> Task<Option<String>> {
1176 let Some(git_ptr) = self.git_repositories.get(&repo.work_directory).map(|git_ptr| git_ptr.to_owned()) else {
1177 return Task::Ready(Some(None))
1178 };
1179 let git_ptr = git_ptr.repo_ptr;
1180
1181 cx.background()
1182 .spawn(async move { git_ptr.lock().load_index_text(&repo_path) })
1183 }
1184}
1185
1186impl RemoteWorktree {
1187 fn snapshot(&self) -> Snapshot {
1188 self.snapshot.clone()
1189 }
1190
1191 pub fn disconnected_from_host(&mut self) {
1192 self.updates_tx.take();
1193 self.snapshot_subscriptions.clear();
1194 self.disconnected = true;
1195 }
1196
1197 pub fn save_buffer(
1198 &self,
1199 buffer_handle: ModelHandle<Buffer>,
1200 cx: &mut ModelContext<Worktree>,
1201 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1202 let buffer = buffer_handle.read(cx);
1203 let buffer_id = buffer.remote_id();
1204 let version = buffer.version();
1205 let rpc = self.client.clone();
1206 let project_id = self.project_id;
1207 cx.as_mut().spawn(|mut cx| async move {
1208 let response = rpc
1209 .request(proto::SaveBuffer {
1210 project_id,
1211 buffer_id,
1212 version: serialize_version(&version),
1213 })
1214 .await?;
1215 let version = deserialize_version(&response.version);
1216 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1217 let mtime = response
1218 .mtime
1219 .ok_or_else(|| anyhow!("missing mtime"))?
1220 .into();
1221
1222 buffer_handle.update(&mut cx, |buffer, cx| {
1223 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1224 });
1225
1226 Ok((version, fingerprint, mtime))
1227 })
1228 }
1229
1230 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1231 if let Some(updates_tx) = &self.updates_tx {
1232 updates_tx
1233 .unbounded_send(update)
1234 .expect("consumer runs to completion");
1235 }
1236 }
1237
1238 fn observed_snapshot(&self, scan_id: usize) -> bool {
1239 self.completed_scan_id >= scan_id
1240 }
1241
1242 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1243 let (tx, rx) = oneshot::channel();
1244 if self.observed_snapshot(scan_id) {
1245 let _ = tx.send(());
1246 } else if self.disconnected {
1247 drop(tx);
1248 } else {
1249 match self
1250 .snapshot_subscriptions
1251 .binary_search_by_key(&scan_id, |probe| probe.0)
1252 {
1253 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1254 }
1255 }
1256
1257 async move {
1258 rx.await?;
1259 Ok(())
1260 }
1261 }
1262
1263 pub fn update_diagnostic_summary(
1264 &mut self,
1265 path: Arc<Path>,
1266 summary: &proto::DiagnosticSummary,
1267 ) {
1268 let server_id = LanguageServerId(summary.language_server_id as usize);
1269 let summary = DiagnosticSummary {
1270 error_count: summary.error_count as usize,
1271 warning_count: summary.warning_count as usize,
1272 };
1273
1274 if summary.is_empty() {
1275 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1276 summaries.remove(&server_id);
1277 if summaries.is_empty() {
1278 self.diagnostic_summaries.remove(&path);
1279 }
1280 }
1281 } else {
1282 self.diagnostic_summaries
1283 .entry(path)
1284 .or_default()
1285 .insert(server_id, summary);
1286 }
1287 }
1288
1289 pub fn insert_entry(
1290 &mut self,
1291 entry: proto::Entry,
1292 scan_id: usize,
1293 cx: &mut ModelContext<Worktree>,
1294 ) -> Task<Result<Entry>> {
1295 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1296 cx.spawn(|this, mut cx| async move {
1297 wait_for_snapshot.await?;
1298 this.update(&mut cx, |worktree, _| {
1299 let worktree = worktree.as_remote_mut().unwrap();
1300 let mut snapshot = worktree.background_snapshot.lock();
1301 let entry = snapshot.insert_entry(entry);
1302 worktree.snapshot = snapshot.clone();
1303 entry
1304 })
1305 })
1306 }
1307
1308 pub(crate) fn delete_entry(
1309 &mut self,
1310 id: ProjectEntryId,
1311 scan_id: usize,
1312 cx: &mut ModelContext<Worktree>,
1313 ) -> Task<Result<()>> {
1314 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1315 cx.spawn(|this, mut cx| async move {
1316 wait_for_snapshot.await?;
1317 this.update(&mut cx, |worktree, _| {
1318 let worktree = worktree.as_remote_mut().unwrap();
1319 let mut snapshot = worktree.background_snapshot.lock();
1320 snapshot.delete_entry(id);
1321 worktree.snapshot = snapshot.clone();
1322 });
1323 Ok(())
1324 })
1325 }
1326}
1327
1328impl Snapshot {
1329 pub fn id(&self) -> WorktreeId {
1330 self.id
1331 }
1332
1333 pub fn abs_path(&self) -> &Arc<Path> {
1334 &self.abs_path
1335 }
1336
1337 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1338 self.entries_by_id.get(&entry_id, &()).is_some()
1339 }
1340
1341 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1342 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1343 let old_entry = self.entries_by_id.insert_or_replace(
1344 PathEntry {
1345 id: entry.id,
1346 path: entry.path.clone(),
1347 is_ignored: entry.is_ignored,
1348 scan_id: 0,
1349 },
1350 &(),
1351 );
1352 if let Some(old_entry) = old_entry {
1353 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1354 }
1355 self.entries_by_path.insert_or_replace(entry.clone(), &());
1356 Ok(entry)
1357 }
1358
1359 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1360 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1361 self.entries_by_path = {
1362 let mut cursor = self.entries_by_path.cursor();
1363 let mut new_entries_by_path =
1364 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1365 while let Some(entry) = cursor.item() {
1366 if entry.path.starts_with(&removed_entry.path) {
1367 self.entries_by_id.remove(&entry.id, &());
1368 cursor.next(&());
1369 } else {
1370 break;
1371 }
1372 }
1373 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1374 new_entries_by_path
1375 };
1376
1377 Some(removed_entry.path)
1378 }
1379
1380 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1381 let mut entries_by_path_edits = Vec::new();
1382 let mut entries_by_id_edits = Vec::new();
1383 for entry_id in update.removed_entries {
1384 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1385 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1386 entries_by_id_edits.push(Edit::Remove(entry.id));
1387 }
1388 }
1389
1390 for entry in update.updated_entries {
1391 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1392 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1393 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1394 }
1395 entries_by_id_edits.push(Edit::Insert(PathEntry {
1396 id: entry.id,
1397 path: entry.path.clone(),
1398 is_ignored: entry.is_ignored,
1399 scan_id: 0,
1400 }));
1401 entries_by_path_edits.push(Edit::Insert(entry));
1402 }
1403
1404 self.entries_by_path.edit(entries_by_path_edits, &());
1405 self.entries_by_id.edit(entries_by_id_edits, &());
1406
1407 update.removed_repositories.sort_unstable();
1408 self.repository_entries.retain(|_, entry| {
1409 if let Ok(_) = update
1410 .removed_repositories
1411 .binary_search(&entry.work_directory.to_proto())
1412 {
1413 false
1414 } else {
1415 true
1416 }
1417 });
1418
1419 for repository in update.updated_repositories {
1420 let repository = RepositoryEntry {
1421 work_directory: ProjectEntryId::from_proto(repository.work_directory_id).into(),
1422 scan_id: repository.scan_id as usize,
1423 branch: repository.branch.map(Into::into),
1424 };
1425 // TODO: Double check this logic
1426 if let Some(entry) = self.entry_for_id(repository.work_directory_id()) {
1427 self.repository_entries
1428 .insert(RepositoryWorkDirectory(entry.path.clone()), repository)
1429 }
1430 }
1431
1432 self.scan_id = update.scan_id as usize;
1433 if update.is_last_update {
1434 self.completed_scan_id = update.scan_id as usize;
1435 }
1436
1437 Ok(())
1438 }
1439
1440 pub fn file_count(&self) -> usize {
1441 self.entries_by_path.summary().file_count
1442 }
1443
1444 pub fn visible_file_count(&self) -> usize {
1445 self.entries_by_path.summary().visible_file_count
1446 }
1447
1448 fn traverse_from_offset(
1449 &self,
1450 include_dirs: bool,
1451 include_ignored: bool,
1452 start_offset: usize,
1453 ) -> Traversal {
1454 let mut cursor = self.entries_by_path.cursor();
1455 cursor.seek(
1456 &TraversalTarget::Count {
1457 count: start_offset,
1458 include_dirs,
1459 include_ignored,
1460 },
1461 Bias::Right,
1462 &(),
1463 );
1464 Traversal {
1465 cursor,
1466 include_dirs,
1467 include_ignored,
1468 }
1469 }
1470
1471 fn traverse_from_path(
1472 &self,
1473 include_dirs: bool,
1474 include_ignored: bool,
1475 path: &Path,
1476 ) -> Traversal {
1477 let mut cursor = self.entries_by_path.cursor();
1478 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1479 Traversal {
1480 cursor,
1481 include_dirs,
1482 include_ignored,
1483 }
1484 }
1485
1486 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1487 self.traverse_from_offset(false, include_ignored, start)
1488 }
1489
1490 pub fn entries(&self, include_ignored: bool) -> Traversal {
1491 self.traverse_from_offset(true, include_ignored, 0)
1492 }
1493
1494 pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1495 self.repository_entries.values()
1496 }
1497
1498 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1499 let empty_path = Path::new("");
1500 self.entries_by_path
1501 .cursor::<()>()
1502 .filter(move |entry| entry.path.as_ref() != empty_path)
1503 .map(|entry| &entry.path)
1504 }
1505
1506 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1507 let mut cursor = self.entries_by_path.cursor();
1508 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1509 let traversal = Traversal {
1510 cursor,
1511 include_dirs: true,
1512 include_ignored: true,
1513 };
1514 ChildEntriesIter {
1515 traversal,
1516 parent_path,
1517 }
1518 }
1519
1520 pub fn root_entry(&self) -> Option<&Entry> {
1521 self.entry_for_path("")
1522 }
1523
1524 pub fn root_name(&self) -> &str {
1525 &self.root_name
1526 }
1527
1528 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1529 self.repository_entries
1530 .get(&RepositoryWorkDirectory(Path::new("").into()))
1531 .map(|entry| entry.to_owned())
1532 }
1533
1534 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1535 self.repository_entries.values()
1536 }
1537
1538 pub fn scan_id(&self) -> usize {
1539 self.scan_id
1540 }
1541
1542 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1543 let path = path.as_ref();
1544 self.traverse_from_path(true, true, path)
1545 .entry()
1546 .and_then(|entry| {
1547 if entry.path.as_ref() == path {
1548 Some(entry)
1549 } else {
1550 None
1551 }
1552 })
1553 }
1554
1555 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1556 let entry = self.entries_by_id.get(&id, &())?;
1557 self.entry_for_path(&entry.path)
1558 }
1559
1560 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1561 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1562 }
1563}
1564
1565impl LocalSnapshot {
1566 pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1567 let mut max_len = 0;
1568 let mut current_candidate = None;
1569 for (work_directory, repo) in (&self.repository_entries).iter() {
1570 if repo.contains(self, path) {
1571 if work_directory.0.as_os_str().len() >= max_len {
1572 current_candidate = Some(repo);
1573 max_len = work_directory.0.as_os_str().len();
1574 } else {
1575 break;
1576 }
1577 }
1578 }
1579
1580 current_candidate.map(|entry| entry.to_owned())
1581 }
1582
1583 pub(crate) fn repo_for_metadata(
1584 &self,
1585 path: &Path,
1586 ) -> Option<(RepositoryWorkDirectory, Arc<Mutex<dyn GitRepository>>)> {
1587 let (entry_id, local_repo) = self
1588 .git_repositories
1589 .iter()
1590 .find(|(_, repo)| repo.in_dot_git(path))?;
1591
1592 let work_dir = self
1593 .snapshot
1594 .repository_entries
1595 .iter()
1596 .find(|(_, entry)| *entry.work_directory == *entry_id)
1597 .and_then(|(_, entry)| entry.work_directory(self))?;
1598
1599 Some((work_dir, local_repo.repo_ptr.to_owned()))
1600 }
1601
1602 #[cfg(test)]
1603 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1604 let root_name = self.root_name.clone();
1605 proto::UpdateWorktree {
1606 project_id,
1607 worktree_id: self.id().to_proto(),
1608 abs_path: self.abs_path().to_string_lossy().into(),
1609 root_name,
1610 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1611 removed_entries: Default::default(),
1612 scan_id: self.scan_id as u64,
1613 is_last_update: true,
1614 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1615 removed_repositories: Default::default(),
1616 }
1617 }
1618
1619 pub(crate) fn build_update(
1620 &self,
1621 other: &Self,
1622 project_id: u64,
1623 worktree_id: u64,
1624 include_ignored: bool,
1625 ) -> proto::UpdateWorktree {
1626 let mut updated_entries = Vec::new();
1627 let mut removed_entries = Vec::new();
1628 let mut self_entries = self
1629 .entries_by_id
1630 .cursor::<()>()
1631 .filter(|e| include_ignored || !e.is_ignored)
1632 .peekable();
1633 let mut other_entries = other
1634 .entries_by_id
1635 .cursor::<()>()
1636 .filter(|e| include_ignored || !e.is_ignored)
1637 .peekable();
1638 loop {
1639 match (self_entries.peek(), other_entries.peek()) {
1640 (Some(self_entry), Some(other_entry)) => {
1641 match Ord::cmp(&self_entry.id, &other_entry.id) {
1642 Ordering::Less => {
1643 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1644 updated_entries.push(entry);
1645 self_entries.next();
1646 }
1647 Ordering::Equal => {
1648 if self_entry.scan_id != other_entry.scan_id {
1649 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1650 updated_entries.push(entry);
1651 }
1652
1653 self_entries.next();
1654 other_entries.next();
1655 }
1656 Ordering::Greater => {
1657 removed_entries.push(other_entry.id.to_proto());
1658 other_entries.next();
1659 }
1660 }
1661 }
1662 (Some(self_entry), None) => {
1663 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1664 updated_entries.push(entry);
1665 self_entries.next();
1666 }
1667 (None, Some(other_entry)) => {
1668 removed_entries.push(other_entry.id.to_proto());
1669 other_entries.next();
1670 }
1671 (None, None) => break,
1672 }
1673 }
1674
1675 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1676 let mut removed_repositories = Vec::new();
1677 let mut self_repos = self.snapshot.repository_entries.values().peekable();
1678 let mut other_repos = other.snapshot.repository_entries.values().peekable();
1679 loop {
1680 match (self_repos.peek(), other_repos.peek()) {
1681 (Some(self_repo), Some(other_repo)) => {
1682 match Ord::cmp(&self_repo.work_directory, &other_repo.work_directory) {
1683 Ordering::Less => {
1684 updated_repositories.push((*self_repo).into());
1685 self_repos.next();
1686 }
1687 Ordering::Equal => {
1688 if self_repo.scan_id != other_repo.scan_id {
1689 updated_repositories.push((*self_repo).into());
1690 }
1691
1692 self_repos.next();
1693 other_repos.next();
1694 }
1695 Ordering::Greater => {
1696 removed_repositories.push(other_repo.work_directory.to_proto());
1697 other_repos.next();
1698 }
1699 }
1700 }
1701 (Some(self_repo), None) => {
1702 updated_repositories.push((*self_repo).into());
1703 self_repos.next();
1704 }
1705 (None, Some(other_repo)) => {
1706 removed_repositories.push(other_repo.work_directory.to_proto());
1707 other_repos.next();
1708 }
1709 (None, None) => break,
1710 }
1711 }
1712
1713 proto::UpdateWorktree {
1714 project_id,
1715 worktree_id,
1716 abs_path: self.abs_path().to_string_lossy().into(),
1717 root_name: self.root_name().to_string(),
1718 updated_entries,
1719 removed_entries,
1720 scan_id: self.scan_id as u64,
1721 is_last_update: self.completed_scan_id == self.scan_id,
1722 updated_repositories,
1723 removed_repositories,
1724 }
1725 }
1726
1727 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1728 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1729 let abs_path = self.abs_path.join(&entry.path);
1730 match smol::block_on(build_gitignore(&abs_path, fs)) {
1731 Ok(ignore) => {
1732 self.ignores_by_parent_abs_path.insert(
1733 abs_path.parent().unwrap().into(),
1734 (Arc::new(ignore), self.scan_id),
1735 );
1736 }
1737 Err(error) => {
1738 log::error!(
1739 "error loading .gitignore file {:?} - {:?}",
1740 &entry.path,
1741 error
1742 );
1743 }
1744 }
1745 }
1746
1747 self.reuse_entry_id(&mut entry);
1748
1749 if entry.kind == EntryKind::PendingDir {
1750 if let Some(existing_entry) =
1751 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1752 {
1753 entry.kind = existing_entry.kind;
1754 }
1755 }
1756
1757 let scan_id = self.scan_id;
1758 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1759 if let Some(removed) = removed {
1760 if removed.id != entry.id {
1761 self.entries_by_id.remove(&removed.id, &());
1762 }
1763 }
1764 self.entries_by_id.insert_or_replace(
1765 PathEntry {
1766 id: entry.id,
1767 path: entry.path.clone(),
1768 is_ignored: entry.is_ignored,
1769 scan_id,
1770 },
1771 &(),
1772 );
1773
1774 entry
1775 }
1776
1777 fn populate_dir(
1778 &mut self,
1779 parent_path: Arc<Path>,
1780 entries: impl IntoIterator<Item = Entry>,
1781 ignore: Option<Arc<Gitignore>>,
1782 fs: &dyn Fs,
1783 ) {
1784 let mut parent_entry = if let Some(parent_entry) =
1785 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1786 {
1787 parent_entry.clone()
1788 } else {
1789 log::warn!(
1790 "populating a directory {:?} that has been removed",
1791 parent_path
1792 );
1793 return;
1794 };
1795
1796 match parent_entry.kind {
1797 EntryKind::PendingDir => {
1798 parent_entry.kind = EntryKind::Dir;
1799 }
1800 EntryKind::Dir => {}
1801 _ => return,
1802 }
1803
1804 if let Some(ignore) = ignore {
1805 self.ignores_by_parent_abs_path.insert(
1806 self.abs_path.join(&parent_path).into(),
1807 (ignore, self.scan_id),
1808 );
1809 }
1810
1811 if parent_path.file_name() == Some(&DOT_GIT) {
1812 let abs_path = self.abs_path.join(&parent_path);
1813 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1814
1815 if let Some(work_dir_id) = self
1816 .entry_for_path(content_path.clone())
1817 .map(|entry| entry.id)
1818 {
1819 let key = RepositoryWorkDirectory(content_path.clone());
1820 if self.repository_entries.get(&key).is_none() {
1821 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1822 let repo_lock = repo.lock();
1823 self.repository_entries.insert(
1824 key.clone(),
1825 RepositoryEntry {
1826 work_directory: work_dir_id.into(),
1827 scan_id: 0,
1828 branch: repo_lock.branch_name().map(Into::into),
1829 },
1830 );
1831 drop(repo_lock);
1832
1833 self.git_repositories.insert(
1834 work_dir_id,
1835 LocalRepositoryEntry {
1836 repo_ptr: repo,
1837 git_dir_path: parent_path.clone(),
1838 },
1839 )
1840 }
1841 }
1842 }
1843 }
1844
1845 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1846 let mut entries_by_id_edits = Vec::new();
1847
1848 for mut entry in entries {
1849 self.reuse_entry_id(&mut entry);
1850 entries_by_id_edits.push(Edit::Insert(PathEntry {
1851 id: entry.id,
1852 path: entry.path.clone(),
1853 is_ignored: entry.is_ignored,
1854 scan_id: self.scan_id,
1855 }));
1856 entries_by_path_edits.push(Edit::Insert(entry));
1857 }
1858
1859 self.entries_by_path.edit(entries_by_path_edits, &());
1860 self.entries_by_id.edit(entries_by_id_edits, &());
1861 }
1862
1863 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1864 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1865 entry.id = removed_entry_id;
1866 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1867 entry.id = existing_entry.id;
1868 }
1869 }
1870
1871 fn remove_path(&mut self, path: &Path) {
1872 let mut new_entries;
1873 let removed_entries;
1874 {
1875 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1876 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1877 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1878 new_entries.push_tree(cursor.suffix(&()), &());
1879 }
1880 self.entries_by_path = new_entries;
1881
1882 let mut entries_by_id_edits = Vec::new();
1883 for entry in removed_entries.cursor::<()>() {
1884 let removed_entry_id = self
1885 .removed_entry_ids
1886 .entry(entry.inode)
1887 .or_insert(entry.id);
1888 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1889 entries_by_id_edits.push(Edit::Remove(entry.id));
1890 }
1891 self.entries_by_id.edit(entries_by_id_edits, &());
1892
1893 if path.file_name() == Some(&GITIGNORE) {
1894 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1895 if let Some((_, scan_id)) = self
1896 .ignores_by_parent_abs_path
1897 .get_mut(abs_parent_path.as_path())
1898 {
1899 *scan_id = self.snapshot.scan_id;
1900 }
1901 } else if path.file_name() == Some(&DOT_GIT) {
1902 let repo_entry_key = RepositoryWorkDirectory(path.parent().unwrap().into());
1903 self.snapshot
1904 .repository_entries
1905 .update(&repo_entry_key, |repo| repo.scan_id = self.snapshot.scan_id);
1906 }
1907 }
1908
1909 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1910 let mut inodes = TreeSet::default();
1911 for ancestor in path.ancestors().skip(1) {
1912 if let Some(entry) = self.entry_for_path(ancestor) {
1913 inodes.insert(entry.inode);
1914 }
1915 }
1916 inodes
1917 }
1918
1919 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1920 let mut new_ignores = Vec::new();
1921 for ancestor in abs_path.ancestors().skip(1) {
1922 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1923 new_ignores.push((ancestor, Some(ignore.clone())));
1924 } else {
1925 new_ignores.push((ancestor, None));
1926 }
1927 }
1928
1929 let mut ignore_stack = IgnoreStack::none();
1930 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1931 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1932 ignore_stack = IgnoreStack::all();
1933 break;
1934 } else if let Some(ignore) = ignore {
1935 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1936 }
1937 }
1938
1939 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1940 ignore_stack = IgnoreStack::all();
1941 }
1942
1943 ignore_stack
1944 }
1945}
1946
1947async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1948 let contents = fs.load(abs_path).await?;
1949 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1950 let mut builder = GitignoreBuilder::new(parent);
1951 for line in contents.lines() {
1952 builder.add_line(Some(abs_path.into()), line)?;
1953 }
1954 Ok(builder.build()?)
1955}
1956
1957impl WorktreeId {
1958 pub fn from_usize(handle_id: usize) -> Self {
1959 Self(handle_id)
1960 }
1961
1962 pub(crate) fn from_proto(id: u64) -> Self {
1963 Self(id as usize)
1964 }
1965
1966 pub fn to_proto(&self) -> u64 {
1967 self.0 as u64
1968 }
1969
1970 pub fn to_usize(&self) -> usize {
1971 self.0
1972 }
1973}
1974
1975impl fmt::Display for WorktreeId {
1976 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1977 self.0.fmt(f)
1978 }
1979}
1980
1981impl Deref for Worktree {
1982 type Target = Snapshot;
1983
1984 fn deref(&self) -> &Self::Target {
1985 match self {
1986 Worktree::Local(worktree) => &worktree.snapshot,
1987 Worktree::Remote(worktree) => &worktree.snapshot,
1988 }
1989 }
1990}
1991
1992impl Deref for LocalWorktree {
1993 type Target = LocalSnapshot;
1994
1995 fn deref(&self) -> &Self::Target {
1996 &self.snapshot
1997 }
1998}
1999
2000impl Deref for RemoteWorktree {
2001 type Target = Snapshot;
2002
2003 fn deref(&self) -> &Self::Target {
2004 &self.snapshot
2005 }
2006}
2007
2008impl fmt::Debug for LocalWorktree {
2009 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2010 self.snapshot.fmt(f)
2011 }
2012}
2013
2014impl fmt::Debug for Snapshot {
2015 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2016 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2017 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2018
2019 impl<'a> fmt::Debug for EntriesByPath<'a> {
2020 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2021 f.debug_map()
2022 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2023 .finish()
2024 }
2025 }
2026
2027 impl<'a> fmt::Debug for EntriesById<'a> {
2028 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2029 f.debug_list().entries(self.0.iter()).finish()
2030 }
2031 }
2032
2033 f.debug_struct("Snapshot")
2034 .field("id", &self.id)
2035 .field("root_name", &self.root_name)
2036 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2037 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2038 .finish()
2039 }
2040}
2041
2042#[derive(Clone, PartialEq)]
2043pub struct File {
2044 pub worktree: ModelHandle<Worktree>,
2045 pub path: Arc<Path>,
2046 pub mtime: SystemTime,
2047 pub(crate) entry_id: ProjectEntryId,
2048 pub(crate) is_local: bool,
2049 pub(crate) is_deleted: bool,
2050}
2051
2052impl language::File for File {
2053 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2054 if self.is_local {
2055 Some(self)
2056 } else {
2057 None
2058 }
2059 }
2060
2061 fn mtime(&self) -> SystemTime {
2062 self.mtime
2063 }
2064
2065 fn path(&self) -> &Arc<Path> {
2066 &self.path
2067 }
2068
2069 fn full_path(&self, cx: &AppContext) -> PathBuf {
2070 let mut full_path = PathBuf::new();
2071 let worktree = self.worktree.read(cx);
2072
2073 if worktree.is_visible() {
2074 full_path.push(worktree.root_name());
2075 } else {
2076 let path = worktree.abs_path();
2077
2078 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2079 full_path.push("~");
2080 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2081 } else {
2082 full_path.push(path)
2083 }
2084 }
2085
2086 if self.path.components().next().is_some() {
2087 full_path.push(&self.path);
2088 }
2089
2090 full_path
2091 }
2092
2093 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2094 /// of its worktree, then this method will return the name of the worktree itself.
2095 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2096 self.path
2097 .file_name()
2098 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2099 }
2100
2101 fn is_deleted(&self) -> bool {
2102 self.is_deleted
2103 }
2104
2105 fn as_any(&self) -> &dyn Any {
2106 self
2107 }
2108
2109 fn to_proto(&self) -> rpc::proto::File {
2110 rpc::proto::File {
2111 worktree_id: self.worktree.id() as u64,
2112 entry_id: self.entry_id.to_proto(),
2113 path: self.path.to_string_lossy().into(),
2114 mtime: Some(self.mtime.into()),
2115 is_deleted: self.is_deleted,
2116 }
2117 }
2118}
2119
2120impl language::LocalFile for File {
2121 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2122 self.worktree
2123 .read(cx)
2124 .as_local()
2125 .unwrap()
2126 .abs_path
2127 .join(&self.path)
2128 }
2129
2130 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2131 let worktree = self.worktree.read(cx).as_local().unwrap();
2132 let abs_path = worktree.absolutize(&self.path);
2133 let fs = worktree.fs.clone();
2134 cx.background()
2135 .spawn(async move { fs.load(&abs_path).await })
2136 }
2137
2138 fn buffer_reloaded(
2139 &self,
2140 buffer_id: u64,
2141 version: &clock::Global,
2142 fingerprint: RopeFingerprint,
2143 line_ending: LineEnding,
2144 mtime: SystemTime,
2145 cx: &mut AppContext,
2146 ) {
2147 let worktree = self.worktree.read(cx).as_local().unwrap();
2148 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2149 worktree
2150 .client
2151 .send(proto::BufferReloaded {
2152 project_id,
2153 buffer_id,
2154 version: serialize_version(version),
2155 mtime: Some(mtime.into()),
2156 fingerprint: serialize_fingerprint(fingerprint),
2157 line_ending: serialize_line_ending(line_ending) as i32,
2158 })
2159 .log_err();
2160 }
2161 }
2162}
2163
2164impl File {
2165 pub fn from_proto(
2166 proto: rpc::proto::File,
2167 worktree: ModelHandle<Worktree>,
2168 cx: &AppContext,
2169 ) -> Result<Self> {
2170 let worktree_id = worktree
2171 .read(cx)
2172 .as_remote()
2173 .ok_or_else(|| anyhow!("not remote"))?
2174 .id();
2175
2176 if worktree_id.to_proto() != proto.worktree_id {
2177 return Err(anyhow!("worktree id does not match file"));
2178 }
2179
2180 Ok(Self {
2181 worktree,
2182 path: Path::new(&proto.path).into(),
2183 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2184 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2185 is_local: false,
2186 is_deleted: proto.is_deleted,
2187 })
2188 }
2189
2190 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2191 file.and_then(|f| f.as_any().downcast_ref())
2192 }
2193
2194 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2195 self.worktree.read(cx).id()
2196 }
2197
2198 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2199 if self.is_deleted {
2200 None
2201 } else {
2202 Some(self.entry_id)
2203 }
2204 }
2205}
2206
2207#[derive(Clone, Debug, PartialEq, Eq)]
2208pub struct Entry {
2209 pub id: ProjectEntryId,
2210 pub kind: EntryKind,
2211 pub path: Arc<Path>,
2212 pub inode: u64,
2213 pub mtime: SystemTime,
2214 pub is_symlink: bool,
2215 pub is_ignored: bool,
2216}
2217
2218#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2219pub enum EntryKind {
2220 PendingDir,
2221 Dir,
2222 File(CharBag),
2223}
2224
2225#[derive(Clone, Copy, Debug)]
2226pub enum PathChange {
2227 Added,
2228 Removed,
2229 Updated,
2230 AddedOrUpdated,
2231}
2232
2233impl Entry {
2234 fn new(
2235 path: Arc<Path>,
2236 metadata: &fs::Metadata,
2237 next_entry_id: &AtomicUsize,
2238 root_char_bag: CharBag,
2239 ) -> Self {
2240 Self {
2241 id: ProjectEntryId::new(next_entry_id),
2242 kind: if metadata.is_dir {
2243 EntryKind::PendingDir
2244 } else {
2245 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2246 },
2247 path,
2248 inode: metadata.inode,
2249 mtime: metadata.mtime,
2250 is_symlink: metadata.is_symlink,
2251 is_ignored: false,
2252 }
2253 }
2254
2255 pub fn is_dir(&self) -> bool {
2256 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2257 }
2258
2259 pub fn is_file(&self) -> bool {
2260 matches!(self.kind, EntryKind::File(_))
2261 }
2262}
2263
2264impl sum_tree::Item for Entry {
2265 type Summary = EntrySummary;
2266
2267 fn summary(&self) -> Self::Summary {
2268 let visible_count = if self.is_ignored { 0 } else { 1 };
2269 let file_count;
2270 let visible_file_count;
2271 if self.is_file() {
2272 file_count = 1;
2273 visible_file_count = visible_count;
2274 } else {
2275 file_count = 0;
2276 visible_file_count = 0;
2277 }
2278
2279 EntrySummary {
2280 max_path: self.path.clone(),
2281 count: 1,
2282 visible_count,
2283 file_count,
2284 visible_file_count,
2285 }
2286 }
2287}
2288
2289impl sum_tree::KeyedItem for Entry {
2290 type Key = PathKey;
2291
2292 fn key(&self) -> Self::Key {
2293 PathKey(self.path.clone())
2294 }
2295}
2296
2297#[derive(Clone, Debug)]
2298pub struct EntrySummary {
2299 max_path: Arc<Path>,
2300 count: usize,
2301 visible_count: usize,
2302 file_count: usize,
2303 visible_file_count: usize,
2304}
2305
2306impl Default for EntrySummary {
2307 fn default() -> Self {
2308 Self {
2309 max_path: Arc::from(Path::new("")),
2310 count: 0,
2311 visible_count: 0,
2312 file_count: 0,
2313 visible_file_count: 0,
2314 }
2315 }
2316}
2317
2318impl sum_tree::Summary for EntrySummary {
2319 type Context = ();
2320
2321 fn add_summary(&mut self, rhs: &Self, _: &()) {
2322 self.max_path = rhs.max_path.clone();
2323 self.count += rhs.count;
2324 self.visible_count += rhs.visible_count;
2325 self.file_count += rhs.file_count;
2326 self.visible_file_count += rhs.visible_file_count;
2327 }
2328}
2329
2330#[derive(Clone, Debug)]
2331struct PathEntry {
2332 id: ProjectEntryId,
2333 path: Arc<Path>,
2334 is_ignored: bool,
2335 scan_id: usize,
2336}
2337
2338impl sum_tree::Item for PathEntry {
2339 type Summary = PathEntrySummary;
2340
2341 fn summary(&self) -> Self::Summary {
2342 PathEntrySummary { max_id: self.id }
2343 }
2344}
2345
2346impl sum_tree::KeyedItem for PathEntry {
2347 type Key = ProjectEntryId;
2348
2349 fn key(&self) -> Self::Key {
2350 self.id
2351 }
2352}
2353
2354#[derive(Clone, Debug, Default)]
2355struct PathEntrySummary {
2356 max_id: ProjectEntryId,
2357}
2358
2359impl sum_tree::Summary for PathEntrySummary {
2360 type Context = ();
2361
2362 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2363 self.max_id = summary.max_id;
2364 }
2365}
2366
2367impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2368 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2369 *self = summary.max_id;
2370 }
2371}
2372
2373#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2374pub struct PathKey(Arc<Path>);
2375
2376impl Default for PathKey {
2377 fn default() -> Self {
2378 Self(Path::new("").into())
2379 }
2380}
2381
2382impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2383 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2384 self.0 = summary.max_path.clone();
2385 }
2386}
2387
2388struct BackgroundScanner {
2389 snapshot: Mutex<LocalSnapshot>,
2390 fs: Arc<dyn Fs>,
2391 status_updates_tx: UnboundedSender<ScanState>,
2392 executor: Arc<executor::Background>,
2393 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2394 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2395 finished_initial_scan: bool,
2396}
2397
2398impl BackgroundScanner {
2399 fn new(
2400 snapshot: LocalSnapshot,
2401 fs: Arc<dyn Fs>,
2402 status_updates_tx: UnboundedSender<ScanState>,
2403 executor: Arc<executor::Background>,
2404 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2405 ) -> Self {
2406 Self {
2407 fs,
2408 status_updates_tx,
2409 executor,
2410 refresh_requests_rx,
2411 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2412 snapshot: Mutex::new(snapshot),
2413 finished_initial_scan: false,
2414 }
2415 }
2416
2417 async fn run(
2418 &mut self,
2419 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2420 ) {
2421 use futures::FutureExt as _;
2422
2423 let (root_abs_path, root_inode) = {
2424 let snapshot = self.snapshot.lock();
2425 (
2426 snapshot.abs_path.clone(),
2427 snapshot.root_entry().map(|e| e.inode),
2428 )
2429 };
2430
2431 // Populate ignores above the root.
2432 let ignore_stack;
2433 for ancestor in root_abs_path.ancestors().skip(1) {
2434 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2435 {
2436 self.snapshot
2437 .lock()
2438 .ignores_by_parent_abs_path
2439 .insert(ancestor.into(), (ignore.into(), 0));
2440 }
2441 }
2442 {
2443 let mut snapshot = self.snapshot.lock();
2444 snapshot.scan_id += 1;
2445 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2446 if ignore_stack.is_all() {
2447 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2448 root_entry.is_ignored = true;
2449 snapshot.insert_entry(root_entry, self.fs.as_ref());
2450 }
2451 }
2452 };
2453
2454 // Perform an initial scan of the directory.
2455 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2456 smol::block_on(scan_job_tx.send(ScanJob {
2457 abs_path: root_abs_path,
2458 path: Arc::from(Path::new("")),
2459 ignore_stack,
2460 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2461 scan_queue: scan_job_tx.clone(),
2462 }))
2463 .unwrap();
2464 drop(scan_job_tx);
2465 self.scan_dirs(true, scan_job_rx).await;
2466 {
2467 let mut snapshot = self.snapshot.lock();
2468 snapshot.completed_scan_id = snapshot.scan_id;
2469 }
2470 self.send_status_update(false, None);
2471
2472 // Process any any FS events that occurred while performing the initial scan.
2473 // For these events, update events cannot be as precise, because we didn't
2474 // have the previous state loaded yet.
2475 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2476 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2477 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2478 paths.extend(more_events.into_iter().map(|e| e.path));
2479 }
2480 self.process_events(paths).await;
2481 }
2482
2483 self.finished_initial_scan = true;
2484
2485 // Continue processing events until the worktree is dropped.
2486 loop {
2487 select_biased! {
2488 // Process any path refresh requests from the worktree. Prioritize
2489 // these before handling changes reported by the filesystem.
2490 request = self.refresh_requests_rx.recv().fuse() => {
2491 let Ok((paths, barrier)) = request else { break };
2492 if !self.process_refresh_request(paths, barrier).await {
2493 return;
2494 }
2495 }
2496
2497 events = events_rx.next().fuse() => {
2498 let Some(events) = events else { break };
2499 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2500 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2501 paths.extend(more_events.into_iter().map(|e| e.path));
2502 }
2503 self.process_events(paths).await;
2504 }
2505 }
2506 }
2507 }
2508
2509 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2510 self.reload_entries_for_paths(paths, None).await;
2511 self.send_status_update(false, Some(barrier))
2512 }
2513
2514 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2515 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2516 if let Some(mut paths) = self
2517 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2518 .await
2519 {
2520 paths.sort_unstable();
2521 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2522 }
2523 drop(scan_job_tx);
2524 self.scan_dirs(false, scan_job_rx).await;
2525
2526 self.update_ignore_statuses().await;
2527
2528 let mut snapshot = self.snapshot.lock();
2529
2530 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2531 git_repositories.retain(|project_entry_id, _| {
2532 snapshot
2533 .entry_for_id(*project_entry_id)
2534 .map_or(false, |entry| entry.path.file_name() == Some(&DOT_GIT))
2535 });
2536 snapshot.git_repositories = git_repositories;
2537
2538 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2539 git_repository_entries.retain(|_, entry| {
2540 entry
2541 .work_directory(&snapshot)
2542 .map(|directory| {
2543 snapshot
2544 .entry_for_path((directory.as_ref()).join(".git"))
2545 .is_some()
2546 })
2547 .unwrap_or(false)
2548 });
2549 snapshot.snapshot.repository_entries = git_repository_entries;
2550
2551 snapshot.removed_entry_ids.clear();
2552 snapshot.completed_scan_id = snapshot.scan_id;
2553
2554 drop(snapshot);
2555
2556 self.send_status_update(false, None);
2557 }
2558
2559 async fn scan_dirs(
2560 &self,
2561 enable_progress_updates: bool,
2562 scan_jobs_rx: channel::Receiver<ScanJob>,
2563 ) {
2564 use futures::FutureExt as _;
2565
2566 if self
2567 .status_updates_tx
2568 .unbounded_send(ScanState::Started)
2569 .is_err()
2570 {
2571 return;
2572 }
2573
2574 let progress_update_count = AtomicUsize::new(0);
2575 self.executor
2576 .scoped(|scope| {
2577 for _ in 0..self.executor.num_cpus() {
2578 scope.spawn(async {
2579 let mut last_progress_update_count = 0;
2580 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2581 futures::pin_mut!(progress_update_timer);
2582
2583 loop {
2584 select_biased! {
2585 // Process any path refresh requests before moving on to process
2586 // the scan queue, so that user operations are prioritized.
2587 request = self.refresh_requests_rx.recv().fuse() => {
2588 let Ok((paths, barrier)) = request else { break };
2589 if !self.process_refresh_request(paths, barrier).await {
2590 return;
2591 }
2592 }
2593
2594 // Send periodic progress updates to the worktree. Use an atomic counter
2595 // to ensure that only one of the workers sends a progress update after
2596 // the update interval elapses.
2597 _ = progress_update_timer => {
2598 match progress_update_count.compare_exchange(
2599 last_progress_update_count,
2600 last_progress_update_count + 1,
2601 SeqCst,
2602 SeqCst
2603 ) {
2604 Ok(_) => {
2605 last_progress_update_count += 1;
2606 self.send_status_update(true, None);
2607 }
2608 Err(count) => {
2609 last_progress_update_count = count;
2610 }
2611 }
2612 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2613 }
2614
2615 // Recursively load directories from the file system.
2616 job = scan_jobs_rx.recv().fuse() => {
2617 let Ok(job) = job else { break };
2618 if let Err(err) = self.scan_dir(&job).await {
2619 if job.path.as_ref() != Path::new("") {
2620 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2621 }
2622 }
2623 }
2624 }
2625 }
2626 })
2627 }
2628 })
2629 .await;
2630 }
2631
2632 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2633 let mut prev_state = self.prev_state.lock();
2634 let snapshot = self.snapshot.lock().clone();
2635 let mut old_snapshot = snapshot.snapshot.clone();
2636 mem::swap(&mut old_snapshot, &mut prev_state.0);
2637 let changed_paths = mem::take(&mut prev_state.1);
2638 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2639 self.status_updates_tx
2640 .unbounded_send(ScanState::Updated {
2641 snapshot,
2642 changes,
2643 scanning,
2644 barrier,
2645 })
2646 .is_ok()
2647 }
2648
2649 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2650 let mut new_entries: Vec<Entry> = Vec::new();
2651 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2652 let mut ignore_stack = job.ignore_stack.clone();
2653 let mut new_ignore = None;
2654 let (root_abs_path, root_char_bag, next_entry_id) = {
2655 let snapshot = self.snapshot.lock();
2656 (
2657 snapshot.abs_path().clone(),
2658 snapshot.root_char_bag,
2659 snapshot.next_entry_id.clone(),
2660 )
2661 };
2662 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2663 while let Some(child_abs_path) = child_paths.next().await {
2664 let child_abs_path: Arc<Path> = match child_abs_path {
2665 Ok(child_abs_path) => child_abs_path.into(),
2666 Err(error) => {
2667 log::error!("error processing entry {:?}", error);
2668 continue;
2669 }
2670 };
2671
2672 let child_name = child_abs_path.file_name().unwrap();
2673 let child_path: Arc<Path> = job.path.join(child_name).into();
2674 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2675 Ok(Some(metadata)) => metadata,
2676 Ok(None) => continue,
2677 Err(err) => {
2678 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2679 continue;
2680 }
2681 };
2682
2683 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2684 if child_name == *GITIGNORE {
2685 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2686 Ok(ignore) => {
2687 let ignore = Arc::new(ignore);
2688 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2689 new_ignore = Some(ignore);
2690 }
2691 Err(error) => {
2692 log::error!(
2693 "error loading .gitignore file {:?} - {:?}",
2694 child_name,
2695 error
2696 );
2697 }
2698 }
2699
2700 // Update ignore status of any child entries we've already processed to reflect the
2701 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2702 // there should rarely be too numerous. Update the ignore stack associated with any
2703 // new jobs as well.
2704 let mut new_jobs = new_jobs.iter_mut();
2705 for entry in &mut new_entries {
2706 let entry_abs_path = root_abs_path.join(&entry.path);
2707 entry.is_ignored =
2708 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2709
2710 if entry.is_dir() {
2711 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2712 job.ignore_stack = if entry.is_ignored {
2713 IgnoreStack::all()
2714 } else {
2715 ignore_stack.clone()
2716 };
2717 }
2718 }
2719 }
2720 }
2721
2722 let mut child_entry = Entry::new(
2723 child_path.clone(),
2724 &child_metadata,
2725 &next_entry_id,
2726 root_char_bag,
2727 );
2728
2729 if child_entry.is_dir() {
2730 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2731 child_entry.is_ignored = is_ignored;
2732
2733 // Avoid recursing until crash in the case of a recursive symlink
2734 if !job.ancestor_inodes.contains(&child_entry.inode) {
2735 let mut ancestor_inodes = job.ancestor_inodes.clone();
2736 ancestor_inodes.insert(child_entry.inode);
2737
2738 new_jobs.push(Some(ScanJob {
2739 abs_path: child_abs_path,
2740 path: child_path,
2741 ignore_stack: if is_ignored {
2742 IgnoreStack::all()
2743 } else {
2744 ignore_stack.clone()
2745 },
2746 ancestor_inodes,
2747 scan_queue: job.scan_queue.clone(),
2748 }));
2749 } else {
2750 new_jobs.push(None);
2751 }
2752 } else {
2753 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2754 }
2755
2756 new_entries.push(child_entry);
2757 }
2758
2759 self.snapshot.lock().populate_dir(
2760 job.path.clone(),
2761 new_entries,
2762 new_ignore,
2763 self.fs.as_ref(),
2764 );
2765
2766 for new_job in new_jobs {
2767 if let Some(new_job) = new_job {
2768 job.scan_queue.send(new_job).await.unwrap();
2769 }
2770 }
2771
2772 Ok(())
2773 }
2774
2775 async fn reload_entries_for_paths(
2776 &self,
2777 mut abs_paths: Vec<PathBuf>,
2778 scan_queue_tx: Option<Sender<ScanJob>>,
2779 ) -> Option<Vec<Arc<Path>>> {
2780 let doing_recursive_update = scan_queue_tx.is_some();
2781
2782 abs_paths.sort_unstable();
2783 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2784
2785 let root_abs_path = self.snapshot.lock().abs_path.clone();
2786 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2787 let metadata = futures::future::join_all(
2788 abs_paths
2789 .iter()
2790 .map(|abs_path| self.fs.metadata(&abs_path))
2791 .collect::<Vec<_>>(),
2792 )
2793 .await;
2794
2795 let mut snapshot = self.snapshot.lock();
2796 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2797 snapshot.scan_id += 1;
2798 if is_idle && !doing_recursive_update {
2799 snapshot.completed_scan_id = snapshot.scan_id;
2800 }
2801
2802 // Remove any entries for paths that no longer exist or are being recursively
2803 // refreshed. Do this before adding any new entries, so that renames can be
2804 // detected regardless of the order of the paths.
2805 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2806 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2807 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2808 if matches!(metadata, Ok(None)) || doing_recursive_update {
2809 snapshot.remove_path(path);
2810 }
2811 event_paths.push(path.into());
2812 } else {
2813 log::error!(
2814 "unexpected event {:?} for root path {:?}",
2815 abs_path,
2816 root_canonical_path
2817 );
2818 }
2819 }
2820
2821 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2822 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2823
2824 match metadata {
2825 Ok(Some(metadata)) => {
2826 let ignore_stack =
2827 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2828 let mut fs_entry = Entry::new(
2829 path.clone(),
2830 &metadata,
2831 snapshot.next_entry_id.as_ref(),
2832 snapshot.root_char_bag,
2833 );
2834 fs_entry.is_ignored = ignore_stack.is_all();
2835 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2836
2837 let scan_id = snapshot.scan_id;
2838
2839 let repo_with_path_in_dotgit = snapshot.repo_for_metadata(&path);
2840 if let Some((key, repo)) = repo_with_path_in_dotgit {
2841 let repo = repo.lock();
2842 repo.reload_index();
2843 let branch = repo.branch_name();
2844
2845 snapshot.repository_entries.update(&key, |entry| {
2846 entry.scan_id = scan_id;
2847 entry.branch = branch.map(Into::into)
2848 });
2849 }
2850
2851 if let Some(scan_queue_tx) = &scan_queue_tx {
2852 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2853 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2854 ancestor_inodes.insert(metadata.inode);
2855 smol::block_on(scan_queue_tx.send(ScanJob {
2856 abs_path,
2857 path,
2858 ignore_stack,
2859 ancestor_inodes,
2860 scan_queue: scan_queue_tx.clone(),
2861 }))
2862 .unwrap();
2863 }
2864 }
2865 }
2866 Ok(None) => {}
2867 Err(err) => {
2868 // TODO - create a special 'error' entry in the entries tree to mark this
2869 log::error!("error reading file on event {:?}", err);
2870 }
2871 }
2872 }
2873
2874 Some(event_paths)
2875 }
2876
2877 async fn update_ignore_statuses(&self) {
2878 use futures::FutureExt as _;
2879
2880 let mut snapshot = self.snapshot.lock().clone();
2881 let mut ignores_to_update = Vec::new();
2882 let mut ignores_to_delete = Vec::new();
2883 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2884 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2885 if *scan_id > snapshot.completed_scan_id
2886 && snapshot.entry_for_path(parent_path).is_some()
2887 {
2888 ignores_to_update.push(parent_abs_path.clone());
2889 }
2890
2891 let ignore_path = parent_path.join(&*GITIGNORE);
2892 if snapshot.entry_for_path(ignore_path).is_none() {
2893 ignores_to_delete.push(parent_abs_path.clone());
2894 }
2895 }
2896 }
2897
2898 for parent_abs_path in ignores_to_delete {
2899 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2900 self.snapshot
2901 .lock()
2902 .ignores_by_parent_abs_path
2903 .remove(&parent_abs_path);
2904 }
2905
2906 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2907 ignores_to_update.sort_unstable();
2908 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2909 while let Some(parent_abs_path) = ignores_to_update.next() {
2910 while ignores_to_update
2911 .peek()
2912 .map_or(false, |p| p.starts_with(&parent_abs_path))
2913 {
2914 ignores_to_update.next().unwrap();
2915 }
2916
2917 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2918 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2919 abs_path: parent_abs_path,
2920 ignore_stack,
2921 ignore_queue: ignore_queue_tx.clone(),
2922 }))
2923 .unwrap();
2924 }
2925 drop(ignore_queue_tx);
2926
2927 self.executor
2928 .scoped(|scope| {
2929 for _ in 0..self.executor.num_cpus() {
2930 scope.spawn(async {
2931 loop {
2932 select_biased! {
2933 // Process any path refresh requests before moving on to process
2934 // the queue of ignore statuses.
2935 request = self.refresh_requests_rx.recv().fuse() => {
2936 let Ok((paths, barrier)) = request else { break };
2937 if !self.process_refresh_request(paths, barrier).await {
2938 return;
2939 }
2940 }
2941
2942 // Recursively process directories whose ignores have changed.
2943 job = ignore_queue_rx.recv().fuse() => {
2944 let Ok(job) = job else { break };
2945 self.update_ignore_status(job, &snapshot).await;
2946 }
2947 }
2948 }
2949 });
2950 }
2951 })
2952 .await;
2953 }
2954
2955 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2956 let mut ignore_stack = job.ignore_stack;
2957 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2958 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2959 }
2960
2961 let mut entries_by_id_edits = Vec::new();
2962 let mut entries_by_path_edits = Vec::new();
2963 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2964 for mut entry in snapshot.child_entries(path).cloned() {
2965 let was_ignored = entry.is_ignored;
2966 let abs_path = snapshot.abs_path().join(&entry.path);
2967 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2968 if entry.is_dir() {
2969 let child_ignore_stack = if entry.is_ignored {
2970 IgnoreStack::all()
2971 } else {
2972 ignore_stack.clone()
2973 };
2974 job.ignore_queue
2975 .send(UpdateIgnoreStatusJob {
2976 abs_path: abs_path.into(),
2977 ignore_stack: child_ignore_stack,
2978 ignore_queue: job.ignore_queue.clone(),
2979 })
2980 .await
2981 .unwrap();
2982 }
2983
2984 if entry.is_ignored != was_ignored {
2985 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2986 path_entry.scan_id = snapshot.scan_id;
2987 path_entry.is_ignored = entry.is_ignored;
2988 entries_by_id_edits.push(Edit::Insert(path_entry));
2989 entries_by_path_edits.push(Edit::Insert(entry));
2990 }
2991 }
2992
2993 let mut snapshot = self.snapshot.lock();
2994 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2995 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2996 }
2997
2998 fn build_change_set(
2999 &self,
3000 old_snapshot: &Snapshot,
3001 new_snapshot: &Snapshot,
3002 event_paths: Vec<Arc<Path>>,
3003 ) -> HashMap<Arc<Path>, PathChange> {
3004 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3005
3006 let mut changes = HashMap::default();
3007 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3008 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3009 let received_before_initialized = !self.finished_initial_scan;
3010
3011 for path in event_paths {
3012 let path = PathKey(path);
3013 old_paths.seek(&path, Bias::Left, &());
3014 new_paths.seek(&path, Bias::Left, &());
3015
3016 loop {
3017 match (old_paths.item(), new_paths.item()) {
3018 (Some(old_entry), Some(new_entry)) => {
3019 if old_entry.path > path.0
3020 && new_entry.path > path.0
3021 && !old_entry.path.starts_with(&path.0)
3022 && !new_entry.path.starts_with(&path.0)
3023 {
3024 break;
3025 }
3026
3027 match Ord::cmp(&old_entry.path, &new_entry.path) {
3028 Ordering::Less => {
3029 changes.insert(old_entry.path.clone(), Removed);
3030 old_paths.next(&());
3031 }
3032 Ordering::Equal => {
3033 if received_before_initialized {
3034 // If the worktree was not fully initialized when this event was generated,
3035 // we can't know whether this entry was added during the scan or whether
3036 // it was merely updated.
3037 changes.insert(new_entry.path.clone(), AddedOrUpdated);
3038 } else if old_entry.mtime != new_entry.mtime {
3039 changes.insert(new_entry.path.clone(), Updated);
3040 }
3041 old_paths.next(&());
3042 new_paths.next(&());
3043 }
3044 Ordering::Greater => {
3045 changes.insert(new_entry.path.clone(), Added);
3046 new_paths.next(&());
3047 }
3048 }
3049 }
3050 (Some(old_entry), None) => {
3051 changes.insert(old_entry.path.clone(), Removed);
3052 old_paths.next(&());
3053 }
3054 (None, Some(new_entry)) => {
3055 changes.insert(new_entry.path.clone(), Added);
3056 new_paths.next(&());
3057 }
3058 (None, None) => break,
3059 }
3060 }
3061 }
3062 changes
3063 }
3064
3065 async fn progress_timer(&self, running: bool) {
3066 if !running {
3067 return futures::future::pending().await;
3068 }
3069
3070 #[cfg(any(test, feature = "test-support"))]
3071 if self.fs.is_fake() {
3072 return self.executor.simulate_random_delay().await;
3073 }
3074
3075 smol::Timer::after(Duration::from_millis(100)).await;
3076 }
3077}
3078
3079fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3080 let mut result = root_char_bag;
3081 result.extend(
3082 path.to_string_lossy()
3083 .chars()
3084 .map(|c| c.to_ascii_lowercase()),
3085 );
3086 result
3087}
3088
3089struct ScanJob {
3090 abs_path: Arc<Path>,
3091 path: Arc<Path>,
3092 ignore_stack: Arc<IgnoreStack>,
3093 scan_queue: Sender<ScanJob>,
3094 ancestor_inodes: TreeSet<u64>,
3095}
3096
3097struct UpdateIgnoreStatusJob {
3098 abs_path: Arc<Path>,
3099 ignore_stack: Arc<IgnoreStack>,
3100 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3101}
3102
3103pub trait WorktreeHandle {
3104 #[cfg(any(test, feature = "test-support"))]
3105 fn flush_fs_events<'a>(
3106 &self,
3107 cx: &'a gpui::TestAppContext,
3108 ) -> futures::future::LocalBoxFuture<'a, ()>;
3109}
3110
3111impl WorktreeHandle for ModelHandle<Worktree> {
3112 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3113 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3114 // extra directory scans, and emit extra scan-state notifications.
3115 //
3116 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3117 // to ensure that all redundant FS events have already been processed.
3118 #[cfg(any(test, feature = "test-support"))]
3119 fn flush_fs_events<'a>(
3120 &self,
3121 cx: &'a gpui::TestAppContext,
3122 ) -> futures::future::LocalBoxFuture<'a, ()> {
3123 use smol::future::FutureExt;
3124
3125 let filename = "fs-event-sentinel";
3126 let tree = self.clone();
3127 let (fs, root_path) = self.read_with(cx, |tree, _| {
3128 let tree = tree.as_local().unwrap();
3129 (tree.fs.clone(), tree.abs_path().clone())
3130 });
3131
3132 async move {
3133 fs.create_file(&root_path.join(filename), Default::default())
3134 .await
3135 .unwrap();
3136 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3137 .await;
3138
3139 fs.remove_file(&root_path.join(filename), Default::default())
3140 .await
3141 .unwrap();
3142 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3143 .await;
3144
3145 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3146 .await;
3147 }
3148 .boxed_local()
3149 }
3150}
3151
3152#[derive(Clone, Debug)]
3153struct TraversalProgress<'a> {
3154 max_path: &'a Path,
3155 count: usize,
3156 visible_count: usize,
3157 file_count: usize,
3158 visible_file_count: usize,
3159}
3160
3161impl<'a> TraversalProgress<'a> {
3162 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3163 match (include_ignored, include_dirs) {
3164 (true, true) => self.count,
3165 (true, false) => self.file_count,
3166 (false, true) => self.visible_count,
3167 (false, false) => self.visible_file_count,
3168 }
3169 }
3170}
3171
3172impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3173 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3174 self.max_path = summary.max_path.as_ref();
3175 self.count += summary.count;
3176 self.visible_count += summary.visible_count;
3177 self.file_count += summary.file_count;
3178 self.visible_file_count += summary.visible_file_count;
3179 }
3180}
3181
3182impl<'a> Default for TraversalProgress<'a> {
3183 fn default() -> Self {
3184 Self {
3185 max_path: Path::new(""),
3186 count: 0,
3187 visible_count: 0,
3188 file_count: 0,
3189 visible_file_count: 0,
3190 }
3191 }
3192}
3193
3194pub struct Traversal<'a> {
3195 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3196 include_ignored: bool,
3197 include_dirs: bool,
3198}
3199
3200impl<'a> Traversal<'a> {
3201 pub fn advance(&mut self) -> bool {
3202 self.advance_to_offset(self.offset() + 1)
3203 }
3204
3205 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3206 self.cursor.seek_forward(
3207 &TraversalTarget::Count {
3208 count: offset,
3209 include_dirs: self.include_dirs,
3210 include_ignored: self.include_ignored,
3211 },
3212 Bias::Right,
3213 &(),
3214 )
3215 }
3216
3217 pub fn advance_to_sibling(&mut self) -> bool {
3218 while let Some(entry) = self.cursor.item() {
3219 self.cursor.seek_forward(
3220 &TraversalTarget::PathSuccessor(&entry.path),
3221 Bias::Left,
3222 &(),
3223 );
3224 if let Some(entry) = self.cursor.item() {
3225 if (self.include_dirs || !entry.is_dir())
3226 && (self.include_ignored || !entry.is_ignored)
3227 {
3228 return true;
3229 }
3230 }
3231 }
3232 false
3233 }
3234
3235 pub fn entry(&self) -> Option<&'a Entry> {
3236 self.cursor.item()
3237 }
3238
3239 pub fn offset(&self) -> usize {
3240 self.cursor
3241 .start()
3242 .count(self.include_dirs, self.include_ignored)
3243 }
3244}
3245
3246impl<'a> Iterator for Traversal<'a> {
3247 type Item = &'a Entry;
3248
3249 fn next(&mut self) -> Option<Self::Item> {
3250 if let Some(item) = self.entry() {
3251 self.advance();
3252 Some(item)
3253 } else {
3254 None
3255 }
3256 }
3257}
3258
3259#[derive(Debug)]
3260enum TraversalTarget<'a> {
3261 Path(&'a Path),
3262 PathSuccessor(&'a Path),
3263 Count {
3264 count: usize,
3265 include_ignored: bool,
3266 include_dirs: bool,
3267 },
3268}
3269
3270impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3271 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3272 match self {
3273 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3274 TraversalTarget::PathSuccessor(path) => {
3275 if !cursor_location.max_path.starts_with(path) {
3276 Ordering::Equal
3277 } else {
3278 Ordering::Greater
3279 }
3280 }
3281 TraversalTarget::Count {
3282 count,
3283 include_dirs,
3284 include_ignored,
3285 } => Ord::cmp(
3286 count,
3287 &cursor_location.count(*include_dirs, *include_ignored),
3288 ),
3289 }
3290 }
3291}
3292
3293struct ChildEntriesIter<'a> {
3294 parent_path: &'a Path,
3295 traversal: Traversal<'a>,
3296}
3297
3298impl<'a> Iterator for ChildEntriesIter<'a> {
3299 type Item = &'a Entry;
3300
3301 fn next(&mut self) -> Option<Self::Item> {
3302 if let Some(item) = self.traversal.entry() {
3303 if item.path.starts_with(&self.parent_path) {
3304 self.traversal.advance_to_sibling();
3305 return Some(item);
3306 }
3307 }
3308 None
3309 }
3310}
3311
3312impl<'a> From<&'a Entry> for proto::Entry {
3313 fn from(entry: &'a Entry) -> Self {
3314 Self {
3315 id: entry.id.to_proto(),
3316 is_dir: entry.is_dir(),
3317 path: entry.path.to_string_lossy().into(),
3318 inode: entry.inode,
3319 mtime: Some(entry.mtime.into()),
3320 is_symlink: entry.is_symlink,
3321 is_ignored: entry.is_ignored,
3322 }
3323 }
3324}
3325
3326impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3327 type Error = anyhow::Error;
3328
3329 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3330 if let Some(mtime) = entry.mtime {
3331 let kind = if entry.is_dir {
3332 EntryKind::Dir
3333 } else {
3334 let mut char_bag = *root_char_bag;
3335 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3336 EntryKind::File(char_bag)
3337 };
3338 let path: Arc<Path> = PathBuf::from(entry.path).into();
3339 Ok(Entry {
3340 id: ProjectEntryId::from_proto(entry.id),
3341 kind,
3342 path,
3343 inode: entry.inode,
3344 mtime: mtime.into(),
3345 is_symlink: entry.is_symlink,
3346 is_ignored: entry.is_ignored,
3347 })
3348 } else {
3349 Err(anyhow!(
3350 "missing mtime in remote worktree entry {:?}",
3351 entry.path
3352 ))
3353 }
3354 }
3355}
3356
3357#[cfg(test)]
3358mod tests {
3359 use super::*;
3360 use fs::{FakeFs, RealFs};
3361 use gpui::{executor::Deterministic, TestAppContext};
3362 use pretty_assertions::assert_eq;
3363 use rand::prelude::*;
3364 use serde_json::json;
3365 use std::{env, fmt::Write};
3366 use util::{http::FakeHttpClient, test::temp_tree};
3367
3368 #[gpui::test]
3369 async fn test_traversal(cx: &mut TestAppContext) {
3370 let fs = FakeFs::new(cx.background());
3371 fs.insert_tree(
3372 "/root",
3373 json!({
3374 ".gitignore": "a/b\n",
3375 "a": {
3376 "b": "",
3377 "c": "",
3378 }
3379 }),
3380 )
3381 .await;
3382
3383 let http_client = FakeHttpClient::with_404_response();
3384 let client = cx.read(|cx| Client::new(http_client, cx));
3385
3386 let tree = Worktree::local(
3387 client,
3388 Path::new("/root"),
3389 true,
3390 fs,
3391 Default::default(),
3392 &mut cx.to_async(),
3393 )
3394 .await
3395 .unwrap();
3396 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3397 .await;
3398
3399 tree.read_with(cx, |tree, _| {
3400 assert_eq!(
3401 tree.entries(false)
3402 .map(|entry| entry.path.as_ref())
3403 .collect::<Vec<_>>(),
3404 vec![
3405 Path::new(""),
3406 Path::new(".gitignore"),
3407 Path::new("a"),
3408 Path::new("a/c"),
3409 ]
3410 );
3411 assert_eq!(
3412 tree.entries(true)
3413 .map(|entry| entry.path.as_ref())
3414 .collect::<Vec<_>>(),
3415 vec![
3416 Path::new(""),
3417 Path::new(".gitignore"),
3418 Path::new("a"),
3419 Path::new("a/b"),
3420 Path::new("a/c"),
3421 ]
3422 );
3423 })
3424 }
3425
3426 #[gpui::test(iterations = 10)]
3427 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3428 let fs = FakeFs::new(cx.background());
3429 fs.insert_tree(
3430 "/root",
3431 json!({
3432 "lib": {
3433 "a": {
3434 "a.txt": ""
3435 },
3436 "b": {
3437 "b.txt": ""
3438 }
3439 }
3440 }),
3441 )
3442 .await;
3443 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3444 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3445
3446 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3447 let tree = Worktree::local(
3448 client,
3449 Path::new("/root"),
3450 true,
3451 fs.clone(),
3452 Default::default(),
3453 &mut cx.to_async(),
3454 )
3455 .await
3456 .unwrap();
3457
3458 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3459 .await;
3460
3461 tree.read_with(cx, |tree, _| {
3462 assert_eq!(
3463 tree.entries(false)
3464 .map(|entry| entry.path.as_ref())
3465 .collect::<Vec<_>>(),
3466 vec![
3467 Path::new(""),
3468 Path::new("lib"),
3469 Path::new("lib/a"),
3470 Path::new("lib/a/a.txt"),
3471 Path::new("lib/a/lib"),
3472 Path::new("lib/b"),
3473 Path::new("lib/b/b.txt"),
3474 Path::new("lib/b/lib"),
3475 ]
3476 );
3477 });
3478
3479 fs.rename(
3480 Path::new("/root/lib/a/lib"),
3481 Path::new("/root/lib/a/lib-2"),
3482 Default::default(),
3483 )
3484 .await
3485 .unwrap();
3486 executor.run_until_parked();
3487 tree.read_with(cx, |tree, _| {
3488 assert_eq!(
3489 tree.entries(false)
3490 .map(|entry| entry.path.as_ref())
3491 .collect::<Vec<_>>(),
3492 vec![
3493 Path::new(""),
3494 Path::new("lib"),
3495 Path::new("lib/a"),
3496 Path::new("lib/a/a.txt"),
3497 Path::new("lib/a/lib-2"),
3498 Path::new("lib/b"),
3499 Path::new("lib/b/b.txt"),
3500 Path::new("lib/b/lib"),
3501 ]
3502 );
3503 });
3504 }
3505
3506 #[gpui::test]
3507 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3508 let parent_dir = temp_tree(json!({
3509 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3510 "tree": {
3511 ".git": {},
3512 ".gitignore": "ignored-dir\n",
3513 "tracked-dir": {
3514 "tracked-file1": "",
3515 "ancestor-ignored-file1": "",
3516 },
3517 "ignored-dir": {
3518 "ignored-file1": ""
3519 }
3520 }
3521 }));
3522 let dir = parent_dir.path().join("tree");
3523
3524 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3525
3526 let tree = Worktree::local(
3527 client,
3528 dir.as_path(),
3529 true,
3530 Arc::new(RealFs),
3531 Default::default(),
3532 &mut cx.to_async(),
3533 )
3534 .await
3535 .unwrap();
3536 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3537 .await;
3538 tree.flush_fs_events(cx).await;
3539 cx.read(|cx| {
3540 let tree = tree.read(cx);
3541 assert!(
3542 !tree
3543 .entry_for_path("tracked-dir/tracked-file1")
3544 .unwrap()
3545 .is_ignored
3546 );
3547 assert!(
3548 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3549 .unwrap()
3550 .is_ignored
3551 );
3552 assert!(
3553 tree.entry_for_path("ignored-dir/ignored-file1")
3554 .unwrap()
3555 .is_ignored
3556 );
3557 });
3558
3559 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3560 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3561 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3562 tree.flush_fs_events(cx).await;
3563 cx.read(|cx| {
3564 let tree = tree.read(cx);
3565 assert!(
3566 !tree
3567 .entry_for_path("tracked-dir/tracked-file2")
3568 .unwrap()
3569 .is_ignored
3570 );
3571 assert!(
3572 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3573 .unwrap()
3574 .is_ignored
3575 );
3576 assert!(
3577 tree.entry_for_path("ignored-dir/ignored-file2")
3578 .unwrap()
3579 .is_ignored
3580 );
3581 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3582 });
3583 }
3584
3585 #[gpui::test]
3586 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3587 let root = temp_tree(json!({
3588 "dir1": {
3589 ".git": {},
3590 "deps": {
3591 "dep1": {
3592 ".git": {},
3593 "src": {
3594 "a.txt": ""
3595 }
3596 }
3597 },
3598 "src": {
3599 "b.txt": ""
3600 }
3601 },
3602 "c.txt": "",
3603 }));
3604
3605 let http_client = FakeHttpClient::with_404_response();
3606 let client = cx.read(|cx| Client::new(http_client, cx));
3607 let tree = Worktree::local(
3608 client,
3609 root.path(),
3610 true,
3611 Arc::new(RealFs),
3612 Default::default(),
3613 &mut cx.to_async(),
3614 )
3615 .await
3616 .unwrap();
3617
3618 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3619 .await;
3620 tree.flush_fs_events(cx).await;
3621
3622 tree.read_with(cx, |tree, _cx| {
3623 let tree = tree.as_local().unwrap();
3624
3625 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3626
3627 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3628 assert_eq!(
3629 entry
3630 .work_directory(tree)
3631 .map(|directory| directory.as_ref().to_owned()),
3632 Some(Path::new("dir1").to_owned())
3633 );
3634
3635 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3636 assert_eq!(
3637 entry
3638 .work_directory(tree)
3639 .map(|directory| directory.as_ref().to_owned()),
3640 Some(Path::new("dir1/deps/dep1").to_owned())
3641 );
3642 });
3643
3644 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3645 let tree = tree.as_local().unwrap();
3646 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3647 entry.scan_id
3648 });
3649
3650 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3651 tree.flush_fs_events(cx).await;
3652
3653 tree.read_with(cx, |tree, _cx| {
3654 let tree = tree.as_local().unwrap();
3655 let new_scan_id = {
3656 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3657 entry.scan_id
3658 };
3659 assert_ne!(
3660 original_scan_id, new_scan_id,
3661 "original {original_scan_id}, new {new_scan_id}"
3662 );
3663 });
3664
3665 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3666 tree.flush_fs_events(cx).await;
3667
3668 tree.read_with(cx, |tree, _cx| {
3669 let tree = tree.as_local().unwrap();
3670
3671 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3672 });
3673 }
3674
3675 #[test]
3676 fn test_changed_repos() {
3677 fn fake_entry(work_dir_id: usize, scan_id: usize) -> RepositoryEntry {
3678 RepositoryEntry {
3679 scan_id,
3680 work_directory: ProjectEntryId(work_dir_id).into(),
3681 branch: None,
3682 }
3683 }
3684
3685 let mut prev_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3686 prev_repos.insert(
3687 RepositoryWorkDirectory(Path::new("don't-care-1").into()),
3688 fake_entry(1, 0),
3689 );
3690 prev_repos.insert(
3691 RepositoryWorkDirectory(Path::new("don't-care-2").into()),
3692 fake_entry(2, 0),
3693 );
3694 prev_repos.insert(
3695 RepositoryWorkDirectory(Path::new("don't-care-3").into()),
3696 fake_entry(3, 0),
3697 );
3698
3699 let mut new_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3700 new_repos.insert(
3701 RepositoryWorkDirectory(Path::new("don't-care-4").into()),
3702 fake_entry(2, 1),
3703 );
3704 new_repos.insert(
3705 RepositoryWorkDirectory(Path::new("don't-care-5").into()),
3706 fake_entry(3, 0),
3707 );
3708 new_repos.insert(
3709 RepositoryWorkDirectory(Path::new("don't-care-6").into()),
3710 fake_entry(4, 0),
3711 );
3712
3713 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3714
3715 // Deletion retained
3716 assert!(res
3717 .iter()
3718 .find(|repo| repo.work_directory.0 .0 == 1 && repo.scan_id == 0)
3719 .is_some());
3720
3721 // Update retained
3722 assert!(res
3723 .iter()
3724 .find(|repo| repo.work_directory.0 .0 == 2 && repo.scan_id == 1)
3725 .is_some());
3726
3727 // Addition retained
3728 assert!(res
3729 .iter()
3730 .find(|repo| repo.work_directory.0 .0 == 4 && repo.scan_id == 0)
3731 .is_some());
3732
3733 // Nochange, not retained
3734 assert!(res
3735 .iter()
3736 .find(|repo| repo.work_directory.0 .0 == 3 && repo.scan_id == 0)
3737 .is_none());
3738 }
3739
3740 #[gpui::test]
3741 async fn test_write_file(cx: &mut TestAppContext) {
3742 let dir = temp_tree(json!({
3743 ".git": {},
3744 ".gitignore": "ignored-dir\n",
3745 "tracked-dir": {},
3746 "ignored-dir": {}
3747 }));
3748
3749 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3750
3751 let tree = Worktree::local(
3752 client,
3753 dir.path(),
3754 true,
3755 Arc::new(RealFs),
3756 Default::default(),
3757 &mut cx.to_async(),
3758 )
3759 .await
3760 .unwrap();
3761 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3762 .await;
3763 tree.flush_fs_events(cx).await;
3764
3765 tree.update(cx, |tree, cx| {
3766 tree.as_local().unwrap().write_file(
3767 Path::new("tracked-dir/file.txt"),
3768 "hello".into(),
3769 Default::default(),
3770 cx,
3771 )
3772 })
3773 .await
3774 .unwrap();
3775 tree.update(cx, |tree, cx| {
3776 tree.as_local().unwrap().write_file(
3777 Path::new("ignored-dir/file.txt"),
3778 "world".into(),
3779 Default::default(),
3780 cx,
3781 )
3782 })
3783 .await
3784 .unwrap();
3785
3786 tree.read_with(cx, |tree, _| {
3787 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3788 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3789 assert!(!tracked.is_ignored);
3790 assert!(ignored.is_ignored);
3791 });
3792 }
3793
3794 #[gpui::test(iterations = 30)]
3795 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3796 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3797
3798 let fs = FakeFs::new(cx.background());
3799 fs.insert_tree(
3800 "/root",
3801 json!({
3802 "b": {},
3803 "c": {},
3804 "d": {},
3805 }),
3806 )
3807 .await;
3808
3809 let tree = Worktree::local(
3810 client,
3811 "/root".as_ref(),
3812 true,
3813 fs,
3814 Default::default(),
3815 &mut cx.to_async(),
3816 )
3817 .await
3818 .unwrap();
3819
3820 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3821
3822 let entry = tree
3823 .update(cx, |tree, cx| {
3824 tree.as_local_mut()
3825 .unwrap()
3826 .create_entry("a/e".as_ref(), true, cx)
3827 })
3828 .await
3829 .unwrap();
3830 assert!(entry.is_dir());
3831
3832 cx.foreground().run_until_parked();
3833 tree.read_with(cx, |tree, _| {
3834 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3835 });
3836
3837 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3838 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3839 snapshot1.apply_remote_update(update).unwrap();
3840 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3841 }
3842
3843 #[gpui::test(iterations = 100)]
3844 async fn test_random_worktree_operations_during_initial_scan(
3845 cx: &mut TestAppContext,
3846 mut rng: StdRng,
3847 ) {
3848 let operations = env::var("OPERATIONS")
3849 .map(|o| o.parse().unwrap())
3850 .unwrap_or(5);
3851 let initial_entries = env::var("INITIAL_ENTRIES")
3852 .map(|o| o.parse().unwrap())
3853 .unwrap_or(20);
3854
3855 let root_dir = Path::new("/test");
3856 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3857 fs.as_fake().insert_tree(root_dir, json!({})).await;
3858 for _ in 0..initial_entries {
3859 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3860 }
3861 log::info!("generated initial tree");
3862
3863 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3864 let worktree = Worktree::local(
3865 client.clone(),
3866 root_dir,
3867 true,
3868 fs.clone(),
3869 Default::default(),
3870 &mut cx.to_async(),
3871 )
3872 .await
3873 .unwrap();
3874
3875 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3876
3877 for _ in 0..operations {
3878 worktree
3879 .update(cx, |worktree, cx| {
3880 randomly_mutate_worktree(worktree, &mut rng, cx)
3881 })
3882 .await
3883 .log_err();
3884 worktree.read_with(cx, |tree, _| {
3885 tree.as_local().unwrap().snapshot.check_invariants()
3886 });
3887
3888 if rng.gen_bool(0.6) {
3889 let new_snapshot =
3890 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3891 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3892 snapshot.apply_remote_update(update.clone()).unwrap();
3893 assert_eq!(
3894 snapshot.to_vec(true),
3895 new_snapshot.to_vec(true),
3896 "incorrect snapshot after update {:?}",
3897 update
3898 );
3899 }
3900 }
3901
3902 worktree
3903 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3904 .await;
3905 worktree.read_with(cx, |tree, _| {
3906 tree.as_local().unwrap().snapshot.check_invariants()
3907 });
3908
3909 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3910 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3911 snapshot.apply_remote_update(update.clone()).unwrap();
3912 assert_eq!(
3913 snapshot.to_vec(true),
3914 new_snapshot.to_vec(true),
3915 "incorrect snapshot after update {:?}",
3916 update
3917 );
3918 }
3919
3920 #[gpui::test(iterations = 100)]
3921 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3922 let operations = env::var("OPERATIONS")
3923 .map(|o| o.parse().unwrap())
3924 .unwrap_or(40);
3925 let initial_entries = env::var("INITIAL_ENTRIES")
3926 .map(|o| o.parse().unwrap())
3927 .unwrap_or(20);
3928
3929 let root_dir = Path::new("/test");
3930 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3931 fs.as_fake().insert_tree(root_dir, json!({})).await;
3932 for _ in 0..initial_entries {
3933 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3934 }
3935 log::info!("generated initial tree");
3936
3937 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3938 let worktree = Worktree::local(
3939 client.clone(),
3940 root_dir,
3941 true,
3942 fs.clone(),
3943 Default::default(),
3944 &mut cx.to_async(),
3945 )
3946 .await
3947 .unwrap();
3948
3949 worktree
3950 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3951 .await;
3952
3953 // After the initial scan is complete, the `UpdatedEntries` event can
3954 // be used to follow along with all changes to the worktree's snapshot.
3955 worktree.update(cx, |tree, cx| {
3956 let mut paths = tree
3957 .as_local()
3958 .unwrap()
3959 .paths()
3960 .cloned()
3961 .collect::<Vec<_>>();
3962
3963 cx.subscribe(&worktree, move |tree, _, event, _| {
3964 if let Event::UpdatedEntries(changes) = event {
3965 for (path, change_type) in changes.iter() {
3966 let path = path.clone();
3967 let ix = match paths.binary_search(&path) {
3968 Ok(ix) | Err(ix) => ix,
3969 };
3970 match change_type {
3971 PathChange::Added => {
3972 assert_ne!(paths.get(ix), Some(&path));
3973 paths.insert(ix, path);
3974 }
3975 PathChange::Removed => {
3976 assert_eq!(paths.get(ix), Some(&path));
3977 paths.remove(ix);
3978 }
3979 PathChange::Updated => {
3980 assert_eq!(paths.get(ix), Some(&path));
3981 }
3982 PathChange::AddedOrUpdated => {
3983 if paths[ix] != path {
3984 paths.insert(ix, path);
3985 }
3986 }
3987 }
3988 }
3989 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3990 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3991 }
3992 })
3993 .detach();
3994 });
3995
3996 let mut snapshots = Vec::new();
3997 let mut mutations_len = operations;
3998 while mutations_len > 1 {
3999 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
4000 let buffered_event_count = fs.as_fake().buffered_event_count().await;
4001 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4002 let len = rng.gen_range(0..=buffered_event_count);
4003 log::info!("flushing {} events", len);
4004 fs.as_fake().flush_events(len).await;
4005 } else {
4006 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4007 mutations_len -= 1;
4008 }
4009
4010 cx.foreground().run_until_parked();
4011 if rng.gen_bool(0.2) {
4012 log::info!("storing snapshot {}", snapshots.len());
4013 let snapshot =
4014 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4015 snapshots.push(snapshot);
4016 }
4017 }
4018
4019 log::info!("quiescing");
4020 fs.as_fake().flush_events(usize::MAX).await;
4021 cx.foreground().run_until_parked();
4022 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4023 snapshot.check_invariants();
4024
4025 {
4026 let new_worktree = Worktree::local(
4027 client.clone(),
4028 root_dir,
4029 true,
4030 fs.clone(),
4031 Default::default(),
4032 &mut cx.to_async(),
4033 )
4034 .await
4035 .unwrap();
4036 new_worktree
4037 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4038 .await;
4039 let new_snapshot =
4040 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4041 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4042 }
4043
4044 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4045 let include_ignored = rng.gen::<bool>();
4046 if !include_ignored {
4047 let mut entries_by_path_edits = Vec::new();
4048 let mut entries_by_id_edits = Vec::new();
4049 for entry in prev_snapshot
4050 .entries_by_id
4051 .cursor::<()>()
4052 .filter(|e| e.is_ignored)
4053 {
4054 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4055 entries_by_id_edits.push(Edit::Remove(entry.id));
4056 }
4057
4058 prev_snapshot
4059 .entries_by_path
4060 .edit(entries_by_path_edits, &());
4061 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4062 }
4063
4064 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4065 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4066 assert_eq!(
4067 prev_snapshot.to_vec(include_ignored),
4068 snapshot.to_vec(include_ignored),
4069 "wrong update for snapshot {i}. update: {:?}",
4070 update
4071 );
4072 }
4073 }
4074
4075 fn randomly_mutate_worktree(
4076 worktree: &mut Worktree,
4077 rng: &mut impl Rng,
4078 cx: &mut ModelContext<Worktree>,
4079 ) -> Task<Result<()>> {
4080 let worktree = worktree.as_local_mut().unwrap();
4081 let snapshot = worktree.snapshot();
4082 let entry = snapshot.entries(false).choose(rng).unwrap();
4083
4084 match rng.gen_range(0_u32..100) {
4085 0..=33 if entry.path.as_ref() != Path::new("") => {
4086 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4087 worktree.delete_entry(entry.id, cx).unwrap()
4088 }
4089 ..=66 if entry.path.as_ref() != Path::new("") => {
4090 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4091 let new_parent_path = if other_entry.is_dir() {
4092 other_entry.path.clone()
4093 } else {
4094 other_entry.path.parent().unwrap().into()
4095 };
4096 let mut new_path = new_parent_path.join(gen_name(rng));
4097 if new_path.starts_with(&entry.path) {
4098 new_path = gen_name(rng).into();
4099 }
4100
4101 log::info!(
4102 "renaming entry {:?} ({}) to {:?}",
4103 entry.path,
4104 entry.id.0,
4105 new_path
4106 );
4107 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4108 cx.foreground().spawn(async move {
4109 task.await?;
4110 Ok(())
4111 })
4112 }
4113 _ => {
4114 let task = if entry.is_dir() {
4115 let child_path = entry.path.join(gen_name(rng));
4116 let is_dir = rng.gen_bool(0.3);
4117 log::info!(
4118 "creating {} at {:?}",
4119 if is_dir { "dir" } else { "file" },
4120 child_path,
4121 );
4122 worktree.create_entry(child_path, is_dir, cx)
4123 } else {
4124 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4125 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4126 };
4127 cx.foreground().spawn(async move {
4128 task.await?;
4129 Ok(())
4130 })
4131 }
4132 }
4133 }
4134
4135 async fn randomly_mutate_fs(
4136 fs: &Arc<dyn Fs>,
4137 root_path: &Path,
4138 insertion_probability: f64,
4139 rng: &mut impl Rng,
4140 ) {
4141 let mut files = Vec::new();
4142 let mut dirs = Vec::new();
4143 for path in fs.as_fake().paths() {
4144 if path.starts_with(root_path) {
4145 if fs.is_file(&path).await {
4146 files.push(path);
4147 } else {
4148 dirs.push(path);
4149 }
4150 }
4151 }
4152
4153 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4154 let path = dirs.choose(rng).unwrap();
4155 let new_path = path.join(gen_name(rng));
4156
4157 if rng.gen() {
4158 log::info!(
4159 "creating dir {:?}",
4160 new_path.strip_prefix(root_path).unwrap()
4161 );
4162 fs.create_dir(&new_path).await.unwrap();
4163 } else {
4164 log::info!(
4165 "creating file {:?}",
4166 new_path.strip_prefix(root_path).unwrap()
4167 );
4168 fs.create_file(&new_path, Default::default()).await.unwrap();
4169 }
4170 } else if rng.gen_bool(0.05) {
4171 let ignore_dir_path = dirs.choose(rng).unwrap();
4172 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4173
4174 let subdirs = dirs
4175 .iter()
4176 .filter(|d| d.starts_with(&ignore_dir_path))
4177 .cloned()
4178 .collect::<Vec<_>>();
4179 let subfiles = files
4180 .iter()
4181 .filter(|d| d.starts_with(&ignore_dir_path))
4182 .cloned()
4183 .collect::<Vec<_>>();
4184 let files_to_ignore = {
4185 let len = rng.gen_range(0..=subfiles.len());
4186 subfiles.choose_multiple(rng, len)
4187 };
4188 let dirs_to_ignore = {
4189 let len = rng.gen_range(0..subdirs.len());
4190 subdirs.choose_multiple(rng, len)
4191 };
4192
4193 let mut ignore_contents = String::new();
4194 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4195 writeln!(
4196 ignore_contents,
4197 "{}",
4198 path_to_ignore
4199 .strip_prefix(&ignore_dir_path)
4200 .unwrap()
4201 .to_str()
4202 .unwrap()
4203 )
4204 .unwrap();
4205 }
4206 log::info!(
4207 "creating gitignore {:?} with contents:\n{}",
4208 ignore_path.strip_prefix(&root_path).unwrap(),
4209 ignore_contents
4210 );
4211 fs.save(
4212 &ignore_path,
4213 &ignore_contents.as_str().into(),
4214 Default::default(),
4215 )
4216 .await
4217 .unwrap();
4218 } else {
4219 let old_path = {
4220 let file_path = files.choose(rng);
4221 let dir_path = dirs[1..].choose(rng);
4222 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4223 };
4224
4225 let is_rename = rng.gen();
4226 if is_rename {
4227 let new_path_parent = dirs
4228 .iter()
4229 .filter(|d| !d.starts_with(old_path))
4230 .choose(rng)
4231 .unwrap();
4232
4233 let overwrite_existing_dir =
4234 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4235 let new_path = if overwrite_existing_dir {
4236 fs.remove_dir(
4237 &new_path_parent,
4238 RemoveOptions {
4239 recursive: true,
4240 ignore_if_not_exists: true,
4241 },
4242 )
4243 .await
4244 .unwrap();
4245 new_path_parent.to_path_buf()
4246 } else {
4247 new_path_parent.join(gen_name(rng))
4248 };
4249
4250 log::info!(
4251 "renaming {:?} to {}{:?}",
4252 old_path.strip_prefix(&root_path).unwrap(),
4253 if overwrite_existing_dir {
4254 "overwrite "
4255 } else {
4256 ""
4257 },
4258 new_path.strip_prefix(&root_path).unwrap()
4259 );
4260 fs.rename(
4261 &old_path,
4262 &new_path,
4263 fs::RenameOptions {
4264 overwrite: true,
4265 ignore_if_exists: true,
4266 },
4267 )
4268 .await
4269 .unwrap();
4270 } else if fs.is_file(&old_path).await {
4271 log::info!(
4272 "deleting file {:?}",
4273 old_path.strip_prefix(&root_path).unwrap()
4274 );
4275 fs.remove_file(old_path, Default::default()).await.unwrap();
4276 } else {
4277 log::info!(
4278 "deleting dir {:?}",
4279 old_path.strip_prefix(&root_path).unwrap()
4280 );
4281 fs.remove_dir(
4282 &old_path,
4283 RemoveOptions {
4284 recursive: true,
4285 ignore_if_not_exists: true,
4286 },
4287 )
4288 .await
4289 .unwrap();
4290 }
4291 }
4292 }
4293
4294 fn gen_name(rng: &mut impl Rng) -> String {
4295 (0..6)
4296 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4297 .map(char::from)
4298 .collect()
4299 }
4300
4301 impl LocalSnapshot {
4302 fn check_invariants(&self) {
4303 assert_eq!(
4304 self.entries_by_path
4305 .cursor::<()>()
4306 .map(|e| (&e.path, e.id))
4307 .collect::<Vec<_>>(),
4308 self.entries_by_id
4309 .cursor::<()>()
4310 .map(|e| (&e.path, e.id))
4311 .collect::<collections::BTreeSet<_>>()
4312 .into_iter()
4313 .collect::<Vec<_>>(),
4314 "entries_by_path and entries_by_id are inconsistent"
4315 );
4316
4317 let mut files = self.files(true, 0);
4318 let mut visible_files = self.files(false, 0);
4319 for entry in self.entries_by_path.cursor::<()>() {
4320 if entry.is_file() {
4321 assert_eq!(files.next().unwrap().inode, entry.inode);
4322 if !entry.is_ignored {
4323 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4324 }
4325 }
4326 }
4327
4328 assert!(files.next().is_none());
4329 assert!(visible_files.next().is_none());
4330
4331 let mut bfs_paths = Vec::new();
4332 let mut stack = vec![Path::new("")];
4333 while let Some(path) = stack.pop() {
4334 bfs_paths.push(path);
4335 let ix = stack.len();
4336 for child_entry in self.child_entries(path) {
4337 stack.insert(ix, &child_entry.path);
4338 }
4339 }
4340
4341 let dfs_paths_via_iter = self
4342 .entries_by_path
4343 .cursor::<()>()
4344 .map(|e| e.path.as_ref())
4345 .collect::<Vec<_>>();
4346 assert_eq!(bfs_paths, dfs_paths_via_iter);
4347
4348 let dfs_paths_via_traversal = self
4349 .entries(true)
4350 .map(|e| e.path.as_ref())
4351 .collect::<Vec<_>>();
4352 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4353
4354 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4355 let ignore_parent_path =
4356 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4357 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4358 assert!(self
4359 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4360 .is_some());
4361 }
4362 }
4363
4364 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4365 let mut paths = Vec::new();
4366 for entry in self.entries_by_path.cursor::<()>() {
4367 if include_ignored || !entry.is_ignored {
4368 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4369 }
4370 }
4371 paths.sort_by(|a, b| a.0.cmp(b.0));
4372 paths
4373 }
4374 }
4375}