1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 pin::Pin,
48 sync::{
49 atomic::{AtomicUsize, Ordering::SeqCst},
50 Arc,
51 },
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<
72 Arc<Path>,
73 Vec<(
74 LanguageServerId,
75 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
76 )>,
77 >,
78 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
79 client: Arc<Client>,
80 fs: Arc<dyn Fs>,
81 visible: bool,
82}
83
84pub struct RemoteWorktree {
85 snapshot: Snapshot,
86 background_snapshot: Arc<Mutex<Snapshot>>,
87 project_id: u64,
88 client: Arc<Client>,
89 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
90 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
91 replica_id: ReplicaId,
92 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
93 visible: bool,
94 disconnected: bool,
95}
96
97#[derive(Clone)]
98pub struct Snapshot {
99 id: WorktreeId,
100 abs_path: Arc<Path>,
101 root_name: String,
102 root_char_bag: CharBag,
103 entries_by_path: SumTree<Entry>,
104 entries_by_id: SumTree<PathEntry>,
105 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
106
107 /// A number that increases every time the worktree begins scanning
108 /// a set of paths from the filesystem. This scanning could be caused
109 /// by some operation performed on the worktree, such as reading or
110 /// writing a file, or by an event reported by the filesystem.
111 scan_id: usize,
112
113 /// The latest scan id that has completed, and whose preceding scans
114 /// have all completed. The current `scan_id` could be more than one
115 /// greater than the `completed_scan_id` if operations are performed
116 /// on the worktree while it is processing a file-system event.
117 completed_scan_id: usize,
118}
119
120#[derive(Clone, Debug, Eq, PartialEq)]
121pub struct RepositoryEntry {
122 pub(crate) scan_id: usize,
123 pub(crate) work_directory: WorkDirectoryEntry,
124 pub(crate) branch: Option<Arc<str>>,
125}
126
127impl RepositoryEntry {
128 pub fn branch(&self) -> Option<Arc<str>> {
129 self.branch.clone()
130 }
131
132 pub fn work_directory_id(&self) -> ProjectEntryId {
133 *self.work_directory
134 }
135
136 pub fn work_directory(&self, snapshot: &Snapshot) -> Option<RepositoryWorkDirectory> {
137 snapshot
138 .entry_for_id(self.work_directory_id())
139 .map(|entry| RepositoryWorkDirectory(entry.path.clone()))
140 }
141
142 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
143 self.work_directory.contains(snapshot, path)
144 }
145}
146
147impl From<&RepositoryEntry> for proto::RepositoryEntry {
148 fn from(value: &RepositoryEntry) -> Self {
149 proto::RepositoryEntry {
150 scan_id: value.scan_id as u64,
151 work_directory_id: value.work_directory.to_proto(),
152 branch: value.branch.as_ref().map(|str| str.to_string()),
153 }
154 }
155}
156
157/// This path corresponds to the 'content path' (the folder that contains the .git)
158#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
159pub struct RepositoryWorkDirectory(Arc<Path>);
160
161impl Default for RepositoryWorkDirectory {
162 fn default() -> Self {
163 RepositoryWorkDirectory(Arc::from(Path::new("")))
164 }
165}
166
167#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
168pub struct WorkDirectoryEntry(ProjectEntryId);
169
170impl WorkDirectoryEntry {
171 // Note that these paths should be relative to the worktree root.
172 pub(crate) fn contains(&self, snapshot: &Snapshot, path: &Path) -> bool {
173 snapshot
174 .entry_for_id(self.0)
175 .map(|entry| path.starts_with(&entry.path))
176 .unwrap_or(false)
177 }
178
179 pub(crate) fn relativize(&self, worktree: &Snapshot, path: &Path) -> Option<RepoPath> {
180 worktree.entry_for_id(self.0).and_then(|entry| {
181 path.strip_prefix(&entry.path)
182 .ok()
183 .map(move |path| RepoPath(path.to_owned()))
184 })
185 }
186}
187
188impl Deref for WorkDirectoryEntry {
189 type Target = ProjectEntryId;
190
191 fn deref(&self) -> &Self::Target {
192 &self.0
193 }
194}
195
196impl<'a> From<ProjectEntryId> for WorkDirectoryEntry {
197 fn from(value: ProjectEntryId) -> Self {
198 WorkDirectoryEntry(value)
199 }
200}
201
202#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
203pub struct RepoPath(PathBuf);
204
205impl AsRef<Path> for RepoPath {
206 fn as_ref(&self) -> &Path {
207 self.0.as_ref()
208 }
209}
210
211impl Deref for RepoPath {
212 type Target = PathBuf;
213
214 fn deref(&self) -> &Self::Target {
215 &self.0
216 }
217}
218
219impl AsRef<Path> for RepositoryWorkDirectory {
220 fn as_ref(&self) -> &Path {
221 self.0.as_ref()
222 }
223}
224
225#[derive(Debug, Clone)]
226pub struct LocalSnapshot {
227 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
228 // The ProjectEntryId corresponds to the entry for the .git dir
229 // work_directory_id
230 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
231 removed_entry_ids: HashMap<u64, ProjectEntryId>,
232 next_entry_id: Arc<AtomicUsize>,
233 snapshot: Snapshot,
234}
235
236#[derive(Debug, Clone)]
237pub struct LocalRepositoryEntry {
238 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
239 /// Path to the actual .git folder.
240 /// Note: if .git is a file, this points to the folder indicated by the .git file
241 pub(crate) git_dir_path: Arc<Path>,
242}
243
244impl LocalRepositoryEntry {
245 // Note that this path should be relative to the worktree root.
246 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
247 path.starts_with(self.git_dir_path.as_ref())
248 }
249}
250
251impl Deref for LocalSnapshot {
252 type Target = Snapshot;
253
254 fn deref(&self) -> &Self::Target {
255 &self.snapshot
256 }
257}
258
259impl DerefMut for LocalSnapshot {
260 fn deref_mut(&mut self) -> &mut Self::Target {
261 &mut self.snapshot
262 }
263}
264
265enum ScanState {
266 Started,
267 Updated {
268 snapshot: LocalSnapshot,
269 changes: HashMap<Arc<Path>, PathChange>,
270 barrier: Option<barrier::Sender>,
271 scanning: bool,
272 },
273}
274
275struct ShareState {
276 project_id: u64,
277 snapshots_tx: watch::Sender<LocalSnapshot>,
278 resume_updates: watch::Sender<()>,
279 _maintain_remote_snapshot: Task<Option<()>>,
280}
281
282pub enum Event {
283 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
284 UpdatedGitRepositories(Vec<RepositoryEntry>),
285}
286
287impl Entity for Worktree {
288 type Event = Event;
289}
290
291impl Worktree {
292 pub async fn local(
293 client: Arc<Client>,
294 path: impl Into<Arc<Path>>,
295 visible: bool,
296 fs: Arc<dyn Fs>,
297 next_entry_id: Arc<AtomicUsize>,
298 cx: &mut AsyncAppContext,
299 ) -> Result<ModelHandle<Self>> {
300 // After determining whether the root entry is a file or a directory, populate the
301 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
302 let abs_path = path.into();
303 let metadata = fs
304 .metadata(&abs_path)
305 .await
306 .context("failed to stat worktree path")?;
307
308 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
309 let root_name = abs_path
310 .file_name()
311 .map_or(String::new(), |f| f.to_string_lossy().to_string());
312
313 let mut snapshot = LocalSnapshot {
314 ignores_by_parent_abs_path: Default::default(),
315 removed_entry_ids: Default::default(),
316 git_repositories: Default::default(),
317 next_entry_id,
318 snapshot: Snapshot {
319 id: WorktreeId::from_usize(cx.model_id()),
320 abs_path: abs_path.clone(),
321 root_name: root_name.clone(),
322 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
323 entries_by_path: Default::default(),
324 entries_by_id: Default::default(),
325 repository_entries: Default::default(),
326 scan_id: 1,
327 completed_scan_id: 0,
328 },
329 };
330
331 if let Some(metadata) = metadata {
332 snapshot.insert_entry(
333 Entry::new(
334 Arc::from(Path::new("")),
335 &metadata,
336 &snapshot.next_entry_id,
337 snapshot.root_char_bag,
338 ),
339 fs.as_ref(),
340 );
341 }
342
343 let (path_changes_tx, path_changes_rx) = channel::unbounded();
344 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
345
346 cx.spawn_weak(|this, mut cx| async move {
347 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
348 this.update(&mut cx, |this, cx| {
349 let this = this.as_local_mut().unwrap();
350 match state {
351 ScanState::Started => {
352 *this.is_scanning.0.borrow_mut() = true;
353 }
354 ScanState::Updated {
355 snapshot,
356 changes,
357 barrier,
358 scanning,
359 } => {
360 *this.is_scanning.0.borrow_mut() = scanning;
361 this.set_snapshot(snapshot, cx);
362 cx.emit(Event::UpdatedEntries(changes));
363 drop(barrier);
364 }
365 }
366 cx.notify();
367 });
368 }
369 })
370 .detach();
371
372 let background_scanner_task = cx.background().spawn({
373 let fs = fs.clone();
374 let snapshot = snapshot.clone();
375 let background = cx.background().clone();
376 async move {
377 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
378 BackgroundScanner::new(
379 snapshot,
380 fs,
381 scan_states_tx,
382 background,
383 path_changes_rx,
384 )
385 .run(events)
386 .await;
387 }
388 });
389
390 Worktree::Local(LocalWorktree {
391 snapshot,
392 is_scanning: watch::channel_with(true),
393 share: None,
394 path_changes_tx,
395 _background_scanner_task: background_scanner_task,
396 diagnostics: Default::default(),
397 diagnostic_summaries: Default::default(),
398 client,
399 fs,
400 visible,
401 })
402 }))
403 }
404
405 pub fn remote(
406 project_remote_id: u64,
407 replica_id: ReplicaId,
408 worktree: proto::WorktreeMetadata,
409 client: Arc<Client>,
410 cx: &mut AppContext,
411 ) -> ModelHandle<Self> {
412 cx.add_model(|cx: &mut ModelContext<Self>| {
413 let snapshot = Snapshot {
414 id: WorktreeId(worktree.id as usize),
415 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
416 root_name: worktree.root_name.clone(),
417 root_char_bag: worktree
418 .root_name
419 .chars()
420 .map(|c| c.to_ascii_lowercase())
421 .collect(),
422 entries_by_path: Default::default(),
423 entries_by_id: Default::default(),
424 repository_entries: Default::default(),
425 scan_id: 1,
426 completed_scan_id: 0,
427 };
428
429 let (updates_tx, mut updates_rx) = mpsc::unbounded();
430 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
431 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
432
433 cx.background()
434 .spawn({
435 let background_snapshot = background_snapshot.clone();
436 async move {
437 while let Some(update) = updates_rx.next().await {
438 if let Err(error) =
439 background_snapshot.lock().apply_remote_update(update)
440 {
441 log::error!("error applying worktree update: {}", error);
442 }
443 snapshot_updated_tx.send(()).await.ok();
444 }
445 }
446 })
447 .detach();
448
449 cx.spawn_weak(|this, mut cx| async move {
450 while (snapshot_updated_rx.recv().await).is_some() {
451 if let Some(this) = this.upgrade(&cx) {
452 this.update(&mut cx, |this, cx| {
453 let this = this.as_remote_mut().unwrap();
454 this.snapshot = this.background_snapshot.lock().clone();
455 cx.emit(Event::UpdatedEntries(Default::default()));
456 cx.notify();
457 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
458 if this.observed_snapshot(*scan_id) {
459 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
460 let _ = tx.send(());
461 } else {
462 break;
463 }
464 }
465 });
466 } else {
467 break;
468 }
469 }
470 })
471 .detach();
472
473 Worktree::Remote(RemoteWorktree {
474 project_id: project_remote_id,
475 replica_id,
476 snapshot: snapshot.clone(),
477 background_snapshot,
478 updates_tx: Some(updates_tx),
479 snapshot_subscriptions: Default::default(),
480 client: client.clone(),
481 diagnostic_summaries: Default::default(),
482 visible: worktree.visible,
483 disconnected: false,
484 })
485 })
486 }
487
488 pub fn as_local(&self) -> Option<&LocalWorktree> {
489 if let Worktree::Local(worktree) = self {
490 Some(worktree)
491 } else {
492 None
493 }
494 }
495
496 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
497 if let Worktree::Remote(worktree) = self {
498 Some(worktree)
499 } else {
500 None
501 }
502 }
503
504 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
505 if let Worktree::Local(worktree) = self {
506 Some(worktree)
507 } else {
508 None
509 }
510 }
511
512 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
513 if let Worktree::Remote(worktree) = self {
514 Some(worktree)
515 } else {
516 None
517 }
518 }
519
520 pub fn is_local(&self) -> bool {
521 matches!(self, Worktree::Local(_))
522 }
523
524 pub fn is_remote(&self) -> bool {
525 !self.is_local()
526 }
527
528 pub fn snapshot(&self) -> Snapshot {
529 match self {
530 Worktree::Local(worktree) => worktree.snapshot().snapshot,
531 Worktree::Remote(worktree) => worktree.snapshot(),
532 }
533 }
534
535 pub fn scan_id(&self) -> usize {
536 match self {
537 Worktree::Local(worktree) => worktree.snapshot.scan_id,
538 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
539 }
540 }
541
542 pub fn completed_scan_id(&self) -> usize {
543 match self {
544 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
545 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
546 }
547 }
548
549 pub fn is_visible(&self) -> bool {
550 match self {
551 Worktree::Local(worktree) => worktree.visible,
552 Worktree::Remote(worktree) => worktree.visible,
553 }
554 }
555
556 pub fn replica_id(&self) -> ReplicaId {
557 match self {
558 Worktree::Local(_) => 0,
559 Worktree::Remote(worktree) => worktree.replica_id,
560 }
561 }
562
563 pub fn diagnostic_summaries(
564 &self,
565 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
566 match self {
567 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
568 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
569 }
570 .iter()
571 .flat_map(|(path, summaries)| {
572 summaries
573 .iter()
574 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
575 })
576 }
577
578 pub fn abs_path(&self) -> Arc<Path> {
579 match self {
580 Worktree::Local(worktree) => worktree.abs_path.clone(),
581 Worktree::Remote(worktree) => worktree.abs_path.clone(),
582 }
583 }
584}
585
586impl LocalWorktree {
587 pub fn contains_abs_path(&self, path: &Path) -> bool {
588 path.starts_with(&self.abs_path)
589 }
590
591 fn absolutize(&self, path: &Path) -> PathBuf {
592 if path.file_name().is_some() {
593 self.abs_path.join(path)
594 } else {
595 self.abs_path.to_path_buf()
596 }
597 }
598
599 pub(crate) fn load_buffer(
600 &mut self,
601 id: u64,
602 path: &Path,
603 cx: &mut ModelContext<Worktree>,
604 ) -> Task<Result<ModelHandle<Buffer>>> {
605 let path = Arc::from(path);
606 cx.spawn(move |this, mut cx| async move {
607 let (file, contents, diff_base) = this
608 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
609 .await?;
610 let text_buffer = cx
611 .background()
612 .spawn(async move { text::Buffer::new(0, id, contents) })
613 .await;
614 Ok(cx.add_model(|cx| {
615 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
616 buffer.git_diff_recalc(cx);
617 buffer
618 }))
619 })
620 }
621
622 pub fn diagnostics_for_path(
623 &self,
624 path: &Path,
625 ) -> Vec<(
626 LanguageServerId,
627 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
628 )> {
629 self.diagnostics.get(path).cloned().unwrap_or_default()
630 }
631
632 pub fn update_diagnostics(
633 &mut self,
634 server_id: LanguageServerId,
635 worktree_path: Arc<Path>,
636 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
637 _: &mut ModelContext<Worktree>,
638 ) -> Result<bool> {
639 let summaries_by_server_id = self
640 .diagnostic_summaries
641 .entry(worktree_path.clone())
642 .or_default();
643
644 let old_summary = summaries_by_server_id
645 .remove(&server_id)
646 .unwrap_or_default();
647
648 let new_summary = DiagnosticSummary::new(&diagnostics);
649 if new_summary.is_empty() {
650 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
651 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
652 diagnostics_by_server_id.remove(ix);
653 }
654 if diagnostics_by_server_id.is_empty() {
655 self.diagnostics.remove(&worktree_path);
656 }
657 }
658 } else {
659 summaries_by_server_id.insert(server_id, new_summary);
660 let diagnostics_by_server_id =
661 self.diagnostics.entry(worktree_path.clone()).or_default();
662 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
663 Ok(ix) => {
664 diagnostics_by_server_id[ix] = (server_id, diagnostics);
665 }
666 Err(ix) => {
667 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
668 }
669 }
670 }
671
672 if !old_summary.is_empty() || !new_summary.is_empty() {
673 if let Some(share) = self.share.as_ref() {
674 self.client
675 .send(proto::UpdateDiagnosticSummary {
676 project_id: share.project_id,
677 worktree_id: self.id().to_proto(),
678 summary: Some(proto::DiagnosticSummary {
679 path: worktree_path.to_string_lossy().to_string(),
680 language_server_id: server_id.0 as u64,
681 error_count: new_summary.error_count as u32,
682 warning_count: new_summary.warning_count as u32,
683 }),
684 })
685 .log_err();
686 }
687 }
688
689 Ok(!old_summary.is_empty() || !new_summary.is_empty())
690 }
691
692 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
693 let updated_repos = Self::changed_repos(
694 &self.snapshot.repository_entries,
695 &new_snapshot.repository_entries,
696 );
697 self.snapshot = new_snapshot;
698
699 if let Some(share) = self.share.as_mut() {
700 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
701 }
702
703 if !updated_repos.is_empty() {
704 cx.emit(Event::UpdatedGitRepositories(updated_repos));
705 }
706 }
707
708 fn changed_repos(
709 old_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
710 new_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
711 ) -> Vec<RepositoryEntry> {
712 fn diff<'a>(
713 a: impl Iterator<Item = &'a RepositoryEntry>,
714 mut b: impl Iterator<Item = &'a RepositoryEntry>,
715 updated: &mut HashMap<ProjectEntryId, RepositoryEntry>,
716 ) {
717 for a_repo in a {
718 let matched = b.find(|b_repo| {
719 a_repo.work_directory == b_repo.work_directory
720 && a_repo.scan_id == b_repo.scan_id
721 });
722
723 if matched.is_none() {
724 updated.insert(*a_repo.work_directory, a_repo.clone());
725 }
726 }
727 }
728
729 let mut updated = HashMap::<ProjectEntryId, RepositoryEntry>::default();
730
731 diff(old_repos.values(), new_repos.values(), &mut updated);
732 diff(new_repos.values(), old_repos.values(), &mut updated);
733
734 updated.into_values().collect()
735 }
736
737 pub fn scan_complete(&self) -> impl Future<Output = ()> {
738 let mut is_scanning_rx = self.is_scanning.1.clone();
739 async move {
740 let mut is_scanning = is_scanning_rx.borrow().clone();
741 while is_scanning {
742 if let Some(value) = is_scanning_rx.recv().await {
743 is_scanning = value;
744 } else {
745 break;
746 }
747 }
748 }
749 }
750
751 pub fn snapshot(&self) -> LocalSnapshot {
752 self.snapshot.clone()
753 }
754
755 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
756 proto::WorktreeMetadata {
757 id: self.id().to_proto(),
758 root_name: self.root_name().to_string(),
759 visible: self.visible,
760 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
761 }
762 }
763
764 fn load(
765 &self,
766 path: &Path,
767 cx: &mut ModelContext<Worktree>,
768 ) -> Task<Result<(File, String, Option<String>)>> {
769 let handle = cx.handle();
770 let path = Arc::from(path);
771 let abs_path = self.absolutize(&path);
772 let fs = self.fs.clone();
773 let snapshot = self.snapshot();
774
775 let mut index_task = None;
776
777 if let Some(repo) = snapshot.repo_for(&path) {
778 let repo_path = repo.work_directory.relativize(self, &path).unwrap();
779 if let Some(repo) = self.git_repositories.get(&*repo.work_directory) {
780 let repo = repo.repo_ptr.to_owned();
781 index_task = Some(
782 cx.background()
783 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
784 );
785 }
786 }
787
788 cx.spawn(|this, mut cx| async move {
789 let text = fs.load(&abs_path).await?;
790
791 let diff_base = if let Some(index_task) = index_task {
792 index_task.await
793 } else {
794 None
795 };
796
797 // Eagerly populate the snapshot with an updated entry for the loaded file
798 let entry = this
799 .update(&mut cx, |this, cx| {
800 this.as_local().unwrap().refresh_entry(path, None, cx)
801 })
802 .await?;
803
804 Ok((
805 File {
806 entry_id: entry.id,
807 worktree: handle,
808 path: entry.path,
809 mtime: entry.mtime,
810 is_local: true,
811 is_deleted: false,
812 },
813 text,
814 diff_base,
815 ))
816 })
817 }
818
819 pub fn save_buffer(
820 &self,
821 buffer_handle: ModelHandle<Buffer>,
822 path: Arc<Path>,
823 has_changed_file: bool,
824 cx: &mut ModelContext<Worktree>,
825 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
826 let handle = cx.handle();
827 let buffer = buffer_handle.read(cx);
828
829 let rpc = self.client.clone();
830 let buffer_id = buffer.remote_id();
831 let project_id = self.share.as_ref().map(|share| share.project_id);
832
833 let text = buffer.as_rope().clone();
834 let fingerprint = text.fingerprint();
835 let version = buffer.version();
836 let save = self.write_file(path, text, buffer.line_ending(), cx);
837
838 cx.as_mut().spawn(|mut cx| async move {
839 let entry = save.await?;
840
841 if has_changed_file {
842 let new_file = Arc::new(File {
843 entry_id: entry.id,
844 worktree: handle,
845 path: entry.path,
846 mtime: entry.mtime,
847 is_local: true,
848 is_deleted: false,
849 });
850
851 if let Some(project_id) = project_id {
852 rpc.send(proto::UpdateBufferFile {
853 project_id,
854 buffer_id,
855 file: Some(new_file.to_proto()),
856 })
857 .log_err();
858 }
859
860 buffer_handle.update(&mut cx, |buffer, cx| {
861 if has_changed_file {
862 buffer.file_updated(new_file, cx).detach();
863 }
864 });
865 }
866
867 if let Some(project_id) = project_id {
868 rpc.send(proto::BufferSaved {
869 project_id,
870 buffer_id,
871 version: serialize_version(&version),
872 mtime: Some(entry.mtime.into()),
873 fingerprint: serialize_fingerprint(fingerprint),
874 })?;
875 }
876
877 buffer_handle.update(&mut cx, |buffer, cx| {
878 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
879 });
880
881 Ok((version, fingerprint, entry.mtime))
882 })
883 }
884
885 pub fn create_entry(
886 &self,
887 path: impl Into<Arc<Path>>,
888 is_dir: bool,
889 cx: &mut ModelContext<Worktree>,
890 ) -> Task<Result<Entry>> {
891 let path = path.into();
892 let abs_path = self.absolutize(&path);
893 let fs = self.fs.clone();
894 let write = cx.background().spawn(async move {
895 if is_dir {
896 fs.create_dir(&abs_path).await
897 } else {
898 fs.save(&abs_path, &Default::default(), Default::default())
899 .await
900 }
901 });
902
903 cx.spawn(|this, mut cx| async move {
904 write.await?;
905 this.update(&mut cx, |this, cx| {
906 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
907 })
908 .await
909 })
910 }
911
912 pub fn write_file(
913 &self,
914 path: impl Into<Arc<Path>>,
915 text: Rope,
916 line_ending: LineEnding,
917 cx: &mut ModelContext<Worktree>,
918 ) -> Task<Result<Entry>> {
919 let path = path.into();
920 let abs_path = self.absolutize(&path);
921 let fs = self.fs.clone();
922 let write = cx
923 .background()
924 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
925
926 cx.spawn(|this, mut cx| async move {
927 write.await?;
928 this.update(&mut cx, |this, cx| {
929 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
930 })
931 .await
932 })
933 }
934
935 pub fn delete_entry(
936 &self,
937 entry_id: ProjectEntryId,
938 cx: &mut ModelContext<Worktree>,
939 ) -> Option<Task<Result<()>>> {
940 let entry = self.entry_for_id(entry_id)?.clone();
941 let abs_path = self.abs_path.clone();
942 let fs = self.fs.clone();
943
944 let delete = cx.background().spawn(async move {
945 let mut abs_path = fs.canonicalize(&abs_path).await?;
946 if entry.path.file_name().is_some() {
947 abs_path = abs_path.join(&entry.path);
948 }
949 if entry.is_file() {
950 fs.remove_file(&abs_path, Default::default()).await?;
951 } else {
952 fs.remove_dir(
953 &abs_path,
954 RemoveOptions {
955 recursive: true,
956 ignore_if_not_exists: false,
957 },
958 )
959 .await?;
960 }
961 anyhow::Ok(abs_path)
962 });
963
964 Some(cx.spawn(|this, mut cx| async move {
965 let abs_path = delete.await?;
966 let (tx, mut rx) = barrier::channel();
967 this.update(&mut cx, |this, _| {
968 this.as_local_mut()
969 .unwrap()
970 .path_changes_tx
971 .try_send((vec![abs_path], tx))
972 })?;
973 rx.recv().await;
974 Ok(())
975 }))
976 }
977
978 pub fn rename_entry(
979 &self,
980 entry_id: ProjectEntryId,
981 new_path: impl Into<Arc<Path>>,
982 cx: &mut ModelContext<Worktree>,
983 ) -> Option<Task<Result<Entry>>> {
984 let old_path = self.entry_for_id(entry_id)?.path.clone();
985 let new_path = new_path.into();
986 let abs_old_path = self.absolutize(&old_path);
987 let abs_new_path = self.absolutize(&new_path);
988 let fs = self.fs.clone();
989 let rename = cx.background().spawn(async move {
990 fs.rename(&abs_old_path, &abs_new_path, Default::default())
991 .await
992 });
993
994 Some(cx.spawn(|this, mut cx| async move {
995 rename.await?;
996 this.update(&mut cx, |this, cx| {
997 this.as_local_mut()
998 .unwrap()
999 .refresh_entry(new_path.clone(), Some(old_path), cx)
1000 })
1001 .await
1002 }))
1003 }
1004
1005 pub fn copy_entry(
1006 &self,
1007 entry_id: ProjectEntryId,
1008 new_path: impl Into<Arc<Path>>,
1009 cx: &mut ModelContext<Worktree>,
1010 ) -> Option<Task<Result<Entry>>> {
1011 let old_path = self.entry_for_id(entry_id)?.path.clone();
1012 let new_path = new_path.into();
1013 let abs_old_path = self.absolutize(&old_path);
1014 let abs_new_path = self.absolutize(&new_path);
1015 let fs = self.fs.clone();
1016 let copy = cx.background().spawn(async move {
1017 copy_recursive(
1018 fs.as_ref(),
1019 &abs_old_path,
1020 &abs_new_path,
1021 Default::default(),
1022 )
1023 .await
1024 });
1025
1026 Some(cx.spawn(|this, mut cx| async move {
1027 copy.await?;
1028 this.update(&mut cx, |this, cx| {
1029 this.as_local_mut()
1030 .unwrap()
1031 .refresh_entry(new_path.clone(), None, cx)
1032 })
1033 .await
1034 }))
1035 }
1036
1037 fn refresh_entry(
1038 &self,
1039 path: Arc<Path>,
1040 old_path: Option<Arc<Path>>,
1041 cx: &mut ModelContext<Worktree>,
1042 ) -> Task<Result<Entry>> {
1043 let fs = self.fs.clone();
1044 let abs_root_path = self.abs_path.clone();
1045 let path_changes_tx = self.path_changes_tx.clone();
1046 cx.spawn_weak(move |this, mut cx| async move {
1047 let abs_path = fs.canonicalize(&abs_root_path).await?;
1048 let mut paths = Vec::with_capacity(2);
1049 paths.push(if path.file_name().is_some() {
1050 abs_path.join(&path)
1051 } else {
1052 abs_path.clone()
1053 });
1054 if let Some(old_path) = old_path {
1055 paths.push(if old_path.file_name().is_some() {
1056 abs_path.join(&old_path)
1057 } else {
1058 abs_path.clone()
1059 });
1060 }
1061
1062 let (tx, mut rx) = barrier::channel();
1063 path_changes_tx.try_send((paths, tx))?;
1064 rx.recv().await;
1065 this.upgrade(&cx)
1066 .ok_or_else(|| anyhow!("worktree was dropped"))?
1067 .update(&mut cx, |this, _| {
1068 this.entry_for_path(path)
1069 .cloned()
1070 .ok_or_else(|| anyhow!("failed to read path after update"))
1071 })
1072 })
1073 }
1074
1075 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1076 let (share_tx, share_rx) = oneshot::channel();
1077
1078 if let Some(share) = self.share.as_mut() {
1079 let _ = share_tx.send(());
1080 *share.resume_updates.borrow_mut() = ();
1081 } else {
1082 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1083 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1084 let worktree_id = cx.model_id() as u64;
1085
1086 for (path, summaries) in &self.diagnostic_summaries {
1087 for (&server_id, summary) in summaries {
1088 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1089 project_id,
1090 worktree_id,
1091 summary: Some(summary.to_proto(server_id, &path)),
1092 }) {
1093 return Task::ready(Err(e));
1094 }
1095 }
1096 }
1097
1098 let _maintain_remote_snapshot = cx.background().spawn({
1099 let client = self.client.clone();
1100 async move {
1101 let mut share_tx = Some(share_tx);
1102 let mut prev_snapshot = LocalSnapshot {
1103 ignores_by_parent_abs_path: Default::default(),
1104 removed_entry_ids: Default::default(),
1105 next_entry_id: Default::default(),
1106 git_repositories: Default::default(),
1107 snapshot: Snapshot {
1108 id: WorktreeId(worktree_id as usize),
1109 abs_path: Path::new("").into(),
1110 root_name: Default::default(),
1111 root_char_bag: Default::default(),
1112 entries_by_path: Default::default(),
1113 entries_by_id: Default::default(),
1114 repository_entries: Default::default(),
1115 scan_id: 0,
1116 completed_scan_id: 0,
1117 },
1118 };
1119 while let Some(snapshot) = snapshots_rx.recv().await {
1120 #[cfg(any(test, feature = "test-support"))]
1121 const MAX_CHUNK_SIZE: usize = 2;
1122 #[cfg(not(any(test, feature = "test-support")))]
1123 const MAX_CHUNK_SIZE: usize = 256;
1124
1125 let update =
1126 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1127 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1128 let _ = resume_updates_rx.try_recv();
1129 while let Err(error) = client.request(update.clone()).await {
1130 log::error!("failed to send worktree update: {}", error);
1131 log::info!("waiting to resume updates");
1132 if resume_updates_rx.next().await.is_none() {
1133 return Ok(());
1134 }
1135 }
1136 }
1137
1138 if let Some(share_tx) = share_tx.take() {
1139 let _ = share_tx.send(());
1140 }
1141
1142 prev_snapshot = snapshot;
1143 }
1144
1145 Ok::<_, anyhow::Error>(())
1146 }
1147 .log_err()
1148 });
1149
1150 self.share = Some(ShareState {
1151 project_id,
1152 snapshots_tx,
1153 resume_updates: resume_updates_tx,
1154 _maintain_remote_snapshot,
1155 });
1156 }
1157
1158 cx.foreground()
1159 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1160 }
1161
1162 pub fn unshare(&mut self) {
1163 self.share.take();
1164 }
1165
1166 pub fn is_shared(&self) -> bool {
1167 self.share.is_some()
1168 }
1169
1170 pub fn load_index_text(
1171 &self,
1172 repo: RepositoryEntry,
1173 repo_path: RepoPath,
1174 cx: &mut ModelContext<Worktree>,
1175 ) -> Task<Option<String>> {
1176 let Some(git_ptr) = self.git_repositories.get(&repo.work_directory).map(|git_ptr| git_ptr.to_owned()) else {
1177 return Task::Ready(Some(None))
1178 };
1179 let git_ptr = git_ptr.repo_ptr;
1180
1181 cx.background()
1182 .spawn(async move { git_ptr.lock().load_index_text(&repo_path) })
1183 }
1184}
1185
1186impl RemoteWorktree {
1187 fn snapshot(&self) -> Snapshot {
1188 self.snapshot.clone()
1189 }
1190
1191 pub fn disconnected_from_host(&mut self) {
1192 self.updates_tx.take();
1193 self.snapshot_subscriptions.clear();
1194 self.disconnected = true;
1195 }
1196
1197 pub fn save_buffer(
1198 &self,
1199 buffer_handle: ModelHandle<Buffer>,
1200 cx: &mut ModelContext<Worktree>,
1201 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1202 let buffer = buffer_handle.read(cx);
1203 let buffer_id = buffer.remote_id();
1204 let version = buffer.version();
1205 let rpc = self.client.clone();
1206 let project_id = self.project_id;
1207 cx.as_mut().spawn(|mut cx| async move {
1208 let response = rpc
1209 .request(proto::SaveBuffer {
1210 project_id,
1211 buffer_id,
1212 version: serialize_version(&version),
1213 })
1214 .await?;
1215 let version = deserialize_version(&response.version);
1216 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1217 let mtime = response
1218 .mtime
1219 .ok_or_else(|| anyhow!("missing mtime"))?
1220 .into();
1221
1222 buffer_handle.update(&mut cx, |buffer, cx| {
1223 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1224 });
1225
1226 Ok((version, fingerprint, mtime))
1227 })
1228 }
1229
1230 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1231 if let Some(updates_tx) = &self.updates_tx {
1232 updates_tx
1233 .unbounded_send(update)
1234 .expect("consumer runs to completion");
1235 }
1236 }
1237
1238 fn observed_snapshot(&self, scan_id: usize) -> bool {
1239 self.completed_scan_id >= scan_id
1240 }
1241
1242 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1243 let (tx, rx) = oneshot::channel();
1244 if self.observed_snapshot(scan_id) {
1245 let _ = tx.send(());
1246 } else if self.disconnected {
1247 drop(tx);
1248 } else {
1249 match self
1250 .snapshot_subscriptions
1251 .binary_search_by_key(&scan_id, |probe| probe.0)
1252 {
1253 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1254 }
1255 }
1256
1257 async move {
1258 rx.await?;
1259 Ok(())
1260 }
1261 }
1262
1263 pub fn update_diagnostic_summary(
1264 &mut self,
1265 path: Arc<Path>,
1266 summary: &proto::DiagnosticSummary,
1267 ) {
1268 let server_id = LanguageServerId(summary.language_server_id as usize);
1269 let summary = DiagnosticSummary {
1270 error_count: summary.error_count as usize,
1271 warning_count: summary.warning_count as usize,
1272 };
1273
1274 if summary.is_empty() {
1275 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1276 summaries.remove(&server_id);
1277 if summaries.is_empty() {
1278 self.diagnostic_summaries.remove(&path);
1279 }
1280 }
1281 } else {
1282 self.diagnostic_summaries
1283 .entry(path)
1284 .or_default()
1285 .insert(server_id, summary);
1286 }
1287 }
1288
1289 pub fn insert_entry(
1290 &mut self,
1291 entry: proto::Entry,
1292 scan_id: usize,
1293 cx: &mut ModelContext<Worktree>,
1294 ) -> Task<Result<Entry>> {
1295 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1296 cx.spawn(|this, mut cx| async move {
1297 wait_for_snapshot.await?;
1298 this.update(&mut cx, |worktree, _| {
1299 let worktree = worktree.as_remote_mut().unwrap();
1300 let mut snapshot = worktree.background_snapshot.lock();
1301 let entry = snapshot.insert_entry(entry);
1302 worktree.snapshot = snapshot.clone();
1303 entry
1304 })
1305 })
1306 }
1307
1308 pub(crate) fn delete_entry(
1309 &mut self,
1310 id: ProjectEntryId,
1311 scan_id: usize,
1312 cx: &mut ModelContext<Worktree>,
1313 ) -> Task<Result<()>> {
1314 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1315 cx.spawn(|this, mut cx| async move {
1316 wait_for_snapshot.await?;
1317 this.update(&mut cx, |worktree, _| {
1318 let worktree = worktree.as_remote_mut().unwrap();
1319 let mut snapshot = worktree.background_snapshot.lock();
1320 snapshot.delete_entry(id);
1321 worktree.snapshot = snapshot.clone();
1322 });
1323 Ok(())
1324 })
1325 }
1326}
1327
1328impl Snapshot {
1329 pub fn id(&self) -> WorktreeId {
1330 self.id
1331 }
1332
1333 pub fn abs_path(&self) -> &Arc<Path> {
1334 &self.abs_path
1335 }
1336
1337 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1338 self.entries_by_id.get(&entry_id, &()).is_some()
1339 }
1340
1341 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1342 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1343 let old_entry = self.entries_by_id.insert_or_replace(
1344 PathEntry {
1345 id: entry.id,
1346 path: entry.path.clone(),
1347 is_ignored: entry.is_ignored,
1348 scan_id: 0,
1349 },
1350 &(),
1351 );
1352 if let Some(old_entry) = old_entry {
1353 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1354 }
1355 self.entries_by_path.insert_or_replace(entry.clone(), &());
1356 Ok(entry)
1357 }
1358
1359 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1360 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1361 self.entries_by_path = {
1362 let mut cursor = self.entries_by_path.cursor();
1363 let mut new_entries_by_path =
1364 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1365 while let Some(entry) = cursor.item() {
1366 if entry.path.starts_with(&removed_entry.path) {
1367 self.entries_by_id.remove(&entry.id, &());
1368 cursor.next(&());
1369 } else {
1370 break;
1371 }
1372 }
1373 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1374 new_entries_by_path
1375 };
1376
1377 Some(removed_entry.path)
1378 }
1379
1380 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1381 let mut entries_by_path_edits = Vec::new();
1382 let mut entries_by_id_edits = Vec::new();
1383 for entry_id in update.removed_entries {
1384 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1385 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1386 entries_by_id_edits.push(Edit::Remove(entry.id));
1387 }
1388 }
1389
1390 for entry in update.updated_entries {
1391 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1392 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1393 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1394 }
1395 entries_by_id_edits.push(Edit::Insert(PathEntry {
1396 id: entry.id,
1397 path: entry.path.clone(),
1398 is_ignored: entry.is_ignored,
1399 scan_id: 0,
1400 }));
1401 entries_by_path_edits.push(Edit::Insert(entry));
1402 }
1403
1404 self.entries_by_path.edit(entries_by_path_edits, &());
1405 self.entries_by_id.edit(entries_by_id_edits, &());
1406
1407 update.removed_repositories.sort_unstable();
1408 self.repository_entries.retain(|_, entry| {
1409 if let Ok(_) = update
1410 .removed_repositories
1411 .binary_search(&entry.work_directory.to_proto())
1412 {
1413 false
1414 } else {
1415 true
1416 }
1417 });
1418
1419 for repository in update.updated_repositories {
1420 let repository = RepositoryEntry {
1421 work_directory: ProjectEntryId::from_proto(repository.work_directory_id).into(),
1422 scan_id: repository.scan_id as usize,
1423 branch: repository.branch.map(Into::into),
1424 };
1425 if let Some(entry) = self.entry_for_id(repository.work_directory_id()) {
1426 self.repository_entries
1427 .insert(RepositoryWorkDirectory(entry.path.clone()), repository)
1428 } else {
1429 log::error!("no work directory entry for repository {:?}", repository)
1430 }
1431 }
1432
1433 self.scan_id = update.scan_id as usize;
1434 if update.is_last_update {
1435 self.completed_scan_id = update.scan_id as usize;
1436 }
1437
1438 Ok(())
1439 }
1440
1441 pub fn file_count(&self) -> usize {
1442 self.entries_by_path.summary().file_count
1443 }
1444
1445 pub fn visible_file_count(&self) -> usize {
1446 self.entries_by_path.summary().visible_file_count
1447 }
1448
1449 fn traverse_from_offset(
1450 &self,
1451 include_dirs: bool,
1452 include_ignored: bool,
1453 start_offset: usize,
1454 ) -> Traversal {
1455 let mut cursor = self.entries_by_path.cursor();
1456 cursor.seek(
1457 &TraversalTarget::Count {
1458 count: start_offset,
1459 include_dirs,
1460 include_ignored,
1461 },
1462 Bias::Right,
1463 &(),
1464 );
1465 Traversal {
1466 cursor,
1467 include_dirs,
1468 include_ignored,
1469 }
1470 }
1471
1472 fn traverse_from_path(
1473 &self,
1474 include_dirs: bool,
1475 include_ignored: bool,
1476 path: &Path,
1477 ) -> Traversal {
1478 let mut cursor = self.entries_by_path.cursor();
1479 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1480 Traversal {
1481 cursor,
1482 include_dirs,
1483 include_ignored,
1484 }
1485 }
1486
1487 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1488 self.traverse_from_offset(false, include_ignored, start)
1489 }
1490
1491 pub fn entries(&self, include_ignored: bool) -> Traversal {
1492 self.traverse_from_offset(true, include_ignored, 0)
1493 }
1494
1495 pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1496 self.repository_entries.values()
1497 }
1498
1499 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1500 let empty_path = Path::new("");
1501 self.entries_by_path
1502 .cursor::<()>()
1503 .filter(move |entry| entry.path.as_ref() != empty_path)
1504 .map(|entry| &entry.path)
1505 }
1506
1507 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1508 let mut cursor = self.entries_by_path.cursor();
1509 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1510 let traversal = Traversal {
1511 cursor,
1512 include_dirs: true,
1513 include_ignored: true,
1514 };
1515 ChildEntriesIter {
1516 traversal,
1517 parent_path,
1518 }
1519 }
1520
1521 pub fn root_entry(&self) -> Option<&Entry> {
1522 self.entry_for_path("")
1523 }
1524
1525 pub fn root_name(&self) -> &str {
1526 &self.root_name
1527 }
1528
1529 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1530 self.repository_entries
1531 .get(&RepositoryWorkDirectory(Path::new("").into()))
1532 .map(|entry| entry.to_owned())
1533 }
1534
1535 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1536 self.repository_entries.values()
1537 }
1538
1539 pub fn scan_id(&self) -> usize {
1540 self.scan_id
1541 }
1542
1543 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1544 let path = path.as_ref();
1545 self.traverse_from_path(true, true, path)
1546 .entry()
1547 .and_then(|entry| {
1548 if entry.path.as_ref() == path {
1549 Some(entry)
1550 } else {
1551 None
1552 }
1553 })
1554 }
1555
1556 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1557 let entry = self.entries_by_id.get(&id, &())?;
1558 self.entry_for_path(&entry.path)
1559 }
1560
1561 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1562 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1563 }
1564}
1565
1566impl LocalSnapshot {
1567 pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1568 let mut max_len = 0;
1569 let mut current_candidate = None;
1570 for (work_directory, repo) in (&self.repository_entries).iter() {
1571 if repo.contains(self, path) {
1572 if work_directory.0.as_os_str().len() >= max_len {
1573 current_candidate = Some(repo);
1574 max_len = work_directory.0.as_os_str().len();
1575 } else {
1576 break;
1577 }
1578 }
1579 }
1580
1581 current_candidate.map(|entry| entry.to_owned())
1582 }
1583
1584 pub(crate) fn repo_for_metadata(
1585 &self,
1586 path: &Path,
1587 ) -> Option<(RepositoryWorkDirectory, Arc<Mutex<dyn GitRepository>>)> {
1588 let (entry_id, local_repo) = self
1589 .git_repositories
1590 .iter()
1591 .find(|(_, repo)| repo.in_dot_git(path))?;
1592
1593 let work_dir = self
1594 .snapshot
1595 .repository_entries
1596 .iter()
1597 .find(|(_, entry)| *entry.work_directory == *entry_id)
1598 .and_then(|(_, entry)| entry.work_directory(self))?;
1599
1600 Some((work_dir, local_repo.repo_ptr.to_owned()))
1601 }
1602
1603 #[cfg(test)]
1604 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1605 let root_name = self.root_name.clone();
1606 proto::UpdateWorktree {
1607 project_id,
1608 worktree_id: self.id().to_proto(),
1609 abs_path: self.abs_path().to_string_lossy().into(),
1610 root_name,
1611 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1612 removed_entries: Default::default(),
1613 scan_id: self.scan_id as u64,
1614 is_last_update: true,
1615 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1616 removed_repositories: Default::default(),
1617 }
1618 }
1619
1620 pub(crate) fn build_update(
1621 &self,
1622 other: &Self,
1623 project_id: u64,
1624 worktree_id: u64,
1625 include_ignored: bool,
1626 ) -> proto::UpdateWorktree {
1627 let mut updated_entries = Vec::new();
1628 let mut removed_entries = Vec::new();
1629 let mut self_entries = self
1630 .entries_by_id
1631 .cursor::<()>()
1632 .filter(|e| include_ignored || !e.is_ignored)
1633 .peekable();
1634 let mut other_entries = other
1635 .entries_by_id
1636 .cursor::<()>()
1637 .filter(|e| include_ignored || !e.is_ignored)
1638 .peekable();
1639 loop {
1640 match (self_entries.peek(), other_entries.peek()) {
1641 (Some(self_entry), Some(other_entry)) => {
1642 match Ord::cmp(&self_entry.id, &other_entry.id) {
1643 Ordering::Less => {
1644 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1645 updated_entries.push(entry);
1646 self_entries.next();
1647 }
1648 Ordering::Equal => {
1649 if self_entry.scan_id != other_entry.scan_id {
1650 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1651 updated_entries.push(entry);
1652 }
1653
1654 self_entries.next();
1655 other_entries.next();
1656 }
1657 Ordering::Greater => {
1658 removed_entries.push(other_entry.id.to_proto());
1659 other_entries.next();
1660 }
1661 }
1662 }
1663 (Some(self_entry), None) => {
1664 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1665 updated_entries.push(entry);
1666 self_entries.next();
1667 }
1668 (None, Some(other_entry)) => {
1669 removed_entries.push(other_entry.id.to_proto());
1670 other_entries.next();
1671 }
1672 (None, None) => break,
1673 }
1674 }
1675
1676 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1677 let mut removed_repositories = Vec::new();
1678 let mut self_repos = self.snapshot.repository_entries.values().peekable();
1679 let mut other_repos = other.snapshot.repository_entries.values().peekable();
1680 loop {
1681 match (self_repos.peek(), other_repos.peek()) {
1682 (Some(self_repo), Some(other_repo)) => {
1683 match Ord::cmp(&self_repo.work_directory, &other_repo.work_directory) {
1684 Ordering::Less => {
1685 updated_repositories.push((*self_repo).into());
1686 self_repos.next();
1687 }
1688 Ordering::Equal => {
1689 if self_repo.scan_id != other_repo.scan_id {
1690 updated_repositories.push((*self_repo).into());
1691 }
1692
1693 self_repos.next();
1694 other_repos.next();
1695 }
1696 Ordering::Greater => {
1697 removed_repositories.push(other_repo.work_directory.to_proto());
1698 other_repos.next();
1699 }
1700 }
1701 }
1702 (Some(self_repo), None) => {
1703 updated_repositories.push((*self_repo).into());
1704 self_repos.next();
1705 }
1706 (None, Some(other_repo)) => {
1707 removed_repositories.push(other_repo.work_directory.to_proto());
1708 other_repos.next();
1709 }
1710 (None, None) => break,
1711 }
1712 }
1713
1714 proto::UpdateWorktree {
1715 project_id,
1716 worktree_id,
1717 abs_path: self.abs_path().to_string_lossy().into(),
1718 root_name: self.root_name().to_string(),
1719 updated_entries,
1720 removed_entries,
1721 scan_id: self.scan_id as u64,
1722 is_last_update: self.completed_scan_id == self.scan_id,
1723 updated_repositories,
1724 removed_repositories,
1725 }
1726 }
1727
1728 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1729 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1730 let abs_path = self.abs_path.join(&entry.path);
1731 match smol::block_on(build_gitignore(&abs_path, fs)) {
1732 Ok(ignore) => {
1733 self.ignores_by_parent_abs_path.insert(
1734 abs_path.parent().unwrap().into(),
1735 (Arc::new(ignore), self.scan_id),
1736 );
1737 }
1738 Err(error) => {
1739 log::error!(
1740 "error loading .gitignore file {:?} - {:?}",
1741 &entry.path,
1742 error
1743 );
1744 }
1745 }
1746 }
1747
1748 self.reuse_entry_id(&mut entry);
1749
1750 if entry.kind == EntryKind::PendingDir {
1751 if let Some(existing_entry) =
1752 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1753 {
1754 entry.kind = existing_entry.kind;
1755 }
1756 }
1757
1758 let scan_id = self.scan_id;
1759 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1760 if let Some(removed) = removed {
1761 if removed.id != entry.id {
1762 self.entries_by_id.remove(&removed.id, &());
1763 }
1764 }
1765 self.entries_by_id.insert_or_replace(
1766 PathEntry {
1767 id: entry.id,
1768 path: entry.path.clone(),
1769 is_ignored: entry.is_ignored,
1770 scan_id,
1771 },
1772 &(),
1773 );
1774
1775 entry
1776 }
1777
1778 fn populate_dir(
1779 &mut self,
1780 parent_path: Arc<Path>,
1781 entries: impl IntoIterator<Item = Entry>,
1782 ignore: Option<Arc<Gitignore>>,
1783 fs: &dyn Fs,
1784 ) {
1785 let mut parent_entry = if let Some(parent_entry) =
1786 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1787 {
1788 parent_entry.clone()
1789 } else {
1790 log::warn!(
1791 "populating a directory {:?} that has been removed",
1792 parent_path
1793 );
1794 return;
1795 };
1796
1797 match parent_entry.kind {
1798 EntryKind::PendingDir => {
1799 parent_entry.kind = EntryKind::Dir;
1800 }
1801 EntryKind::Dir => {}
1802 _ => return,
1803 }
1804
1805 if let Some(ignore) = ignore {
1806 self.ignores_by_parent_abs_path.insert(
1807 self.abs_path.join(&parent_path).into(),
1808 (ignore, self.scan_id),
1809 );
1810 }
1811
1812 if parent_path.file_name() == Some(&DOT_GIT) {
1813 let abs_path = self.abs_path.join(&parent_path);
1814 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1815
1816 if let Some(work_dir_id) = self
1817 .entry_for_path(content_path.clone())
1818 .map(|entry| entry.id)
1819 {
1820 let key = RepositoryWorkDirectory(content_path.clone());
1821 if self.repository_entries.get(&key).is_none() {
1822 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1823 let repo_lock = repo.lock();
1824 self.repository_entries.insert(
1825 key.clone(),
1826 RepositoryEntry {
1827 work_directory: work_dir_id.into(),
1828 scan_id: 0,
1829 branch: repo_lock.branch_name().map(Into::into),
1830 },
1831 );
1832 drop(repo_lock);
1833
1834 self.git_repositories.insert(
1835 work_dir_id,
1836 LocalRepositoryEntry {
1837 repo_ptr: repo,
1838 git_dir_path: parent_path.clone(),
1839 },
1840 )
1841 }
1842 }
1843 }
1844 }
1845
1846 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1847 let mut entries_by_id_edits = Vec::new();
1848
1849 for mut entry in entries {
1850 self.reuse_entry_id(&mut entry);
1851 entries_by_id_edits.push(Edit::Insert(PathEntry {
1852 id: entry.id,
1853 path: entry.path.clone(),
1854 is_ignored: entry.is_ignored,
1855 scan_id: self.scan_id,
1856 }));
1857 entries_by_path_edits.push(Edit::Insert(entry));
1858 }
1859
1860 self.entries_by_path.edit(entries_by_path_edits, &());
1861 self.entries_by_id.edit(entries_by_id_edits, &());
1862 }
1863
1864 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1865 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1866 entry.id = removed_entry_id;
1867 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1868 entry.id = existing_entry.id;
1869 }
1870 }
1871
1872 fn remove_path(&mut self, path: &Path) {
1873 let mut new_entries;
1874 let removed_entries;
1875 {
1876 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1877 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1878 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1879 new_entries.push_tree(cursor.suffix(&()), &());
1880 }
1881 self.entries_by_path = new_entries;
1882
1883 let mut entries_by_id_edits = Vec::new();
1884 for entry in removed_entries.cursor::<()>() {
1885 let removed_entry_id = self
1886 .removed_entry_ids
1887 .entry(entry.inode)
1888 .or_insert(entry.id);
1889 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1890 entries_by_id_edits.push(Edit::Remove(entry.id));
1891 }
1892 self.entries_by_id.edit(entries_by_id_edits, &());
1893
1894 if path.file_name() == Some(&GITIGNORE) {
1895 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1896 if let Some((_, scan_id)) = self
1897 .ignores_by_parent_abs_path
1898 .get_mut(abs_parent_path.as_path())
1899 {
1900 *scan_id = self.snapshot.scan_id;
1901 }
1902 } else if path.file_name() == Some(&DOT_GIT) {
1903 let repo_entry_key = RepositoryWorkDirectory(path.parent().unwrap().into());
1904 self.snapshot
1905 .repository_entries
1906 .update(&repo_entry_key, |repo| repo.scan_id = self.snapshot.scan_id);
1907 }
1908 }
1909
1910 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1911 let mut inodes = TreeSet::default();
1912 for ancestor in path.ancestors().skip(1) {
1913 if let Some(entry) = self.entry_for_path(ancestor) {
1914 inodes.insert(entry.inode);
1915 }
1916 }
1917 inodes
1918 }
1919
1920 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1921 let mut new_ignores = Vec::new();
1922 for ancestor in abs_path.ancestors().skip(1) {
1923 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1924 new_ignores.push((ancestor, Some(ignore.clone())));
1925 } else {
1926 new_ignores.push((ancestor, None));
1927 }
1928 }
1929
1930 let mut ignore_stack = IgnoreStack::none();
1931 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1932 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1933 ignore_stack = IgnoreStack::all();
1934 break;
1935 } else if let Some(ignore) = ignore {
1936 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1937 }
1938 }
1939
1940 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1941 ignore_stack = IgnoreStack::all();
1942 }
1943
1944 ignore_stack
1945 }
1946}
1947
1948async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1949 let contents = fs.load(abs_path).await?;
1950 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1951 let mut builder = GitignoreBuilder::new(parent);
1952 for line in contents.lines() {
1953 builder.add_line(Some(abs_path.into()), line)?;
1954 }
1955 Ok(builder.build()?)
1956}
1957
1958impl WorktreeId {
1959 pub fn from_usize(handle_id: usize) -> Self {
1960 Self(handle_id)
1961 }
1962
1963 pub(crate) fn from_proto(id: u64) -> Self {
1964 Self(id as usize)
1965 }
1966
1967 pub fn to_proto(&self) -> u64 {
1968 self.0 as u64
1969 }
1970
1971 pub fn to_usize(&self) -> usize {
1972 self.0
1973 }
1974}
1975
1976impl fmt::Display for WorktreeId {
1977 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1978 self.0.fmt(f)
1979 }
1980}
1981
1982impl Deref for Worktree {
1983 type Target = Snapshot;
1984
1985 fn deref(&self) -> &Self::Target {
1986 match self {
1987 Worktree::Local(worktree) => &worktree.snapshot,
1988 Worktree::Remote(worktree) => &worktree.snapshot,
1989 }
1990 }
1991}
1992
1993impl Deref for LocalWorktree {
1994 type Target = LocalSnapshot;
1995
1996 fn deref(&self) -> &Self::Target {
1997 &self.snapshot
1998 }
1999}
2000
2001impl Deref for RemoteWorktree {
2002 type Target = Snapshot;
2003
2004 fn deref(&self) -> &Self::Target {
2005 &self.snapshot
2006 }
2007}
2008
2009impl fmt::Debug for LocalWorktree {
2010 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2011 self.snapshot.fmt(f)
2012 }
2013}
2014
2015impl fmt::Debug for Snapshot {
2016 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2017 struct EntriesById<'a>(&'a SumTree<PathEntry>);
2018 struct EntriesByPath<'a>(&'a SumTree<Entry>);
2019
2020 impl<'a> fmt::Debug for EntriesByPath<'a> {
2021 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2022 f.debug_map()
2023 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2024 .finish()
2025 }
2026 }
2027
2028 impl<'a> fmt::Debug for EntriesById<'a> {
2029 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2030 f.debug_list().entries(self.0.iter()).finish()
2031 }
2032 }
2033
2034 f.debug_struct("Snapshot")
2035 .field("id", &self.id)
2036 .field("root_name", &self.root_name)
2037 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2038 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2039 .finish()
2040 }
2041}
2042
2043#[derive(Clone, PartialEq)]
2044pub struct File {
2045 pub worktree: ModelHandle<Worktree>,
2046 pub path: Arc<Path>,
2047 pub mtime: SystemTime,
2048 pub(crate) entry_id: ProjectEntryId,
2049 pub(crate) is_local: bool,
2050 pub(crate) is_deleted: bool,
2051}
2052
2053impl language::File for File {
2054 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2055 if self.is_local {
2056 Some(self)
2057 } else {
2058 None
2059 }
2060 }
2061
2062 fn mtime(&self) -> SystemTime {
2063 self.mtime
2064 }
2065
2066 fn path(&self) -> &Arc<Path> {
2067 &self.path
2068 }
2069
2070 fn full_path(&self, cx: &AppContext) -> PathBuf {
2071 let mut full_path = PathBuf::new();
2072 let worktree = self.worktree.read(cx);
2073
2074 if worktree.is_visible() {
2075 full_path.push(worktree.root_name());
2076 } else {
2077 let path = worktree.abs_path();
2078
2079 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2080 full_path.push("~");
2081 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2082 } else {
2083 full_path.push(path)
2084 }
2085 }
2086
2087 if self.path.components().next().is_some() {
2088 full_path.push(&self.path);
2089 }
2090
2091 full_path
2092 }
2093
2094 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2095 /// of its worktree, then this method will return the name of the worktree itself.
2096 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2097 self.path
2098 .file_name()
2099 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2100 }
2101
2102 fn is_deleted(&self) -> bool {
2103 self.is_deleted
2104 }
2105
2106 fn as_any(&self) -> &dyn Any {
2107 self
2108 }
2109
2110 fn to_proto(&self) -> rpc::proto::File {
2111 rpc::proto::File {
2112 worktree_id: self.worktree.id() as u64,
2113 entry_id: self.entry_id.to_proto(),
2114 path: self.path.to_string_lossy().into(),
2115 mtime: Some(self.mtime.into()),
2116 is_deleted: self.is_deleted,
2117 }
2118 }
2119}
2120
2121impl language::LocalFile for File {
2122 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2123 self.worktree
2124 .read(cx)
2125 .as_local()
2126 .unwrap()
2127 .abs_path
2128 .join(&self.path)
2129 }
2130
2131 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2132 let worktree = self.worktree.read(cx).as_local().unwrap();
2133 let abs_path = worktree.absolutize(&self.path);
2134 let fs = worktree.fs.clone();
2135 cx.background()
2136 .spawn(async move { fs.load(&abs_path).await })
2137 }
2138
2139 fn buffer_reloaded(
2140 &self,
2141 buffer_id: u64,
2142 version: &clock::Global,
2143 fingerprint: RopeFingerprint,
2144 line_ending: LineEnding,
2145 mtime: SystemTime,
2146 cx: &mut AppContext,
2147 ) {
2148 let worktree = self.worktree.read(cx).as_local().unwrap();
2149 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2150 worktree
2151 .client
2152 .send(proto::BufferReloaded {
2153 project_id,
2154 buffer_id,
2155 version: serialize_version(version),
2156 mtime: Some(mtime.into()),
2157 fingerprint: serialize_fingerprint(fingerprint),
2158 line_ending: serialize_line_ending(line_ending) as i32,
2159 })
2160 .log_err();
2161 }
2162 }
2163}
2164
2165impl File {
2166 pub fn from_proto(
2167 proto: rpc::proto::File,
2168 worktree: ModelHandle<Worktree>,
2169 cx: &AppContext,
2170 ) -> Result<Self> {
2171 let worktree_id = worktree
2172 .read(cx)
2173 .as_remote()
2174 .ok_or_else(|| anyhow!("not remote"))?
2175 .id();
2176
2177 if worktree_id.to_proto() != proto.worktree_id {
2178 return Err(anyhow!("worktree id does not match file"));
2179 }
2180
2181 Ok(Self {
2182 worktree,
2183 path: Path::new(&proto.path).into(),
2184 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2185 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2186 is_local: false,
2187 is_deleted: proto.is_deleted,
2188 })
2189 }
2190
2191 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2192 file.and_then(|f| f.as_any().downcast_ref())
2193 }
2194
2195 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2196 self.worktree.read(cx).id()
2197 }
2198
2199 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2200 if self.is_deleted {
2201 None
2202 } else {
2203 Some(self.entry_id)
2204 }
2205 }
2206}
2207
2208#[derive(Clone, Debug, PartialEq, Eq)]
2209pub struct Entry {
2210 pub id: ProjectEntryId,
2211 pub kind: EntryKind,
2212 pub path: Arc<Path>,
2213 pub inode: u64,
2214 pub mtime: SystemTime,
2215 pub is_symlink: bool,
2216 pub is_ignored: bool,
2217}
2218
2219#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2220pub enum EntryKind {
2221 PendingDir,
2222 Dir,
2223 File(CharBag),
2224}
2225
2226#[derive(Clone, Copy, Debug)]
2227pub enum PathChange {
2228 Added,
2229 Removed,
2230 Updated,
2231 AddedOrUpdated,
2232}
2233
2234impl Entry {
2235 fn new(
2236 path: Arc<Path>,
2237 metadata: &fs::Metadata,
2238 next_entry_id: &AtomicUsize,
2239 root_char_bag: CharBag,
2240 ) -> Self {
2241 Self {
2242 id: ProjectEntryId::new(next_entry_id),
2243 kind: if metadata.is_dir {
2244 EntryKind::PendingDir
2245 } else {
2246 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2247 },
2248 path,
2249 inode: metadata.inode,
2250 mtime: metadata.mtime,
2251 is_symlink: metadata.is_symlink,
2252 is_ignored: false,
2253 }
2254 }
2255
2256 pub fn is_dir(&self) -> bool {
2257 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2258 }
2259
2260 pub fn is_file(&self) -> bool {
2261 matches!(self.kind, EntryKind::File(_))
2262 }
2263}
2264
2265impl sum_tree::Item for Entry {
2266 type Summary = EntrySummary;
2267
2268 fn summary(&self) -> Self::Summary {
2269 let visible_count = if self.is_ignored { 0 } else { 1 };
2270 let file_count;
2271 let visible_file_count;
2272 if self.is_file() {
2273 file_count = 1;
2274 visible_file_count = visible_count;
2275 } else {
2276 file_count = 0;
2277 visible_file_count = 0;
2278 }
2279
2280 EntrySummary {
2281 max_path: self.path.clone(),
2282 count: 1,
2283 visible_count,
2284 file_count,
2285 visible_file_count,
2286 }
2287 }
2288}
2289
2290impl sum_tree::KeyedItem for Entry {
2291 type Key = PathKey;
2292
2293 fn key(&self) -> Self::Key {
2294 PathKey(self.path.clone())
2295 }
2296}
2297
2298#[derive(Clone, Debug)]
2299pub struct EntrySummary {
2300 max_path: Arc<Path>,
2301 count: usize,
2302 visible_count: usize,
2303 file_count: usize,
2304 visible_file_count: usize,
2305}
2306
2307impl Default for EntrySummary {
2308 fn default() -> Self {
2309 Self {
2310 max_path: Arc::from(Path::new("")),
2311 count: 0,
2312 visible_count: 0,
2313 file_count: 0,
2314 visible_file_count: 0,
2315 }
2316 }
2317}
2318
2319impl sum_tree::Summary for EntrySummary {
2320 type Context = ();
2321
2322 fn add_summary(&mut self, rhs: &Self, _: &()) {
2323 self.max_path = rhs.max_path.clone();
2324 self.count += rhs.count;
2325 self.visible_count += rhs.visible_count;
2326 self.file_count += rhs.file_count;
2327 self.visible_file_count += rhs.visible_file_count;
2328 }
2329}
2330
2331#[derive(Clone, Debug)]
2332struct PathEntry {
2333 id: ProjectEntryId,
2334 path: Arc<Path>,
2335 is_ignored: bool,
2336 scan_id: usize,
2337}
2338
2339impl sum_tree::Item for PathEntry {
2340 type Summary = PathEntrySummary;
2341
2342 fn summary(&self) -> Self::Summary {
2343 PathEntrySummary { max_id: self.id }
2344 }
2345}
2346
2347impl sum_tree::KeyedItem for PathEntry {
2348 type Key = ProjectEntryId;
2349
2350 fn key(&self) -> Self::Key {
2351 self.id
2352 }
2353}
2354
2355#[derive(Clone, Debug, Default)]
2356struct PathEntrySummary {
2357 max_id: ProjectEntryId,
2358}
2359
2360impl sum_tree::Summary for PathEntrySummary {
2361 type Context = ();
2362
2363 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2364 self.max_id = summary.max_id;
2365 }
2366}
2367
2368impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2369 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2370 *self = summary.max_id;
2371 }
2372}
2373
2374#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2375pub struct PathKey(Arc<Path>);
2376
2377impl Default for PathKey {
2378 fn default() -> Self {
2379 Self(Path::new("").into())
2380 }
2381}
2382
2383impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2384 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2385 self.0 = summary.max_path.clone();
2386 }
2387}
2388
2389struct BackgroundScanner {
2390 snapshot: Mutex<LocalSnapshot>,
2391 fs: Arc<dyn Fs>,
2392 status_updates_tx: UnboundedSender<ScanState>,
2393 executor: Arc<executor::Background>,
2394 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2395 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2396 finished_initial_scan: bool,
2397}
2398
2399impl BackgroundScanner {
2400 fn new(
2401 snapshot: LocalSnapshot,
2402 fs: Arc<dyn Fs>,
2403 status_updates_tx: UnboundedSender<ScanState>,
2404 executor: Arc<executor::Background>,
2405 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2406 ) -> Self {
2407 Self {
2408 fs,
2409 status_updates_tx,
2410 executor,
2411 refresh_requests_rx,
2412 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2413 snapshot: Mutex::new(snapshot),
2414 finished_initial_scan: false,
2415 }
2416 }
2417
2418 async fn run(
2419 &mut self,
2420 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2421 ) {
2422 use futures::FutureExt as _;
2423
2424 let (root_abs_path, root_inode) = {
2425 let snapshot = self.snapshot.lock();
2426 (
2427 snapshot.abs_path.clone(),
2428 snapshot.root_entry().map(|e| e.inode),
2429 )
2430 };
2431
2432 // Populate ignores above the root.
2433 let ignore_stack;
2434 for ancestor in root_abs_path.ancestors().skip(1) {
2435 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2436 {
2437 self.snapshot
2438 .lock()
2439 .ignores_by_parent_abs_path
2440 .insert(ancestor.into(), (ignore.into(), 0));
2441 }
2442 }
2443 {
2444 let mut snapshot = self.snapshot.lock();
2445 snapshot.scan_id += 1;
2446 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2447 if ignore_stack.is_all() {
2448 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2449 root_entry.is_ignored = true;
2450 snapshot.insert_entry(root_entry, self.fs.as_ref());
2451 }
2452 }
2453 };
2454
2455 // Perform an initial scan of the directory.
2456 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2457 smol::block_on(scan_job_tx.send(ScanJob {
2458 abs_path: root_abs_path,
2459 path: Arc::from(Path::new("")),
2460 ignore_stack,
2461 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2462 scan_queue: scan_job_tx.clone(),
2463 }))
2464 .unwrap();
2465 drop(scan_job_tx);
2466 self.scan_dirs(true, scan_job_rx).await;
2467 {
2468 let mut snapshot = self.snapshot.lock();
2469 snapshot.completed_scan_id = snapshot.scan_id;
2470 }
2471 self.send_status_update(false, None);
2472
2473 // Process any any FS events that occurred while performing the initial scan.
2474 // For these events, update events cannot be as precise, because we didn't
2475 // have the previous state loaded yet.
2476 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2477 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2478 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2479 paths.extend(more_events.into_iter().map(|e| e.path));
2480 }
2481 self.process_events(paths).await;
2482 }
2483
2484 self.finished_initial_scan = true;
2485
2486 // Continue processing events until the worktree is dropped.
2487 loop {
2488 select_biased! {
2489 // Process any path refresh requests from the worktree. Prioritize
2490 // these before handling changes reported by the filesystem.
2491 request = self.refresh_requests_rx.recv().fuse() => {
2492 let Ok((paths, barrier)) = request else { break };
2493 if !self.process_refresh_request(paths, barrier).await {
2494 return;
2495 }
2496 }
2497
2498 events = events_rx.next().fuse() => {
2499 let Some(events) = events else { break };
2500 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2501 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2502 paths.extend(more_events.into_iter().map(|e| e.path));
2503 }
2504 self.process_events(paths).await;
2505 }
2506 }
2507 }
2508 }
2509
2510 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2511 self.reload_entries_for_paths(paths, None).await;
2512 self.send_status_update(false, Some(barrier))
2513 }
2514
2515 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2516 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2517 if let Some(mut paths) = self
2518 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2519 .await
2520 {
2521 paths.sort_unstable();
2522 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2523 }
2524 drop(scan_job_tx);
2525 self.scan_dirs(false, scan_job_rx).await;
2526
2527 self.update_ignore_statuses().await;
2528
2529 let mut snapshot = self.snapshot.lock();
2530
2531 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2532 git_repositories.retain(|work_directory_id, _| {
2533 snapshot
2534 .entry_for_id(*work_directory_id)
2535 .map_or(false, |entry| {
2536 snapshot.entry_for_path(entry.path.join(*DOT_GIT)).is_some()
2537 })
2538 });
2539 snapshot.git_repositories = git_repositories;
2540
2541 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2542 git_repository_entries.retain(|_, entry| {
2543 snapshot
2544 .git_repositories
2545 .get(&entry.work_directory.0)
2546 .is_some()
2547 });
2548 snapshot.snapshot.repository_entries = git_repository_entries;
2549
2550 snapshot.removed_entry_ids.clear();
2551 snapshot.completed_scan_id = snapshot.scan_id;
2552
2553 drop(snapshot);
2554
2555 self.send_status_update(false, None);
2556 }
2557
2558 async fn scan_dirs(
2559 &self,
2560 enable_progress_updates: bool,
2561 scan_jobs_rx: channel::Receiver<ScanJob>,
2562 ) {
2563 use futures::FutureExt as _;
2564
2565 if self
2566 .status_updates_tx
2567 .unbounded_send(ScanState::Started)
2568 .is_err()
2569 {
2570 return;
2571 }
2572
2573 let progress_update_count = AtomicUsize::new(0);
2574 self.executor
2575 .scoped(|scope| {
2576 for _ in 0..self.executor.num_cpus() {
2577 scope.spawn(async {
2578 let mut last_progress_update_count = 0;
2579 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2580 futures::pin_mut!(progress_update_timer);
2581
2582 loop {
2583 select_biased! {
2584 // Process any path refresh requests before moving on to process
2585 // the scan queue, so that user operations are prioritized.
2586 request = self.refresh_requests_rx.recv().fuse() => {
2587 let Ok((paths, barrier)) = request else { break };
2588 if !self.process_refresh_request(paths, barrier).await {
2589 return;
2590 }
2591 }
2592
2593 // Send periodic progress updates to the worktree. Use an atomic counter
2594 // to ensure that only one of the workers sends a progress update after
2595 // the update interval elapses.
2596 _ = progress_update_timer => {
2597 match progress_update_count.compare_exchange(
2598 last_progress_update_count,
2599 last_progress_update_count + 1,
2600 SeqCst,
2601 SeqCst
2602 ) {
2603 Ok(_) => {
2604 last_progress_update_count += 1;
2605 self.send_status_update(true, None);
2606 }
2607 Err(count) => {
2608 last_progress_update_count = count;
2609 }
2610 }
2611 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2612 }
2613
2614 // Recursively load directories from the file system.
2615 job = scan_jobs_rx.recv().fuse() => {
2616 let Ok(job) = job else { break };
2617 if let Err(err) = self.scan_dir(&job).await {
2618 if job.path.as_ref() != Path::new("") {
2619 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2620 }
2621 }
2622 }
2623 }
2624 }
2625 })
2626 }
2627 })
2628 .await;
2629 }
2630
2631 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2632 let mut prev_state = self.prev_state.lock();
2633 let snapshot = self.snapshot.lock().clone();
2634 let mut old_snapshot = snapshot.snapshot.clone();
2635 mem::swap(&mut old_snapshot, &mut prev_state.0);
2636 let changed_paths = mem::take(&mut prev_state.1);
2637 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2638 self.status_updates_tx
2639 .unbounded_send(ScanState::Updated {
2640 snapshot,
2641 changes,
2642 scanning,
2643 barrier,
2644 })
2645 .is_ok()
2646 }
2647
2648 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2649 let mut new_entries: Vec<Entry> = Vec::new();
2650 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2651 let mut ignore_stack = job.ignore_stack.clone();
2652 let mut new_ignore = None;
2653 let (root_abs_path, root_char_bag, next_entry_id) = {
2654 let snapshot = self.snapshot.lock();
2655 (
2656 snapshot.abs_path().clone(),
2657 snapshot.root_char_bag,
2658 snapshot.next_entry_id.clone(),
2659 )
2660 };
2661 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2662 while let Some(child_abs_path) = child_paths.next().await {
2663 let child_abs_path: Arc<Path> = match child_abs_path {
2664 Ok(child_abs_path) => child_abs_path.into(),
2665 Err(error) => {
2666 log::error!("error processing entry {:?}", error);
2667 continue;
2668 }
2669 };
2670
2671 let child_name = child_abs_path.file_name().unwrap();
2672 let child_path: Arc<Path> = job.path.join(child_name).into();
2673 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2674 Ok(Some(metadata)) => metadata,
2675 Ok(None) => continue,
2676 Err(err) => {
2677 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2678 continue;
2679 }
2680 };
2681
2682 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2683 if child_name == *GITIGNORE {
2684 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2685 Ok(ignore) => {
2686 let ignore = Arc::new(ignore);
2687 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2688 new_ignore = Some(ignore);
2689 }
2690 Err(error) => {
2691 log::error!(
2692 "error loading .gitignore file {:?} - {:?}",
2693 child_name,
2694 error
2695 );
2696 }
2697 }
2698
2699 // Update ignore status of any child entries we've already processed to reflect the
2700 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2701 // there should rarely be too numerous. Update the ignore stack associated with any
2702 // new jobs as well.
2703 let mut new_jobs = new_jobs.iter_mut();
2704 for entry in &mut new_entries {
2705 let entry_abs_path = root_abs_path.join(&entry.path);
2706 entry.is_ignored =
2707 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2708
2709 if entry.is_dir() {
2710 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2711 job.ignore_stack = if entry.is_ignored {
2712 IgnoreStack::all()
2713 } else {
2714 ignore_stack.clone()
2715 };
2716 }
2717 }
2718 }
2719 }
2720
2721 let mut child_entry = Entry::new(
2722 child_path.clone(),
2723 &child_metadata,
2724 &next_entry_id,
2725 root_char_bag,
2726 );
2727
2728 if child_entry.is_dir() {
2729 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2730 child_entry.is_ignored = is_ignored;
2731
2732 // Avoid recursing until crash in the case of a recursive symlink
2733 if !job.ancestor_inodes.contains(&child_entry.inode) {
2734 let mut ancestor_inodes = job.ancestor_inodes.clone();
2735 ancestor_inodes.insert(child_entry.inode);
2736
2737 new_jobs.push(Some(ScanJob {
2738 abs_path: child_abs_path,
2739 path: child_path,
2740 ignore_stack: if is_ignored {
2741 IgnoreStack::all()
2742 } else {
2743 ignore_stack.clone()
2744 },
2745 ancestor_inodes,
2746 scan_queue: job.scan_queue.clone(),
2747 }));
2748 } else {
2749 new_jobs.push(None);
2750 }
2751 } else {
2752 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2753 }
2754
2755 new_entries.push(child_entry);
2756 }
2757
2758 self.snapshot.lock().populate_dir(
2759 job.path.clone(),
2760 new_entries,
2761 new_ignore,
2762 self.fs.as_ref(),
2763 );
2764
2765 for new_job in new_jobs {
2766 if let Some(new_job) = new_job {
2767 job.scan_queue.send(new_job).await.unwrap();
2768 }
2769 }
2770
2771 Ok(())
2772 }
2773
2774 async fn reload_entries_for_paths(
2775 &self,
2776 mut abs_paths: Vec<PathBuf>,
2777 scan_queue_tx: Option<Sender<ScanJob>>,
2778 ) -> Option<Vec<Arc<Path>>> {
2779 let doing_recursive_update = scan_queue_tx.is_some();
2780
2781 abs_paths.sort_unstable();
2782 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2783
2784 let root_abs_path = self.snapshot.lock().abs_path.clone();
2785 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2786 let metadata = futures::future::join_all(
2787 abs_paths
2788 .iter()
2789 .map(|abs_path| self.fs.metadata(&abs_path))
2790 .collect::<Vec<_>>(),
2791 )
2792 .await;
2793
2794 let mut snapshot = self.snapshot.lock();
2795 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2796 snapshot.scan_id += 1;
2797 if is_idle && !doing_recursive_update {
2798 snapshot.completed_scan_id = snapshot.scan_id;
2799 }
2800
2801 // Remove any entries for paths that no longer exist or are being recursively
2802 // refreshed. Do this before adding any new entries, so that renames can be
2803 // detected regardless of the order of the paths.
2804 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2805 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2806 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2807 if matches!(metadata, Ok(None)) || doing_recursive_update {
2808 snapshot.remove_path(path);
2809 }
2810 event_paths.push(path.into());
2811 } else {
2812 log::error!(
2813 "unexpected event {:?} for root path {:?}",
2814 abs_path,
2815 root_canonical_path
2816 );
2817 }
2818 }
2819
2820 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2821 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2822
2823 match metadata {
2824 Ok(Some(metadata)) => {
2825 let ignore_stack =
2826 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2827 let mut fs_entry = Entry::new(
2828 path.clone(),
2829 &metadata,
2830 snapshot.next_entry_id.as_ref(),
2831 snapshot.root_char_bag,
2832 );
2833 fs_entry.is_ignored = ignore_stack.is_all();
2834 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2835
2836 let scan_id = snapshot.scan_id;
2837
2838 let repo_with_path_in_dotgit = snapshot.repo_for_metadata(&path);
2839 if let Some((key, repo)) = repo_with_path_in_dotgit {
2840 let repo = repo.lock();
2841 repo.reload_index();
2842 let branch = repo.branch_name();
2843
2844 snapshot.repository_entries.update(&key, |entry| {
2845 entry.scan_id = scan_id;
2846 entry.branch = branch.map(Into::into)
2847 });
2848 }
2849
2850 if let Some(scan_queue_tx) = &scan_queue_tx {
2851 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2852 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2853 ancestor_inodes.insert(metadata.inode);
2854 smol::block_on(scan_queue_tx.send(ScanJob {
2855 abs_path,
2856 path,
2857 ignore_stack,
2858 ancestor_inodes,
2859 scan_queue: scan_queue_tx.clone(),
2860 }))
2861 .unwrap();
2862 }
2863 }
2864 }
2865 Ok(None) => {}
2866 Err(err) => {
2867 // TODO - create a special 'error' entry in the entries tree to mark this
2868 log::error!("error reading file on event {:?}", err);
2869 }
2870 }
2871 }
2872
2873 Some(event_paths)
2874 }
2875
2876 async fn update_ignore_statuses(&self) {
2877 use futures::FutureExt as _;
2878
2879 let mut snapshot = self.snapshot.lock().clone();
2880 let mut ignores_to_update = Vec::new();
2881 let mut ignores_to_delete = Vec::new();
2882 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2883 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2884 if *scan_id > snapshot.completed_scan_id
2885 && snapshot.entry_for_path(parent_path).is_some()
2886 {
2887 ignores_to_update.push(parent_abs_path.clone());
2888 }
2889
2890 let ignore_path = parent_path.join(&*GITIGNORE);
2891 if snapshot.entry_for_path(ignore_path).is_none() {
2892 ignores_to_delete.push(parent_abs_path.clone());
2893 }
2894 }
2895 }
2896
2897 for parent_abs_path in ignores_to_delete {
2898 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2899 self.snapshot
2900 .lock()
2901 .ignores_by_parent_abs_path
2902 .remove(&parent_abs_path);
2903 }
2904
2905 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2906 ignores_to_update.sort_unstable();
2907 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2908 while let Some(parent_abs_path) = ignores_to_update.next() {
2909 while ignores_to_update
2910 .peek()
2911 .map_or(false, |p| p.starts_with(&parent_abs_path))
2912 {
2913 ignores_to_update.next().unwrap();
2914 }
2915
2916 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2917 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2918 abs_path: parent_abs_path,
2919 ignore_stack,
2920 ignore_queue: ignore_queue_tx.clone(),
2921 }))
2922 .unwrap();
2923 }
2924 drop(ignore_queue_tx);
2925
2926 self.executor
2927 .scoped(|scope| {
2928 for _ in 0..self.executor.num_cpus() {
2929 scope.spawn(async {
2930 loop {
2931 select_biased! {
2932 // Process any path refresh requests before moving on to process
2933 // the queue of ignore statuses.
2934 request = self.refresh_requests_rx.recv().fuse() => {
2935 let Ok((paths, barrier)) = request else { break };
2936 if !self.process_refresh_request(paths, barrier).await {
2937 return;
2938 }
2939 }
2940
2941 // Recursively process directories whose ignores have changed.
2942 job = ignore_queue_rx.recv().fuse() => {
2943 let Ok(job) = job else { break };
2944 self.update_ignore_status(job, &snapshot).await;
2945 }
2946 }
2947 }
2948 });
2949 }
2950 })
2951 .await;
2952 }
2953
2954 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2955 let mut ignore_stack = job.ignore_stack;
2956 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2957 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2958 }
2959
2960 let mut entries_by_id_edits = Vec::new();
2961 let mut entries_by_path_edits = Vec::new();
2962 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2963 for mut entry in snapshot.child_entries(path).cloned() {
2964 let was_ignored = entry.is_ignored;
2965 let abs_path = snapshot.abs_path().join(&entry.path);
2966 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2967 if entry.is_dir() {
2968 let child_ignore_stack = if entry.is_ignored {
2969 IgnoreStack::all()
2970 } else {
2971 ignore_stack.clone()
2972 };
2973 job.ignore_queue
2974 .send(UpdateIgnoreStatusJob {
2975 abs_path: abs_path.into(),
2976 ignore_stack: child_ignore_stack,
2977 ignore_queue: job.ignore_queue.clone(),
2978 })
2979 .await
2980 .unwrap();
2981 }
2982
2983 if entry.is_ignored != was_ignored {
2984 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2985 path_entry.scan_id = snapshot.scan_id;
2986 path_entry.is_ignored = entry.is_ignored;
2987 entries_by_id_edits.push(Edit::Insert(path_entry));
2988 entries_by_path_edits.push(Edit::Insert(entry));
2989 }
2990 }
2991
2992 let mut snapshot = self.snapshot.lock();
2993 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2994 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2995 }
2996
2997 fn build_change_set(
2998 &self,
2999 old_snapshot: &Snapshot,
3000 new_snapshot: &Snapshot,
3001 event_paths: Vec<Arc<Path>>,
3002 ) -> HashMap<Arc<Path>, PathChange> {
3003 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
3004
3005 let mut changes = HashMap::default();
3006 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
3007 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
3008 let received_before_initialized = !self.finished_initial_scan;
3009
3010 for path in event_paths {
3011 let path = PathKey(path);
3012 old_paths.seek(&path, Bias::Left, &());
3013 new_paths.seek(&path, Bias::Left, &());
3014
3015 loop {
3016 match (old_paths.item(), new_paths.item()) {
3017 (Some(old_entry), Some(new_entry)) => {
3018 if old_entry.path > path.0
3019 && new_entry.path > path.0
3020 && !old_entry.path.starts_with(&path.0)
3021 && !new_entry.path.starts_with(&path.0)
3022 {
3023 break;
3024 }
3025
3026 match Ord::cmp(&old_entry.path, &new_entry.path) {
3027 Ordering::Less => {
3028 changes.insert(old_entry.path.clone(), Removed);
3029 old_paths.next(&());
3030 }
3031 Ordering::Equal => {
3032 if received_before_initialized {
3033 // If the worktree was not fully initialized when this event was generated,
3034 // we can't know whether this entry was added during the scan or whether
3035 // it was merely updated.
3036 changes.insert(new_entry.path.clone(), AddedOrUpdated);
3037 } else if old_entry.mtime != new_entry.mtime {
3038 changes.insert(new_entry.path.clone(), Updated);
3039 }
3040 old_paths.next(&());
3041 new_paths.next(&());
3042 }
3043 Ordering::Greater => {
3044 changes.insert(new_entry.path.clone(), Added);
3045 new_paths.next(&());
3046 }
3047 }
3048 }
3049 (Some(old_entry), None) => {
3050 changes.insert(old_entry.path.clone(), Removed);
3051 old_paths.next(&());
3052 }
3053 (None, Some(new_entry)) => {
3054 changes.insert(new_entry.path.clone(), Added);
3055 new_paths.next(&());
3056 }
3057 (None, None) => break,
3058 }
3059 }
3060 }
3061 changes
3062 }
3063
3064 async fn progress_timer(&self, running: bool) {
3065 if !running {
3066 return futures::future::pending().await;
3067 }
3068
3069 #[cfg(any(test, feature = "test-support"))]
3070 if self.fs.is_fake() {
3071 return self.executor.simulate_random_delay().await;
3072 }
3073
3074 smol::Timer::after(Duration::from_millis(100)).await;
3075 }
3076}
3077
3078fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3079 let mut result = root_char_bag;
3080 result.extend(
3081 path.to_string_lossy()
3082 .chars()
3083 .map(|c| c.to_ascii_lowercase()),
3084 );
3085 result
3086}
3087
3088struct ScanJob {
3089 abs_path: Arc<Path>,
3090 path: Arc<Path>,
3091 ignore_stack: Arc<IgnoreStack>,
3092 scan_queue: Sender<ScanJob>,
3093 ancestor_inodes: TreeSet<u64>,
3094}
3095
3096struct UpdateIgnoreStatusJob {
3097 abs_path: Arc<Path>,
3098 ignore_stack: Arc<IgnoreStack>,
3099 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3100}
3101
3102pub trait WorktreeHandle {
3103 #[cfg(any(test, feature = "test-support"))]
3104 fn flush_fs_events<'a>(
3105 &self,
3106 cx: &'a gpui::TestAppContext,
3107 ) -> futures::future::LocalBoxFuture<'a, ()>;
3108}
3109
3110impl WorktreeHandle for ModelHandle<Worktree> {
3111 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3112 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3113 // extra directory scans, and emit extra scan-state notifications.
3114 //
3115 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3116 // to ensure that all redundant FS events have already been processed.
3117 #[cfg(any(test, feature = "test-support"))]
3118 fn flush_fs_events<'a>(
3119 &self,
3120 cx: &'a gpui::TestAppContext,
3121 ) -> futures::future::LocalBoxFuture<'a, ()> {
3122 use smol::future::FutureExt;
3123
3124 let filename = "fs-event-sentinel";
3125 let tree = self.clone();
3126 let (fs, root_path) = self.read_with(cx, |tree, _| {
3127 let tree = tree.as_local().unwrap();
3128 (tree.fs.clone(), tree.abs_path().clone())
3129 });
3130
3131 async move {
3132 fs.create_file(&root_path.join(filename), Default::default())
3133 .await
3134 .unwrap();
3135 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3136 .await;
3137
3138 fs.remove_file(&root_path.join(filename), Default::default())
3139 .await
3140 .unwrap();
3141 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3142 .await;
3143
3144 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3145 .await;
3146 }
3147 .boxed_local()
3148 }
3149}
3150
3151#[derive(Clone, Debug)]
3152struct TraversalProgress<'a> {
3153 max_path: &'a Path,
3154 count: usize,
3155 visible_count: usize,
3156 file_count: usize,
3157 visible_file_count: usize,
3158}
3159
3160impl<'a> TraversalProgress<'a> {
3161 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3162 match (include_ignored, include_dirs) {
3163 (true, true) => self.count,
3164 (true, false) => self.file_count,
3165 (false, true) => self.visible_count,
3166 (false, false) => self.visible_file_count,
3167 }
3168 }
3169}
3170
3171impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3172 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3173 self.max_path = summary.max_path.as_ref();
3174 self.count += summary.count;
3175 self.visible_count += summary.visible_count;
3176 self.file_count += summary.file_count;
3177 self.visible_file_count += summary.visible_file_count;
3178 }
3179}
3180
3181impl<'a> Default for TraversalProgress<'a> {
3182 fn default() -> Self {
3183 Self {
3184 max_path: Path::new(""),
3185 count: 0,
3186 visible_count: 0,
3187 file_count: 0,
3188 visible_file_count: 0,
3189 }
3190 }
3191}
3192
3193pub struct Traversal<'a> {
3194 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3195 include_ignored: bool,
3196 include_dirs: bool,
3197}
3198
3199impl<'a> Traversal<'a> {
3200 pub fn advance(&mut self) -> bool {
3201 self.advance_to_offset(self.offset() + 1)
3202 }
3203
3204 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3205 self.cursor.seek_forward(
3206 &TraversalTarget::Count {
3207 count: offset,
3208 include_dirs: self.include_dirs,
3209 include_ignored: self.include_ignored,
3210 },
3211 Bias::Right,
3212 &(),
3213 )
3214 }
3215
3216 pub fn advance_to_sibling(&mut self) -> bool {
3217 while let Some(entry) = self.cursor.item() {
3218 self.cursor.seek_forward(
3219 &TraversalTarget::PathSuccessor(&entry.path),
3220 Bias::Left,
3221 &(),
3222 );
3223 if let Some(entry) = self.cursor.item() {
3224 if (self.include_dirs || !entry.is_dir())
3225 && (self.include_ignored || !entry.is_ignored)
3226 {
3227 return true;
3228 }
3229 }
3230 }
3231 false
3232 }
3233
3234 pub fn entry(&self) -> Option<&'a Entry> {
3235 self.cursor.item()
3236 }
3237
3238 pub fn offset(&self) -> usize {
3239 self.cursor
3240 .start()
3241 .count(self.include_dirs, self.include_ignored)
3242 }
3243}
3244
3245impl<'a> Iterator for Traversal<'a> {
3246 type Item = &'a Entry;
3247
3248 fn next(&mut self) -> Option<Self::Item> {
3249 if let Some(item) = self.entry() {
3250 self.advance();
3251 Some(item)
3252 } else {
3253 None
3254 }
3255 }
3256}
3257
3258#[derive(Debug)]
3259enum TraversalTarget<'a> {
3260 Path(&'a Path),
3261 PathSuccessor(&'a Path),
3262 Count {
3263 count: usize,
3264 include_ignored: bool,
3265 include_dirs: bool,
3266 },
3267}
3268
3269impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3270 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3271 match self {
3272 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3273 TraversalTarget::PathSuccessor(path) => {
3274 if !cursor_location.max_path.starts_with(path) {
3275 Ordering::Equal
3276 } else {
3277 Ordering::Greater
3278 }
3279 }
3280 TraversalTarget::Count {
3281 count,
3282 include_dirs,
3283 include_ignored,
3284 } => Ord::cmp(
3285 count,
3286 &cursor_location.count(*include_dirs, *include_ignored),
3287 ),
3288 }
3289 }
3290}
3291
3292struct ChildEntriesIter<'a> {
3293 parent_path: &'a Path,
3294 traversal: Traversal<'a>,
3295}
3296
3297impl<'a> Iterator for ChildEntriesIter<'a> {
3298 type Item = &'a Entry;
3299
3300 fn next(&mut self) -> Option<Self::Item> {
3301 if let Some(item) = self.traversal.entry() {
3302 if item.path.starts_with(&self.parent_path) {
3303 self.traversal.advance_to_sibling();
3304 return Some(item);
3305 }
3306 }
3307 None
3308 }
3309}
3310
3311impl<'a> From<&'a Entry> for proto::Entry {
3312 fn from(entry: &'a Entry) -> Self {
3313 Self {
3314 id: entry.id.to_proto(),
3315 is_dir: entry.is_dir(),
3316 path: entry.path.to_string_lossy().into(),
3317 inode: entry.inode,
3318 mtime: Some(entry.mtime.into()),
3319 is_symlink: entry.is_symlink,
3320 is_ignored: entry.is_ignored,
3321 }
3322 }
3323}
3324
3325impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3326 type Error = anyhow::Error;
3327
3328 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3329 if let Some(mtime) = entry.mtime {
3330 let kind = if entry.is_dir {
3331 EntryKind::Dir
3332 } else {
3333 let mut char_bag = *root_char_bag;
3334 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3335 EntryKind::File(char_bag)
3336 };
3337 let path: Arc<Path> = PathBuf::from(entry.path).into();
3338 Ok(Entry {
3339 id: ProjectEntryId::from_proto(entry.id),
3340 kind,
3341 path,
3342 inode: entry.inode,
3343 mtime: mtime.into(),
3344 is_symlink: entry.is_symlink,
3345 is_ignored: entry.is_ignored,
3346 })
3347 } else {
3348 Err(anyhow!(
3349 "missing mtime in remote worktree entry {:?}",
3350 entry.path
3351 ))
3352 }
3353 }
3354}
3355
3356#[cfg(test)]
3357mod tests {
3358 use super::*;
3359 use fs::{FakeFs, RealFs};
3360 use gpui::{executor::Deterministic, TestAppContext};
3361 use pretty_assertions::assert_eq;
3362 use rand::prelude::*;
3363 use serde_json::json;
3364 use std::{env, fmt::Write};
3365 use util::{http::FakeHttpClient, test::temp_tree};
3366
3367 #[gpui::test]
3368 async fn test_traversal(cx: &mut TestAppContext) {
3369 let fs = FakeFs::new(cx.background());
3370 fs.insert_tree(
3371 "/root",
3372 json!({
3373 ".gitignore": "a/b\n",
3374 "a": {
3375 "b": "",
3376 "c": "",
3377 }
3378 }),
3379 )
3380 .await;
3381
3382 let http_client = FakeHttpClient::with_404_response();
3383 let client = cx.read(|cx| Client::new(http_client, cx));
3384
3385 let tree = Worktree::local(
3386 client,
3387 Path::new("/root"),
3388 true,
3389 fs,
3390 Default::default(),
3391 &mut cx.to_async(),
3392 )
3393 .await
3394 .unwrap();
3395 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3396 .await;
3397
3398 tree.read_with(cx, |tree, _| {
3399 assert_eq!(
3400 tree.entries(false)
3401 .map(|entry| entry.path.as_ref())
3402 .collect::<Vec<_>>(),
3403 vec![
3404 Path::new(""),
3405 Path::new(".gitignore"),
3406 Path::new("a"),
3407 Path::new("a/c"),
3408 ]
3409 );
3410 assert_eq!(
3411 tree.entries(true)
3412 .map(|entry| entry.path.as_ref())
3413 .collect::<Vec<_>>(),
3414 vec![
3415 Path::new(""),
3416 Path::new(".gitignore"),
3417 Path::new("a"),
3418 Path::new("a/b"),
3419 Path::new("a/c"),
3420 ]
3421 );
3422 })
3423 }
3424
3425 #[gpui::test(iterations = 10)]
3426 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3427 let fs = FakeFs::new(cx.background());
3428 fs.insert_tree(
3429 "/root",
3430 json!({
3431 "lib": {
3432 "a": {
3433 "a.txt": ""
3434 },
3435 "b": {
3436 "b.txt": ""
3437 }
3438 }
3439 }),
3440 )
3441 .await;
3442 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3443 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3444
3445 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3446 let tree = Worktree::local(
3447 client,
3448 Path::new("/root"),
3449 true,
3450 fs.clone(),
3451 Default::default(),
3452 &mut cx.to_async(),
3453 )
3454 .await
3455 .unwrap();
3456
3457 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3458 .await;
3459
3460 tree.read_with(cx, |tree, _| {
3461 assert_eq!(
3462 tree.entries(false)
3463 .map(|entry| entry.path.as_ref())
3464 .collect::<Vec<_>>(),
3465 vec![
3466 Path::new(""),
3467 Path::new("lib"),
3468 Path::new("lib/a"),
3469 Path::new("lib/a/a.txt"),
3470 Path::new("lib/a/lib"),
3471 Path::new("lib/b"),
3472 Path::new("lib/b/b.txt"),
3473 Path::new("lib/b/lib"),
3474 ]
3475 );
3476 });
3477
3478 fs.rename(
3479 Path::new("/root/lib/a/lib"),
3480 Path::new("/root/lib/a/lib-2"),
3481 Default::default(),
3482 )
3483 .await
3484 .unwrap();
3485 executor.run_until_parked();
3486 tree.read_with(cx, |tree, _| {
3487 assert_eq!(
3488 tree.entries(false)
3489 .map(|entry| entry.path.as_ref())
3490 .collect::<Vec<_>>(),
3491 vec![
3492 Path::new(""),
3493 Path::new("lib"),
3494 Path::new("lib/a"),
3495 Path::new("lib/a/a.txt"),
3496 Path::new("lib/a/lib-2"),
3497 Path::new("lib/b"),
3498 Path::new("lib/b/b.txt"),
3499 Path::new("lib/b/lib"),
3500 ]
3501 );
3502 });
3503 }
3504
3505 #[gpui::test]
3506 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3507 let parent_dir = temp_tree(json!({
3508 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3509 "tree": {
3510 ".git": {},
3511 ".gitignore": "ignored-dir\n",
3512 "tracked-dir": {
3513 "tracked-file1": "",
3514 "ancestor-ignored-file1": "",
3515 },
3516 "ignored-dir": {
3517 "ignored-file1": ""
3518 }
3519 }
3520 }));
3521 let dir = parent_dir.path().join("tree");
3522
3523 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3524
3525 let tree = Worktree::local(
3526 client,
3527 dir.as_path(),
3528 true,
3529 Arc::new(RealFs),
3530 Default::default(),
3531 &mut cx.to_async(),
3532 )
3533 .await
3534 .unwrap();
3535 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3536 .await;
3537 tree.flush_fs_events(cx).await;
3538 cx.read(|cx| {
3539 let tree = tree.read(cx);
3540 assert!(
3541 !tree
3542 .entry_for_path("tracked-dir/tracked-file1")
3543 .unwrap()
3544 .is_ignored
3545 );
3546 assert!(
3547 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3548 .unwrap()
3549 .is_ignored
3550 );
3551 assert!(
3552 tree.entry_for_path("ignored-dir/ignored-file1")
3553 .unwrap()
3554 .is_ignored
3555 );
3556 });
3557
3558 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3559 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3560 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3561 tree.flush_fs_events(cx).await;
3562 cx.read(|cx| {
3563 let tree = tree.read(cx);
3564 assert!(
3565 !tree
3566 .entry_for_path("tracked-dir/tracked-file2")
3567 .unwrap()
3568 .is_ignored
3569 );
3570 assert!(
3571 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3572 .unwrap()
3573 .is_ignored
3574 );
3575 assert!(
3576 tree.entry_for_path("ignored-dir/ignored-file2")
3577 .unwrap()
3578 .is_ignored
3579 );
3580 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3581 });
3582 }
3583
3584 #[gpui::test]
3585 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3586 let root = temp_tree(json!({
3587 "dir1": {
3588 ".git": {},
3589 "deps": {
3590 "dep1": {
3591 ".git": {},
3592 "src": {
3593 "a.txt": ""
3594 }
3595 }
3596 },
3597 "src": {
3598 "b.txt": ""
3599 }
3600 },
3601 "c.txt": "",
3602 }));
3603
3604 let http_client = FakeHttpClient::with_404_response();
3605 let client = cx.read(|cx| Client::new(http_client, cx));
3606 let tree = Worktree::local(
3607 client,
3608 root.path(),
3609 true,
3610 Arc::new(RealFs),
3611 Default::default(),
3612 &mut cx.to_async(),
3613 )
3614 .await
3615 .unwrap();
3616
3617 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3618 .await;
3619 tree.flush_fs_events(cx).await;
3620
3621 tree.read_with(cx, |tree, _cx| {
3622 let tree = tree.as_local().unwrap();
3623
3624 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3625
3626 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3627 assert_eq!(
3628 entry
3629 .work_directory(tree)
3630 .map(|directory| directory.as_ref().to_owned()),
3631 Some(Path::new("dir1").to_owned())
3632 );
3633
3634 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3635 assert_eq!(
3636 entry
3637 .work_directory(tree)
3638 .map(|directory| directory.as_ref().to_owned()),
3639 Some(Path::new("dir1/deps/dep1").to_owned())
3640 );
3641 });
3642
3643 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3644 let tree = tree.as_local().unwrap();
3645 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3646 entry.scan_id
3647 });
3648
3649 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3650 tree.flush_fs_events(cx).await;
3651
3652 tree.read_with(cx, |tree, _cx| {
3653 let tree = tree.as_local().unwrap();
3654 let new_scan_id = {
3655 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3656 entry.scan_id
3657 };
3658 assert_ne!(
3659 original_scan_id, new_scan_id,
3660 "original {original_scan_id}, new {new_scan_id}"
3661 );
3662 });
3663
3664 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3665 tree.flush_fs_events(cx).await;
3666
3667 tree.read_with(cx, |tree, _cx| {
3668 let tree = tree.as_local().unwrap();
3669
3670 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3671 });
3672 }
3673
3674 #[test]
3675 fn test_changed_repos() {
3676 fn fake_entry(work_dir_id: usize, scan_id: usize) -> RepositoryEntry {
3677 RepositoryEntry {
3678 scan_id,
3679 work_directory: ProjectEntryId(work_dir_id).into(),
3680 branch: None,
3681 }
3682 }
3683
3684 let mut prev_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3685 prev_repos.insert(
3686 RepositoryWorkDirectory(Path::new("don't-care-1").into()),
3687 fake_entry(1, 0),
3688 );
3689 prev_repos.insert(
3690 RepositoryWorkDirectory(Path::new("don't-care-2").into()),
3691 fake_entry(2, 0),
3692 );
3693 prev_repos.insert(
3694 RepositoryWorkDirectory(Path::new("don't-care-3").into()),
3695 fake_entry(3, 0),
3696 );
3697
3698 let mut new_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3699 new_repos.insert(
3700 RepositoryWorkDirectory(Path::new("don't-care-4").into()),
3701 fake_entry(2, 1),
3702 );
3703 new_repos.insert(
3704 RepositoryWorkDirectory(Path::new("don't-care-5").into()),
3705 fake_entry(3, 0),
3706 );
3707 new_repos.insert(
3708 RepositoryWorkDirectory(Path::new("don't-care-6").into()),
3709 fake_entry(4, 0),
3710 );
3711
3712 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3713
3714 // Deletion retained
3715 assert!(res
3716 .iter()
3717 .find(|repo| repo.work_directory.0 .0 == 1 && repo.scan_id == 0)
3718 .is_some());
3719
3720 // Update retained
3721 assert!(res
3722 .iter()
3723 .find(|repo| repo.work_directory.0 .0 == 2 && repo.scan_id == 1)
3724 .is_some());
3725
3726 // Addition retained
3727 assert!(res
3728 .iter()
3729 .find(|repo| repo.work_directory.0 .0 == 4 && repo.scan_id == 0)
3730 .is_some());
3731
3732 // Nochange, not retained
3733 assert!(res
3734 .iter()
3735 .find(|repo| repo.work_directory.0 .0 == 3 && repo.scan_id == 0)
3736 .is_none());
3737 }
3738
3739 #[gpui::test]
3740 async fn test_write_file(cx: &mut TestAppContext) {
3741 let dir = temp_tree(json!({
3742 ".git": {},
3743 ".gitignore": "ignored-dir\n",
3744 "tracked-dir": {},
3745 "ignored-dir": {}
3746 }));
3747
3748 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3749
3750 let tree = Worktree::local(
3751 client,
3752 dir.path(),
3753 true,
3754 Arc::new(RealFs),
3755 Default::default(),
3756 &mut cx.to_async(),
3757 )
3758 .await
3759 .unwrap();
3760 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3761 .await;
3762 tree.flush_fs_events(cx).await;
3763
3764 tree.update(cx, |tree, cx| {
3765 tree.as_local().unwrap().write_file(
3766 Path::new("tracked-dir/file.txt"),
3767 "hello".into(),
3768 Default::default(),
3769 cx,
3770 )
3771 })
3772 .await
3773 .unwrap();
3774 tree.update(cx, |tree, cx| {
3775 tree.as_local().unwrap().write_file(
3776 Path::new("ignored-dir/file.txt"),
3777 "world".into(),
3778 Default::default(),
3779 cx,
3780 )
3781 })
3782 .await
3783 .unwrap();
3784
3785 tree.read_with(cx, |tree, _| {
3786 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3787 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3788 assert!(!tracked.is_ignored);
3789 assert!(ignored.is_ignored);
3790 });
3791 }
3792
3793 #[gpui::test(iterations = 30)]
3794 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3795 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3796
3797 let fs = FakeFs::new(cx.background());
3798 fs.insert_tree(
3799 "/root",
3800 json!({
3801 "b": {},
3802 "c": {},
3803 "d": {},
3804 }),
3805 )
3806 .await;
3807
3808 let tree = Worktree::local(
3809 client,
3810 "/root".as_ref(),
3811 true,
3812 fs,
3813 Default::default(),
3814 &mut cx.to_async(),
3815 )
3816 .await
3817 .unwrap();
3818
3819 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3820
3821 let entry = tree
3822 .update(cx, |tree, cx| {
3823 tree.as_local_mut()
3824 .unwrap()
3825 .create_entry("a/e".as_ref(), true, cx)
3826 })
3827 .await
3828 .unwrap();
3829 assert!(entry.is_dir());
3830
3831 cx.foreground().run_until_parked();
3832 tree.read_with(cx, |tree, _| {
3833 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3834 });
3835
3836 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3837 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3838 snapshot1.apply_remote_update(update).unwrap();
3839 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3840 }
3841
3842 #[gpui::test(iterations = 100)]
3843 async fn test_random_worktree_operations_during_initial_scan(
3844 cx: &mut TestAppContext,
3845 mut rng: StdRng,
3846 ) {
3847 let operations = env::var("OPERATIONS")
3848 .map(|o| o.parse().unwrap())
3849 .unwrap_or(5);
3850 let initial_entries = env::var("INITIAL_ENTRIES")
3851 .map(|o| o.parse().unwrap())
3852 .unwrap_or(20);
3853
3854 let root_dir = Path::new("/test");
3855 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3856 fs.as_fake().insert_tree(root_dir, json!({})).await;
3857 for _ in 0..initial_entries {
3858 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3859 }
3860 log::info!("generated initial tree");
3861
3862 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3863 let worktree = Worktree::local(
3864 client.clone(),
3865 root_dir,
3866 true,
3867 fs.clone(),
3868 Default::default(),
3869 &mut cx.to_async(),
3870 )
3871 .await
3872 .unwrap();
3873
3874 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3875
3876 for _ in 0..operations {
3877 worktree
3878 .update(cx, |worktree, cx| {
3879 randomly_mutate_worktree(worktree, &mut rng, cx)
3880 })
3881 .await
3882 .log_err();
3883 worktree.read_with(cx, |tree, _| {
3884 tree.as_local().unwrap().snapshot.check_invariants()
3885 });
3886
3887 if rng.gen_bool(0.6) {
3888 let new_snapshot =
3889 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3890 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3891 snapshot.apply_remote_update(update.clone()).unwrap();
3892 assert_eq!(
3893 snapshot.to_vec(true),
3894 new_snapshot.to_vec(true),
3895 "incorrect snapshot after update {:?}",
3896 update
3897 );
3898 }
3899 }
3900
3901 worktree
3902 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3903 .await;
3904 worktree.read_with(cx, |tree, _| {
3905 tree.as_local().unwrap().snapshot.check_invariants()
3906 });
3907
3908 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3909 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3910 snapshot.apply_remote_update(update.clone()).unwrap();
3911 assert_eq!(
3912 snapshot.to_vec(true),
3913 new_snapshot.to_vec(true),
3914 "incorrect snapshot after update {:?}",
3915 update
3916 );
3917 }
3918
3919 #[gpui::test(iterations = 100)]
3920 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3921 let operations = env::var("OPERATIONS")
3922 .map(|o| o.parse().unwrap())
3923 .unwrap_or(40);
3924 let initial_entries = env::var("INITIAL_ENTRIES")
3925 .map(|o| o.parse().unwrap())
3926 .unwrap_or(20);
3927
3928 let root_dir = Path::new("/test");
3929 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3930 fs.as_fake().insert_tree(root_dir, json!({})).await;
3931 for _ in 0..initial_entries {
3932 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3933 }
3934 log::info!("generated initial tree");
3935
3936 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3937 let worktree = Worktree::local(
3938 client.clone(),
3939 root_dir,
3940 true,
3941 fs.clone(),
3942 Default::default(),
3943 &mut cx.to_async(),
3944 )
3945 .await
3946 .unwrap();
3947
3948 worktree
3949 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3950 .await;
3951
3952 // After the initial scan is complete, the `UpdatedEntries` event can
3953 // be used to follow along with all changes to the worktree's snapshot.
3954 worktree.update(cx, |tree, cx| {
3955 let mut paths = tree
3956 .as_local()
3957 .unwrap()
3958 .paths()
3959 .cloned()
3960 .collect::<Vec<_>>();
3961
3962 cx.subscribe(&worktree, move |tree, _, event, _| {
3963 if let Event::UpdatedEntries(changes) = event {
3964 for (path, change_type) in changes.iter() {
3965 let path = path.clone();
3966 let ix = match paths.binary_search(&path) {
3967 Ok(ix) | Err(ix) => ix,
3968 };
3969 match change_type {
3970 PathChange::Added => {
3971 assert_ne!(paths.get(ix), Some(&path));
3972 paths.insert(ix, path);
3973 }
3974 PathChange::Removed => {
3975 assert_eq!(paths.get(ix), Some(&path));
3976 paths.remove(ix);
3977 }
3978 PathChange::Updated => {
3979 assert_eq!(paths.get(ix), Some(&path));
3980 }
3981 PathChange::AddedOrUpdated => {
3982 if paths[ix] != path {
3983 paths.insert(ix, path);
3984 }
3985 }
3986 }
3987 }
3988 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3989 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3990 }
3991 })
3992 .detach();
3993 });
3994
3995 let mut snapshots = Vec::new();
3996 let mut mutations_len = operations;
3997 while mutations_len > 1 {
3998 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3999 let buffered_event_count = fs.as_fake().buffered_event_count().await;
4000 if buffered_event_count > 0 && rng.gen_bool(0.3) {
4001 let len = rng.gen_range(0..=buffered_event_count);
4002 log::info!("flushing {} events", len);
4003 fs.as_fake().flush_events(len).await;
4004 } else {
4005 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
4006 mutations_len -= 1;
4007 }
4008
4009 cx.foreground().run_until_parked();
4010 if rng.gen_bool(0.2) {
4011 log::info!("storing snapshot {}", snapshots.len());
4012 let snapshot =
4013 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4014 snapshots.push(snapshot);
4015 }
4016 }
4017
4018 log::info!("quiescing");
4019 fs.as_fake().flush_events(usize::MAX).await;
4020 cx.foreground().run_until_parked();
4021 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4022 snapshot.check_invariants();
4023
4024 {
4025 let new_worktree = Worktree::local(
4026 client.clone(),
4027 root_dir,
4028 true,
4029 fs.clone(),
4030 Default::default(),
4031 &mut cx.to_async(),
4032 )
4033 .await
4034 .unwrap();
4035 new_worktree
4036 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4037 .await;
4038 let new_snapshot =
4039 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4040 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4041 }
4042
4043 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4044 let include_ignored = rng.gen::<bool>();
4045 if !include_ignored {
4046 let mut entries_by_path_edits = Vec::new();
4047 let mut entries_by_id_edits = Vec::new();
4048 for entry in prev_snapshot
4049 .entries_by_id
4050 .cursor::<()>()
4051 .filter(|e| e.is_ignored)
4052 {
4053 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4054 entries_by_id_edits.push(Edit::Remove(entry.id));
4055 }
4056
4057 prev_snapshot
4058 .entries_by_path
4059 .edit(entries_by_path_edits, &());
4060 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4061 }
4062
4063 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4064 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4065 assert_eq!(
4066 prev_snapshot.to_vec(include_ignored),
4067 snapshot.to_vec(include_ignored),
4068 "wrong update for snapshot {i}. update: {:?}",
4069 update
4070 );
4071 }
4072 }
4073
4074 fn randomly_mutate_worktree(
4075 worktree: &mut Worktree,
4076 rng: &mut impl Rng,
4077 cx: &mut ModelContext<Worktree>,
4078 ) -> Task<Result<()>> {
4079 let worktree = worktree.as_local_mut().unwrap();
4080 let snapshot = worktree.snapshot();
4081 let entry = snapshot.entries(false).choose(rng).unwrap();
4082
4083 match rng.gen_range(0_u32..100) {
4084 0..=33 if entry.path.as_ref() != Path::new("") => {
4085 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4086 worktree.delete_entry(entry.id, cx).unwrap()
4087 }
4088 ..=66 if entry.path.as_ref() != Path::new("") => {
4089 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4090 let new_parent_path = if other_entry.is_dir() {
4091 other_entry.path.clone()
4092 } else {
4093 other_entry.path.parent().unwrap().into()
4094 };
4095 let mut new_path = new_parent_path.join(gen_name(rng));
4096 if new_path.starts_with(&entry.path) {
4097 new_path = gen_name(rng).into();
4098 }
4099
4100 log::info!(
4101 "renaming entry {:?} ({}) to {:?}",
4102 entry.path,
4103 entry.id.0,
4104 new_path
4105 );
4106 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4107 cx.foreground().spawn(async move {
4108 task.await?;
4109 Ok(())
4110 })
4111 }
4112 _ => {
4113 let task = if entry.is_dir() {
4114 let child_path = entry.path.join(gen_name(rng));
4115 let is_dir = rng.gen_bool(0.3);
4116 log::info!(
4117 "creating {} at {:?}",
4118 if is_dir { "dir" } else { "file" },
4119 child_path,
4120 );
4121 worktree.create_entry(child_path, is_dir, cx)
4122 } else {
4123 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4124 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4125 };
4126 cx.foreground().spawn(async move {
4127 task.await?;
4128 Ok(())
4129 })
4130 }
4131 }
4132 }
4133
4134 async fn randomly_mutate_fs(
4135 fs: &Arc<dyn Fs>,
4136 root_path: &Path,
4137 insertion_probability: f64,
4138 rng: &mut impl Rng,
4139 ) {
4140 let mut files = Vec::new();
4141 let mut dirs = Vec::new();
4142 for path in fs.as_fake().paths() {
4143 if path.starts_with(root_path) {
4144 if fs.is_file(&path).await {
4145 files.push(path);
4146 } else {
4147 dirs.push(path);
4148 }
4149 }
4150 }
4151
4152 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4153 let path = dirs.choose(rng).unwrap();
4154 let new_path = path.join(gen_name(rng));
4155
4156 if rng.gen() {
4157 log::info!(
4158 "creating dir {:?}",
4159 new_path.strip_prefix(root_path).unwrap()
4160 );
4161 fs.create_dir(&new_path).await.unwrap();
4162 } else {
4163 log::info!(
4164 "creating file {:?}",
4165 new_path.strip_prefix(root_path).unwrap()
4166 );
4167 fs.create_file(&new_path, Default::default()).await.unwrap();
4168 }
4169 } else if rng.gen_bool(0.05) {
4170 let ignore_dir_path = dirs.choose(rng).unwrap();
4171 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4172
4173 let subdirs = dirs
4174 .iter()
4175 .filter(|d| d.starts_with(&ignore_dir_path))
4176 .cloned()
4177 .collect::<Vec<_>>();
4178 let subfiles = files
4179 .iter()
4180 .filter(|d| d.starts_with(&ignore_dir_path))
4181 .cloned()
4182 .collect::<Vec<_>>();
4183 let files_to_ignore = {
4184 let len = rng.gen_range(0..=subfiles.len());
4185 subfiles.choose_multiple(rng, len)
4186 };
4187 let dirs_to_ignore = {
4188 let len = rng.gen_range(0..subdirs.len());
4189 subdirs.choose_multiple(rng, len)
4190 };
4191
4192 let mut ignore_contents = String::new();
4193 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4194 writeln!(
4195 ignore_contents,
4196 "{}",
4197 path_to_ignore
4198 .strip_prefix(&ignore_dir_path)
4199 .unwrap()
4200 .to_str()
4201 .unwrap()
4202 )
4203 .unwrap();
4204 }
4205 log::info!(
4206 "creating gitignore {:?} with contents:\n{}",
4207 ignore_path.strip_prefix(&root_path).unwrap(),
4208 ignore_contents
4209 );
4210 fs.save(
4211 &ignore_path,
4212 &ignore_contents.as_str().into(),
4213 Default::default(),
4214 )
4215 .await
4216 .unwrap();
4217 } else {
4218 let old_path = {
4219 let file_path = files.choose(rng);
4220 let dir_path = dirs[1..].choose(rng);
4221 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4222 };
4223
4224 let is_rename = rng.gen();
4225 if is_rename {
4226 let new_path_parent = dirs
4227 .iter()
4228 .filter(|d| !d.starts_with(old_path))
4229 .choose(rng)
4230 .unwrap();
4231
4232 let overwrite_existing_dir =
4233 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4234 let new_path = if overwrite_existing_dir {
4235 fs.remove_dir(
4236 &new_path_parent,
4237 RemoveOptions {
4238 recursive: true,
4239 ignore_if_not_exists: true,
4240 },
4241 )
4242 .await
4243 .unwrap();
4244 new_path_parent.to_path_buf()
4245 } else {
4246 new_path_parent.join(gen_name(rng))
4247 };
4248
4249 log::info!(
4250 "renaming {:?} to {}{:?}",
4251 old_path.strip_prefix(&root_path).unwrap(),
4252 if overwrite_existing_dir {
4253 "overwrite "
4254 } else {
4255 ""
4256 },
4257 new_path.strip_prefix(&root_path).unwrap()
4258 );
4259 fs.rename(
4260 &old_path,
4261 &new_path,
4262 fs::RenameOptions {
4263 overwrite: true,
4264 ignore_if_exists: true,
4265 },
4266 )
4267 .await
4268 .unwrap();
4269 } else if fs.is_file(&old_path).await {
4270 log::info!(
4271 "deleting file {:?}",
4272 old_path.strip_prefix(&root_path).unwrap()
4273 );
4274 fs.remove_file(old_path, Default::default()).await.unwrap();
4275 } else {
4276 log::info!(
4277 "deleting dir {:?}",
4278 old_path.strip_prefix(&root_path).unwrap()
4279 );
4280 fs.remove_dir(
4281 &old_path,
4282 RemoveOptions {
4283 recursive: true,
4284 ignore_if_not_exists: true,
4285 },
4286 )
4287 .await
4288 .unwrap();
4289 }
4290 }
4291 }
4292
4293 fn gen_name(rng: &mut impl Rng) -> String {
4294 (0..6)
4295 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4296 .map(char::from)
4297 .collect()
4298 }
4299
4300 impl LocalSnapshot {
4301 fn check_invariants(&self) {
4302 assert_eq!(
4303 self.entries_by_path
4304 .cursor::<()>()
4305 .map(|e| (&e.path, e.id))
4306 .collect::<Vec<_>>(),
4307 self.entries_by_id
4308 .cursor::<()>()
4309 .map(|e| (&e.path, e.id))
4310 .collect::<collections::BTreeSet<_>>()
4311 .into_iter()
4312 .collect::<Vec<_>>(),
4313 "entries_by_path and entries_by_id are inconsistent"
4314 );
4315
4316 let mut files = self.files(true, 0);
4317 let mut visible_files = self.files(false, 0);
4318 for entry in self.entries_by_path.cursor::<()>() {
4319 if entry.is_file() {
4320 assert_eq!(files.next().unwrap().inode, entry.inode);
4321 if !entry.is_ignored {
4322 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4323 }
4324 }
4325 }
4326
4327 assert!(files.next().is_none());
4328 assert!(visible_files.next().is_none());
4329
4330 let mut bfs_paths = Vec::new();
4331 let mut stack = vec![Path::new("")];
4332 while let Some(path) = stack.pop() {
4333 bfs_paths.push(path);
4334 let ix = stack.len();
4335 for child_entry in self.child_entries(path) {
4336 stack.insert(ix, &child_entry.path);
4337 }
4338 }
4339
4340 let dfs_paths_via_iter = self
4341 .entries_by_path
4342 .cursor::<()>()
4343 .map(|e| e.path.as_ref())
4344 .collect::<Vec<_>>();
4345 assert_eq!(bfs_paths, dfs_paths_via_iter);
4346
4347 let dfs_paths_via_traversal = self
4348 .entries(true)
4349 .map(|e| e.path.as_ref())
4350 .collect::<Vec<_>>();
4351 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4352
4353 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4354 let ignore_parent_path =
4355 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4356 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4357 assert!(self
4358 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4359 .is_some());
4360 }
4361 }
4362
4363 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4364 let mut paths = Vec::new();
4365 for entry in self.entries_by_path.cursor::<()>() {
4366 if include_ignored || !entry.is_ignored {
4367 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4368 }
4369 }
4370 paths.sort_by(|a, b| a.0.cmp(b.0));
4371 paths
4372 }
4373 }
4374}