1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 pin::Pin,
48 sync::{
49 atomic::{AtomicUsize, Ordering::SeqCst},
50 Arc,
51 },
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<
72 Arc<Path>,
73 Vec<(
74 LanguageServerId,
75 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
76 )>,
77 >,
78 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
79 client: Arc<Client>,
80 fs: Arc<dyn Fs>,
81 visible: bool,
82}
83
84pub struct RemoteWorktree {
85 snapshot: Snapshot,
86 background_snapshot: Arc<Mutex<Snapshot>>,
87 project_id: u64,
88 client: Arc<Client>,
89 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
90 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
91 replica_id: ReplicaId,
92 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
93 visible: bool,
94 disconnected: bool,
95}
96
97#[derive(Clone)]
98pub struct Snapshot {
99 id: WorktreeId,
100 abs_path: Arc<Path>,
101 root_name: String,
102 root_char_bag: CharBag,
103 entries_by_path: SumTree<Entry>,
104 entries_by_id: SumTree<PathEntry>,
105 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
106
107 /// A number that increases every time the worktree begins scanning
108 /// a set of paths from the filesystem. This scanning could be caused
109 /// by some operation performed on the worktree, such as reading or
110 /// writing a file, or by an event reported by the filesystem.
111 scan_id: usize,
112
113 /// The latest scan id that has completed, and whose preceding scans
114 /// have all completed. The current `scan_id` could be more than one
115 /// greater than the `completed_scan_id` if operations are performed
116 /// on the worktree while it is processing a file-system event.
117 completed_scan_id: usize,
118}
119
120#[derive(Clone, Debug, Eq, PartialEq)]
121pub struct RepositoryEntry {
122 pub(crate) scan_id: usize,
123 pub(crate) work_directory_id: ProjectEntryId,
124 pub(crate) branch: Option<Arc<str>>,
125}
126
127impl RepositoryEntry {
128 pub fn branch(&self) -> Option<Arc<str>> {
129 self.branch.clone()
130 }
131
132 pub fn work_directory_id(&self) -> ProjectEntryId {
133 self.work_directory_id
134 }
135}
136
137impl From<&RepositoryEntry> for proto::RepositoryEntry {
138 fn from(value: &RepositoryEntry) -> Self {
139 proto::RepositoryEntry {
140 scan_id: value.scan_id as u64,
141 work_directory_id: value.work_directory_id.to_proto(),
142 branch: value.branch.as_ref().map(|str| str.to_string()),
143 }
144 }
145}
146
147/// This path corresponds to the 'content path' (the folder that contains the .git)
148#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
149pub struct RepositoryWorkDirectory(Arc<Path>);
150
151impl RepositoryWorkDirectory {
152 // Note that these paths should be relative to the worktree root.
153 pub(crate) fn contains(&self, path: &Path) -> bool {
154 path.starts_with(self.0.as_ref())
155 }
156
157 pub(crate) fn relativize(&self, path: &Path) -> Option<RepoPath> {
158 path.strip_prefix(self.0.as_ref())
159 .ok()
160 .map(move |path| RepoPath(path.to_owned()))
161 }
162}
163
164impl Deref for RepositoryWorkDirectory {
165 type Target = Path;
166
167 fn deref(&self) -> &Self::Target {
168 self.0.as_ref()
169 }
170}
171
172impl<'a> From<&'a str> for RepositoryWorkDirectory {
173 fn from(value: &'a str) -> Self {
174 RepositoryWorkDirectory(Path::new(value).into())
175 }
176}
177
178impl Default for RepositoryWorkDirectory {
179 fn default() -> Self {
180 RepositoryWorkDirectory(Arc::from(Path::new("")))
181 }
182}
183
184#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
185pub struct RepoPath(PathBuf);
186
187impl AsRef<Path> for RepoPath {
188 fn as_ref(&self) -> &Path {
189 self.0.as_ref()
190 }
191}
192
193impl Deref for RepoPath {
194 type Target = PathBuf;
195
196 fn deref(&self) -> &Self::Target {
197 &self.0
198 }
199}
200
201impl AsRef<Path> for RepositoryWorkDirectory {
202 fn as_ref(&self) -> &Path {
203 self.0.as_ref()
204 }
205}
206
207#[derive(Debug, Clone)]
208pub struct LocalSnapshot {
209 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
210 // The ProjectEntryId corresponds to the entry for the .git dir
211 // work_directory_id
212 git_repositories: TreeMap<ProjectEntryId, LocalRepositoryEntry>,
213 removed_entry_ids: HashMap<u64, ProjectEntryId>,
214 next_entry_id: Arc<AtomicUsize>,
215 snapshot: Snapshot,
216}
217
218#[derive(Debug, Clone)]
219pub struct LocalRepositoryEntry {
220 pub(crate) repo_ptr: Arc<Mutex<dyn GitRepository>>,
221 /// Path to the actual .git folder.
222 /// Note: if .git is a file, this points to the folder indicated by the .git file
223 pub(crate) git_dir_path: Arc<Path>,
224}
225
226impl LocalRepositoryEntry {
227 // Note that this path should be relative to the worktree root.
228 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
229 path.starts_with(self.git_dir_path.as_ref())
230 }
231}
232
233impl Deref for LocalSnapshot {
234 type Target = Snapshot;
235
236 fn deref(&self) -> &Self::Target {
237 &self.snapshot
238 }
239}
240
241impl DerefMut for LocalSnapshot {
242 fn deref_mut(&mut self) -> &mut Self::Target {
243 &mut self.snapshot
244 }
245}
246
247enum ScanState {
248 Started,
249 Updated {
250 snapshot: LocalSnapshot,
251 changes: HashMap<Arc<Path>, PathChange>,
252 barrier: Option<barrier::Sender>,
253 scanning: bool,
254 },
255}
256
257struct ShareState {
258 project_id: u64,
259 snapshots_tx: watch::Sender<LocalSnapshot>,
260 resume_updates: watch::Sender<()>,
261 _maintain_remote_snapshot: Task<Option<()>>,
262}
263
264pub enum Event {
265 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
266 UpdatedGitRepositories(Vec<RepositoryEntry>),
267}
268
269impl Entity for Worktree {
270 type Event = Event;
271}
272
273impl Worktree {
274 pub async fn local(
275 client: Arc<Client>,
276 path: impl Into<Arc<Path>>,
277 visible: bool,
278 fs: Arc<dyn Fs>,
279 next_entry_id: Arc<AtomicUsize>,
280 cx: &mut AsyncAppContext,
281 ) -> Result<ModelHandle<Self>> {
282 // After determining whether the root entry is a file or a directory, populate the
283 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
284 let abs_path = path.into();
285 let metadata = fs
286 .metadata(&abs_path)
287 .await
288 .context("failed to stat worktree path")?;
289
290 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
291 let root_name = abs_path
292 .file_name()
293 .map_or(String::new(), |f| f.to_string_lossy().to_string());
294
295 let mut snapshot = LocalSnapshot {
296 ignores_by_parent_abs_path: Default::default(),
297 removed_entry_ids: Default::default(),
298 git_repositories: Default::default(),
299 next_entry_id,
300 snapshot: Snapshot {
301 id: WorktreeId::from_usize(cx.model_id()),
302 abs_path: abs_path.clone(),
303 root_name: root_name.clone(),
304 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
305 entries_by_path: Default::default(),
306 entries_by_id: Default::default(),
307 repository_entries: Default::default(),
308 scan_id: 1,
309 completed_scan_id: 0,
310 },
311 };
312
313 if let Some(metadata) = metadata {
314 snapshot.insert_entry(
315 Entry::new(
316 Arc::from(Path::new("")),
317 &metadata,
318 &snapshot.next_entry_id,
319 snapshot.root_char_bag,
320 ),
321 fs.as_ref(),
322 );
323 }
324
325 let (path_changes_tx, path_changes_rx) = channel::unbounded();
326 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
327
328 cx.spawn_weak(|this, mut cx| async move {
329 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
330 this.update(&mut cx, |this, cx| {
331 let this = this.as_local_mut().unwrap();
332 match state {
333 ScanState::Started => {
334 *this.is_scanning.0.borrow_mut() = true;
335 }
336 ScanState::Updated {
337 snapshot,
338 changes,
339 barrier,
340 scanning,
341 } => {
342 *this.is_scanning.0.borrow_mut() = scanning;
343 this.set_snapshot(snapshot, cx);
344 cx.emit(Event::UpdatedEntries(changes));
345 drop(barrier);
346 }
347 }
348 cx.notify();
349 });
350 }
351 })
352 .detach();
353
354 let background_scanner_task = cx.background().spawn({
355 let fs = fs.clone();
356 let snapshot = snapshot.clone();
357 let background = cx.background().clone();
358 async move {
359 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
360 BackgroundScanner::new(
361 snapshot,
362 fs,
363 scan_states_tx,
364 background,
365 path_changes_rx,
366 )
367 .run(events)
368 .await;
369 }
370 });
371
372 Worktree::Local(LocalWorktree {
373 snapshot,
374 is_scanning: watch::channel_with(true),
375 share: None,
376 path_changes_tx,
377 _background_scanner_task: background_scanner_task,
378 diagnostics: Default::default(),
379 diagnostic_summaries: Default::default(),
380 client,
381 fs,
382 visible,
383 })
384 }))
385 }
386
387 pub fn remote(
388 project_remote_id: u64,
389 replica_id: ReplicaId,
390 worktree: proto::WorktreeMetadata,
391 client: Arc<Client>,
392 cx: &mut AppContext,
393 ) -> ModelHandle<Self> {
394 cx.add_model(|cx: &mut ModelContext<Self>| {
395 let snapshot = Snapshot {
396 id: WorktreeId(worktree.id as usize),
397 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
398 root_name: worktree.root_name.clone(),
399 root_char_bag: worktree
400 .root_name
401 .chars()
402 .map(|c| c.to_ascii_lowercase())
403 .collect(),
404 entries_by_path: Default::default(),
405 entries_by_id: Default::default(),
406 repository_entries: Default::default(),
407 scan_id: 1,
408 completed_scan_id: 0,
409 };
410
411 let (updates_tx, mut updates_rx) = mpsc::unbounded();
412 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
413 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
414
415 cx.background()
416 .spawn({
417 let background_snapshot = background_snapshot.clone();
418 async move {
419 while let Some(update) = updates_rx.next().await {
420 if let Err(error) =
421 background_snapshot.lock().apply_remote_update(update)
422 {
423 log::error!("error applying worktree update: {}", error);
424 }
425 snapshot_updated_tx.send(()).await.ok();
426 }
427 }
428 })
429 .detach();
430
431 cx.spawn_weak(|this, mut cx| async move {
432 while (snapshot_updated_rx.recv().await).is_some() {
433 if let Some(this) = this.upgrade(&cx) {
434 this.update(&mut cx, |this, cx| {
435 let this = this.as_remote_mut().unwrap();
436 this.snapshot = this.background_snapshot.lock().clone();
437 cx.emit(Event::UpdatedEntries(Default::default()));
438 cx.notify();
439 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
440 if this.observed_snapshot(*scan_id) {
441 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
442 let _ = tx.send(());
443 } else {
444 break;
445 }
446 }
447 });
448 } else {
449 break;
450 }
451 }
452 })
453 .detach();
454
455 Worktree::Remote(RemoteWorktree {
456 project_id: project_remote_id,
457 replica_id,
458 snapshot: snapshot.clone(),
459 background_snapshot,
460 updates_tx: Some(updates_tx),
461 snapshot_subscriptions: Default::default(),
462 client: client.clone(),
463 diagnostic_summaries: Default::default(),
464 visible: worktree.visible,
465 disconnected: false,
466 })
467 })
468 }
469
470 pub fn as_local(&self) -> Option<&LocalWorktree> {
471 if let Worktree::Local(worktree) = self {
472 Some(worktree)
473 } else {
474 None
475 }
476 }
477
478 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
479 if let Worktree::Remote(worktree) = self {
480 Some(worktree)
481 } else {
482 None
483 }
484 }
485
486 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
487 if let Worktree::Local(worktree) = self {
488 Some(worktree)
489 } else {
490 None
491 }
492 }
493
494 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
495 if let Worktree::Remote(worktree) = self {
496 Some(worktree)
497 } else {
498 None
499 }
500 }
501
502 pub fn is_local(&self) -> bool {
503 matches!(self, Worktree::Local(_))
504 }
505
506 pub fn is_remote(&self) -> bool {
507 !self.is_local()
508 }
509
510 pub fn snapshot(&self) -> Snapshot {
511 match self {
512 Worktree::Local(worktree) => worktree.snapshot().snapshot,
513 Worktree::Remote(worktree) => worktree.snapshot(),
514 }
515 }
516
517 pub fn scan_id(&self) -> usize {
518 match self {
519 Worktree::Local(worktree) => worktree.snapshot.scan_id,
520 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
521 }
522 }
523
524 pub fn completed_scan_id(&self) -> usize {
525 match self {
526 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
527 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
528 }
529 }
530
531 pub fn is_visible(&self) -> bool {
532 match self {
533 Worktree::Local(worktree) => worktree.visible,
534 Worktree::Remote(worktree) => worktree.visible,
535 }
536 }
537
538 pub fn replica_id(&self) -> ReplicaId {
539 match self {
540 Worktree::Local(_) => 0,
541 Worktree::Remote(worktree) => worktree.replica_id,
542 }
543 }
544
545 pub fn diagnostic_summaries(
546 &self,
547 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
548 match self {
549 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
550 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
551 }
552 .iter()
553 .flat_map(|(path, summaries)| {
554 summaries
555 .iter()
556 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
557 })
558 }
559
560 pub fn abs_path(&self) -> Arc<Path> {
561 match self {
562 Worktree::Local(worktree) => worktree.abs_path.clone(),
563 Worktree::Remote(worktree) => worktree.abs_path.clone(),
564 }
565 }
566}
567
568impl LocalWorktree {
569 pub fn contains_abs_path(&self, path: &Path) -> bool {
570 path.starts_with(&self.abs_path)
571 }
572
573 fn absolutize(&self, path: &Path) -> PathBuf {
574 if path.file_name().is_some() {
575 self.abs_path.join(path)
576 } else {
577 self.abs_path.to_path_buf()
578 }
579 }
580
581 pub(crate) fn load_buffer(
582 &mut self,
583 id: u64,
584 path: &Path,
585 cx: &mut ModelContext<Worktree>,
586 ) -> Task<Result<ModelHandle<Buffer>>> {
587 let path = Arc::from(path);
588 cx.spawn(move |this, mut cx| async move {
589 let (file, contents, diff_base) = this
590 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
591 .await?;
592 let text_buffer = cx
593 .background()
594 .spawn(async move { text::Buffer::new(0, id, contents) })
595 .await;
596 Ok(cx.add_model(|cx| {
597 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
598 buffer.git_diff_recalc(cx);
599 buffer
600 }))
601 })
602 }
603
604 pub fn diagnostics_for_path(
605 &self,
606 path: &Path,
607 ) -> Vec<(
608 LanguageServerId,
609 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
610 )> {
611 self.diagnostics.get(path).cloned().unwrap_or_default()
612 }
613
614 pub fn update_diagnostics(
615 &mut self,
616 server_id: LanguageServerId,
617 worktree_path: Arc<Path>,
618 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
619 _: &mut ModelContext<Worktree>,
620 ) -> Result<bool> {
621 let summaries_by_server_id = self
622 .diagnostic_summaries
623 .entry(worktree_path.clone())
624 .or_default();
625
626 let old_summary = summaries_by_server_id
627 .remove(&server_id)
628 .unwrap_or_default();
629
630 let new_summary = DiagnosticSummary::new(&diagnostics);
631 if new_summary.is_empty() {
632 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
633 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
634 diagnostics_by_server_id.remove(ix);
635 }
636 if diagnostics_by_server_id.is_empty() {
637 self.diagnostics.remove(&worktree_path);
638 }
639 }
640 } else {
641 summaries_by_server_id.insert(server_id, new_summary);
642 let diagnostics_by_server_id =
643 self.diagnostics.entry(worktree_path.clone()).or_default();
644 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
645 Ok(ix) => {
646 diagnostics_by_server_id[ix] = (server_id, diagnostics);
647 }
648 Err(ix) => {
649 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
650 }
651 }
652 }
653
654 if !old_summary.is_empty() || !new_summary.is_empty() {
655 if let Some(share) = self.share.as_ref() {
656 self.client
657 .send(proto::UpdateDiagnosticSummary {
658 project_id: share.project_id,
659 worktree_id: self.id().to_proto(),
660 summary: Some(proto::DiagnosticSummary {
661 path: worktree_path.to_string_lossy().to_string(),
662 language_server_id: server_id.0 as u64,
663 error_count: new_summary.error_count as u32,
664 warning_count: new_summary.warning_count as u32,
665 }),
666 })
667 .log_err();
668 }
669 }
670
671 Ok(!old_summary.is_empty() || !new_summary.is_empty())
672 }
673
674 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
675 let updated_repos = Self::changed_repos(
676 &self.snapshot.repository_entries,
677 &new_snapshot.repository_entries,
678 );
679 self.snapshot = new_snapshot;
680
681 if let Some(share) = self.share.as_mut() {
682 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
683 }
684
685 if !updated_repos.is_empty() {
686 cx.emit(Event::UpdatedGitRepositories(updated_repos));
687 }
688 }
689
690 fn changed_repos(
691 old_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
692 new_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
693 ) -> Vec<RepositoryEntry> {
694 fn diff<'a>(
695 a: impl Iterator<Item = &'a RepositoryEntry>,
696 mut b: impl Iterator<Item = &'a RepositoryEntry>,
697 updated: &mut HashMap<ProjectEntryId, RepositoryEntry>,
698 ) {
699 for a_repo in a {
700 let matched = b.find(|b_repo| {
701 a_repo.work_directory_id == b_repo.work_directory_id
702 && a_repo.scan_id == b_repo.scan_id
703 });
704
705 if matched.is_none() {
706 updated.insert(a_repo.work_directory_id, a_repo.clone());
707 }
708 }
709 }
710
711 let mut updated = HashMap::<ProjectEntryId, RepositoryEntry>::default();
712
713 diff(old_repos.values(), new_repos.values(), &mut updated);
714 diff(new_repos.values(), old_repos.values(), &mut updated);
715
716 updated.into_values().collect()
717 }
718
719 pub fn scan_complete(&self) -> impl Future<Output = ()> {
720 let mut is_scanning_rx = self.is_scanning.1.clone();
721 async move {
722 let mut is_scanning = is_scanning_rx.borrow().clone();
723 while is_scanning {
724 if let Some(value) = is_scanning_rx.recv().await {
725 is_scanning = value;
726 } else {
727 break;
728 }
729 }
730 }
731 }
732
733 pub fn snapshot(&self) -> LocalSnapshot {
734 self.snapshot.clone()
735 }
736
737 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
738 proto::WorktreeMetadata {
739 id: self.id().to_proto(),
740 root_name: self.root_name().to_string(),
741 visible: self.visible,
742 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
743 }
744 }
745
746 fn load(
747 &self,
748 path: &Path,
749 cx: &mut ModelContext<Worktree>,
750 ) -> Task<Result<(File, String, Option<String>)>> {
751 let handle = cx.handle();
752 let path = Arc::from(path);
753 let abs_path = self.absolutize(&path);
754 let fs = self.fs.clone();
755 let snapshot = self.snapshot();
756
757 let mut index_task = None;
758
759 if let Some(repo) = snapshot.repo_for(&path) {
760 let repo_path = repo.work_directory.relativize(&path).unwrap();
761 if let Some(repo) = self.git_repositories.get(&repo.dot_git_entry_id) {
762 let repo = repo.repo_ptr.to_owned();
763 index_task = Some(
764 cx.background()
765 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
766 );
767 }
768 }
769
770 cx.spawn(|this, mut cx| async move {
771 let text = fs.load(&abs_path).await?;
772
773 let diff_base = if let Some(index_task) = index_task {
774 index_task.await
775 } else {
776 None
777 };
778
779 // Eagerly populate the snapshot with an updated entry for the loaded file
780 let entry = this
781 .update(&mut cx, |this, cx| {
782 this.as_local().unwrap().refresh_entry(path, None, cx)
783 })
784 .await?;
785
786 Ok((
787 File {
788 entry_id: entry.id,
789 worktree: handle,
790 path: entry.path,
791 mtime: entry.mtime,
792 is_local: true,
793 is_deleted: false,
794 },
795 text,
796 diff_base,
797 ))
798 })
799 }
800
801 pub fn save_buffer(
802 &self,
803 buffer_handle: ModelHandle<Buffer>,
804 path: Arc<Path>,
805 has_changed_file: bool,
806 cx: &mut ModelContext<Worktree>,
807 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
808 let handle = cx.handle();
809 let buffer = buffer_handle.read(cx);
810
811 let rpc = self.client.clone();
812 let buffer_id = buffer.remote_id();
813 let project_id = self.share.as_ref().map(|share| share.project_id);
814
815 let text = buffer.as_rope().clone();
816 let fingerprint = text.fingerprint();
817 let version = buffer.version();
818 let save = self.write_file(path, text, buffer.line_ending(), cx);
819
820 cx.as_mut().spawn(|mut cx| async move {
821 let entry = save.await?;
822
823 if has_changed_file {
824 let new_file = Arc::new(File {
825 entry_id: entry.id,
826 worktree: handle,
827 path: entry.path,
828 mtime: entry.mtime,
829 is_local: true,
830 is_deleted: false,
831 });
832
833 if let Some(project_id) = project_id {
834 rpc.send(proto::UpdateBufferFile {
835 project_id,
836 buffer_id,
837 file: Some(new_file.to_proto()),
838 })
839 .log_err();
840 }
841
842 buffer_handle.update(&mut cx, |buffer, cx| {
843 if has_changed_file {
844 buffer.file_updated(new_file, cx).detach();
845 }
846 });
847 }
848
849 if let Some(project_id) = project_id {
850 rpc.send(proto::BufferSaved {
851 project_id,
852 buffer_id,
853 version: serialize_version(&version),
854 mtime: Some(entry.mtime.into()),
855 fingerprint: serialize_fingerprint(fingerprint),
856 })?;
857 }
858
859 buffer_handle.update(&mut cx, |buffer, cx| {
860 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
861 });
862
863 Ok((version, fingerprint, entry.mtime))
864 })
865 }
866
867 pub fn create_entry(
868 &self,
869 path: impl Into<Arc<Path>>,
870 is_dir: bool,
871 cx: &mut ModelContext<Worktree>,
872 ) -> Task<Result<Entry>> {
873 let path = path.into();
874 let abs_path = self.absolutize(&path);
875 let fs = self.fs.clone();
876 let write = cx.background().spawn(async move {
877 if is_dir {
878 fs.create_dir(&abs_path).await
879 } else {
880 fs.save(&abs_path, &Default::default(), Default::default())
881 .await
882 }
883 });
884
885 cx.spawn(|this, mut cx| async move {
886 write.await?;
887 this.update(&mut cx, |this, cx| {
888 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
889 })
890 .await
891 })
892 }
893
894 pub fn write_file(
895 &self,
896 path: impl Into<Arc<Path>>,
897 text: Rope,
898 line_ending: LineEnding,
899 cx: &mut ModelContext<Worktree>,
900 ) -> Task<Result<Entry>> {
901 let path = path.into();
902 let abs_path = self.absolutize(&path);
903 let fs = self.fs.clone();
904 let write = cx
905 .background()
906 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
907
908 cx.spawn(|this, mut cx| async move {
909 write.await?;
910 this.update(&mut cx, |this, cx| {
911 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
912 })
913 .await
914 })
915 }
916
917 pub fn delete_entry(
918 &self,
919 entry_id: ProjectEntryId,
920 cx: &mut ModelContext<Worktree>,
921 ) -> Option<Task<Result<()>>> {
922 let entry = self.entry_for_id(entry_id)?.clone();
923 let abs_path = self.abs_path.clone();
924 let fs = self.fs.clone();
925
926 let delete = cx.background().spawn(async move {
927 let mut abs_path = fs.canonicalize(&abs_path).await?;
928 if entry.path.file_name().is_some() {
929 abs_path = abs_path.join(&entry.path);
930 }
931 if entry.is_file() {
932 fs.remove_file(&abs_path, Default::default()).await?;
933 } else {
934 fs.remove_dir(
935 &abs_path,
936 RemoveOptions {
937 recursive: true,
938 ignore_if_not_exists: false,
939 },
940 )
941 .await?;
942 }
943 anyhow::Ok(abs_path)
944 });
945
946 Some(cx.spawn(|this, mut cx| async move {
947 let abs_path = delete.await?;
948 let (tx, mut rx) = barrier::channel();
949 this.update(&mut cx, |this, _| {
950 this.as_local_mut()
951 .unwrap()
952 .path_changes_tx
953 .try_send((vec![abs_path], tx))
954 })?;
955 rx.recv().await;
956 Ok(())
957 }))
958 }
959
960 pub fn rename_entry(
961 &self,
962 entry_id: ProjectEntryId,
963 new_path: impl Into<Arc<Path>>,
964 cx: &mut ModelContext<Worktree>,
965 ) -> Option<Task<Result<Entry>>> {
966 let old_path = self.entry_for_id(entry_id)?.path.clone();
967 let new_path = new_path.into();
968 let abs_old_path = self.absolutize(&old_path);
969 let abs_new_path = self.absolutize(&new_path);
970 let fs = self.fs.clone();
971 let rename = cx.background().spawn(async move {
972 fs.rename(&abs_old_path, &abs_new_path, Default::default())
973 .await
974 });
975
976 Some(cx.spawn(|this, mut cx| async move {
977 rename.await?;
978 this.update(&mut cx, |this, cx| {
979 this.as_local_mut()
980 .unwrap()
981 .refresh_entry(new_path.clone(), Some(old_path), cx)
982 })
983 .await
984 }))
985 }
986
987 pub fn copy_entry(
988 &self,
989 entry_id: ProjectEntryId,
990 new_path: impl Into<Arc<Path>>,
991 cx: &mut ModelContext<Worktree>,
992 ) -> Option<Task<Result<Entry>>> {
993 let old_path = self.entry_for_id(entry_id)?.path.clone();
994 let new_path = new_path.into();
995 let abs_old_path = self.absolutize(&old_path);
996 let abs_new_path = self.absolutize(&new_path);
997 let fs = self.fs.clone();
998 let copy = cx.background().spawn(async move {
999 copy_recursive(
1000 fs.as_ref(),
1001 &abs_old_path,
1002 &abs_new_path,
1003 Default::default(),
1004 )
1005 .await
1006 });
1007
1008 Some(cx.spawn(|this, mut cx| async move {
1009 copy.await?;
1010 this.update(&mut cx, |this, cx| {
1011 this.as_local_mut()
1012 .unwrap()
1013 .refresh_entry(new_path.clone(), None, cx)
1014 })
1015 .await
1016 }))
1017 }
1018
1019 fn refresh_entry(
1020 &self,
1021 path: Arc<Path>,
1022 old_path: Option<Arc<Path>>,
1023 cx: &mut ModelContext<Worktree>,
1024 ) -> Task<Result<Entry>> {
1025 let fs = self.fs.clone();
1026 let abs_root_path = self.abs_path.clone();
1027 let path_changes_tx = self.path_changes_tx.clone();
1028 cx.spawn_weak(move |this, mut cx| async move {
1029 let abs_path = fs.canonicalize(&abs_root_path).await?;
1030 let mut paths = Vec::with_capacity(2);
1031 paths.push(if path.file_name().is_some() {
1032 abs_path.join(&path)
1033 } else {
1034 abs_path.clone()
1035 });
1036 if let Some(old_path) = old_path {
1037 paths.push(if old_path.file_name().is_some() {
1038 abs_path.join(&old_path)
1039 } else {
1040 abs_path.clone()
1041 });
1042 }
1043
1044 let (tx, mut rx) = barrier::channel();
1045 path_changes_tx.try_send((paths, tx))?;
1046 rx.recv().await;
1047 this.upgrade(&cx)
1048 .ok_or_else(|| anyhow!("worktree was dropped"))?
1049 .update(&mut cx, |this, _| {
1050 this.entry_for_path(path)
1051 .cloned()
1052 .ok_or_else(|| anyhow!("failed to read path after update"))
1053 })
1054 })
1055 }
1056
1057 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1058 let (share_tx, share_rx) = oneshot::channel();
1059
1060 if let Some(share) = self.share.as_mut() {
1061 let _ = share_tx.send(());
1062 *share.resume_updates.borrow_mut() = ();
1063 } else {
1064 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1065 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1066 let worktree_id = cx.model_id() as u64;
1067
1068 for (path, summaries) in &self.diagnostic_summaries {
1069 for (&server_id, summary) in summaries {
1070 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1071 project_id,
1072 worktree_id,
1073 summary: Some(summary.to_proto(server_id, &path)),
1074 }) {
1075 return Task::ready(Err(e));
1076 }
1077 }
1078 }
1079
1080 let _maintain_remote_snapshot = cx.background().spawn({
1081 let client = self.client.clone();
1082 async move {
1083 let mut share_tx = Some(share_tx);
1084 let mut prev_snapshot = LocalSnapshot {
1085 ignores_by_parent_abs_path: Default::default(),
1086 removed_entry_ids: Default::default(),
1087 next_entry_id: Default::default(),
1088 git_repositories: Default::default(),
1089 snapshot: Snapshot {
1090 id: WorktreeId(worktree_id as usize),
1091 abs_path: Path::new("").into(),
1092 root_name: Default::default(),
1093 root_char_bag: Default::default(),
1094 entries_by_path: Default::default(),
1095 entries_by_id: Default::default(),
1096 repository_entries: Default::default(),
1097 scan_id: 0,
1098 completed_scan_id: 0,
1099 },
1100 };
1101 while let Some(snapshot) = snapshots_rx.recv().await {
1102 #[cfg(any(test, feature = "test-support"))]
1103 const MAX_CHUNK_SIZE: usize = 2;
1104 #[cfg(not(any(test, feature = "test-support")))]
1105 const MAX_CHUNK_SIZE: usize = 256;
1106
1107 let update =
1108 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1109 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1110 let _ = resume_updates_rx.try_recv();
1111 while let Err(error) = client.request(update.clone()).await {
1112 log::error!("failed to send worktree update: {}", error);
1113 log::info!("waiting to resume updates");
1114 if resume_updates_rx.next().await.is_none() {
1115 return Ok(());
1116 }
1117 }
1118 }
1119
1120 if let Some(share_tx) = share_tx.take() {
1121 let _ = share_tx.send(());
1122 }
1123
1124 prev_snapshot = snapshot;
1125 }
1126
1127 Ok::<_, anyhow::Error>(())
1128 }
1129 .log_err()
1130 });
1131
1132 self.share = Some(ShareState {
1133 project_id,
1134 snapshots_tx,
1135 resume_updates: resume_updates_tx,
1136 _maintain_remote_snapshot,
1137 });
1138 }
1139
1140 cx.foreground()
1141 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1142 }
1143
1144 pub fn unshare(&mut self) {
1145 self.share.take();
1146 }
1147
1148 pub fn is_shared(&self) -> bool {
1149 self.share.is_some()
1150 }
1151
1152 pub fn load_index_text(
1153 &self,
1154 repo: RepositoryEntry,
1155 repo_path: RepoPath,
1156 cx: &mut ModelContext<Worktree>,
1157 ) -> Task<Option<String>> {
1158 let Some(git_ptr) = self.git_repositories.get(&repo.work_directory_id).map(|git_ptr| git_ptr.to_owned()) else {
1159 return Task::Ready(Some(None))
1160 };
1161 let git_ptr = git_ptr.repo_ptr;
1162
1163 cx.background()
1164 .spawn(async move { git_ptr.lock().load_index_text(&repo_path) })
1165 }
1166}
1167
1168impl RemoteWorktree {
1169 fn snapshot(&self) -> Snapshot {
1170 self.snapshot.clone()
1171 }
1172
1173 pub fn disconnected_from_host(&mut self) {
1174 self.updates_tx.take();
1175 self.snapshot_subscriptions.clear();
1176 self.disconnected = true;
1177 }
1178
1179 pub fn save_buffer(
1180 &self,
1181 buffer_handle: ModelHandle<Buffer>,
1182 cx: &mut ModelContext<Worktree>,
1183 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1184 let buffer = buffer_handle.read(cx);
1185 let buffer_id = buffer.remote_id();
1186 let version = buffer.version();
1187 let rpc = self.client.clone();
1188 let project_id = self.project_id;
1189 cx.as_mut().spawn(|mut cx| async move {
1190 let response = rpc
1191 .request(proto::SaveBuffer {
1192 project_id,
1193 buffer_id,
1194 version: serialize_version(&version),
1195 })
1196 .await?;
1197 let version = deserialize_version(&response.version);
1198 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1199 let mtime = response
1200 .mtime
1201 .ok_or_else(|| anyhow!("missing mtime"))?
1202 .into();
1203
1204 buffer_handle.update(&mut cx, |buffer, cx| {
1205 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1206 });
1207
1208 Ok((version, fingerprint, mtime))
1209 })
1210 }
1211
1212 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1213 if let Some(updates_tx) = &self.updates_tx {
1214 updates_tx
1215 .unbounded_send(update)
1216 .expect("consumer runs to completion");
1217 }
1218 }
1219
1220 fn observed_snapshot(&self, scan_id: usize) -> bool {
1221 self.completed_scan_id >= scan_id
1222 }
1223
1224 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1225 let (tx, rx) = oneshot::channel();
1226 if self.observed_snapshot(scan_id) {
1227 let _ = tx.send(());
1228 } else if self.disconnected {
1229 drop(tx);
1230 } else {
1231 match self
1232 .snapshot_subscriptions
1233 .binary_search_by_key(&scan_id, |probe| probe.0)
1234 {
1235 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1236 }
1237 }
1238
1239 async move {
1240 rx.await?;
1241 Ok(())
1242 }
1243 }
1244
1245 pub fn update_diagnostic_summary(
1246 &mut self,
1247 path: Arc<Path>,
1248 summary: &proto::DiagnosticSummary,
1249 ) {
1250 let server_id = LanguageServerId(summary.language_server_id as usize);
1251 let summary = DiagnosticSummary {
1252 error_count: summary.error_count as usize,
1253 warning_count: summary.warning_count as usize,
1254 };
1255
1256 if summary.is_empty() {
1257 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1258 summaries.remove(&server_id);
1259 if summaries.is_empty() {
1260 self.diagnostic_summaries.remove(&path);
1261 }
1262 }
1263 } else {
1264 self.diagnostic_summaries
1265 .entry(path)
1266 .or_default()
1267 .insert(server_id, summary);
1268 }
1269 }
1270
1271 pub fn insert_entry(
1272 &mut self,
1273 entry: proto::Entry,
1274 scan_id: usize,
1275 cx: &mut ModelContext<Worktree>,
1276 ) -> Task<Result<Entry>> {
1277 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1278 cx.spawn(|this, mut cx| async move {
1279 wait_for_snapshot.await?;
1280 this.update(&mut cx, |worktree, _| {
1281 let worktree = worktree.as_remote_mut().unwrap();
1282 let mut snapshot = worktree.background_snapshot.lock();
1283 let entry = snapshot.insert_entry(entry);
1284 worktree.snapshot = snapshot.clone();
1285 entry
1286 })
1287 })
1288 }
1289
1290 pub(crate) fn delete_entry(
1291 &mut self,
1292 id: ProjectEntryId,
1293 scan_id: usize,
1294 cx: &mut ModelContext<Worktree>,
1295 ) -> Task<Result<()>> {
1296 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1297 cx.spawn(|this, mut cx| async move {
1298 wait_for_snapshot.await?;
1299 this.update(&mut cx, |worktree, _| {
1300 let worktree = worktree.as_remote_mut().unwrap();
1301 let mut snapshot = worktree.background_snapshot.lock();
1302 snapshot.delete_entry(id);
1303 worktree.snapshot = snapshot.clone();
1304 });
1305 Ok(())
1306 })
1307 }
1308}
1309
1310impl Snapshot {
1311 pub fn id(&self) -> WorktreeId {
1312 self.id
1313 }
1314
1315 pub fn abs_path(&self) -> &Arc<Path> {
1316 &self.abs_path
1317 }
1318
1319 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1320 self.entries_by_id.get(&entry_id, &()).is_some()
1321 }
1322
1323 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1324 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1325 let old_entry = self.entries_by_id.insert_or_replace(
1326 PathEntry {
1327 id: entry.id,
1328 path: entry.path.clone(),
1329 is_ignored: entry.is_ignored,
1330 scan_id: 0,
1331 },
1332 &(),
1333 );
1334 if let Some(old_entry) = old_entry {
1335 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1336 }
1337 self.entries_by_path.insert_or_replace(entry.clone(), &());
1338 Ok(entry)
1339 }
1340
1341 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1342 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1343 self.entries_by_path = {
1344 let mut cursor = self.entries_by_path.cursor();
1345 let mut new_entries_by_path =
1346 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1347 while let Some(entry) = cursor.item() {
1348 if entry.path.starts_with(&removed_entry.path) {
1349 self.entries_by_id.remove(&entry.id, &());
1350 cursor.next(&());
1351 } else {
1352 break;
1353 }
1354 }
1355 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1356 new_entries_by_path
1357 };
1358
1359 Some(removed_entry.path)
1360 }
1361
1362 pub(crate) fn apply_remote_update(&mut self, mut update: proto::UpdateWorktree) -> Result<()> {
1363 let mut entries_by_path_edits = Vec::new();
1364 let mut entries_by_id_edits = Vec::new();
1365 for entry_id in update.removed_entries {
1366 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1367 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1368 entries_by_id_edits.push(Edit::Remove(entry.id));
1369 }
1370 }
1371
1372 for entry in update.updated_entries {
1373 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1374 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1375 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1376 }
1377 entries_by_id_edits.push(Edit::Insert(PathEntry {
1378 id: entry.id,
1379 path: entry.path.clone(),
1380 is_ignored: entry.is_ignored,
1381 scan_id: 0,
1382 }));
1383 entries_by_path_edits.push(Edit::Insert(entry));
1384 }
1385
1386 self.entries_by_path.edit(entries_by_path_edits, &());
1387 self.entries_by_id.edit(entries_by_id_edits, &());
1388
1389 update.removed_repositories.sort_unstable();
1390 self.repository_entries.retain(|_, entry| {
1391 if let Ok(_) = update
1392 .removed_repositories
1393 .binary_search(&entry.dot_git_entry_id.to_proto())
1394 {
1395 false
1396 } else {
1397 true
1398 }
1399 });
1400
1401 for repository in update.updated_repositories {
1402 let repository = RepositoryEntry {
1403 dot_git_entry_id: ProjectEntryId::from_proto(repository.dot_git_entry_id),
1404 work_directory: RepositoryWorkDirectory(
1405 Path::new(&repository.work_directory).into(),
1406 ),
1407 scan_id: repository.scan_id as usize,
1408 branch: repository.branch.map(Into::into),
1409 };
1410 self.repository_entries
1411 .insert(repository.work_directory.clone(), repository)
1412 }
1413
1414 self.scan_id = update.scan_id as usize;
1415 if update.is_last_update {
1416 self.completed_scan_id = update.scan_id as usize;
1417 }
1418
1419 Ok(())
1420 }
1421
1422 pub fn file_count(&self) -> usize {
1423 self.entries_by_path.summary().file_count
1424 }
1425
1426 pub fn visible_file_count(&self) -> usize {
1427 self.entries_by_path.summary().visible_file_count
1428 }
1429
1430 fn traverse_from_offset(
1431 &self,
1432 include_dirs: bool,
1433 include_ignored: bool,
1434 start_offset: usize,
1435 ) -> Traversal {
1436 let mut cursor = self.entries_by_path.cursor();
1437 cursor.seek(
1438 &TraversalTarget::Count {
1439 count: start_offset,
1440 include_dirs,
1441 include_ignored,
1442 },
1443 Bias::Right,
1444 &(),
1445 );
1446 Traversal {
1447 cursor,
1448 include_dirs,
1449 include_ignored,
1450 }
1451 }
1452
1453 fn traverse_from_path(
1454 &self,
1455 include_dirs: bool,
1456 include_ignored: bool,
1457 path: &Path,
1458 ) -> Traversal {
1459 let mut cursor = self.entries_by_path.cursor();
1460 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1461 Traversal {
1462 cursor,
1463 include_dirs,
1464 include_ignored,
1465 }
1466 }
1467
1468 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1469 self.traverse_from_offset(false, include_ignored, start)
1470 }
1471
1472 pub fn entries(&self, include_ignored: bool) -> Traversal {
1473 self.traverse_from_offset(true, include_ignored, 0)
1474 }
1475
1476 pub fn repositories(&self) -> impl Iterator<Item = &RepositoryEntry> {
1477 self.repository_entries.values()
1478 }
1479
1480 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1481 let empty_path = Path::new("");
1482 self.entries_by_path
1483 .cursor::<()>()
1484 .filter(move |entry| entry.path.as_ref() != empty_path)
1485 .map(|entry| &entry.path)
1486 }
1487
1488 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1489 let mut cursor = self.entries_by_path.cursor();
1490 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1491 let traversal = Traversal {
1492 cursor,
1493 include_dirs: true,
1494 include_ignored: true,
1495 };
1496 ChildEntriesIter {
1497 traversal,
1498 parent_path,
1499 }
1500 }
1501
1502 pub fn root_entry(&self) -> Option<&Entry> {
1503 self.entry_for_path("")
1504 }
1505
1506 pub fn root_name(&self) -> &str {
1507 &self.root_name
1508 }
1509
1510 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1511 self.repository_entries
1512 .get(&"".into())
1513 .map(|entry| entry.to_owned())
1514 }
1515
1516 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1517 self.repository_entries.values()
1518 }
1519
1520 pub fn scan_id(&self) -> usize {
1521 self.scan_id
1522 }
1523
1524 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1525 let path = path.as_ref();
1526 self.traverse_from_path(true, true, path)
1527 .entry()
1528 .and_then(|entry| {
1529 if entry.path.as_ref() == path {
1530 Some(entry)
1531 } else {
1532 None
1533 }
1534 })
1535 }
1536
1537 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1538 let entry = self.entries_by_id.get(&id, &())?;
1539 self.entry_for_path(&entry.path)
1540 }
1541
1542 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1543 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1544 }
1545}
1546
1547impl LocalSnapshot {
1548 pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1549 let mut max_len = 0;
1550 let mut current_candidate = None;
1551 for (work_directory, repo) in (&self.repository_entries).iter() {
1552 if work_directory.contains(path) {
1553 if work_directory.0.as_os_str().len() >= max_len {
1554 current_candidate = Some(repo);
1555 max_len = work_directory.0.as_os_str().len();
1556 } else {
1557 break;
1558 }
1559 }
1560 }
1561
1562 current_candidate.map(|entry| entry.to_owned())
1563 }
1564
1565 pub(crate) fn repo_for_metadata(
1566 &self,
1567 path: &Path,
1568 ) -> Option<(RepositoryWorkDirectory, Arc<Mutex<dyn GitRepository>>)> {
1569 let (entry_id, local_repo) = self
1570 .git_repositories
1571 .iter()
1572 .find(|(_, repo)| repo.in_dot_git(path))?;
1573
1574 let work_dir = self
1575 .snapshot
1576 .repository_entries
1577 .iter()
1578 .find(|(_, entry)| entry.dot_git_entry_id == *entry_id)
1579 .map(|(_, entry)| entry.work_directory.to_owned())?;
1580
1581 Some((work_dir, local_repo.repo_ptr.to_owned()))
1582 }
1583
1584 #[cfg(test)]
1585 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1586 let root_name = self.root_name.clone();
1587 proto::UpdateWorktree {
1588 project_id,
1589 worktree_id: self.id().to_proto(),
1590 abs_path: self.abs_path().to_string_lossy().into(),
1591 root_name,
1592 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1593 removed_entries: Default::default(),
1594 scan_id: self.scan_id as u64,
1595 is_last_update: true,
1596 updated_repositories: self.repository_entries.values().map(Into::into).collect(),
1597 removed_repositories: Default::default(),
1598 }
1599 }
1600
1601 pub(crate) fn build_update(
1602 &self,
1603 other: &Self,
1604 project_id: u64,
1605 worktree_id: u64,
1606 include_ignored: bool,
1607 ) -> proto::UpdateWorktree {
1608 let mut updated_entries = Vec::new();
1609 let mut removed_entries = Vec::new();
1610 let mut self_entries = self
1611 .entries_by_id
1612 .cursor::<()>()
1613 .filter(|e| include_ignored || !e.is_ignored)
1614 .peekable();
1615 let mut other_entries = other
1616 .entries_by_id
1617 .cursor::<()>()
1618 .filter(|e| include_ignored || !e.is_ignored)
1619 .peekable();
1620 loop {
1621 match (self_entries.peek(), other_entries.peek()) {
1622 (Some(self_entry), Some(other_entry)) => {
1623 match Ord::cmp(&self_entry.id, &other_entry.id) {
1624 Ordering::Less => {
1625 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1626 updated_entries.push(entry);
1627 self_entries.next();
1628 }
1629 Ordering::Equal => {
1630 if self_entry.scan_id != other_entry.scan_id {
1631 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1632 updated_entries.push(entry);
1633 }
1634
1635 self_entries.next();
1636 other_entries.next();
1637 }
1638 Ordering::Greater => {
1639 removed_entries.push(other_entry.id.to_proto());
1640 other_entries.next();
1641 }
1642 }
1643 }
1644 (Some(self_entry), None) => {
1645 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1646 updated_entries.push(entry);
1647 self_entries.next();
1648 }
1649 (None, Some(other_entry)) => {
1650 removed_entries.push(other_entry.id.to_proto());
1651 other_entries.next();
1652 }
1653 (None, None) => break,
1654 }
1655 }
1656
1657 let mut updated_repositories: Vec<proto::RepositoryEntry> = Vec::new();
1658 let mut removed_repositories = Vec::new();
1659 let mut self_repos = self.snapshot.repository_entries.values().peekable();
1660 let mut other_repos = other.snapshot.repository_entries.values().peekable();
1661 loop {
1662 match (self_repos.peek(), other_repos.peek()) {
1663 (Some(self_repo), Some(other_repo)) => {
1664 match Ord::cmp(&self_repo.work_directory, &other_repo.work_directory) {
1665 Ordering::Less => {
1666 updated_repositories.push((*self_repo).into());
1667 self_repos.next();
1668 }
1669 Ordering::Equal => {
1670 if self_repo.scan_id != other_repo.scan_id {
1671 updated_repositories.push((*self_repo).into());
1672 }
1673
1674 self_repos.next();
1675 other_repos.next();
1676 }
1677 Ordering::Greater => {
1678 removed_repositories.push(other_repo.dot_git_entry_id.to_proto());
1679 other_repos.next();
1680 }
1681 }
1682 }
1683 (Some(self_repo), None) => {
1684 updated_repositories.push((*self_repo).into());
1685 self_repos.next();
1686 }
1687 (None, Some(other_repo)) => {
1688 removed_repositories.push(other_repo.dot_git_entry_id.to_proto());
1689 other_repos.next();
1690 }
1691 (None, None) => break,
1692 }
1693 }
1694
1695 proto::UpdateWorktree {
1696 project_id,
1697 worktree_id,
1698 abs_path: self.abs_path().to_string_lossy().into(),
1699 root_name: self.root_name().to_string(),
1700 updated_entries,
1701 removed_entries,
1702 scan_id: self.scan_id as u64,
1703 is_last_update: self.completed_scan_id == self.scan_id,
1704 updated_repositories,
1705 removed_repositories,
1706 }
1707 }
1708
1709 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1710 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1711 let abs_path = self.abs_path.join(&entry.path);
1712 match smol::block_on(build_gitignore(&abs_path, fs)) {
1713 Ok(ignore) => {
1714 self.ignores_by_parent_abs_path.insert(
1715 abs_path.parent().unwrap().into(),
1716 (Arc::new(ignore), self.scan_id),
1717 );
1718 }
1719 Err(error) => {
1720 log::error!(
1721 "error loading .gitignore file {:?} - {:?}",
1722 &entry.path,
1723 error
1724 );
1725 }
1726 }
1727 }
1728
1729 self.reuse_entry_id(&mut entry);
1730
1731 if entry.kind == EntryKind::PendingDir {
1732 if let Some(existing_entry) =
1733 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1734 {
1735 entry.kind = existing_entry.kind;
1736 }
1737 }
1738
1739 let scan_id = self.scan_id;
1740 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1741 if let Some(removed) = removed {
1742 if removed.id != entry.id {
1743 self.entries_by_id.remove(&removed.id, &());
1744 }
1745 }
1746 self.entries_by_id.insert_or_replace(
1747 PathEntry {
1748 id: entry.id,
1749 path: entry.path.clone(),
1750 is_ignored: entry.is_ignored,
1751 scan_id,
1752 },
1753 &(),
1754 );
1755
1756 entry
1757 }
1758
1759 fn populate_dir(
1760 &mut self,
1761 parent_path: Arc<Path>,
1762 entries: impl IntoIterator<Item = Entry>,
1763 ignore: Option<Arc<Gitignore>>,
1764 fs: &dyn Fs,
1765 ) {
1766 let mut parent_entry = if let Some(parent_entry) =
1767 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1768 {
1769 parent_entry.clone()
1770 } else {
1771 log::warn!(
1772 "populating a directory {:?} that has been removed",
1773 parent_path
1774 );
1775 return;
1776 };
1777
1778 match parent_entry.kind {
1779 EntryKind::PendingDir => {
1780 parent_entry.kind = EntryKind::Dir;
1781 }
1782 EntryKind::Dir => {}
1783 _ => return,
1784 }
1785
1786 if let Some(ignore) = ignore {
1787 self.ignores_by_parent_abs_path.insert(
1788 self.abs_path.join(&parent_path).into(),
1789 (ignore, self.scan_id),
1790 );
1791 }
1792
1793 if parent_path.file_name() == Some(&DOT_GIT) {
1794 let abs_path = self.abs_path.join(&parent_path);
1795 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1796
1797 let key = RepositoryWorkDirectory(content_path.clone());
1798 if self.repository_entries.get(&key).is_none() {
1799 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1800 let repo_lock = repo.lock();
1801 self.repository_entries.insert(
1802 key.clone(),
1803 RepositoryEntry {
1804 dot_git_entry_id: parent_entry.id,
1805 work_directory: key,
1806 scan_id: 0,
1807 branch: repo_lock.branch_name().map(Into::into),
1808 },
1809 );
1810 drop(repo_lock);
1811
1812 self.git_repositories.insert(
1813 parent_entry.id,
1814 LocalRepositoryEntry {
1815 repo_ptr: repo,
1816 git_dir_path: parent_path.clone(),
1817 },
1818 )
1819 }
1820 }
1821 }
1822
1823 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1824 let mut entries_by_id_edits = Vec::new();
1825
1826 for mut entry in entries {
1827 self.reuse_entry_id(&mut entry);
1828 entries_by_id_edits.push(Edit::Insert(PathEntry {
1829 id: entry.id,
1830 path: entry.path.clone(),
1831 is_ignored: entry.is_ignored,
1832 scan_id: self.scan_id,
1833 }));
1834 entries_by_path_edits.push(Edit::Insert(entry));
1835 }
1836
1837 self.entries_by_path.edit(entries_by_path_edits, &());
1838 self.entries_by_id.edit(entries_by_id_edits, &());
1839 }
1840
1841 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1842 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1843 entry.id = removed_entry_id;
1844 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1845 entry.id = existing_entry.id;
1846 }
1847 }
1848
1849 fn remove_path(&mut self, path: &Path) {
1850 let mut new_entries;
1851 let removed_entries;
1852 {
1853 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1854 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1855 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1856 new_entries.push_tree(cursor.suffix(&()), &());
1857 }
1858 self.entries_by_path = new_entries;
1859
1860 let mut entries_by_id_edits = Vec::new();
1861 for entry in removed_entries.cursor::<()>() {
1862 let removed_entry_id = self
1863 .removed_entry_ids
1864 .entry(entry.inode)
1865 .or_insert(entry.id);
1866 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1867 entries_by_id_edits.push(Edit::Remove(entry.id));
1868 }
1869 self.entries_by_id.edit(entries_by_id_edits, &());
1870
1871 if path.file_name() == Some(&GITIGNORE) {
1872 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1873 if let Some((_, scan_id)) = self
1874 .ignores_by_parent_abs_path
1875 .get_mut(abs_parent_path.as_path())
1876 {
1877 *scan_id = self.snapshot.scan_id;
1878 }
1879 } else if path.file_name() == Some(&DOT_GIT) {
1880 let repo_entry_key = RepositoryWorkDirectory(path.parent().unwrap().into());
1881 self.snapshot
1882 .repository_entries
1883 .update(&repo_entry_key, |repo| repo.scan_id = self.snapshot.scan_id);
1884 }
1885 }
1886
1887 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1888 let mut inodes = TreeSet::default();
1889 for ancestor in path.ancestors().skip(1) {
1890 if let Some(entry) = self.entry_for_path(ancestor) {
1891 inodes.insert(entry.inode);
1892 }
1893 }
1894 inodes
1895 }
1896
1897 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1898 let mut new_ignores = Vec::new();
1899 for ancestor in abs_path.ancestors().skip(1) {
1900 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1901 new_ignores.push((ancestor, Some(ignore.clone())));
1902 } else {
1903 new_ignores.push((ancestor, None));
1904 }
1905 }
1906
1907 let mut ignore_stack = IgnoreStack::none();
1908 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1909 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1910 ignore_stack = IgnoreStack::all();
1911 break;
1912 } else if let Some(ignore) = ignore {
1913 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1914 }
1915 }
1916
1917 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1918 ignore_stack = IgnoreStack::all();
1919 }
1920
1921 ignore_stack
1922 }
1923}
1924
1925async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1926 let contents = fs.load(abs_path).await?;
1927 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1928 let mut builder = GitignoreBuilder::new(parent);
1929 for line in contents.lines() {
1930 builder.add_line(Some(abs_path.into()), line)?;
1931 }
1932 Ok(builder.build()?)
1933}
1934
1935impl WorktreeId {
1936 pub fn from_usize(handle_id: usize) -> Self {
1937 Self(handle_id)
1938 }
1939
1940 pub(crate) fn from_proto(id: u64) -> Self {
1941 Self(id as usize)
1942 }
1943
1944 pub fn to_proto(&self) -> u64 {
1945 self.0 as u64
1946 }
1947
1948 pub fn to_usize(&self) -> usize {
1949 self.0
1950 }
1951}
1952
1953impl fmt::Display for WorktreeId {
1954 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1955 self.0.fmt(f)
1956 }
1957}
1958
1959impl Deref for Worktree {
1960 type Target = Snapshot;
1961
1962 fn deref(&self) -> &Self::Target {
1963 match self {
1964 Worktree::Local(worktree) => &worktree.snapshot,
1965 Worktree::Remote(worktree) => &worktree.snapshot,
1966 }
1967 }
1968}
1969
1970impl Deref for LocalWorktree {
1971 type Target = LocalSnapshot;
1972
1973 fn deref(&self) -> &Self::Target {
1974 &self.snapshot
1975 }
1976}
1977
1978impl Deref for RemoteWorktree {
1979 type Target = Snapshot;
1980
1981 fn deref(&self) -> &Self::Target {
1982 &self.snapshot
1983 }
1984}
1985
1986impl fmt::Debug for LocalWorktree {
1987 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1988 self.snapshot.fmt(f)
1989 }
1990}
1991
1992impl fmt::Debug for Snapshot {
1993 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1994 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1995 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1996
1997 impl<'a> fmt::Debug for EntriesByPath<'a> {
1998 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1999 f.debug_map()
2000 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
2001 .finish()
2002 }
2003 }
2004
2005 impl<'a> fmt::Debug for EntriesById<'a> {
2006 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2007 f.debug_list().entries(self.0.iter()).finish()
2008 }
2009 }
2010
2011 f.debug_struct("Snapshot")
2012 .field("id", &self.id)
2013 .field("root_name", &self.root_name)
2014 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
2015 .field("entries_by_id", &EntriesById(&self.entries_by_id))
2016 .finish()
2017 }
2018}
2019
2020#[derive(Clone, PartialEq)]
2021pub struct File {
2022 pub worktree: ModelHandle<Worktree>,
2023 pub path: Arc<Path>,
2024 pub mtime: SystemTime,
2025 pub(crate) entry_id: ProjectEntryId,
2026 pub(crate) is_local: bool,
2027 pub(crate) is_deleted: bool,
2028}
2029
2030impl language::File for File {
2031 fn as_local(&self) -> Option<&dyn language::LocalFile> {
2032 if self.is_local {
2033 Some(self)
2034 } else {
2035 None
2036 }
2037 }
2038
2039 fn mtime(&self) -> SystemTime {
2040 self.mtime
2041 }
2042
2043 fn path(&self) -> &Arc<Path> {
2044 &self.path
2045 }
2046
2047 fn full_path(&self, cx: &AppContext) -> PathBuf {
2048 let mut full_path = PathBuf::new();
2049 let worktree = self.worktree.read(cx);
2050
2051 if worktree.is_visible() {
2052 full_path.push(worktree.root_name());
2053 } else {
2054 let path = worktree.abs_path();
2055
2056 if worktree.is_local() && path.starts_with(HOME.as_path()) {
2057 full_path.push("~");
2058 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
2059 } else {
2060 full_path.push(path)
2061 }
2062 }
2063
2064 if self.path.components().next().is_some() {
2065 full_path.push(&self.path);
2066 }
2067
2068 full_path
2069 }
2070
2071 /// Returns the last component of this handle's absolute path. If this handle refers to the root
2072 /// of its worktree, then this method will return the name of the worktree itself.
2073 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
2074 self.path
2075 .file_name()
2076 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
2077 }
2078
2079 fn is_deleted(&self) -> bool {
2080 self.is_deleted
2081 }
2082
2083 fn as_any(&self) -> &dyn Any {
2084 self
2085 }
2086
2087 fn to_proto(&self) -> rpc::proto::File {
2088 rpc::proto::File {
2089 worktree_id: self.worktree.id() as u64,
2090 entry_id: self.entry_id.to_proto(),
2091 path: self.path.to_string_lossy().into(),
2092 mtime: Some(self.mtime.into()),
2093 is_deleted: self.is_deleted,
2094 }
2095 }
2096}
2097
2098impl language::LocalFile for File {
2099 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2100 self.worktree
2101 .read(cx)
2102 .as_local()
2103 .unwrap()
2104 .abs_path
2105 .join(&self.path)
2106 }
2107
2108 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2109 let worktree = self.worktree.read(cx).as_local().unwrap();
2110 let abs_path = worktree.absolutize(&self.path);
2111 let fs = worktree.fs.clone();
2112 cx.background()
2113 .spawn(async move { fs.load(&abs_path).await })
2114 }
2115
2116 fn buffer_reloaded(
2117 &self,
2118 buffer_id: u64,
2119 version: &clock::Global,
2120 fingerprint: RopeFingerprint,
2121 line_ending: LineEnding,
2122 mtime: SystemTime,
2123 cx: &mut AppContext,
2124 ) {
2125 let worktree = self.worktree.read(cx).as_local().unwrap();
2126 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2127 worktree
2128 .client
2129 .send(proto::BufferReloaded {
2130 project_id,
2131 buffer_id,
2132 version: serialize_version(version),
2133 mtime: Some(mtime.into()),
2134 fingerprint: serialize_fingerprint(fingerprint),
2135 line_ending: serialize_line_ending(line_ending) as i32,
2136 })
2137 .log_err();
2138 }
2139 }
2140}
2141
2142impl File {
2143 pub fn from_proto(
2144 proto: rpc::proto::File,
2145 worktree: ModelHandle<Worktree>,
2146 cx: &AppContext,
2147 ) -> Result<Self> {
2148 let worktree_id = worktree
2149 .read(cx)
2150 .as_remote()
2151 .ok_or_else(|| anyhow!("not remote"))?
2152 .id();
2153
2154 if worktree_id.to_proto() != proto.worktree_id {
2155 return Err(anyhow!("worktree id does not match file"));
2156 }
2157
2158 Ok(Self {
2159 worktree,
2160 path: Path::new(&proto.path).into(),
2161 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2162 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2163 is_local: false,
2164 is_deleted: proto.is_deleted,
2165 })
2166 }
2167
2168 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2169 file.and_then(|f| f.as_any().downcast_ref())
2170 }
2171
2172 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2173 self.worktree.read(cx).id()
2174 }
2175
2176 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2177 if self.is_deleted {
2178 None
2179 } else {
2180 Some(self.entry_id)
2181 }
2182 }
2183}
2184
2185#[derive(Clone, Debug, PartialEq, Eq)]
2186pub struct Entry {
2187 pub id: ProjectEntryId,
2188 pub kind: EntryKind,
2189 pub path: Arc<Path>,
2190 pub inode: u64,
2191 pub mtime: SystemTime,
2192 pub is_symlink: bool,
2193 pub is_ignored: bool,
2194}
2195
2196#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2197pub enum EntryKind {
2198 PendingDir,
2199 Dir,
2200 File(CharBag),
2201}
2202
2203#[derive(Clone, Copy, Debug)]
2204pub enum PathChange {
2205 Added,
2206 Removed,
2207 Updated,
2208 AddedOrUpdated,
2209}
2210
2211impl Entry {
2212 fn new(
2213 path: Arc<Path>,
2214 metadata: &fs::Metadata,
2215 next_entry_id: &AtomicUsize,
2216 root_char_bag: CharBag,
2217 ) -> Self {
2218 Self {
2219 id: ProjectEntryId::new(next_entry_id),
2220 kind: if metadata.is_dir {
2221 EntryKind::PendingDir
2222 } else {
2223 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2224 },
2225 path,
2226 inode: metadata.inode,
2227 mtime: metadata.mtime,
2228 is_symlink: metadata.is_symlink,
2229 is_ignored: false,
2230 }
2231 }
2232
2233 pub fn is_dir(&self) -> bool {
2234 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2235 }
2236
2237 pub fn is_file(&self) -> bool {
2238 matches!(self.kind, EntryKind::File(_))
2239 }
2240}
2241
2242impl sum_tree::Item for Entry {
2243 type Summary = EntrySummary;
2244
2245 fn summary(&self) -> Self::Summary {
2246 let visible_count = if self.is_ignored { 0 } else { 1 };
2247 let file_count;
2248 let visible_file_count;
2249 if self.is_file() {
2250 file_count = 1;
2251 visible_file_count = visible_count;
2252 } else {
2253 file_count = 0;
2254 visible_file_count = 0;
2255 }
2256
2257 EntrySummary {
2258 max_path: self.path.clone(),
2259 count: 1,
2260 visible_count,
2261 file_count,
2262 visible_file_count,
2263 }
2264 }
2265}
2266
2267impl sum_tree::KeyedItem for Entry {
2268 type Key = PathKey;
2269
2270 fn key(&self) -> Self::Key {
2271 PathKey(self.path.clone())
2272 }
2273}
2274
2275#[derive(Clone, Debug)]
2276pub struct EntrySummary {
2277 max_path: Arc<Path>,
2278 count: usize,
2279 visible_count: usize,
2280 file_count: usize,
2281 visible_file_count: usize,
2282}
2283
2284impl Default for EntrySummary {
2285 fn default() -> Self {
2286 Self {
2287 max_path: Arc::from(Path::new("")),
2288 count: 0,
2289 visible_count: 0,
2290 file_count: 0,
2291 visible_file_count: 0,
2292 }
2293 }
2294}
2295
2296impl sum_tree::Summary for EntrySummary {
2297 type Context = ();
2298
2299 fn add_summary(&mut self, rhs: &Self, _: &()) {
2300 self.max_path = rhs.max_path.clone();
2301 self.count += rhs.count;
2302 self.visible_count += rhs.visible_count;
2303 self.file_count += rhs.file_count;
2304 self.visible_file_count += rhs.visible_file_count;
2305 }
2306}
2307
2308#[derive(Clone, Debug)]
2309struct PathEntry {
2310 id: ProjectEntryId,
2311 path: Arc<Path>,
2312 is_ignored: bool,
2313 scan_id: usize,
2314}
2315
2316impl sum_tree::Item for PathEntry {
2317 type Summary = PathEntrySummary;
2318
2319 fn summary(&self) -> Self::Summary {
2320 PathEntrySummary { max_id: self.id }
2321 }
2322}
2323
2324impl sum_tree::KeyedItem for PathEntry {
2325 type Key = ProjectEntryId;
2326
2327 fn key(&self) -> Self::Key {
2328 self.id
2329 }
2330}
2331
2332#[derive(Clone, Debug, Default)]
2333struct PathEntrySummary {
2334 max_id: ProjectEntryId,
2335}
2336
2337impl sum_tree::Summary for PathEntrySummary {
2338 type Context = ();
2339
2340 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2341 self.max_id = summary.max_id;
2342 }
2343}
2344
2345impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2346 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2347 *self = summary.max_id;
2348 }
2349}
2350
2351#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2352pub struct PathKey(Arc<Path>);
2353
2354impl Default for PathKey {
2355 fn default() -> Self {
2356 Self(Path::new("").into())
2357 }
2358}
2359
2360impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2361 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2362 self.0 = summary.max_path.clone();
2363 }
2364}
2365
2366struct BackgroundScanner {
2367 snapshot: Mutex<LocalSnapshot>,
2368 fs: Arc<dyn Fs>,
2369 status_updates_tx: UnboundedSender<ScanState>,
2370 executor: Arc<executor::Background>,
2371 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2372 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2373 finished_initial_scan: bool,
2374}
2375
2376impl BackgroundScanner {
2377 fn new(
2378 snapshot: LocalSnapshot,
2379 fs: Arc<dyn Fs>,
2380 status_updates_tx: UnboundedSender<ScanState>,
2381 executor: Arc<executor::Background>,
2382 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2383 ) -> Self {
2384 Self {
2385 fs,
2386 status_updates_tx,
2387 executor,
2388 refresh_requests_rx,
2389 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2390 snapshot: Mutex::new(snapshot),
2391 finished_initial_scan: false,
2392 }
2393 }
2394
2395 async fn run(
2396 &mut self,
2397 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2398 ) {
2399 use futures::FutureExt as _;
2400
2401 let (root_abs_path, root_inode) = {
2402 let snapshot = self.snapshot.lock();
2403 (
2404 snapshot.abs_path.clone(),
2405 snapshot.root_entry().map(|e| e.inode),
2406 )
2407 };
2408
2409 // Populate ignores above the root.
2410 let ignore_stack;
2411 for ancestor in root_abs_path.ancestors().skip(1) {
2412 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2413 {
2414 self.snapshot
2415 .lock()
2416 .ignores_by_parent_abs_path
2417 .insert(ancestor.into(), (ignore.into(), 0));
2418 }
2419 }
2420 {
2421 let mut snapshot = self.snapshot.lock();
2422 snapshot.scan_id += 1;
2423 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2424 if ignore_stack.is_all() {
2425 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2426 root_entry.is_ignored = true;
2427 snapshot.insert_entry(root_entry, self.fs.as_ref());
2428 }
2429 }
2430 };
2431
2432 // Perform an initial scan of the directory.
2433 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2434 smol::block_on(scan_job_tx.send(ScanJob {
2435 abs_path: root_abs_path,
2436 path: Arc::from(Path::new("")),
2437 ignore_stack,
2438 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2439 scan_queue: scan_job_tx.clone(),
2440 }))
2441 .unwrap();
2442 drop(scan_job_tx);
2443 self.scan_dirs(true, scan_job_rx).await;
2444 {
2445 let mut snapshot = self.snapshot.lock();
2446 snapshot.completed_scan_id = snapshot.scan_id;
2447 }
2448 self.send_status_update(false, None);
2449
2450 // Process any any FS events that occurred while performing the initial scan.
2451 // For these events, update events cannot be as precise, because we didn't
2452 // have the previous state loaded yet.
2453 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2454 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2455 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2456 paths.extend(more_events.into_iter().map(|e| e.path));
2457 }
2458 self.process_events(paths).await;
2459 }
2460
2461 self.finished_initial_scan = true;
2462
2463 // Continue processing events until the worktree is dropped.
2464 loop {
2465 select_biased! {
2466 // Process any path refresh requests from the worktree. Prioritize
2467 // these before handling changes reported by the filesystem.
2468 request = self.refresh_requests_rx.recv().fuse() => {
2469 let Ok((paths, barrier)) = request else { break };
2470 if !self.process_refresh_request(paths, barrier).await {
2471 return;
2472 }
2473 }
2474
2475 events = events_rx.next().fuse() => {
2476 let Some(events) = events else { break };
2477 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2478 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2479 paths.extend(more_events.into_iter().map(|e| e.path));
2480 }
2481 self.process_events(paths).await;
2482 }
2483 }
2484 }
2485 }
2486
2487 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2488 self.reload_entries_for_paths(paths, None).await;
2489 self.send_status_update(false, Some(barrier))
2490 }
2491
2492 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2493 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2494 if let Some(mut paths) = self
2495 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2496 .await
2497 {
2498 paths.sort_unstable();
2499 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2500 }
2501 drop(scan_job_tx);
2502 self.scan_dirs(false, scan_job_rx).await;
2503
2504 self.update_ignore_statuses().await;
2505
2506 let mut snapshot = self.snapshot.lock();
2507
2508 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2509 git_repositories.retain(|project_entry_id, _| {
2510 snapshot
2511 .entry_for_id(*project_entry_id)
2512 .map_or(false, |entry| entry.path.file_name() == Some(&DOT_GIT))
2513 });
2514 snapshot.git_repositories = git_repositories;
2515
2516 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2517 git_repository_entries.retain(|_, entry| snapshot.contains_entry(entry.dot_git_entry_id));
2518 snapshot.snapshot.repository_entries = git_repository_entries;
2519
2520 snapshot.removed_entry_ids.clear();
2521 snapshot.completed_scan_id = snapshot.scan_id;
2522
2523 drop(snapshot);
2524
2525 self.send_status_update(false, None);
2526 }
2527
2528 async fn scan_dirs(
2529 &self,
2530 enable_progress_updates: bool,
2531 scan_jobs_rx: channel::Receiver<ScanJob>,
2532 ) {
2533 use futures::FutureExt as _;
2534
2535 if self
2536 .status_updates_tx
2537 .unbounded_send(ScanState::Started)
2538 .is_err()
2539 {
2540 return;
2541 }
2542
2543 let progress_update_count = AtomicUsize::new(0);
2544 self.executor
2545 .scoped(|scope| {
2546 for _ in 0..self.executor.num_cpus() {
2547 scope.spawn(async {
2548 let mut last_progress_update_count = 0;
2549 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2550 futures::pin_mut!(progress_update_timer);
2551
2552 loop {
2553 select_biased! {
2554 // Process any path refresh requests before moving on to process
2555 // the scan queue, so that user operations are prioritized.
2556 request = self.refresh_requests_rx.recv().fuse() => {
2557 let Ok((paths, barrier)) = request else { break };
2558 if !self.process_refresh_request(paths, barrier).await {
2559 return;
2560 }
2561 }
2562
2563 // Send periodic progress updates to the worktree. Use an atomic counter
2564 // to ensure that only one of the workers sends a progress update after
2565 // the update interval elapses.
2566 _ = progress_update_timer => {
2567 match progress_update_count.compare_exchange(
2568 last_progress_update_count,
2569 last_progress_update_count + 1,
2570 SeqCst,
2571 SeqCst
2572 ) {
2573 Ok(_) => {
2574 last_progress_update_count += 1;
2575 self.send_status_update(true, None);
2576 }
2577 Err(count) => {
2578 last_progress_update_count = count;
2579 }
2580 }
2581 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2582 }
2583
2584 // Recursively load directories from the file system.
2585 job = scan_jobs_rx.recv().fuse() => {
2586 let Ok(job) = job else { break };
2587 if let Err(err) = self.scan_dir(&job).await {
2588 if job.path.as_ref() != Path::new("") {
2589 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2590 }
2591 }
2592 }
2593 }
2594 }
2595 })
2596 }
2597 })
2598 .await;
2599 }
2600
2601 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2602 let mut prev_state = self.prev_state.lock();
2603 let snapshot = self.snapshot.lock().clone();
2604 let mut old_snapshot = snapshot.snapshot.clone();
2605 mem::swap(&mut old_snapshot, &mut prev_state.0);
2606 let changed_paths = mem::take(&mut prev_state.1);
2607 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2608 self.status_updates_tx
2609 .unbounded_send(ScanState::Updated {
2610 snapshot,
2611 changes,
2612 scanning,
2613 barrier,
2614 })
2615 .is_ok()
2616 }
2617
2618 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2619 let mut new_entries: Vec<Entry> = Vec::new();
2620 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2621 let mut ignore_stack = job.ignore_stack.clone();
2622 let mut new_ignore = None;
2623 let (root_abs_path, root_char_bag, next_entry_id) = {
2624 let snapshot = self.snapshot.lock();
2625 (
2626 snapshot.abs_path().clone(),
2627 snapshot.root_char_bag,
2628 snapshot.next_entry_id.clone(),
2629 )
2630 };
2631 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2632 while let Some(child_abs_path) = child_paths.next().await {
2633 let child_abs_path: Arc<Path> = match child_abs_path {
2634 Ok(child_abs_path) => child_abs_path.into(),
2635 Err(error) => {
2636 log::error!("error processing entry {:?}", error);
2637 continue;
2638 }
2639 };
2640
2641 let child_name = child_abs_path.file_name().unwrap();
2642 let child_path: Arc<Path> = job.path.join(child_name).into();
2643 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2644 Ok(Some(metadata)) => metadata,
2645 Ok(None) => continue,
2646 Err(err) => {
2647 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2648 continue;
2649 }
2650 };
2651
2652 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2653 if child_name == *GITIGNORE {
2654 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2655 Ok(ignore) => {
2656 let ignore = Arc::new(ignore);
2657 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2658 new_ignore = Some(ignore);
2659 }
2660 Err(error) => {
2661 log::error!(
2662 "error loading .gitignore file {:?} - {:?}",
2663 child_name,
2664 error
2665 );
2666 }
2667 }
2668
2669 // Update ignore status of any child entries we've already processed to reflect the
2670 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2671 // there should rarely be too numerous. Update the ignore stack associated with any
2672 // new jobs as well.
2673 let mut new_jobs = new_jobs.iter_mut();
2674 for entry in &mut new_entries {
2675 let entry_abs_path = root_abs_path.join(&entry.path);
2676 entry.is_ignored =
2677 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2678
2679 if entry.is_dir() {
2680 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2681 job.ignore_stack = if entry.is_ignored {
2682 IgnoreStack::all()
2683 } else {
2684 ignore_stack.clone()
2685 };
2686 }
2687 }
2688 }
2689 }
2690
2691 let mut child_entry = Entry::new(
2692 child_path.clone(),
2693 &child_metadata,
2694 &next_entry_id,
2695 root_char_bag,
2696 );
2697
2698 if child_entry.is_dir() {
2699 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2700 child_entry.is_ignored = is_ignored;
2701
2702 // Avoid recursing until crash in the case of a recursive symlink
2703 if !job.ancestor_inodes.contains(&child_entry.inode) {
2704 let mut ancestor_inodes = job.ancestor_inodes.clone();
2705 ancestor_inodes.insert(child_entry.inode);
2706
2707 new_jobs.push(Some(ScanJob {
2708 abs_path: child_abs_path,
2709 path: child_path,
2710 ignore_stack: if is_ignored {
2711 IgnoreStack::all()
2712 } else {
2713 ignore_stack.clone()
2714 },
2715 ancestor_inodes,
2716 scan_queue: job.scan_queue.clone(),
2717 }));
2718 } else {
2719 new_jobs.push(None);
2720 }
2721 } else {
2722 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2723 }
2724
2725 new_entries.push(child_entry);
2726 }
2727
2728 self.snapshot.lock().populate_dir(
2729 job.path.clone(),
2730 new_entries,
2731 new_ignore,
2732 self.fs.as_ref(),
2733 );
2734
2735 for new_job in new_jobs {
2736 if let Some(new_job) = new_job {
2737 job.scan_queue.send(new_job).await.unwrap();
2738 }
2739 }
2740
2741 Ok(())
2742 }
2743
2744 async fn reload_entries_for_paths(
2745 &self,
2746 mut abs_paths: Vec<PathBuf>,
2747 scan_queue_tx: Option<Sender<ScanJob>>,
2748 ) -> Option<Vec<Arc<Path>>> {
2749 let doing_recursive_update = scan_queue_tx.is_some();
2750
2751 abs_paths.sort_unstable();
2752 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2753
2754 let root_abs_path = self.snapshot.lock().abs_path.clone();
2755 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2756 let metadata = futures::future::join_all(
2757 abs_paths
2758 .iter()
2759 .map(|abs_path| self.fs.metadata(&abs_path))
2760 .collect::<Vec<_>>(),
2761 )
2762 .await;
2763
2764 let mut snapshot = self.snapshot.lock();
2765 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2766 snapshot.scan_id += 1;
2767 if is_idle && !doing_recursive_update {
2768 snapshot.completed_scan_id = snapshot.scan_id;
2769 }
2770
2771 // Remove any entries for paths that no longer exist or are being recursively
2772 // refreshed. Do this before adding any new entries, so that renames can be
2773 // detected regardless of the order of the paths.
2774 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2775 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2776 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2777 if matches!(metadata, Ok(None)) || doing_recursive_update {
2778 snapshot.remove_path(path);
2779 }
2780 event_paths.push(path.into());
2781 } else {
2782 log::error!(
2783 "unexpected event {:?} for root path {:?}",
2784 abs_path,
2785 root_canonical_path
2786 );
2787 }
2788 }
2789
2790 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2791 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2792
2793 match metadata {
2794 Ok(Some(metadata)) => {
2795 let ignore_stack =
2796 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2797 let mut fs_entry = Entry::new(
2798 path.clone(),
2799 &metadata,
2800 snapshot.next_entry_id.as_ref(),
2801 snapshot.root_char_bag,
2802 );
2803 fs_entry.is_ignored = ignore_stack.is_all();
2804 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2805
2806 let scan_id = snapshot.scan_id;
2807
2808 let repo_with_path_in_dotgit = snapshot.repo_for_metadata(&path);
2809 if let Some((key, repo)) = repo_with_path_in_dotgit {
2810 let repo = repo.lock();
2811 repo.reload_index();
2812 let branch = repo.branch_name();
2813
2814 snapshot.repository_entries.update(&key, |entry| {
2815 entry.scan_id = scan_id;
2816 entry.branch = branch.map(Into::into)
2817 });
2818 }
2819
2820 if let Some(scan_queue_tx) = &scan_queue_tx {
2821 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2822 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2823 ancestor_inodes.insert(metadata.inode);
2824 smol::block_on(scan_queue_tx.send(ScanJob {
2825 abs_path,
2826 path,
2827 ignore_stack,
2828 ancestor_inodes,
2829 scan_queue: scan_queue_tx.clone(),
2830 }))
2831 .unwrap();
2832 }
2833 }
2834 }
2835 Ok(None) => {}
2836 Err(err) => {
2837 // TODO - create a special 'error' entry in the entries tree to mark this
2838 log::error!("error reading file on event {:?}", err);
2839 }
2840 }
2841 }
2842
2843 Some(event_paths)
2844 }
2845
2846 async fn update_ignore_statuses(&self) {
2847 use futures::FutureExt as _;
2848
2849 let mut snapshot = self.snapshot.lock().clone();
2850 let mut ignores_to_update = Vec::new();
2851 let mut ignores_to_delete = Vec::new();
2852 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2853 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2854 if *scan_id > snapshot.completed_scan_id
2855 && snapshot.entry_for_path(parent_path).is_some()
2856 {
2857 ignores_to_update.push(parent_abs_path.clone());
2858 }
2859
2860 let ignore_path = parent_path.join(&*GITIGNORE);
2861 if snapshot.entry_for_path(ignore_path).is_none() {
2862 ignores_to_delete.push(parent_abs_path.clone());
2863 }
2864 }
2865 }
2866
2867 for parent_abs_path in ignores_to_delete {
2868 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2869 self.snapshot
2870 .lock()
2871 .ignores_by_parent_abs_path
2872 .remove(&parent_abs_path);
2873 }
2874
2875 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2876 ignores_to_update.sort_unstable();
2877 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2878 while let Some(parent_abs_path) = ignores_to_update.next() {
2879 while ignores_to_update
2880 .peek()
2881 .map_or(false, |p| p.starts_with(&parent_abs_path))
2882 {
2883 ignores_to_update.next().unwrap();
2884 }
2885
2886 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2887 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2888 abs_path: parent_abs_path,
2889 ignore_stack,
2890 ignore_queue: ignore_queue_tx.clone(),
2891 }))
2892 .unwrap();
2893 }
2894 drop(ignore_queue_tx);
2895
2896 self.executor
2897 .scoped(|scope| {
2898 for _ in 0..self.executor.num_cpus() {
2899 scope.spawn(async {
2900 loop {
2901 select_biased! {
2902 // Process any path refresh requests before moving on to process
2903 // the queue of ignore statuses.
2904 request = self.refresh_requests_rx.recv().fuse() => {
2905 let Ok((paths, barrier)) = request else { break };
2906 if !self.process_refresh_request(paths, barrier).await {
2907 return;
2908 }
2909 }
2910
2911 // Recursively process directories whose ignores have changed.
2912 job = ignore_queue_rx.recv().fuse() => {
2913 let Ok(job) = job else { break };
2914 self.update_ignore_status(job, &snapshot).await;
2915 }
2916 }
2917 }
2918 });
2919 }
2920 })
2921 .await;
2922 }
2923
2924 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2925 let mut ignore_stack = job.ignore_stack;
2926 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2927 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2928 }
2929
2930 let mut entries_by_id_edits = Vec::new();
2931 let mut entries_by_path_edits = Vec::new();
2932 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2933 for mut entry in snapshot.child_entries(path).cloned() {
2934 let was_ignored = entry.is_ignored;
2935 let abs_path = snapshot.abs_path().join(&entry.path);
2936 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2937 if entry.is_dir() {
2938 let child_ignore_stack = if entry.is_ignored {
2939 IgnoreStack::all()
2940 } else {
2941 ignore_stack.clone()
2942 };
2943 job.ignore_queue
2944 .send(UpdateIgnoreStatusJob {
2945 abs_path: abs_path.into(),
2946 ignore_stack: child_ignore_stack,
2947 ignore_queue: job.ignore_queue.clone(),
2948 })
2949 .await
2950 .unwrap();
2951 }
2952
2953 if entry.is_ignored != was_ignored {
2954 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2955 path_entry.scan_id = snapshot.scan_id;
2956 path_entry.is_ignored = entry.is_ignored;
2957 entries_by_id_edits.push(Edit::Insert(path_entry));
2958 entries_by_path_edits.push(Edit::Insert(entry));
2959 }
2960 }
2961
2962 let mut snapshot = self.snapshot.lock();
2963 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2964 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2965 }
2966
2967 fn build_change_set(
2968 &self,
2969 old_snapshot: &Snapshot,
2970 new_snapshot: &Snapshot,
2971 event_paths: Vec<Arc<Path>>,
2972 ) -> HashMap<Arc<Path>, PathChange> {
2973 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2974
2975 let mut changes = HashMap::default();
2976 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2977 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2978 let received_before_initialized = !self.finished_initial_scan;
2979
2980 for path in event_paths {
2981 let path = PathKey(path);
2982 old_paths.seek(&path, Bias::Left, &());
2983 new_paths.seek(&path, Bias::Left, &());
2984
2985 loop {
2986 match (old_paths.item(), new_paths.item()) {
2987 (Some(old_entry), Some(new_entry)) => {
2988 if old_entry.path > path.0
2989 && new_entry.path > path.0
2990 && !old_entry.path.starts_with(&path.0)
2991 && !new_entry.path.starts_with(&path.0)
2992 {
2993 break;
2994 }
2995
2996 match Ord::cmp(&old_entry.path, &new_entry.path) {
2997 Ordering::Less => {
2998 changes.insert(old_entry.path.clone(), Removed);
2999 old_paths.next(&());
3000 }
3001 Ordering::Equal => {
3002 if received_before_initialized {
3003 // If the worktree was not fully initialized when this event was generated,
3004 // we can't know whether this entry was added during the scan or whether
3005 // it was merely updated.
3006 changes.insert(new_entry.path.clone(), AddedOrUpdated);
3007 } else if old_entry.mtime != new_entry.mtime {
3008 changes.insert(new_entry.path.clone(), Updated);
3009 }
3010 old_paths.next(&());
3011 new_paths.next(&());
3012 }
3013 Ordering::Greater => {
3014 changes.insert(new_entry.path.clone(), Added);
3015 new_paths.next(&());
3016 }
3017 }
3018 }
3019 (Some(old_entry), None) => {
3020 changes.insert(old_entry.path.clone(), Removed);
3021 old_paths.next(&());
3022 }
3023 (None, Some(new_entry)) => {
3024 changes.insert(new_entry.path.clone(), Added);
3025 new_paths.next(&());
3026 }
3027 (None, None) => break,
3028 }
3029 }
3030 }
3031 changes
3032 }
3033
3034 async fn progress_timer(&self, running: bool) {
3035 if !running {
3036 return futures::future::pending().await;
3037 }
3038
3039 #[cfg(any(test, feature = "test-support"))]
3040 if self.fs.is_fake() {
3041 return self.executor.simulate_random_delay().await;
3042 }
3043
3044 smol::Timer::after(Duration::from_millis(100)).await;
3045 }
3046}
3047
3048fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
3049 let mut result = root_char_bag;
3050 result.extend(
3051 path.to_string_lossy()
3052 .chars()
3053 .map(|c| c.to_ascii_lowercase()),
3054 );
3055 result
3056}
3057
3058struct ScanJob {
3059 abs_path: Arc<Path>,
3060 path: Arc<Path>,
3061 ignore_stack: Arc<IgnoreStack>,
3062 scan_queue: Sender<ScanJob>,
3063 ancestor_inodes: TreeSet<u64>,
3064}
3065
3066struct UpdateIgnoreStatusJob {
3067 abs_path: Arc<Path>,
3068 ignore_stack: Arc<IgnoreStack>,
3069 ignore_queue: Sender<UpdateIgnoreStatusJob>,
3070}
3071
3072pub trait WorktreeHandle {
3073 #[cfg(any(test, feature = "test-support"))]
3074 fn flush_fs_events<'a>(
3075 &self,
3076 cx: &'a gpui::TestAppContext,
3077 ) -> futures::future::LocalBoxFuture<'a, ()>;
3078}
3079
3080impl WorktreeHandle for ModelHandle<Worktree> {
3081 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
3082 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
3083 // extra directory scans, and emit extra scan-state notifications.
3084 //
3085 // This function mutates the worktree's directory and waits for those mutations to be picked up,
3086 // to ensure that all redundant FS events have already been processed.
3087 #[cfg(any(test, feature = "test-support"))]
3088 fn flush_fs_events<'a>(
3089 &self,
3090 cx: &'a gpui::TestAppContext,
3091 ) -> futures::future::LocalBoxFuture<'a, ()> {
3092 use smol::future::FutureExt;
3093
3094 let filename = "fs-event-sentinel";
3095 let tree = self.clone();
3096 let (fs, root_path) = self.read_with(cx, |tree, _| {
3097 let tree = tree.as_local().unwrap();
3098 (tree.fs.clone(), tree.abs_path().clone())
3099 });
3100
3101 async move {
3102 fs.create_file(&root_path.join(filename), Default::default())
3103 .await
3104 .unwrap();
3105 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3106 .await;
3107
3108 fs.remove_file(&root_path.join(filename), Default::default())
3109 .await
3110 .unwrap();
3111 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3112 .await;
3113
3114 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3115 .await;
3116 }
3117 .boxed_local()
3118 }
3119}
3120
3121#[derive(Clone, Debug)]
3122struct TraversalProgress<'a> {
3123 max_path: &'a Path,
3124 count: usize,
3125 visible_count: usize,
3126 file_count: usize,
3127 visible_file_count: usize,
3128}
3129
3130impl<'a> TraversalProgress<'a> {
3131 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3132 match (include_ignored, include_dirs) {
3133 (true, true) => self.count,
3134 (true, false) => self.file_count,
3135 (false, true) => self.visible_count,
3136 (false, false) => self.visible_file_count,
3137 }
3138 }
3139}
3140
3141impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3142 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3143 self.max_path = summary.max_path.as_ref();
3144 self.count += summary.count;
3145 self.visible_count += summary.visible_count;
3146 self.file_count += summary.file_count;
3147 self.visible_file_count += summary.visible_file_count;
3148 }
3149}
3150
3151impl<'a> Default for TraversalProgress<'a> {
3152 fn default() -> Self {
3153 Self {
3154 max_path: Path::new(""),
3155 count: 0,
3156 visible_count: 0,
3157 file_count: 0,
3158 visible_file_count: 0,
3159 }
3160 }
3161}
3162
3163pub struct Traversal<'a> {
3164 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3165 include_ignored: bool,
3166 include_dirs: bool,
3167}
3168
3169impl<'a> Traversal<'a> {
3170 pub fn advance(&mut self) -> bool {
3171 self.advance_to_offset(self.offset() + 1)
3172 }
3173
3174 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3175 self.cursor.seek_forward(
3176 &TraversalTarget::Count {
3177 count: offset,
3178 include_dirs: self.include_dirs,
3179 include_ignored: self.include_ignored,
3180 },
3181 Bias::Right,
3182 &(),
3183 )
3184 }
3185
3186 pub fn advance_to_sibling(&mut self) -> bool {
3187 while let Some(entry) = self.cursor.item() {
3188 self.cursor.seek_forward(
3189 &TraversalTarget::PathSuccessor(&entry.path),
3190 Bias::Left,
3191 &(),
3192 );
3193 if let Some(entry) = self.cursor.item() {
3194 if (self.include_dirs || !entry.is_dir())
3195 && (self.include_ignored || !entry.is_ignored)
3196 {
3197 return true;
3198 }
3199 }
3200 }
3201 false
3202 }
3203
3204 pub fn entry(&self) -> Option<&'a Entry> {
3205 self.cursor.item()
3206 }
3207
3208 pub fn offset(&self) -> usize {
3209 self.cursor
3210 .start()
3211 .count(self.include_dirs, self.include_ignored)
3212 }
3213}
3214
3215impl<'a> Iterator for Traversal<'a> {
3216 type Item = &'a Entry;
3217
3218 fn next(&mut self) -> Option<Self::Item> {
3219 if let Some(item) = self.entry() {
3220 self.advance();
3221 Some(item)
3222 } else {
3223 None
3224 }
3225 }
3226}
3227
3228#[derive(Debug)]
3229enum TraversalTarget<'a> {
3230 Path(&'a Path),
3231 PathSuccessor(&'a Path),
3232 Count {
3233 count: usize,
3234 include_ignored: bool,
3235 include_dirs: bool,
3236 },
3237}
3238
3239impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3240 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3241 match self {
3242 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3243 TraversalTarget::PathSuccessor(path) => {
3244 if !cursor_location.max_path.starts_with(path) {
3245 Ordering::Equal
3246 } else {
3247 Ordering::Greater
3248 }
3249 }
3250 TraversalTarget::Count {
3251 count,
3252 include_dirs,
3253 include_ignored,
3254 } => Ord::cmp(
3255 count,
3256 &cursor_location.count(*include_dirs, *include_ignored),
3257 ),
3258 }
3259 }
3260}
3261
3262struct ChildEntriesIter<'a> {
3263 parent_path: &'a Path,
3264 traversal: Traversal<'a>,
3265}
3266
3267impl<'a> Iterator for ChildEntriesIter<'a> {
3268 type Item = &'a Entry;
3269
3270 fn next(&mut self) -> Option<Self::Item> {
3271 if let Some(item) = self.traversal.entry() {
3272 if item.path.starts_with(&self.parent_path) {
3273 self.traversal.advance_to_sibling();
3274 return Some(item);
3275 }
3276 }
3277 None
3278 }
3279}
3280
3281impl<'a> From<&'a Entry> for proto::Entry {
3282 fn from(entry: &'a Entry) -> Self {
3283 Self {
3284 id: entry.id.to_proto(),
3285 is_dir: entry.is_dir(),
3286 path: entry.path.to_string_lossy().into(),
3287 inode: entry.inode,
3288 mtime: Some(entry.mtime.into()),
3289 is_symlink: entry.is_symlink,
3290 is_ignored: entry.is_ignored,
3291 }
3292 }
3293}
3294
3295impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3296 type Error = anyhow::Error;
3297
3298 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3299 if let Some(mtime) = entry.mtime {
3300 let kind = if entry.is_dir {
3301 EntryKind::Dir
3302 } else {
3303 let mut char_bag = *root_char_bag;
3304 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3305 EntryKind::File(char_bag)
3306 };
3307 let path: Arc<Path> = PathBuf::from(entry.path).into();
3308 Ok(Entry {
3309 id: ProjectEntryId::from_proto(entry.id),
3310 kind,
3311 path,
3312 inode: entry.inode,
3313 mtime: mtime.into(),
3314 is_symlink: entry.is_symlink,
3315 is_ignored: entry.is_ignored,
3316 })
3317 } else {
3318 Err(anyhow!(
3319 "missing mtime in remote worktree entry {:?}",
3320 entry.path
3321 ))
3322 }
3323 }
3324}
3325
3326#[cfg(test)]
3327mod tests {
3328 use super::*;
3329 use fs::{FakeFs, RealFs};
3330 use gpui::{executor::Deterministic, TestAppContext};
3331 use pretty_assertions::assert_eq;
3332 use rand::prelude::*;
3333 use serde_json::json;
3334 use std::{env, fmt::Write};
3335 use util::{http::FakeHttpClient, test::temp_tree};
3336
3337 #[gpui::test]
3338 async fn test_traversal(cx: &mut TestAppContext) {
3339 let fs = FakeFs::new(cx.background());
3340 fs.insert_tree(
3341 "/root",
3342 json!({
3343 ".gitignore": "a/b\n",
3344 "a": {
3345 "b": "",
3346 "c": "",
3347 }
3348 }),
3349 )
3350 .await;
3351
3352 let http_client = FakeHttpClient::with_404_response();
3353 let client = cx.read(|cx| Client::new(http_client, cx));
3354
3355 let tree = Worktree::local(
3356 client,
3357 Path::new("/root"),
3358 true,
3359 fs,
3360 Default::default(),
3361 &mut cx.to_async(),
3362 )
3363 .await
3364 .unwrap();
3365 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3366 .await;
3367
3368 tree.read_with(cx, |tree, _| {
3369 assert_eq!(
3370 tree.entries(false)
3371 .map(|entry| entry.path.as_ref())
3372 .collect::<Vec<_>>(),
3373 vec![
3374 Path::new(""),
3375 Path::new(".gitignore"),
3376 Path::new("a"),
3377 Path::new("a/c"),
3378 ]
3379 );
3380 assert_eq!(
3381 tree.entries(true)
3382 .map(|entry| entry.path.as_ref())
3383 .collect::<Vec<_>>(),
3384 vec![
3385 Path::new(""),
3386 Path::new(".gitignore"),
3387 Path::new("a"),
3388 Path::new("a/b"),
3389 Path::new("a/c"),
3390 ]
3391 );
3392 })
3393 }
3394
3395 #[gpui::test(iterations = 10)]
3396 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3397 let fs = FakeFs::new(cx.background());
3398 fs.insert_tree(
3399 "/root",
3400 json!({
3401 "lib": {
3402 "a": {
3403 "a.txt": ""
3404 },
3405 "b": {
3406 "b.txt": ""
3407 }
3408 }
3409 }),
3410 )
3411 .await;
3412 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3413 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3414
3415 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3416 let tree = Worktree::local(
3417 client,
3418 Path::new("/root"),
3419 true,
3420 fs.clone(),
3421 Default::default(),
3422 &mut cx.to_async(),
3423 )
3424 .await
3425 .unwrap();
3426
3427 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3428 .await;
3429
3430 tree.read_with(cx, |tree, _| {
3431 assert_eq!(
3432 tree.entries(false)
3433 .map(|entry| entry.path.as_ref())
3434 .collect::<Vec<_>>(),
3435 vec![
3436 Path::new(""),
3437 Path::new("lib"),
3438 Path::new("lib/a"),
3439 Path::new("lib/a/a.txt"),
3440 Path::new("lib/a/lib"),
3441 Path::new("lib/b"),
3442 Path::new("lib/b/b.txt"),
3443 Path::new("lib/b/lib"),
3444 ]
3445 );
3446 });
3447
3448 fs.rename(
3449 Path::new("/root/lib/a/lib"),
3450 Path::new("/root/lib/a/lib-2"),
3451 Default::default(),
3452 )
3453 .await
3454 .unwrap();
3455 executor.run_until_parked();
3456 tree.read_with(cx, |tree, _| {
3457 assert_eq!(
3458 tree.entries(false)
3459 .map(|entry| entry.path.as_ref())
3460 .collect::<Vec<_>>(),
3461 vec![
3462 Path::new(""),
3463 Path::new("lib"),
3464 Path::new("lib/a"),
3465 Path::new("lib/a/a.txt"),
3466 Path::new("lib/a/lib-2"),
3467 Path::new("lib/b"),
3468 Path::new("lib/b/b.txt"),
3469 Path::new("lib/b/lib"),
3470 ]
3471 );
3472 });
3473 }
3474
3475 #[gpui::test]
3476 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3477 let parent_dir = temp_tree(json!({
3478 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3479 "tree": {
3480 ".git": {},
3481 ".gitignore": "ignored-dir\n",
3482 "tracked-dir": {
3483 "tracked-file1": "",
3484 "ancestor-ignored-file1": "",
3485 },
3486 "ignored-dir": {
3487 "ignored-file1": ""
3488 }
3489 }
3490 }));
3491 let dir = parent_dir.path().join("tree");
3492
3493 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3494
3495 let tree = Worktree::local(
3496 client,
3497 dir.as_path(),
3498 true,
3499 Arc::new(RealFs),
3500 Default::default(),
3501 &mut cx.to_async(),
3502 )
3503 .await
3504 .unwrap();
3505 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3506 .await;
3507 tree.flush_fs_events(cx).await;
3508 cx.read(|cx| {
3509 let tree = tree.read(cx);
3510 assert!(
3511 !tree
3512 .entry_for_path("tracked-dir/tracked-file1")
3513 .unwrap()
3514 .is_ignored
3515 );
3516 assert!(
3517 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3518 .unwrap()
3519 .is_ignored
3520 );
3521 assert!(
3522 tree.entry_for_path("ignored-dir/ignored-file1")
3523 .unwrap()
3524 .is_ignored
3525 );
3526 });
3527
3528 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3529 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3530 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3531 tree.flush_fs_events(cx).await;
3532 cx.read(|cx| {
3533 let tree = tree.read(cx);
3534 assert!(
3535 !tree
3536 .entry_for_path("tracked-dir/tracked-file2")
3537 .unwrap()
3538 .is_ignored
3539 );
3540 assert!(
3541 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3542 .unwrap()
3543 .is_ignored
3544 );
3545 assert!(
3546 tree.entry_for_path("ignored-dir/ignored-file2")
3547 .unwrap()
3548 .is_ignored
3549 );
3550 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3551 });
3552 }
3553
3554 #[gpui::test]
3555 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3556 let root = temp_tree(json!({
3557 "dir1": {
3558 ".git": {},
3559 "deps": {
3560 "dep1": {
3561 ".git": {},
3562 "src": {
3563 "a.txt": ""
3564 }
3565 }
3566 },
3567 "src": {
3568 "b.txt": ""
3569 }
3570 },
3571 "c.txt": "",
3572 }));
3573
3574 let http_client = FakeHttpClient::with_404_response();
3575 let client = cx.read(|cx| Client::new(http_client, cx));
3576 let tree = Worktree::local(
3577 client,
3578 root.path(),
3579 true,
3580 Arc::new(RealFs),
3581 Default::default(),
3582 &mut cx.to_async(),
3583 )
3584 .await
3585 .unwrap();
3586
3587 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3588 .await;
3589 tree.flush_fs_events(cx).await;
3590
3591 tree.read_with(cx, |tree, _cx| {
3592 let tree = tree.as_local().unwrap();
3593
3594 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3595
3596 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3597 assert_eq!(entry.work_directory.0.as_ref(), Path::new("dir1"));
3598 assert_eq!(
3599 tree.entry_for_id(entry.dot_git_entry_id)
3600 .unwrap()
3601 .path
3602 .as_ref(),
3603 Path::new("dir1/.git")
3604 );
3605
3606 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3607 assert_eq!(entry.work_directory.deref(), Path::new("dir1/deps/dep1"));
3608 assert_eq!(
3609 tree.entry_for_id(entry.dot_git_entry_id)
3610 .unwrap()
3611 .path
3612 .as_ref(),
3613 Path::new("dir1/deps/dep1/.git"),
3614 );
3615 });
3616
3617 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3618 let tree = tree.as_local().unwrap();
3619 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3620 entry.scan_id
3621 });
3622
3623 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3624 tree.flush_fs_events(cx).await;
3625
3626 tree.read_with(cx, |tree, _cx| {
3627 let tree = tree.as_local().unwrap();
3628 let new_scan_id = {
3629 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3630 entry.scan_id
3631 };
3632 assert_ne!(
3633 original_scan_id, new_scan_id,
3634 "original {original_scan_id}, new {new_scan_id}"
3635 );
3636 });
3637
3638 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3639 tree.flush_fs_events(cx).await;
3640
3641 tree.read_with(cx, |tree, _cx| {
3642 let tree = tree.as_local().unwrap();
3643
3644 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3645 });
3646 }
3647
3648 #[test]
3649 fn test_changed_repos() {
3650 fn fake_entry(dot_git_id: usize, scan_id: usize) -> RepositoryEntry {
3651 RepositoryEntry {
3652 scan_id,
3653 dot_git_entry_id: ProjectEntryId(dot_git_id),
3654 work_directory: RepositoryWorkDirectory(
3655 Path::new(&format!("don't-care-{}", scan_id)).into(),
3656 ),
3657 branch: None,
3658 }
3659 }
3660
3661 let mut prev_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3662 prev_repos.insert(
3663 RepositoryWorkDirectory(Path::new("don't-care-1").into()),
3664 fake_entry(1, 0),
3665 );
3666 prev_repos.insert(
3667 RepositoryWorkDirectory(Path::new("don't-care-2").into()),
3668 fake_entry(2, 0),
3669 );
3670 prev_repos.insert(
3671 RepositoryWorkDirectory(Path::new("don't-care-3").into()),
3672 fake_entry(3, 0),
3673 );
3674
3675 let mut new_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3676 new_repos.insert(
3677 RepositoryWorkDirectory(Path::new("don't-care-4").into()),
3678 fake_entry(2, 1),
3679 );
3680 new_repos.insert(
3681 RepositoryWorkDirectory(Path::new("don't-care-5").into()),
3682 fake_entry(3, 0),
3683 );
3684 new_repos.insert(
3685 RepositoryWorkDirectory(Path::new("don't-care-6").into()),
3686 fake_entry(4, 0),
3687 );
3688
3689 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3690
3691 // Deletion retained
3692 assert!(res
3693 .iter()
3694 .find(|repo| repo.dot_git_entry_id.0 == 1 && repo.scan_id == 0)
3695 .is_some());
3696
3697 // Update retained
3698 assert!(res
3699 .iter()
3700 .find(|repo| repo.dot_git_entry_id.0 == 2 && repo.scan_id == 1)
3701 .is_some());
3702
3703 // Addition retained
3704 assert!(res
3705 .iter()
3706 .find(|repo| repo.dot_git_entry_id.0 == 4 && repo.scan_id == 0)
3707 .is_some());
3708
3709 // Nochange, not retained
3710 assert!(res
3711 .iter()
3712 .find(|repo| repo.dot_git_entry_id.0 == 3 && repo.scan_id == 0)
3713 .is_none());
3714 }
3715
3716 #[gpui::test]
3717 async fn test_write_file(cx: &mut TestAppContext) {
3718 let dir = temp_tree(json!({
3719 ".git": {},
3720 ".gitignore": "ignored-dir\n",
3721 "tracked-dir": {},
3722 "ignored-dir": {}
3723 }));
3724
3725 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3726
3727 let tree = Worktree::local(
3728 client,
3729 dir.path(),
3730 true,
3731 Arc::new(RealFs),
3732 Default::default(),
3733 &mut cx.to_async(),
3734 )
3735 .await
3736 .unwrap();
3737 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3738 .await;
3739 tree.flush_fs_events(cx).await;
3740
3741 tree.update(cx, |tree, cx| {
3742 tree.as_local().unwrap().write_file(
3743 Path::new("tracked-dir/file.txt"),
3744 "hello".into(),
3745 Default::default(),
3746 cx,
3747 )
3748 })
3749 .await
3750 .unwrap();
3751 tree.update(cx, |tree, cx| {
3752 tree.as_local().unwrap().write_file(
3753 Path::new("ignored-dir/file.txt"),
3754 "world".into(),
3755 Default::default(),
3756 cx,
3757 )
3758 })
3759 .await
3760 .unwrap();
3761
3762 tree.read_with(cx, |tree, _| {
3763 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3764 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3765 assert!(!tracked.is_ignored);
3766 assert!(ignored.is_ignored);
3767 });
3768 }
3769
3770 #[gpui::test(iterations = 30)]
3771 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3772 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3773
3774 let fs = FakeFs::new(cx.background());
3775 fs.insert_tree(
3776 "/root",
3777 json!({
3778 "b": {},
3779 "c": {},
3780 "d": {},
3781 }),
3782 )
3783 .await;
3784
3785 let tree = Worktree::local(
3786 client,
3787 "/root".as_ref(),
3788 true,
3789 fs,
3790 Default::default(),
3791 &mut cx.to_async(),
3792 )
3793 .await
3794 .unwrap();
3795
3796 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3797
3798 let entry = tree
3799 .update(cx, |tree, cx| {
3800 tree.as_local_mut()
3801 .unwrap()
3802 .create_entry("a/e".as_ref(), true, cx)
3803 })
3804 .await
3805 .unwrap();
3806 assert!(entry.is_dir());
3807
3808 cx.foreground().run_until_parked();
3809 tree.read_with(cx, |tree, _| {
3810 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3811 });
3812
3813 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3814 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3815 snapshot1.apply_remote_update(update).unwrap();
3816 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3817 }
3818
3819 #[gpui::test(iterations = 100)]
3820 async fn test_random_worktree_operations_during_initial_scan(
3821 cx: &mut TestAppContext,
3822 mut rng: StdRng,
3823 ) {
3824 let operations = env::var("OPERATIONS")
3825 .map(|o| o.parse().unwrap())
3826 .unwrap_or(5);
3827 let initial_entries = env::var("INITIAL_ENTRIES")
3828 .map(|o| o.parse().unwrap())
3829 .unwrap_or(20);
3830
3831 let root_dir = Path::new("/test");
3832 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3833 fs.as_fake().insert_tree(root_dir, json!({})).await;
3834 for _ in 0..initial_entries {
3835 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3836 }
3837 log::info!("generated initial tree");
3838
3839 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3840 let worktree = Worktree::local(
3841 client.clone(),
3842 root_dir,
3843 true,
3844 fs.clone(),
3845 Default::default(),
3846 &mut cx.to_async(),
3847 )
3848 .await
3849 .unwrap();
3850
3851 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3852
3853 for _ in 0..operations {
3854 worktree
3855 .update(cx, |worktree, cx| {
3856 randomly_mutate_worktree(worktree, &mut rng, cx)
3857 })
3858 .await
3859 .log_err();
3860 worktree.read_with(cx, |tree, _| {
3861 tree.as_local().unwrap().snapshot.check_invariants()
3862 });
3863
3864 if rng.gen_bool(0.6) {
3865 let new_snapshot =
3866 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3867 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3868 snapshot.apply_remote_update(update.clone()).unwrap();
3869 assert_eq!(
3870 snapshot.to_vec(true),
3871 new_snapshot.to_vec(true),
3872 "incorrect snapshot after update {:?}",
3873 update
3874 );
3875 }
3876 }
3877
3878 worktree
3879 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3880 .await;
3881 worktree.read_with(cx, |tree, _| {
3882 tree.as_local().unwrap().snapshot.check_invariants()
3883 });
3884
3885 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3886 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3887 snapshot.apply_remote_update(update.clone()).unwrap();
3888 assert_eq!(
3889 snapshot.to_vec(true),
3890 new_snapshot.to_vec(true),
3891 "incorrect snapshot after update {:?}",
3892 update
3893 );
3894 }
3895
3896 #[gpui::test(iterations = 100)]
3897 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3898 let operations = env::var("OPERATIONS")
3899 .map(|o| o.parse().unwrap())
3900 .unwrap_or(40);
3901 let initial_entries = env::var("INITIAL_ENTRIES")
3902 .map(|o| o.parse().unwrap())
3903 .unwrap_or(20);
3904
3905 let root_dir = Path::new("/test");
3906 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3907 fs.as_fake().insert_tree(root_dir, json!({})).await;
3908 for _ in 0..initial_entries {
3909 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3910 }
3911 log::info!("generated initial tree");
3912
3913 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3914 let worktree = Worktree::local(
3915 client.clone(),
3916 root_dir,
3917 true,
3918 fs.clone(),
3919 Default::default(),
3920 &mut cx.to_async(),
3921 )
3922 .await
3923 .unwrap();
3924
3925 worktree
3926 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3927 .await;
3928
3929 // After the initial scan is complete, the `UpdatedEntries` event can
3930 // be used to follow along with all changes to the worktree's snapshot.
3931 worktree.update(cx, |tree, cx| {
3932 let mut paths = tree
3933 .as_local()
3934 .unwrap()
3935 .paths()
3936 .cloned()
3937 .collect::<Vec<_>>();
3938
3939 cx.subscribe(&worktree, move |tree, _, event, _| {
3940 if let Event::UpdatedEntries(changes) = event {
3941 for (path, change_type) in changes.iter() {
3942 let path = path.clone();
3943 let ix = match paths.binary_search(&path) {
3944 Ok(ix) | Err(ix) => ix,
3945 };
3946 match change_type {
3947 PathChange::Added => {
3948 assert_ne!(paths.get(ix), Some(&path));
3949 paths.insert(ix, path);
3950 }
3951 PathChange::Removed => {
3952 assert_eq!(paths.get(ix), Some(&path));
3953 paths.remove(ix);
3954 }
3955 PathChange::Updated => {
3956 assert_eq!(paths.get(ix), Some(&path));
3957 }
3958 PathChange::AddedOrUpdated => {
3959 if paths[ix] != path {
3960 paths.insert(ix, path);
3961 }
3962 }
3963 }
3964 }
3965 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3966 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3967 }
3968 })
3969 .detach();
3970 });
3971
3972 let mut snapshots = Vec::new();
3973 let mut mutations_len = operations;
3974 while mutations_len > 1 {
3975 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3976 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3977 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3978 let len = rng.gen_range(0..=buffered_event_count);
3979 log::info!("flushing {} events", len);
3980 fs.as_fake().flush_events(len).await;
3981 } else {
3982 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3983 mutations_len -= 1;
3984 }
3985
3986 cx.foreground().run_until_parked();
3987 if rng.gen_bool(0.2) {
3988 log::info!("storing snapshot {}", snapshots.len());
3989 let snapshot =
3990 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3991 snapshots.push(snapshot);
3992 }
3993 }
3994
3995 log::info!("quiescing");
3996 fs.as_fake().flush_events(usize::MAX).await;
3997 cx.foreground().run_until_parked();
3998 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3999 snapshot.check_invariants();
4000
4001 {
4002 let new_worktree = Worktree::local(
4003 client.clone(),
4004 root_dir,
4005 true,
4006 fs.clone(),
4007 Default::default(),
4008 &mut cx.to_async(),
4009 )
4010 .await
4011 .unwrap();
4012 new_worktree
4013 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
4014 .await;
4015 let new_snapshot =
4016 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
4017 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
4018 }
4019
4020 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
4021 let include_ignored = rng.gen::<bool>();
4022 if !include_ignored {
4023 let mut entries_by_path_edits = Vec::new();
4024 let mut entries_by_id_edits = Vec::new();
4025 for entry in prev_snapshot
4026 .entries_by_id
4027 .cursor::<()>()
4028 .filter(|e| e.is_ignored)
4029 {
4030 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
4031 entries_by_id_edits.push(Edit::Remove(entry.id));
4032 }
4033
4034 prev_snapshot
4035 .entries_by_path
4036 .edit(entries_by_path_edits, &());
4037 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
4038 }
4039
4040 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
4041 prev_snapshot.apply_remote_update(update.clone()).unwrap();
4042 assert_eq!(
4043 prev_snapshot.to_vec(include_ignored),
4044 snapshot.to_vec(include_ignored),
4045 "wrong update for snapshot {i}. update: {:?}",
4046 update
4047 );
4048 }
4049 }
4050
4051 fn randomly_mutate_worktree(
4052 worktree: &mut Worktree,
4053 rng: &mut impl Rng,
4054 cx: &mut ModelContext<Worktree>,
4055 ) -> Task<Result<()>> {
4056 let worktree = worktree.as_local_mut().unwrap();
4057 let snapshot = worktree.snapshot();
4058 let entry = snapshot.entries(false).choose(rng).unwrap();
4059
4060 match rng.gen_range(0_u32..100) {
4061 0..=33 if entry.path.as_ref() != Path::new("") => {
4062 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
4063 worktree.delete_entry(entry.id, cx).unwrap()
4064 }
4065 ..=66 if entry.path.as_ref() != Path::new("") => {
4066 let other_entry = snapshot.entries(false).choose(rng).unwrap();
4067 let new_parent_path = if other_entry.is_dir() {
4068 other_entry.path.clone()
4069 } else {
4070 other_entry.path.parent().unwrap().into()
4071 };
4072 let mut new_path = new_parent_path.join(gen_name(rng));
4073 if new_path.starts_with(&entry.path) {
4074 new_path = gen_name(rng).into();
4075 }
4076
4077 log::info!(
4078 "renaming entry {:?} ({}) to {:?}",
4079 entry.path,
4080 entry.id.0,
4081 new_path
4082 );
4083 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
4084 cx.foreground().spawn(async move {
4085 task.await?;
4086 Ok(())
4087 })
4088 }
4089 _ => {
4090 let task = if entry.is_dir() {
4091 let child_path = entry.path.join(gen_name(rng));
4092 let is_dir = rng.gen_bool(0.3);
4093 log::info!(
4094 "creating {} at {:?}",
4095 if is_dir { "dir" } else { "file" },
4096 child_path,
4097 );
4098 worktree.create_entry(child_path, is_dir, cx)
4099 } else {
4100 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
4101 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
4102 };
4103 cx.foreground().spawn(async move {
4104 task.await?;
4105 Ok(())
4106 })
4107 }
4108 }
4109 }
4110
4111 async fn randomly_mutate_fs(
4112 fs: &Arc<dyn Fs>,
4113 root_path: &Path,
4114 insertion_probability: f64,
4115 rng: &mut impl Rng,
4116 ) {
4117 let mut files = Vec::new();
4118 let mut dirs = Vec::new();
4119 for path in fs.as_fake().paths() {
4120 if path.starts_with(root_path) {
4121 if fs.is_file(&path).await {
4122 files.push(path);
4123 } else {
4124 dirs.push(path);
4125 }
4126 }
4127 }
4128
4129 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4130 let path = dirs.choose(rng).unwrap();
4131 let new_path = path.join(gen_name(rng));
4132
4133 if rng.gen() {
4134 log::info!(
4135 "creating dir {:?}",
4136 new_path.strip_prefix(root_path).unwrap()
4137 );
4138 fs.create_dir(&new_path).await.unwrap();
4139 } else {
4140 log::info!(
4141 "creating file {:?}",
4142 new_path.strip_prefix(root_path).unwrap()
4143 );
4144 fs.create_file(&new_path, Default::default()).await.unwrap();
4145 }
4146 } else if rng.gen_bool(0.05) {
4147 let ignore_dir_path = dirs.choose(rng).unwrap();
4148 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4149
4150 let subdirs = dirs
4151 .iter()
4152 .filter(|d| d.starts_with(&ignore_dir_path))
4153 .cloned()
4154 .collect::<Vec<_>>();
4155 let subfiles = files
4156 .iter()
4157 .filter(|d| d.starts_with(&ignore_dir_path))
4158 .cloned()
4159 .collect::<Vec<_>>();
4160 let files_to_ignore = {
4161 let len = rng.gen_range(0..=subfiles.len());
4162 subfiles.choose_multiple(rng, len)
4163 };
4164 let dirs_to_ignore = {
4165 let len = rng.gen_range(0..subdirs.len());
4166 subdirs.choose_multiple(rng, len)
4167 };
4168
4169 let mut ignore_contents = String::new();
4170 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4171 writeln!(
4172 ignore_contents,
4173 "{}",
4174 path_to_ignore
4175 .strip_prefix(&ignore_dir_path)
4176 .unwrap()
4177 .to_str()
4178 .unwrap()
4179 )
4180 .unwrap();
4181 }
4182 log::info!(
4183 "creating gitignore {:?} with contents:\n{}",
4184 ignore_path.strip_prefix(&root_path).unwrap(),
4185 ignore_contents
4186 );
4187 fs.save(
4188 &ignore_path,
4189 &ignore_contents.as_str().into(),
4190 Default::default(),
4191 )
4192 .await
4193 .unwrap();
4194 } else {
4195 let old_path = {
4196 let file_path = files.choose(rng);
4197 let dir_path = dirs[1..].choose(rng);
4198 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4199 };
4200
4201 let is_rename = rng.gen();
4202 if is_rename {
4203 let new_path_parent = dirs
4204 .iter()
4205 .filter(|d| !d.starts_with(old_path))
4206 .choose(rng)
4207 .unwrap();
4208
4209 let overwrite_existing_dir =
4210 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4211 let new_path = if overwrite_existing_dir {
4212 fs.remove_dir(
4213 &new_path_parent,
4214 RemoveOptions {
4215 recursive: true,
4216 ignore_if_not_exists: true,
4217 },
4218 )
4219 .await
4220 .unwrap();
4221 new_path_parent.to_path_buf()
4222 } else {
4223 new_path_parent.join(gen_name(rng))
4224 };
4225
4226 log::info!(
4227 "renaming {:?} to {}{:?}",
4228 old_path.strip_prefix(&root_path).unwrap(),
4229 if overwrite_existing_dir {
4230 "overwrite "
4231 } else {
4232 ""
4233 },
4234 new_path.strip_prefix(&root_path).unwrap()
4235 );
4236 fs.rename(
4237 &old_path,
4238 &new_path,
4239 fs::RenameOptions {
4240 overwrite: true,
4241 ignore_if_exists: true,
4242 },
4243 )
4244 .await
4245 .unwrap();
4246 } else if fs.is_file(&old_path).await {
4247 log::info!(
4248 "deleting file {:?}",
4249 old_path.strip_prefix(&root_path).unwrap()
4250 );
4251 fs.remove_file(old_path, Default::default()).await.unwrap();
4252 } else {
4253 log::info!(
4254 "deleting dir {:?}",
4255 old_path.strip_prefix(&root_path).unwrap()
4256 );
4257 fs.remove_dir(
4258 &old_path,
4259 RemoveOptions {
4260 recursive: true,
4261 ignore_if_not_exists: true,
4262 },
4263 )
4264 .await
4265 .unwrap();
4266 }
4267 }
4268 }
4269
4270 fn gen_name(rng: &mut impl Rng) -> String {
4271 (0..6)
4272 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4273 .map(char::from)
4274 .collect()
4275 }
4276
4277 impl LocalSnapshot {
4278 fn check_invariants(&self) {
4279 assert_eq!(
4280 self.entries_by_path
4281 .cursor::<()>()
4282 .map(|e| (&e.path, e.id))
4283 .collect::<Vec<_>>(),
4284 self.entries_by_id
4285 .cursor::<()>()
4286 .map(|e| (&e.path, e.id))
4287 .collect::<collections::BTreeSet<_>>()
4288 .into_iter()
4289 .collect::<Vec<_>>(),
4290 "entries_by_path and entries_by_id are inconsistent"
4291 );
4292
4293 let mut files = self.files(true, 0);
4294 let mut visible_files = self.files(false, 0);
4295 for entry in self.entries_by_path.cursor::<()>() {
4296 if entry.is_file() {
4297 assert_eq!(files.next().unwrap().inode, entry.inode);
4298 if !entry.is_ignored {
4299 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4300 }
4301 }
4302 }
4303
4304 assert!(files.next().is_none());
4305 assert!(visible_files.next().is_none());
4306
4307 let mut bfs_paths = Vec::new();
4308 let mut stack = vec![Path::new("")];
4309 while let Some(path) = stack.pop() {
4310 bfs_paths.push(path);
4311 let ix = stack.len();
4312 for child_entry in self.child_entries(path) {
4313 stack.insert(ix, &child_entry.path);
4314 }
4315 }
4316
4317 let dfs_paths_via_iter = self
4318 .entries_by_path
4319 .cursor::<()>()
4320 .map(|e| e.path.as_ref())
4321 .collect::<Vec<_>>();
4322 assert_eq!(bfs_paths, dfs_paths_via_iter);
4323
4324 let dfs_paths_via_traversal = self
4325 .entries(true)
4326 .map(|e| e.path.as_ref())
4327 .collect::<Vec<_>>();
4328 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4329
4330 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4331 let ignore_parent_path =
4332 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4333 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4334 assert!(self
4335 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4336 .is_some());
4337 }
4338 }
4339
4340 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4341 let mut paths = Vec::new();
4342 for entry in self.entries_by_path.cursor::<()>() {
4343 if include_ignored || !entry.is_ignored {
4344 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4345 }
4346 }
4347 paths.sort_by(|a, b| a.0.cmp(b.0));
4348 paths
4349 }
4350 }
4351}