1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::{repository::GitRepository, Fs};
9use fs::{HomeDir, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::Unclipped;
24use language::{
25 proto::{deserialize_version, serialize_line_ending, serialize_version},
26 Buffer, DiagnosticEntry, PointUtf16, Rope,
27};
28use parking_lot::Mutex;
29use postage::{
30 prelude::{Sink as _, Stream as _},
31 watch,
32};
33
34use smol::channel::{self, Sender};
35use std::{
36 any::Any,
37 cmp::{self, Ordering},
38 convert::TryFrom,
39 ffi::{OsStr, OsString},
40 fmt,
41 future::Future,
42 mem,
43 ops::{Deref, DerefMut},
44 path::{Path, PathBuf},
45 sync::{atomic::AtomicUsize, Arc},
46 task::Poll,
47 time::{Duration, SystemTime},
48};
49use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
50use util::{ResultExt, TryFutureExt};
51
52#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
53pub struct WorktreeId(usize);
54
55#[allow(clippy::large_enum_variant)]
56pub enum Worktree {
57 Local(LocalWorktree),
58 Remote(RemoteWorktree),
59}
60
61pub struct LocalWorktree {
62 snapshot: LocalSnapshot,
63 background_snapshot: Arc<Mutex<LocalSnapshot>>,
64 last_scan_state_rx: watch::Receiver<ScanState>,
65 _background_scanner_task: Option<Task<()>>,
66 poll_task: Option<Task<()>>,
67 share: Option<ShareState>,
68 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
69 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
70 client: Arc<Client>,
71 fs: Arc<dyn Fs>,
72 visible: bool,
73}
74
75pub struct RemoteWorktree {
76 pub snapshot: Snapshot,
77 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
78 project_id: u64,
79 client: Arc<Client>,
80 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
81 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
82 replica_id: ReplicaId,
83 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
84 visible: bool,
85 disconnected: bool,
86}
87
88#[derive(Clone)]
89pub struct Snapshot {
90 id: WorktreeId,
91 abs_path: Arc<Path>,
92 root_name: String,
93 root_char_bag: CharBag,
94 entries_by_path: SumTree<Entry>,
95 entries_by_id: SumTree<PathEntry>,
96 scan_id: usize,
97 is_complete: bool,
98}
99
100#[derive(Clone)]
101pub struct GitRepositoryEntry {
102 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
103
104 pub(crate) scan_id: usize,
105 // Path to folder containing the .git file or directory
106 pub(crate) content_path: Arc<Path>,
107 // Path to the actual .git folder.
108 // Note: if .git is a file, this points to the folder indicated by the .git file
109 pub(crate) git_dir_path: Arc<Path>,
110}
111
112impl std::fmt::Debug for GitRepositoryEntry {
113 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
114 f.debug_struct("GitRepositoryEntry")
115 .field("content_path", &self.content_path)
116 .field("git_dir_path", &self.git_dir_path)
117 .field("libgit_repository", &"LibGitRepository")
118 .finish()
119 }
120}
121
122pub struct LocalSnapshot {
123 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
124 git_repositories: Vec<GitRepositoryEntry>,
125 removed_entry_ids: HashMap<u64, ProjectEntryId>,
126 next_entry_id: Arc<AtomicUsize>,
127 snapshot: Snapshot,
128 extension_counts: HashMap<OsString, usize>,
129}
130
131impl Clone for LocalSnapshot {
132 fn clone(&self) -> Self {
133 Self {
134 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
135 git_repositories: self.git_repositories.iter().cloned().collect(),
136 removed_entry_ids: self.removed_entry_ids.clone(),
137 next_entry_id: self.next_entry_id.clone(),
138 snapshot: self.snapshot.clone(),
139 extension_counts: self.extension_counts.clone(),
140 }
141 }
142}
143
144impl Deref for LocalSnapshot {
145 type Target = Snapshot;
146
147 fn deref(&self) -> &Self::Target {
148 &self.snapshot
149 }
150}
151
152impl DerefMut for LocalSnapshot {
153 fn deref_mut(&mut self) -> &mut Self::Target {
154 &mut self.snapshot
155 }
156}
157
158#[derive(Clone, Debug)]
159enum ScanState {
160 Idle,
161 /// The worktree is performing its initial scan of the filesystem.
162 Initializing,
163 /// The worktree is updating in response to filesystem events.
164 Updating,
165 Err(Arc<anyhow::Error>),
166}
167
168struct ShareState {
169 project_id: u64,
170 snapshots_tx: watch::Sender<LocalSnapshot>,
171 diagnostic_summaries_tx: mpsc::UnboundedSender<(Arc<Path>, DiagnosticSummary)>,
172 _maintain_remote_snapshot: Task<Option<()>>,
173 _maintain_remote_diagnostic_summaries: Task<()>,
174}
175
176pub enum Event {
177 UpdatedEntries,
178 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
179}
180
181impl Entity for Worktree {
182 type Event = Event;
183}
184
185impl Worktree {
186 pub async fn local(
187 client: Arc<Client>,
188 path: impl Into<Arc<Path>>,
189 visible: bool,
190 fs: Arc<dyn Fs>,
191 next_entry_id: Arc<AtomicUsize>,
192 cx: &mut AsyncAppContext,
193 ) -> Result<ModelHandle<Self>> {
194 let (tree, scan_states_tx) =
195 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
196 tree.update(cx, |tree, cx| {
197 let tree = tree.as_local_mut().unwrap();
198 let abs_path = tree.abs_path().clone();
199 let background_snapshot = tree.background_snapshot.clone();
200 let background = cx.background().clone();
201 tree._background_scanner_task = Some(cx.background().spawn(async move {
202 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
203 let scanner =
204 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
205 scanner.run(events).await;
206 }));
207 });
208 Ok(tree)
209 }
210
211 pub fn remote(
212 project_remote_id: u64,
213 replica_id: ReplicaId,
214 worktree: proto::WorktreeMetadata,
215 client: Arc<Client>,
216 cx: &mut MutableAppContext,
217 ) -> ModelHandle<Self> {
218 let remote_id = worktree.id;
219 let root_char_bag: CharBag = worktree
220 .root_name
221 .chars()
222 .map(|c| c.to_ascii_lowercase())
223 .collect();
224 let root_name = worktree.root_name.clone();
225 let visible = worktree.visible;
226
227 let abs_path = PathBuf::from(worktree.abs_path);
228 let snapshot = Snapshot {
229 id: WorktreeId(remote_id as usize),
230 abs_path: Arc::from(abs_path.deref()),
231 root_name,
232 root_char_bag,
233 entries_by_path: Default::default(),
234 entries_by_id: Default::default(),
235 scan_id: 0,
236 is_complete: false,
237 };
238
239 let (updates_tx, mut updates_rx) = mpsc::unbounded();
240 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
241 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
242 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
243 Worktree::Remote(RemoteWorktree {
244 project_id: project_remote_id,
245 replica_id,
246 snapshot: snapshot.clone(),
247 background_snapshot: background_snapshot.clone(),
248 updates_tx: Some(updates_tx),
249 snapshot_subscriptions: Default::default(),
250 client: client.clone(),
251 diagnostic_summaries: Default::default(),
252 visible,
253 disconnected: false,
254 })
255 });
256
257 cx.background()
258 .spawn(async move {
259 while let Some(update) = updates_rx.next().await {
260 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
261 log::error!("error applying worktree update: {}", error);
262 }
263 snapshot_updated_tx.send(()).await.ok();
264 }
265 })
266 .detach();
267
268 cx.spawn(|mut cx| {
269 let this = worktree_handle.downgrade();
270 async move {
271 while (snapshot_updated_rx.recv().await).is_some() {
272 if let Some(this) = this.upgrade(&cx) {
273 this.update(&mut cx, |this, cx| {
274 this.poll_snapshot(cx);
275 let this = this.as_remote_mut().unwrap();
276 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
277 if this.observed_snapshot(*scan_id) {
278 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
279 let _ = tx.send(());
280 } else {
281 break;
282 }
283 }
284 });
285 } else {
286 break;
287 }
288 }
289 }
290 })
291 .detach();
292
293 worktree_handle
294 }
295
296 pub fn as_local(&self) -> Option<&LocalWorktree> {
297 if let Worktree::Local(worktree) = self {
298 Some(worktree)
299 } else {
300 None
301 }
302 }
303
304 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
305 if let Worktree::Remote(worktree) = self {
306 Some(worktree)
307 } else {
308 None
309 }
310 }
311
312 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
313 if let Worktree::Local(worktree) = self {
314 Some(worktree)
315 } else {
316 None
317 }
318 }
319
320 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
321 if let Worktree::Remote(worktree) = self {
322 Some(worktree)
323 } else {
324 None
325 }
326 }
327
328 pub fn is_local(&self) -> bool {
329 matches!(self, Worktree::Local(_))
330 }
331
332 pub fn is_remote(&self) -> bool {
333 !self.is_local()
334 }
335
336 pub fn snapshot(&self) -> Snapshot {
337 match self {
338 Worktree::Local(worktree) => worktree.snapshot().snapshot,
339 Worktree::Remote(worktree) => worktree.snapshot(),
340 }
341 }
342
343 pub fn scan_id(&self) -> usize {
344 match self {
345 Worktree::Local(worktree) => worktree.snapshot.scan_id,
346 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
347 }
348 }
349
350 pub fn is_visible(&self) -> bool {
351 match self {
352 Worktree::Local(worktree) => worktree.visible,
353 Worktree::Remote(worktree) => worktree.visible,
354 }
355 }
356
357 pub fn replica_id(&self) -> ReplicaId {
358 match self {
359 Worktree::Local(_) => 0,
360 Worktree::Remote(worktree) => worktree.replica_id,
361 }
362 }
363
364 pub fn diagnostic_summaries(
365 &self,
366 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
367 match self {
368 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
369 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
370 }
371 .iter()
372 .map(|(path, summary)| (path.0.clone(), *summary))
373 }
374
375 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
376 match self {
377 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
378 Self::Remote(worktree) => worktree.poll_snapshot(cx),
379 };
380 }
381
382 pub fn abs_path(&self) -> Arc<Path> {
383 match self {
384 Worktree::Local(worktree) => worktree.abs_path.clone(),
385 Worktree::Remote(worktree) => worktree.abs_path.clone(),
386 }
387 }
388}
389
390impl LocalWorktree {
391 async fn create(
392 client: Arc<Client>,
393 path: impl Into<Arc<Path>>,
394 visible: bool,
395 fs: Arc<dyn Fs>,
396 next_entry_id: Arc<AtomicUsize>,
397 cx: &mut AsyncAppContext,
398 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
399 let abs_path = path.into();
400 let path: Arc<Path> = Arc::from(Path::new(""));
401
402 // After determining whether the root entry is a file or a directory, populate the
403 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
404 let root_name = abs_path
405 .file_name()
406 .map_or(String::new(), |f| f.to_string_lossy().to_string());
407 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
408 let metadata = fs
409 .metadata(&abs_path)
410 .await
411 .context("failed to stat worktree path")?;
412
413 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
414 let (mut last_scan_state_tx, last_scan_state_rx) =
415 watch::channel_with(ScanState::Initializing);
416 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
417 let mut snapshot = LocalSnapshot {
418 ignores_by_parent_abs_path: Default::default(),
419 git_repositories: Default::default(),
420 removed_entry_ids: Default::default(),
421 next_entry_id,
422 snapshot: Snapshot {
423 id: WorktreeId::from_usize(cx.model_id()),
424 abs_path,
425 root_name: root_name.clone(),
426 root_char_bag,
427 entries_by_path: Default::default(),
428 entries_by_id: Default::default(),
429 scan_id: 0,
430 is_complete: true,
431 },
432 extension_counts: Default::default(),
433 };
434 if let Some(metadata) = metadata {
435 let entry = Entry::new(
436 path,
437 &metadata,
438 &snapshot.next_entry_id,
439 snapshot.root_char_bag,
440 );
441 snapshot.insert_entry(entry, fs.as_ref());
442 }
443
444 let tree = Self {
445 snapshot: snapshot.clone(),
446 background_snapshot: Arc::new(Mutex::new(snapshot)),
447 last_scan_state_rx,
448 _background_scanner_task: None,
449 share: None,
450 poll_task: None,
451 diagnostics: Default::default(),
452 diagnostic_summaries: Default::default(),
453 client,
454 fs,
455 visible,
456 };
457
458 cx.spawn_weak(|this, mut cx| async move {
459 while let Some(scan_state) = scan_states_rx.next().await {
460 if let Some(this) = this.upgrade(&cx) {
461 last_scan_state_tx.blocking_send(scan_state).ok();
462 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
463 } else {
464 break;
465 }
466 }
467 })
468 .detach();
469
470 Worktree::Local(tree)
471 });
472
473 Ok((tree, scan_states_tx))
474 }
475
476 pub fn contains_abs_path(&self, path: &Path) -> bool {
477 path.starts_with(&self.abs_path)
478 }
479
480 fn absolutize(&self, path: &Path) -> PathBuf {
481 if path.file_name().is_some() {
482 self.abs_path.join(path)
483 } else {
484 self.abs_path.to_path_buf()
485 }
486 }
487
488 pub(crate) fn load_buffer(
489 &mut self,
490 path: &Path,
491 cx: &mut ModelContext<Worktree>,
492 ) -> Task<Result<ModelHandle<Buffer>>> {
493 let path = Arc::from(path);
494 cx.spawn(move |this, mut cx| async move {
495 let (file, contents, diff_base) = this
496 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
497 .await?;
498 Ok(cx.add_model(|cx| {
499 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
500 buffer.git_diff_recalc(cx);
501 buffer
502 }))
503 })
504 }
505
506 pub fn diagnostics_for_path(
507 &self,
508 path: &Path,
509 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
510 self.diagnostics.get(path).cloned()
511 }
512
513 pub fn update_diagnostics(
514 &mut self,
515 language_server_id: usize,
516 worktree_path: Arc<Path>,
517 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
518 _: &mut ModelContext<Worktree>,
519 ) -> Result<bool> {
520 self.diagnostics.remove(&worktree_path);
521 let old_summary = self
522 .diagnostic_summaries
523 .remove(&PathKey(worktree_path.clone()))
524 .unwrap_or_default();
525 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
526 if !new_summary.is_empty() {
527 self.diagnostic_summaries
528 .insert(PathKey(worktree_path.clone()), new_summary);
529 self.diagnostics.insert(worktree_path.clone(), diagnostics);
530 }
531
532 let updated = !old_summary.is_empty() || !new_summary.is_empty();
533 if updated {
534 if let Some(share) = self.share.as_ref() {
535 let _ = share
536 .diagnostic_summaries_tx
537 .unbounded_send((worktree_path.clone(), new_summary));
538 }
539 }
540
541 Ok(updated)
542 }
543
544 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
545 self.poll_task.take();
546
547 match self.scan_state() {
548 ScanState::Idle => {
549 let new_snapshot = self.background_snapshot.lock().clone();
550 let updated_repos = Self::changed_repos(
551 &self.snapshot.git_repositories,
552 &new_snapshot.git_repositories,
553 );
554 self.snapshot = new_snapshot;
555
556 if let Some(share) = self.share.as_mut() {
557 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
558 }
559
560 cx.emit(Event::UpdatedEntries);
561
562 if !updated_repos.is_empty() {
563 cx.emit(Event::UpdatedGitRepositories(updated_repos));
564 }
565 }
566
567 ScanState::Initializing => {
568 let is_fake_fs = self.fs.is_fake();
569
570 let new_snapshot = self.background_snapshot.lock().clone();
571 let updated_repos = Self::changed_repos(
572 &self.snapshot.git_repositories,
573 &new_snapshot.git_repositories,
574 );
575 self.snapshot = new_snapshot;
576
577 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
578 if is_fake_fs {
579 #[cfg(any(test, feature = "test-support"))]
580 cx.background().simulate_random_delay().await;
581 } else {
582 smol::Timer::after(Duration::from_millis(100)).await;
583 }
584 if let Some(this) = this.upgrade(&cx) {
585 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
586 }
587 }));
588
589 cx.emit(Event::UpdatedEntries);
590
591 if !updated_repos.is_empty() {
592 cx.emit(Event::UpdatedGitRepositories(updated_repos));
593 }
594 }
595
596 _ => {
597 if force {
598 self.snapshot = self.background_snapshot.lock().clone();
599 }
600 }
601 }
602
603 cx.notify();
604 }
605
606 fn changed_repos(
607 old_repos: &[GitRepositoryEntry],
608 new_repos: &[GitRepositoryEntry],
609 ) -> Vec<GitRepositoryEntry> {
610 fn diff<'a>(
611 a: &'a [GitRepositoryEntry],
612 b: &'a [GitRepositoryEntry],
613 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
614 ) {
615 for a_repo in a {
616 let matched = b.iter().find(|b_repo| {
617 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
618 });
619
620 if matched.is_none() {
621 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
622 }
623 }
624 }
625
626 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
627
628 diff(old_repos, new_repos, &mut updated);
629 diff(new_repos, old_repos, &mut updated);
630
631 updated.into_values().collect()
632 }
633
634 pub fn scan_complete(&self) -> impl Future<Output = ()> {
635 let mut scan_state_rx = self.last_scan_state_rx.clone();
636 async move {
637 let mut scan_state = Some(scan_state_rx.borrow().clone());
638 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
639 scan_state = scan_state_rx.recv().await;
640 }
641 }
642 }
643
644 fn scan_state(&self) -> ScanState {
645 self.last_scan_state_rx.borrow().clone()
646 }
647
648 pub fn snapshot(&self) -> LocalSnapshot {
649 self.snapshot.clone()
650 }
651
652 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
653 proto::WorktreeMetadata {
654 id: self.id().to_proto(),
655 root_name: self.root_name().to_string(),
656 visible: self.visible,
657 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
658 }
659 }
660
661 fn load(
662 &self,
663 path: &Path,
664 cx: &mut ModelContext<Worktree>,
665 ) -> Task<Result<(File, String, Option<String>)>> {
666 let handle = cx.handle();
667 let path = Arc::from(path);
668 let abs_path = self.absolutize(&path);
669 let fs = self.fs.clone();
670 let snapshot = self.snapshot();
671
672 cx.spawn(|this, mut cx| async move {
673 let text = fs.load(&abs_path).await?;
674
675 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
676 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
677 let repo_relative = repo_relative.to_owned();
678 cx.background()
679 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
680 .await
681 } else {
682 None
683 }
684 } else {
685 None
686 };
687
688 // Eagerly populate the snapshot with an updated entry for the loaded file
689 let entry = this
690 .update(&mut cx, |this, cx| {
691 this.as_local()
692 .unwrap()
693 .refresh_entry(path, abs_path, None, cx)
694 })
695 .await?;
696
697 Ok((
698 File {
699 entry_id: entry.id,
700 worktree: handle,
701 path: entry.path,
702 mtime: entry.mtime,
703 is_local: true,
704 is_deleted: false,
705 },
706 text,
707 diff_base,
708 ))
709 })
710 }
711
712 pub fn save_buffer_as(
713 &self,
714 buffer_handle: ModelHandle<Buffer>,
715 path: impl Into<Arc<Path>>,
716 cx: &mut ModelContext<Worktree>,
717 ) -> Task<Result<()>> {
718 let buffer = buffer_handle.read(cx);
719 let text = buffer.as_rope().clone();
720 let fingerprint = text.fingerprint();
721 let version = buffer.version();
722 let save = self.write_file(path, text, buffer.line_ending(), cx);
723 let handle = cx.handle();
724 cx.as_mut().spawn(|mut cx| async move {
725 let entry = save.await?;
726 let file = File {
727 entry_id: entry.id,
728 worktree: handle,
729 path: entry.path,
730 mtime: entry.mtime,
731 is_local: true,
732 is_deleted: false,
733 };
734
735 buffer_handle.update(&mut cx, |buffer, cx| {
736 buffer.did_save(version, fingerprint, file.mtime, Some(Arc::new(file)), cx);
737 });
738
739 Ok(())
740 })
741 }
742
743 pub fn create_entry(
744 &self,
745 path: impl Into<Arc<Path>>,
746 is_dir: bool,
747 cx: &mut ModelContext<Worktree>,
748 ) -> Task<Result<Entry>> {
749 self.write_entry_internal(
750 path,
751 if is_dir {
752 None
753 } else {
754 Some(Default::default())
755 },
756 cx,
757 )
758 }
759
760 pub fn write_file(
761 &self,
762 path: impl Into<Arc<Path>>,
763 text: Rope,
764 line_ending: LineEnding,
765 cx: &mut ModelContext<Worktree>,
766 ) -> Task<Result<Entry>> {
767 self.write_entry_internal(path, Some((text, line_ending)), cx)
768 }
769
770 pub fn delete_entry(
771 &self,
772 entry_id: ProjectEntryId,
773 cx: &mut ModelContext<Worktree>,
774 ) -> Option<Task<Result<()>>> {
775 let entry = self.entry_for_id(entry_id)?.clone();
776 let abs_path = self.absolutize(&entry.path);
777 let delete = cx.background().spawn({
778 let fs = self.fs.clone();
779 let abs_path = abs_path;
780 async move {
781 if entry.is_file() {
782 fs.remove_file(&abs_path, Default::default()).await
783 } else {
784 fs.remove_dir(
785 &abs_path,
786 RemoveOptions {
787 recursive: true,
788 ignore_if_not_exists: false,
789 },
790 )
791 .await
792 }
793 }
794 });
795
796 Some(cx.spawn(|this, mut cx| async move {
797 delete.await?;
798 this.update(&mut cx, |this, cx| {
799 let this = this.as_local_mut().unwrap();
800 {
801 let mut snapshot = this.background_snapshot.lock();
802 snapshot.delete_entry(entry_id);
803 }
804 this.poll_snapshot(true, cx);
805 });
806 Ok(())
807 }))
808 }
809
810 pub fn rename_entry(
811 &self,
812 entry_id: ProjectEntryId,
813 new_path: impl Into<Arc<Path>>,
814 cx: &mut ModelContext<Worktree>,
815 ) -> Option<Task<Result<Entry>>> {
816 let old_path = self.entry_for_id(entry_id)?.path.clone();
817 let new_path = new_path.into();
818 let abs_old_path = self.absolutize(&old_path);
819 let abs_new_path = self.absolutize(&new_path);
820 let rename = cx.background().spawn({
821 let fs = self.fs.clone();
822 let abs_new_path = abs_new_path.clone();
823 async move {
824 fs.rename(&abs_old_path, &abs_new_path, Default::default())
825 .await
826 }
827 });
828
829 Some(cx.spawn(|this, mut cx| async move {
830 rename.await?;
831 let entry = this
832 .update(&mut cx, |this, cx| {
833 this.as_local_mut().unwrap().refresh_entry(
834 new_path.clone(),
835 abs_new_path,
836 Some(old_path),
837 cx,
838 )
839 })
840 .await?;
841 Ok(entry)
842 }))
843 }
844
845 pub fn copy_entry(
846 &self,
847 entry_id: ProjectEntryId,
848 new_path: impl Into<Arc<Path>>,
849 cx: &mut ModelContext<Worktree>,
850 ) -> Option<Task<Result<Entry>>> {
851 let old_path = self.entry_for_id(entry_id)?.path.clone();
852 let new_path = new_path.into();
853 let abs_old_path = self.absolutize(&old_path);
854 let abs_new_path = self.absolutize(&new_path);
855 let copy = cx.background().spawn({
856 let fs = self.fs.clone();
857 let abs_new_path = abs_new_path.clone();
858 async move {
859 copy_recursive(
860 fs.as_ref(),
861 &abs_old_path,
862 &abs_new_path,
863 Default::default(),
864 )
865 .await
866 }
867 });
868
869 Some(cx.spawn(|this, mut cx| async move {
870 copy.await?;
871 let entry = this
872 .update(&mut cx, |this, cx| {
873 this.as_local_mut().unwrap().refresh_entry(
874 new_path.clone(),
875 abs_new_path,
876 None,
877 cx,
878 )
879 })
880 .await?;
881 Ok(entry)
882 }))
883 }
884
885 fn write_entry_internal(
886 &self,
887 path: impl Into<Arc<Path>>,
888 text_if_file: Option<(Rope, LineEnding)>,
889 cx: &mut ModelContext<Worktree>,
890 ) -> Task<Result<Entry>> {
891 let path = path.into();
892 let abs_path = self.absolutize(&path);
893 let write = cx.background().spawn({
894 let fs = self.fs.clone();
895 let abs_path = abs_path.clone();
896 async move {
897 if let Some((text, line_ending)) = text_if_file {
898 fs.save(&abs_path, &text, line_ending).await
899 } else {
900 fs.create_dir(&abs_path).await
901 }
902 }
903 });
904
905 cx.spawn(|this, mut cx| async move {
906 write.await?;
907 let entry = this
908 .update(&mut cx, |this, cx| {
909 this.as_local_mut()
910 .unwrap()
911 .refresh_entry(path, abs_path, None, cx)
912 })
913 .await?;
914 Ok(entry)
915 })
916 }
917
918 fn refresh_entry(
919 &self,
920 path: Arc<Path>,
921 abs_path: PathBuf,
922 old_path: Option<Arc<Path>>,
923 cx: &mut ModelContext<Worktree>,
924 ) -> Task<Result<Entry>> {
925 let fs = self.fs.clone();
926 let root_char_bag;
927 let next_entry_id;
928 {
929 let snapshot = self.background_snapshot.lock();
930 root_char_bag = snapshot.root_char_bag;
931 next_entry_id = snapshot.next_entry_id.clone();
932 }
933 cx.spawn_weak(|this, mut cx| async move {
934 let metadata = fs
935 .metadata(&abs_path)
936 .await?
937 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
938 let this = this
939 .upgrade(&cx)
940 .ok_or_else(|| anyhow!("worktree was dropped"))?;
941 this.update(&mut cx, |this, cx| {
942 let this = this.as_local_mut().unwrap();
943 let inserted_entry;
944 {
945 let mut snapshot = this.background_snapshot.lock();
946 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
947 entry.is_ignored = snapshot
948 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
949 .is_abs_path_ignored(&abs_path, entry.is_dir());
950 if let Some(old_path) = old_path {
951 snapshot.remove_path(&old_path);
952 }
953 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
954 snapshot.scan_id += 1;
955 }
956 this.poll_snapshot(true, cx);
957 Ok(inserted_entry)
958 })
959 })
960 }
961
962 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
963 let (share_tx, share_rx) = oneshot::channel();
964
965 if self.share.is_some() {
966 let _ = share_tx.send(Ok(()));
967 } else {
968 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
969 let worktree_id = cx.model_id() as u64;
970
971 let maintain_remote_snapshot = cx.background().spawn({
972 let rpc = self.client.clone();
973 async move {
974 let mut prev_snapshot = match snapshots_rx.recv().await {
975 Some(snapshot) => {
976 let update = proto::UpdateWorktree {
977 project_id,
978 worktree_id,
979 abs_path: snapshot.abs_path().to_string_lossy().into(),
980 root_name: snapshot.root_name().to_string(),
981 updated_entries: snapshot
982 .entries_by_path
983 .iter()
984 .map(Into::into)
985 .collect(),
986 removed_entries: Default::default(),
987 scan_id: snapshot.scan_id as u64,
988 is_last_update: true,
989 };
990 if let Err(error) = send_worktree_update(&rpc, update).await {
991 let _ = share_tx.send(Err(error));
992 return Err(anyhow!("failed to send initial update worktree"));
993 } else {
994 let _ = share_tx.send(Ok(()));
995 snapshot
996 }
997 }
998 None => {
999 share_tx
1000 .send(Err(anyhow!("worktree dropped before share completed")))
1001 .ok();
1002 return Err(anyhow!("failed to send initial update worktree"));
1003 }
1004 };
1005
1006 while let Some(snapshot) = snapshots_rx.recv().await {
1007 send_worktree_update(
1008 &rpc,
1009 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true),
1010 )
1011 .await?;
1012 prev_snapshot = snapshot;
1013 }
1014
1015 Ok::<_, anyhow::Error>(())
1016 }
1017 .log_err()
1018 });
1019
1020 let (diagnostic_summaries_tx, mut diagnostic_summaries_rx) = mpsc::unbounded();
1021 for (path, summary) in self.diagnostic_summaries.iter() {
1022 let _ = diagnostic_summaries_tx.unbounded_send((path.0.clone(), summary.clone()));
1023 }
1024 let maintain_remote_diagnostic_summaries = cx.background().spawn({
1025 let rpc = self.client.clone();
1026 async move {
1027 while let Some((path, summary)) = diagnostic_summaries_rx.next().await {
1028 rpc.request(proto::UpdateDiagnosticSummary {
1029 project_id,
1030 worktree_id,
1031 summary: Some(summary.to_proto(&path)),
1032 })
1033 .await
1034 .log_err();
1035 }
1036 }
1037 });
1038
1039 self.share = Some(ShareState {
1040 project_id,
1041 snapshots_tx,
1042 diagnostic_summaries_tx,
1043 _maintain_remote_snapshot: maintain_remote_snapshot,
1044 _maintain_remote_diagnostic_summaries: maintain_remote_diagnostic_summaries,
1045 });
1046 }
1047
1048 cx.foreground().spawn(async move {
1049 share_rx
1050 .await
1051 .unwrap_or_else(|_| Err(anyhow!("share ended")))
1052 })
1053 }
1054
1055 pub fn unshare(&mut self) {
1056 self.share.take();
1057 }
1058
1059 pub fn is_shared(&self) -> bool {
1060 self.share.is_some()
1061 }
1062}
1063
1064impl RemoteWorktree {
1065 fn snapshot(&self) -> Snapshot {
1066 self.snapshot.clone()
1067 }
1068
1069 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1070 self.snapshot = self.background_snapshot.lock().clone();
1071 cx.emit(Event::UpdatedEntries);
1072 cx.notify();
1073 }
1074
1075 pub fn disconnected_from_host(&mut self) {
1076 self.updates_tx.take();
1077 self.snapshot_subscriptions.clear();
1078 self.disconnected = true;
1079 }
1080
1081 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1082 if let Some(updates_tx) = &self.updates_tx {
1083 updates_tx
1084 .unbounded_send(update)
1085 .expect("consumer runs to completion");
1086 }
1087 }
1088
1089 fn observed_snapshot(&self, scan_id: usize) -> bool {
1090 self.scan_id > scan_id || (self.scan_id == scan_id && self.is_complete)
1091 }
1092
1093 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1094 let (tx, rx) = oneshot::channel();
1095 if self.observed_snapshot(scan_id) {
1096 let _ = tx.send(());
1097 } else if self.disconnected {
1098 drop(tx);
1099 } else {
1100 match self
1101 .snapshot_subscriptions
1102 .binary_search_by_key(&scan_id, |probe| probe.0)
1103 {
1104 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1105 }
1106 }
1107
1108 async move {
1109 rx.await?;
1110 Ok(())
1111 }
1112 }
1113
1114 pub fn update_diagnostic_summary(
1115 &mut self,
1116 path: Arc<Path>,
1117 summary: &proto::DiagnosticSummary,
1118 ) {
1119 let summary = DiagnosticSummary {
1120 language_server_id: summary.language_server_id as usize,
1121 error_count: summary.error_count as usize,
1122 warning_count: summary.warning_count as usize,
1123 };
1124 if summary.is_empty() {
1125 self.diagnostic_summaries.remove(&PathKey(path));
1126 } else {
1127 self.diagnostic_summaries.insert(PathKey(path), summary);
1128 }
1129 }
1130
1131 pub fn insert_entry(
1132 &mut self,
1133 entry: proto::Entry,
1134 scan_id: usize,
1135 cx: &mut ModelContext<Worktree>,
1136 ) -> Task<Result<Entry>> {
1137 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1138 cx.spawn(|this, mut cx| async move {
1139 wait_for_snapshot.await?;
1140 this.update(&mut cx, |worktree, _| {
1141 let worktree = worktree.as_remote_mut().unwrap();
1142 let mut snapshot = worktree.background_snapshot.lock();
1143 let entry = snapshot.insert_entry(entry);
1144 worktree.snapshot = snapshot.clone();
1145 entry
1146 })
1147 })
1148 }
1149
1150 pub(crate) fn delete_entry(
1151 &mut self,
1152 id: ProjectEntryId,
1153 scan_id: usize,
1154 cx: &mut ModelContext<Worktree>,
1155 ) -> Task<Result<()>> {
1156 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1157 cx.spawn(|this, mut cx| async move {
1158 wait_for_snapshot.await?;
1159 this.update(&mut cx, |worktree, _| {
1160 let worktree = worktree.as_remote_mut().unwrap();
1161 let mut snapshot = worktree.background_snapshot.lock();
1162 snapshot.delete_entry(id);
1163 worktree.snapshot = snapshot.clone();
1164 });
1165 Ok(())
1166 })
1167 }
1168}
1169
1170impl Snapshot {
1171 pub fn id(&self) -> WorktreeId {
1172 self.id
1173 }
1174
1175 pub fn abs_path(&self) -> &Arc<Path> {
1176 &self.abs_path
1177 }
1178
1179 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1180 self.entries_by_id.get(&entry_id, &()).is_some()
1181 }
1182
1183 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1184 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1185 let old_entry = self.entries_by_id.insert_or_replace(
1186 PathEntry {
1187 id: entry.id,
1188 path: entry.path.clone(),
1189 is_ignored: entry.is_ignored,
1190 scan_id: 0,
1191 },
1192 &(),
1193 );
1194 if let Some(old_entry) = old_entry {
1195 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1196 }
1197 self.entries_by_path.insert_or_replace(entry.clone(), &());
1198 Ok(entry)
1199 }
1200
1201 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1202 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1203 self.entries_by_path = {
1204 let mut cursor = self.entries_by_path.cursor();
1205 let mut new_entries_by_path =
1206 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1207 while let Some(entry) = cursor.item() {
1208 if entry.path.starts_with(&removed_entry.path) {
1209 self.entries_by_id.remove(&entry.id, &());
1210 cursor.next(&());
1211 } else {
1212 break;
1213 }
1214 }
1215 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1216 new_entries_by_path
1217 };
1218
1219 true
1220 } else {
1221 false
1222 }
1223 }
1224
1225 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1226 let mut entries_by_path_edits = Vec::new();
1227 let mut entries_by_id_edits = Vec::new();
1228 for entry_id in update.removed_entries {
1229 let entry = self
1230 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1231 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1232 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1233 entries_by_id_edits.push(Edit::Remove(entry.id));
1234 }
1235
1236 for entry in update.updated_entries {
1237 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1238 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1239 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1240 }
1241 entries_by_id_edits.push(Edit::Insert(PathEntry {
1242 id: entry.id,
1243 path: entry.path.clone(),
1244 is_ignored: entry.is_ignored,
1245 scan_id: 0,
1246 }));
1247 entries_by_path_edits.push(Edit::Insert(entry));
1248 }
1249
1250 self.entries_by_path.edit(entries_by_path_edits, &());
1251 self.entries_by_id.edit(entries_by_id_edits, &());
1252 self.scan_id = update.scan_id as usize;
1253 self.is_complete = update.is_last_update;
1254
1255 Ok(())
1256 }
1257
1258 pub fn file_count(&self) -> usize {
1259 self.entries_by_path.summary().file_count
1260 }
1261
1262 pub fn visible_file_count(&self) -> usize {
1263 self.entries_by_path.summary().visible_file_count
1264 }
1265
1266 fn traverse_from_offset(
1267 &self,
1268 include_dirs: bool,
1269 include_ignored: bool,
1270 start_offset: usize,
1271 ) -> Traversal {
1272 let mut cursor = self.entries_by_path.cursor();
1273 cursor.seek(
1274 &TraversalTarget::Count {
1275 count: start_offset,
1276 include_dirs,
1277 include_ignored,
1278 },
1279 Bias::Right,
1280 &(),
1281 );
1282 Traversal {
1283 cursor,
1284 include_dirs,
1285 include_ignored,
1286 }
1287 }
1288
1289 fn traverse_from_path(
1290 &self,
1291 include_dirs: bool,
1292 include_ignored: bool,
1293 path: &Path,
1294 ) -> Traversal {
1295 let mut cursor = self.entries_by_path.cursor();
1296 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1297 Traversal {
1298 cursor,
1299 include_dirs,
1300 include_ignored,
1301 }
1302 }
1303
1304 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1305 self.traverse_from_offset(false, include_ignored, start)
1306 }
1307
1308 pub fn entries(&self, include_ignored: bool) -> Traversal {
1309 self.traverse_from_offset(true, include_ignored, 0)
1310 }
1311
1312 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1313 let empty_path = Path::new("");
1314 self.entries_by_path
1315 .cursor::<()>()
1316 .filter(move |entry| entry.path.as_ref() != empty_path)
1317 .map(|entry| &entry.path)
1318 }
1319
1320 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1321 let mut cursor = self.entries_by_path.cursor();
1322 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1323 let traversal = Traversal {
1324 cursor,
1325 include_dirs: true,
1326 include_ignored: true,
1327 };
1328 ChildEntriesIter {
1329 traversal,
1330 parent_path,
1331 }
1332 }
1333
1334 pub fn root_entry(&self) -> Option<&Entry> {
1335 self.entry_for_path("")
1336 }
1337
1338 pub fn root_name(&self) -> &str {
1339 &self.root_name
1340 }
1341
1342 pub fn scan_id(&self) -> usize {
1343 self.scan_id
1344 }
1345
1346 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1347 let path = path.as_ref();
1348 self.traverse_from_path(true, true, path)
1349 .entry()
1350 .and_then(|entry| {
1351 if entry.path.as_ref() == path {
1352 Some(entry)
1353 } else {
1354 None
1355 }
1356 })
1357 }
1358
1359 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1360 let entry = self.entries_by_id.get(&id, &())?;
1361 self.entry_for_path(&entry.path)
1362 }
1363
1364 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1365 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1366 }
1367}
1368
1369impl LocalSnapshot {
1370 pub fn extension_counts(&self) -> &HashMap<OsString, usize> {
1371 &self.extension_counts
1372 }
1373
1374 // Gives the most specific git repository for a given path
1375 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1376 self.git_repositories
1377 .iter()
1378 .rev() //git_repository is ordered lexicographically
1379 .find(|repo| repo.manages(path))
1380 .cloned()
1381 }
1382
1383 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1384 // Git repositories cannot be nested, so we don't need to reverse the order
1385 self.git_repositories
1386 .iter_mut()
1387 .find(|repo| repo.in_dot_git(path))
1388 }
1389
1390 #[cfg(test)]
1391 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1392 let root_name = self.root_name.clone();
1393 proto::UpdateWorktree {
1394 project_id,
1395 worktree_id: self.id().to_proto(),
1396 abs_path: self.abs_path().to_string_lossy().into(),
1397 root_name,
1398 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1399 removed_entries: Default::default(),
1400 scan_id: self.scan_id as u64,
1401 is_last_update: true,
1402 }
1403 }
1404
1405 pub(crate) fn build_update(
1406 &self,
1407 other: &Self,
1408 project_id: u64,
1409 worktree_id: u64,
1410 include_ignored: bool,
1411 ) -> proto::UpdateWorktree {
1412 let mut updated_entries = Vec::new();
1413 let mut removed_entries = Vec::new();
1414 let mut self_entries = self
1415 .entries_by_id
1416 .cursor::<()>()
1417 .filter(|e| include_ignored || !e.is_ignored)
1418 .peekable();
1419 let mut other_entries = other
1420 .entries_by_id
1421 .cursor::<()>()
1422 .filter(|e| include_ignored || !e.is_ignored)
1423 .peekable();
1424 loop {
1425 match (self_entries.peek(), other_entries.peek()) {
1426 (Some(self_entry), Some(other_entry)) => {
1427 match Ord::cmp(&self_entry.id, &other_entry.id) {
1428 Ordering::Less => {
1429 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1430 updated_entries.push(entry);
1431 self_entries.next();
1432 }
1433 Ordering::Equal => {
1434 if self_entry.scan_id != other_entry.scan_id {
1435 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1436 updated_entries.push(entry);
1437 }
1438
1439 self_entries.next();
1440 other_entries.next();
1441 }
1442 Ordering::Greater => {
1443 removed_entries.push(other_entry.id.to_proto());
1444 other_entries.next();
1445 }
1446 }
1447 }
1448 (Some(self_entry), None) => {
1449 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1450 updated_entries.push(entry);
1451 self_entries.next();
1452 }
1453 (None, Some(other_entry)) => {
1454 removed_entries.push(other_entry.id.to_proto());
1455 other_entries.next();
1456 }
1457 (None, None) => break,
1458 }
1459 }
1460
1461 proto::UpdateWorktree {
1462 project_id,
1463 worktree_id,
1464 abs_path: self.abs_path().to_string_lossy().into(),
1465 root_name: self.root_name().to_string(),
1466 updated_entries,
1467 removed_entries,
1468 scan_id: self.scan_id as u64,
1469 is_last_update: true,
1470 }
1471 }
1472
1473 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1474 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1475 let abs_path = self.abs_path.join(&entry.path);
1476 match smol::block_on(build_gitignore(&abs_path, fs)) {
1477 Ok(ignore) => {
1478 self.ignores_by_parent_abs_path.insert(
1479 abs_path.parent().unwrap().into(),
1480 (Arc::new(ignore), self.scan_id),
1481 );
1482 }
1483 Err(error) => {
1484 log::error!(
1485 "error loading .gitignore file {:?} - {:?}",
1486 &entry.path,
1487 error
1488 );
1489 }
1490 }
1491 }
1492
1493 self.reuse_entry_id(&mut entry);
1494
1495 if entry.kind == EntryKind::PendingDir {
1496 if let Some(existing_entry) =
1497 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1498 {
1499 entry.kind = existing_entry.kind;
1500 }
1501 }
1502
1503 self.entries_by_path.insert_or_replace(entry.clone(), &());
1504 let scan_id = self.scan_id;
1505 let removed_entry = self.entries_by_id.insert_or_replace(
1506 PathEntry {
1507 id: entry.id,
1508 path: entry.path.clone(),
1509 is_ignored: entry.is_ignored,
1510 scan_id,
1511 },
1512 &(),
1513 );
1514
1515 if let Some(removed_entry) = removed_entry {
1516 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1517 }
1518 self.inc_extension_count(&entry.path, entry.is_ignored);
1519
1520 entry
1521 }
1522
1523 fn populate_dir(
1524 &mut self,
1525 parent_path: Arc<Path>,
1526 entries: impl IntoIterator<Item = Entry>,
1527 ignore: Option<Arc<Gitignore>>,
1528 fs: &dyn Fs,
1529 ) {
1530 let mut parent_entry = if let Some(parent_entry) =
1531 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1532 {
1533 parent_entry.clone()
1534 } else {
1535 log::warn!(
1536 "populating a directory {:?} that has been removed",
1537 parent_path
1538 );
1539 return;
1540 };
1541
1542 if let Some(ignore) = ignore {
1543 self.ignores_by_parent_abs_path.insert(
1544 self.abs_path.join(&parent_path).into(),
1545 (ignore, self.scan_id),
1546 );
1547 }
1548 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1549 parent_entry.kind = EntryKind::Dir;
1550 } else {
1551 unreachable!();
1552 }
1553
1554 if parent_path.file_name() == Some(&DOT_GIT) {
1555 let abs_path = self.abs_path.join(&parent_path);
1556 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1557 if let Err(ix) = self
1558 .git_repositories
1559 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1560 {
1561 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1562 self.git_repositories.insert(
1563 ix,
1564 GitRepositoryEntry {
1565 repo,
1566 scan_id: 0,
1567 content_path,
1568 git_dir_path: parent_path,
1569 },
1570 );
1571 }
1572 }
1573 }
1574
1575 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1576 let mut entries_by_id_edits = Vec::new();
1577
1578 for mut entry in entries {
1579 self.reuse_entry_id(&mut entry);
1580 self.inc_extension_count(&entry.path, entry.is_ignored);
1581 entries_by_id_edits.push(Edit::Insert(PathEntry {
1582 id: entry.id,
1583 path: entry.path.clone(),
1584 is_ignored: entry.is_ignored,
1585 scan_id: self.scan_id,
1586 }));
1587 entries_by_path_edits.push(Edit::Insert(entry));
1588 }
1589
1590 self.entries_by_path.edit(entries_by_path_edits, &());
1591 let removed_entries = self.entries_by_id.edit(entries_by_id_edits, &());
1592
1593 for removed_entry in removed_entries {
1594 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1595 }
1596 }
1597
1598 fn inc_extension_count(&mut self, path: &Path, ignored: bool) {
1599 if !ignored {
1600 if let Some(extension) = path.extension() {
1601 if let Some(count) = self.extension_counts.get_mut(extension) {
1602 *count += 1;
1603 } else {
1604 self.extension_counts.insert(extension.into(), 1);
1605 }
1606 }
1607 }
1608 }
1609
1610 fn dec_extension_count(&mut self, path: &Path, ignored: bool) {
1611 if !ignored {
1612 if let Some(extension) = path.extension() {
1613 if let Some(count) = self.extension_counts.get_mut(extension) {
1614 *count -= 1;
1615 }
1616 }
1617 }
1618 }
1619
1620 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1621 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1622 entry.id = removed_entry_id;
1623 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1624 entry.id = existing_entry.id;
1625 }
1626 }
1627
1628 fn remove_path(&mut self, path: &Path) {
1629 let mut new_entries;
1630 let removed_entries;
1631 {
1632 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1633 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1634 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1635 new_entries.push_tree(cursor.suffix(&()), &());
1636 }
1637 self.entries_by_path = new_entries;
1638
1639 let mut entries_by_id_edits = Vec::new();
1640 for entry in removed_entries.cursor::<()>() {
1641 let removed_entry_id = self
1642 .removed_entry_ids
1643 .entry(entry.inode)
1644 .or_insert(entry.id);
1645 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1646 entries_by_id_edits.push(Edit::Remove(entry.id));
1647 self.dec_extension_count(&entry.path, entry.is_ignored);
1648 }
1649 self.entries_by_id.edit(entries_by_id_edits, &());
1650
1651 if path.file_name() == Some(&GITIGNORE) {
1652 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1653 if let Some((_, scan_id)) = self
1654 .ignores_by_parent_abs_path
1655 .get_mut(abs_parent_path.as_path())
1656 {
1657 *scan_id = self.snapshot.scan_id;
1658 }
1659 } else if path.file_name() == Some(&DOT_GIT) {
1660 let parent_path = path.parent().unwrap();
1661 if let Ok(ix) = self
1662 .git_repositories
1663 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1664 {
1665 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1666 }
1667 }
1668 }
1669
1670 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1671 let mut inodes = TreeSet::default();
1672 for ancestor in path.ancestors().skip(1) {
1673 if let Some(entry) = self.entry_for_path(ancestor) {
1674 inodes.insert(entry.inode);
1675 }
1676 }
1677 inodes
1678 }
1679
1680 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1681 let mut new_ignores = Vec::new();
1682 for ancestor in abs_path.ancestors().skip(1) {
1683 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1684 new_ignores.push((ancestor, Some(ignore.clone())));
1685 } else {
1686 new_ignores.push((ancestor, None));
1687 }
1688 }
1689
1690 let mut ignore_stack = IgnoreStack::none();
1691 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1692 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1693 ignore_stack = IgnoreStack::all();
1694 break;
1695 } else if let Some(ignore) = ignore {
1696 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1697 }
1698 }
1699
1700 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1701 ignore_stack = IgnoreStack::all();
1702 }
1703
1704 ignore_stack
1705 }
1706
1707 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1708 &self.git_repositories
1709 }
1710}
1711
1712impl GitRepositoryEntry {
1713 // Note that these paths should be relative to the worktree root.
1714 pub(crate) fn manages(&self, path: &Path) -> bool {
1715 path.starts_with(self.content_path.as_ref())
1716 }
1717
1718 // Note that theis path should be relative to the worktree root.
1719 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1720 path.starts_with(self.git_dir_path.as_ref())
1721 }
1722}
1723
1724async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1725 let contents = fs.load(abs_path).await?;
1726 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1727 let mut builder = GitignoreBuilder::new(parent);
1728 for line in contents.lines() {
1729 builder.add_line(Some(abs_path.into()), line)?;
1730 }
1731 Ok(builder.build()?)
1732}
1733
1734impl WorktreeId {
1735 pub fn from_usize(handle_id: usize) -> Self {
1736 Self(handle_id)
1737 }
1738
1739 pub(crate) fn from_proto(id: u64) -> Self {
1740 Self(id as usize)
1741 }
1742
1743 pub fn to_proto(&self) -> u64 {
1744 self.0 as u64
1745 }
1746
1747 pub fn to_usize(&self) -> usize {
1748 self.0
1749 }
1750}
1751
1752impl fmt::Display for WorktreeId {
1753 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1754 self.0.fmt(f)
1755 }
1756}
1757
1758impl Deref for Worktree {
1759 type Target = Snapshot;
1760
1761 fn deref(&self) -> &Self::Target {
1762 match self {
1763 Worktree::Local(worktree) => &worktree.snapshot,
1764 Worktree::Remote(worktree) => &worktree.snapshot,
1765 }
1766 }
1767}
1768
1769impl Deref for LocalWorktree {
1770 type Target = LocalSnapshot;
1771
1772 fn deref(&self) -> &Self::Target {
1773 &self.snapshot
1774 }
1775}
1776
1777impl Deref for RemoteWorktree {
1778 type Target = Snapshot;
1779
1780 fn deref(&self) -> &Self::Target {
1781 &self.snapshot
1782 }
1783}
1784
1785impl fmt::Debug for LocalWorktree {
1786 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1787 self.snapshot.fmt(f)
1788 }
1789}
1790
1791impl fmt::Debug for Snapshot {
1792 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1793 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1794 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1795
1796 impl<'a> fmt::Debug for EntriesByPath<'a> {
1797 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1798 f.debug_map()
1799 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1800 .finish()
1801 }
1802 }
1803
1804 impl<'a> fmt::Debug for EntriesById<'a> {
1805 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1806 f.debug_list().entries(self.0.iter()).finish()
1807 }
1808 }
1809
1810 f.debug_struct("Snapshot")
1811 .field("id", &self.id)
1812 .field("root_name", &self.root_name)
1813 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1814 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1815 .finish()
1816 }
1817}
1818
1819#[derive(Clone, PartialEq)]
1820pub struct File {
1821 pub worktree: ModelHandle<Worktree>,
1822 pub path: Arc<Path>,
1823 pub mtime: SystemTime,
1824 pub(crate) entry_id: ProjectEntryId,
1825 pub(crate) is_local: bool,
1826 pub(crate) is_deleted: bool,
1827}
1828
1829impl language::File for File {
1830 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1831 if self.is_local {
1832 Some(self)
1833 } else {
1834 None
1835 }
1836 }
1837
1838 fn mtime(&self) -> SystemTime {
1839 self.mtime
1840 }
1841
1842 fn path(&self) -> &Arc<Path> {
1843 &self.path
1844 }
1845
1846 fn full_path(&self, cx: &AppContext) -> PathBuf {
1847 let mut full_path = PathBuf::new();
1848 let worktree = self.worktree.read(cx);
1849
1850 if worktree.is_visible() {
1851 full_path.push(worktree.root_name());
1852 } else {
1853 let path = worktree.abs_path();
1854
1855 if worktree.is_local() && path.starts_with(cx.global::<HomeDir>().as_path()) {
1856 full_path.push("~");
1857 full_path.push(path.strip_prefix(cx.global::<HomeDir>().as_path()).unwrap());
1858 } else {
1859 full_path.push(path)
1860 }
1861 }
1862
1863 if self.path.components().next().is_some() {
1864 full_path.push(&self.path);
1865 }
1866
1867 full_path
1868 }
1869
1870 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1871 /// of its worktree, then this method will return the name of the worktree itself.
1872 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1873 self.path
1874 .file_name()
1875 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1876 }
1877
1878 fn is_deleted(&self) -> bool {
1879 self.is_deleted
1880 }
1881
1882 fn save(
1883 &self,
1884 buffer_id: u64,
1885 text: Rope,
1886 version: clock::Global,
1887 line_ending: LineEnding,
1888 cx: &mut MutableAppContext,
1889 ) -> Task<Result<(clock::Global, String, SystemTime)>> {
1890 self.worktree.update(cx, |worktree, cx| match worktree {
1891 Worktree::Local(worktree) => {
1892 let rpc = worktree.client.clone();
1893 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1894 let fingerprint = text.fingerprint();
1895 let save = worktree.write_file(self.path.clone(), text, line_ending, cx);
1896 cx.background().spawn(async move {
1897 let entry = save.await?;
1898 if let Some(project_id) = project_id {
1899 rpc.send(proto::BufferSaved {
1900 project_id,
1901 buffer_id,
1902 version: serialize_version(&version),
1903 mtime: Some(entry.mtime.into()),
1904 fingerprint: fingerprint.clone(),
1905 })?;
1906 }
1907 Ok((version, fingerprint, entry.mtime))
1908 })
1909 }
1910 Worktree::Remote(worktree) => {
1911 let rpc = worktree.client.clone();
1912 let project_id = worktree.project_id;
1913 cx.foreground().spawn(async move {
1914 let response = rpc
1915 .request(proto::SaveBuffer {
1916 project_id,
1917 buffer_id,
1918 version: serialize_version(&version),
1919 })
1920 .await?;
1921 let version = deserialize_version(response.version);
1922 let mtime = response
1923 .mtime
1924 .ok_or_else(|| anyhow!("missing mtime"))?
1925 .into();
1926 Ok((version, response.fingerprint, mtime))
1927 })
1928 }
1929 })
1930 }
1931
1932 fn as_any(&self) -> &dyn Any {
1933 self
1934 }
1935
1936 fn to_proto(&self) -> rpc::proto::File {
1937 rpc::proto::File {
1938 worktree_id: self.worktree.id() as u64,
1939 entry_id: self.entry_id.to_proto(),
1940 path: self.path.to_string_lossy().into(),
1941 mtime: Some(self.mtime.into()),
1942 is_deleted: self.is_deleted,
1943 }
1944 }
1945}
1946
1947impl language::LocalFile for File {
1948 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1949 self.worktree
1950 .read(cx)
1951 .as_local()
1952 .unwrap()
1953 .abs_path
1954 .join(&self.path)
1955 }
1956
1957 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1958 let worktree = self.worktree.read(cx).as_local().unwrap();
1959 let abs_path = worktree.absolutize(&self.path);
1960 let fs = worktree.fs.clone();
1961 cx.background()
1962 .spawn(async move { fs.load(&abs_path).await })
1963 }
1964
1965 fn buffer_reloaded(
1966 &self,
1967 buffer_id: u64,
1968 version: &clock::Global,
1969 fingerprint: String,
1970 line_ending: LineEnding,
1971 mtime: SystemTime,
1972 cx: &mut MutableAppContext,
1973 ) {
1974 let worktree = self.worktree.read(cx).as_local().unwrap();
1975 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1976 worktree
1977 .client
1978 .send(proto::BufferReloaded {
1979 project_id,
1980 buffer_id,
1981 version: serialize_version(version),
1982 mtime: Some(mtime.into()),
1983 fingerprint,
1984 line_ending: serialize_line_ending(line_ending) as i32,
1985 })
1986 .log_err();
1987 }
1988 }
1989}
1990
1991impl File {
1992 pub fn from_proto(
1993 proto: rpc::proto::File,
1994 worktree: ModelHandle<Worktree>,
1995 cx: &AppContext,
1996 ) -> Result<Self> {
1997 let worktree_id = worktree
1998 .read(cx)
1999 .as_remote()
2000 .ok_or_else(|| anyhow!("not remote"))?
2001 .id();
2002
2003 if worktree_id.to_proto() != proto.worktree_id {
2004 return Err(anyhow!("worktree id does not match file"));
2005 }
2006
2007 Ok(Self {
2008 worktree,
2009 path: Path::new(&proto.path).into(),
2010 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2011 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2012 is_local: false,
2013 is_deleted: proto.is_deleted,
2014 })
2015 }
2016
2017 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2018 file.and_then(|f| f.as_any().downcast_ref())
2019 }
2020
2021 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2022 self.worktree.read(cx).id()
2023 }
2024
2025 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2026 if self.is_deleted {
2027 None
2028 } else {
2029 Some(self.entry_id)
2030 }
2031 }
2032}
2033
2034#[derive(Clone, Debug, PartialEq, Eq)]
2035pub struct Entry {
2036 pub id: ProjectEntryId,
2037 pub kind: EntryKind,
2038 pub path: Arc<Path>,
2039 pub inode: u64,
2040 pub mtime: SystemTime,
2041 pub is_symlink: bool,
2042 pub is_ignored: bool,
2043}
2044
2045#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2046pub enum EntryKind {
2047 PendingDir,
2048 Dir,
2049 File(CharBag),
2050}
2051
2052impl Entry {
2053 fn new(
2054 path: Arc<Path>,
2055 metadata: &fs::Metadata,
2056 next_entry_id: &AtomicUsize,
2057 root_char_bag: CharBag,
2058 ) -> Self {
2059 Self {
2060 id: ProjectEntryId::new(next_entry_id),
2061 kind: if metadata.is_dir {
2062 EntryKind::PendingDir
2063 } else {
2064 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2065 },
2066 path,
2067 inode: metadata.inode,
2068 mtime: metadata.mtime,
2069 is_symlink: metadata.is_symlink,
2070 is_ignored: false,
2071 }
2072 }
2073
2074 pub fn is_dir(&self) -> bool {
2075 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2076 }
2077
2078 pub fn is_file(&self) -> bool {
2079 matches!(self.kind, EntryKind::File(_))
2080 }
2081}
2082
2083impl sum_tree::Item for Entry {
2084 type Summary = EntrySummary;
2085
2086 fn summary(&self) -> Self::Summary {
2087 let visible_count = if self.is_ignored { 0 } else { 1 };
2088 let file_count;
2089 let visible_file_count;
2090 if self.is_file() {
2091 file_count = 1;
2092 visible_file_count = visible_count;
2093 } else {
2094 file_count = 0;
2095 visible_file_count = 0;
2096 }
2097
2098 EntrySummary {
2099 max_path: self.path.clone(),
2100 count: 1,
2101 visible_count,
2102 file_count,
2103 visible_file_count,
2104 }
2105 }
2106}
2107
2108impl sum_tree::KeyedItem for Entry {
2109 type Key = PathKey;
2110
2111 fn key(&self) -> Self::Key {
2112 PathKey(self.path.clone())
2113 }
2114}
2115
2116#[derive(Clone, Debug)]
2117pub struct EntrySummary {
2118 max_path: Arc<Path>,
2119 count: usize,
2120 visible_count: usize,
2121 file_count: usize,
2122 visible_file_count: usize,
2123}
2124
2125impl Default for EntrySummary {
2126 fn default() -> Self {
2127 Self {
2128 max_path: Arc::from(Path::new("")),
2129 count: 0,
2130 visible_count: 0,
2131 file_count: 0,
2132 visible_file_count: 0,
2133 }
2134 }
2135}
2136
2137impl sum_tree::Summary for EntrySummary {
2138 type Context = ();
2139
2140 fn add_summary(&mut self, rhs: &Self, _: &()) {
2141 self.max_path = rhs.max_path.clone();
2142 self.count += rhs.count;
2143 self.visible_count += rhs.visible_count;
2144 self.file_count += rhs.file_count;
2145 self.visible_file_count += rhs.visible_file_count;
2146 }
2147}
2148
2149#[derive(Clone, Debug)]
2150struct PathEntry {
2151 id: ProjectEntryId,
2152 path: Arc<Path>,
2153 is_ignored: bool,
2154 scan_id: usize,
2155}
2156
2157impl sum_tree::Item for PathEntry {
2158 type Summary = PathEntrySummary;
2159
2160 fn summary(&self) -> Self::Summary {
2161 PathEntrySummary { max_id: self.id }
2162 }
2163}
2164
2165impl sum_tree::KeyedItem for PathEntry {
2166 type Key = ProjectEntryId;
2167
2168 fn key(&self) -> Self::Key {
2169 self.id
2170 }
2171}
2172
2173#[derive(Clone, Debug, Default)]
2174struct PathEntrySummary {
2175 max_id: ProjectEntryId,
2176}
2177
2178impl sum_tree::Summary for PathEntrySummary {
2179 type Context = ();
2180
2181 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2182 self.max_id = summary.max_id;
2183 }
2184}
2185
2186impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2187 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2188 *self = summary.max_id;
2189 }
2190}
2191
2192#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2193pub struct PathKey(Arc<Path>);
2194
2195impl Default for PathKey {
2196 fn default() -> Self {
2197 Self(Path::new("").into())
2198 }
2199}
2200
2201impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2202 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2203 self.0 = summary.max_path.clone();
2204 }
2205}
2206
2207struct BackgroundScanner {
2208 fs: Arc<dyn Fs>,
2209 snapshot: Arc<Mutex<LocalSnapshot>>,
2210 notify: UnboundedSender<ScanState>,
2211 executor: Arc<executor::Background>,
2212}
2213
2214impl BackgroundScanner {
2215 fn new(
2216 snapshot: Arc<Mutex<LocalSnapshot>>,
2217 notify: UnboundedSender<ScanState>,
2218 fs: Arc<dyn Fs>,
2219 executor: Arc<executor::Background>,
2220 ) -> Self {
2221 Self {
2222 fs,
2223 snapshot,
2224 notify,
2225 executor,
2226 }
2227 }
2228
2229 fn abs_path(&self) -> Arc<Path> {
2230 self.snapshot.lock().abs_path.clone()
2231 }
2232
2233 fn snapshot(&self) -> LocalSnapshot {
2234 self.snapshot.lock().clone()
2235 }
2236
2237 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2238 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2239 return;
2240 }
2241
2242 if let Err(err) = self.scan_dirs().await {
2243 if self
2244 .notify
2245 .unbounded_send(ScanState::Err(Arc::new(err)))
2246 .is_err()
2247 {
2248 return;
2249 }
2250 }
2251
2252 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2253 return;
2254 }
2255
2256 futures::pin_mut!(events_rx);
2257
2258 while let Some(mut events) = events_rx.next().await {
2259 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2260 events.extend(additional_events);
2261 }
2262
2263 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2264 break;
2265 }
2266
2267 if !self.process_events(events).await {
2268 break;
2269 }
2270
2271 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2272 break;
2273 }
2274 }
2275 }
2276
2277 async fn scan_dirs(&mut self) -> Result<()> {
2278 let root_char_bag;
2279 let root_abs_path;
2280 let root_inode;
2281 let is_dir;
2282 let next_entry_id;
2283 {
2284 let snapshot = self.snapshot.lock();
2285 root_char_bag = snapshot.root_char_bag;
2286 root_abs_path = snapshot.abs_path.clone();
2287 root_inode = snapshot.root_entry().map(|e| e.inode);
2288 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2289 next_entry_id = snapshot.next_entry_id.clone();
2290 };
2291
2292 // Populate ignores above the root.
2293 for ancestor in root_abs_path.ancestors().skip(1) {
2294 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2295 {
2296 self.snapshot
2297 .lock()
2298 .ignores_by_parent_abs_path
2299 .insert(ancestor.into(), (ignore.into(), 0));
2300 }
2301 }
2302
2303 let ignore_stack = {
2304 let mut snapshot = self.snapshot.lock();
2305 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2306 if ignore_stack.is_all() {
2307 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2308 root_entry.is_ignored = true;
2309 snapshot.insert_entry(root_entry, self.fs.as_ref());
2310 }
2311 }
2312 ignore_stack
2313 };
2314
2315 if is_dir {
2316 let path: Arc<Path> = Arc::from(Path::new(""));
2317 let mut ancestor_inodes = TreeSet::default();
2318 if let Some(root_inode) = root_inode {
2319 ancestor_inodes.insert(root_inode);
2320 }
2321
2322 let (tx, rx) = channel::unbounded();
2323 self.executor
2324 .block(tx.send(ScanJob {
2325 abs_path: root_abs_path.to_path_buf(),
2326 path,
2327 ignore_stack,
2328 ancestor_inodes,
2329 scan_queue: tx.clone(),
2330 }))
2331 .unwrap();
2332 drop(tx);
2333
2334 self.executor
2335 .scoped(|scope| {
2336 for _ in 0..self.executor.num_cpus() {
2337 scope.spawn(async {
2338 while let Ok(job) = rx.recv().await {
2339 if let Err(err) = self
2340 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2341 .await
2342 {
2343 log::error!("error scanning {:?}: {}", job.abs_path, err);
2344 }
2345 }
2346 });
2347 }
2348 })
2349 .await;
2350 }
2351
2352 Ok(())
2353 }
2354
2355 async fn scan_dir(
2356 &self,
2357 root_char_bag: CharBag,
2358 next_entry_id: Arc<AtomicUsize>,
2359 job: &ScanJob,
2360 ) -> Result<()> {
2361 let mut new_entries: Vec<Entry> = Vec::new();
2362 let mut new_jobs: Vec<ScanJob> = Vec::new();
2363 let mut ignore_stack = job.ignore_stack.clone();
2364 let mut new_ignore = None;
2365
2366 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2367 while let Some(child_abs_path) = child_paths.next().await {
2368 let child_abs_path = match child_abs_path {
2369 Ok(child_abs_path) => child_abs_path,
2370 Err(error) => {
2371 log::error!("error processing entry {:?}", error);
2372 continue;
2373 }
2374 };
2375 let child_name = child_abs_path.file_name().unwrap();
2376 let child_path: Arc<Path> = job.path.join(child_name).into();
2377 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2378 Ok(Some(metadata)) => metadata,
2379 Ok(None) => continue,
2380 Err(err) => {
2381 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2382 continue;
2383 }
2384 };
2385
2386 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2387 if child_name == *GITIGNORE {
2388 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2389 Ok(ignore) => {
2390 let ignore = Arc::new(ignore);
2391 ignore_stack =
2392 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2393 new_ignore = Some(ignore);
2394 }
2395 Err(error) => {
2396 log::error!(
2397 "error loading .gitignore file {:?} - {:?}",
2398 child_name,
2399 error
2400 );
2401 }
2402 }
2403
2404 // Update ignore status of any child entries we've already processed to reflect the
2405 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2406 // there should rarely be too numerous. Update the ignore stack associated with any
2407 // new jobs as well.
2408 let mut new_jobs = new_jobs.iter_mut();
2409 for entry in &mut new_entries {
2410 let entry_abs_path = self.abs_path().join(&entry.path);
2411 entry.is_ignored =
2412 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2413 if entry.is_dir() {
2414 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2415 IgnoreStack::all()
2416 } else {
2417 ignore_stack.clone()
2418 };
2419 }
2420 }
2421 }
2422
2423 let mut child_entry = Entry::new(
2424 child_path.clone(),
2425 &child_metadata,
2426 &next_entry_id,
2427 root_char_bag,
2428 );
2429
2430 if child_entry.is_dir() {
2431 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2432 child_entry.is_ignored = is_ignored;
2433
2434 if !job.ancestor_inodes.contains(&child_entry.inode) {
2435 let mut ancestor_inodes = job.ancestor_inodes.clone();
2436 ancestor_inodes.insert(child_entry.inode);
2437 new_jobs.push(ScanJob {
2438 abs_path: child_abs_path,
2439 path: child_path,
2440 ignore_stack: if is_ignored {
2441 IgnoreStack::all()
2442 } else {
2443 ignore_stack.clone()
2444 },
2445 ancestor_inodes,
2446 scan_queue: job.scan_queue.clone(),
2447 });
2448 }
2449 } else {
2450 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2451 }
2452
2453 new_entries.push(child_entry);
2454 }
2455
2456 self.snapshot.lock().populate_dir(
2457 job.path.clone(),
2458 new_entries,
2459 new_ignore,
2460 self.fs.as_ref(),
2461 );
2462 for new_job in new_jobs {
2463 job.scan_queue.send(new_job).await.unwrap();
2464 }
2465
2466 Ok(())
2467 }
2468
2469 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2470 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2471 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2472
2473 let root_char_bag;
2474 let root_abs_path;
2475 let next_entry_id;
2476 {
2477 let snapshot = self.snapshot.lock();
2478 root_char_bag = snapshot.root_char_bag;
2479 root_abs_path = snapshot.abs_path.clone();
2480 next_entry_id = snapshot.next_entry_id.clone();
2481 }
2482
2483 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2484 path
2485 } else {
2486 return false;
2487 };
2488 let metadata = futures::future::join_all(
2489 events
2490 .iter()
2491 .map(|event| self.fs.metadata(&event.path))
2492 .collect::<Vec<_>>(),
2493 )
2494 .await;
2495
2496 // Hold the snapshot lock while clearing and re-inserting the root entries
2497 // for each event. This way, the snapshot is not observable to the foreground
2498 // thread while this operation is in-progress.
2499 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2500 {
2501 let mut snapshot = self.snapshot.lock();
2502 snapshot.scan_id += 1;
2503 for event in &events {
2504 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2505 snapshot.remove_path(path);
2506 }
2507 }
2508
2509 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2510 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2511 Ok(path) => Arc::from(path.to_path_buf()),
2512 Err(_) => {
2513 log::error!(
2514 "unexpected event {:?} for root path {:?}",
2515 event.path,
2516 root_canonical_path
2517 );
2518 continue;
2519 }
2520 };
2521 let abs_path = root_abs_path.join(&path);
2522
2523 match metadata {
2524 Ok(Some(metadata)) => {
2525 let ignore_stack =
2526 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2527 let mut fs_entry = Entry::new(
2528 path.clone(),
2529 &metadata,
2530 snapshot.next_entry_id.as_ref(),
2531 snapshot.root_char_bag,
2532 );
2533 fs_entry.is_ignored = ignore_stack.is_all();
2534 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2535
2536 let scan_id = snapshot.scan_id;
2537 if let Some(repo) = snapshot.in_dot_git(&path) {
2538 repo.repo.lock().reload_index();
2539 repo.scan_id = scan_id;
2540 }
2541
2542 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2543 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2544 ancestor_inodes.insert(metadata.inode);
2545 self.executor
2546 .block(scan_queue_tx.send(ScanJob {
2547 abs_path,
2548 path,
2549 ignore_stack,
2550 ancestor_inodes,
2551 scan_queue: scan_queue_tx.clone(),
2552 }))
2553 .unwrap();
2554 }
2555 }
2556 Ok(None) => {}
2557 Err(err) => {
2558 // TODO - create a special 'error' entry in the entries tree to mark this
2559 log::error!("error reading file on event {:?}", err);
2560 }
2561 }
2562 }
2563 drop(scan_queue_tx);
2564 }
2565
2566 // Scan any directories that were created as part of this event batch.
2567 self.executor
2568 .scoped(|scope| {
2569 for _ in 0..self.executor.num_cpus() {
2570 scope.spawn(async {
2571 while let Ok(job) = scan_queue_rx.recv().await {
2572 if let Err(err) = self
2573 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2574 .await
2575 {
2576 log::error!("error scanning {:?}: {}", job.abs_path, err);
2577 }
2578 }
2579 });
2580 }
2581 })
2582 .await;
2583
2584 // Attempt to detect renames only over a single batch of file-system events.
2585 self.snapshot.lock().removed_entry_ids.clear();
2586
2587 self.update_ignore_statuses().await;
2588 self.update_git_repositories();
2589 true
2590 }
2591
2592 async fn update_ignore_statuses(&self) {
2593 let mut snapshot = self.snapshot();
2594
2595 let mut ignores_to_update = Vec::new();
2596 let mut ignores_to_delete = Vec::new();
2597 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2598 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2599 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2600 ignores_to_update.push(parent_abs_path.clone());
2601 }
2602
2603 let ignore_path = parent_path.join(&*GITIGNORE);
2604 if snapshot.entry_for_path(ignore_path).is_none() {
2605 ignores_to_delete.push(parent_abs_path.clone());
2606 }
2607 }
2608 }
2609
2610 for parent_abs_path in ignores_to_delete {
2611 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2612 self.snapshot
2613 .lock()
2614 .ignores_by_parent_abs_path
2615 .remove(&parent_abs_path);
2616 }
2617
2618 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2619 ignores_to_update.sort_unstable();
2620 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2621 while let Some(parent_abs_path) = ignores_to_update.next() {
2622 while ignores_to_update
2623 .peek()
2624 .map_or(false, |p| p.starts_with(&parent_abs_path))
2625 {
2626 ignores_to_update.next().unwrap();
2627 }
2628
2629 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2630 ignore_queue_tx
2631 .send(UpdateIgnoreStatusJob {
2632 abs_path: parent_abs_path,
2633 ignore_stack,
2634 ignore_queue: ignore_queue_tx.clone(),
2635 })
2636 .await
2637 .unwrap();
2638 }
2639 drop(ignore_queue_tx);
2640
2641 self.executor
2642 .scoped(|scope| {
2643 for _ in 0..self.executor.num_cpus() {
2644 scope.spawn(async {
2645 while let Ok(job) = ignore_queue_rx.recv().await {
2646 self.update_ignore_status(job, &snapshot).await;
2647 }
2648 });
2649 }
2650 })
2651 .await;
2652 }
2653
2654 fn update_git_repositories(&self) {
2655 let mut snapshot = self.snapshot.lock();
2656 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2657 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2658 snapshot.git_repositories = git_repositories;
2659 }
2660
2661 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2662 let mut ignore_stack = job.ignore_stack;
2663 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2664 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2665 }
2666
2667 let mut entries_by_id_edits = Vec::new();
2668 let mut entries_by_path_edits = Vec::new();
2669 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2670 for mut entry in snapshot.child_entries(path).cloned() {
2671 let was_ignored = entry.is_ignored;
2672 let abs_path = self.abs_path().join(&entry.path);
2673 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2674 if entry.is_dir() {
2675 let child_ignore_stack = if entry.is_ignored {
2676 IgnoreStack::all()
2677 } else {
2678 ignore_stack.clone()
2679 };
2680 job.ignore_queue
2681 .send(UpdateIgnoreStatusJob {
2682 abs_path: abs_path.into(),
2683 ignore_stack: child_ignore_stack,
2684 ignore_queue: job.ignore_queue.clone(),
2685 })
2686 .await
2687 .unwrap();
2688 }
2689
2690 if entry.is_ignored != was_ignored {
2691 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2692 path_entry.scan_id = snapshot.scan_id;
2693 path_entry.is_ignored = entry.is_ignored;
2694 entries_by_id_edits.push(Edit::Insert(path_entry));
2695 entries_by_path_edits.push(Edit::Insert(entry));
2696 }
2697 }
2698
2699 let mut snapshot = self.snapshot.lock();
2700 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2701 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2702 }
2703}
2704
2705fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2706 let mut result = root_char_bag;
2707 result.extend(
2708 path.to_string_lossy()
2709 .chars()
2710 .map(|c| c.to_ascii_lowercase()),
2711 );
2712 result
2713}
2714
2715struct ScanJob {
2716 abs_path: PathBuf,
2717 path: Arc<Path>,
2718 ignore_stack: Arc<IgnoreStack>,
2719 scan_queue: Sender<ScanJob>,
2720 ancestor_inodes: TreeSet<u64>,
2721}
2722
2723struct UpdateIgnoreStatusJob {
2724 abs_path: Arc<Path>,
2725 ignore_stack: Arc<IgnoreStack>,
2726 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2727}
2728
2729pub trait WorktreeHandle {
2730 #[cfg(any(test, feature = "test-support"))]
2731 fn flush_fs_events<'a>(
2732 &self,
2733 cx: &'a gpui::TestAppContext,
2734 ) -> futures::future::LocalBoxFuture<'a, ()>;
2735}
2736
2737impl WorktreeHandle for ModelHandle<Worktree> {
2738 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2739 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2740 // extra directory scans, and emit extra scan-state notifications.
2741 //
2742 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2743 // to ensure that all redundant FS events have already been processed.
2744 #[cfg(any(test, feature = "test-support"))]
2745 fn flush_fs_events<'a>(
2746 &self,
2747 cx: &'a gpui::TestAppContext,
2748 ) -> futures::future::LocalBoxFuture<'a, ()> {
2749 use smol::future::FutureExt;
2750
2751 let filename = "fs-event-sentinel";
2752 let tree = self.clone();
2753 let (fs, root_path) = self.read_with(cx, |tree, _| {
2754 let tree = tree.as_local().unwrap();
2755 (tree.fs.clone(), tree.abs_path().clone())
2756 });
2757
2758 async move {
2759 fs.create_file(&root_path.join(filename), Default::default())
2760 .await
2761 .unwrap();
2762 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2763 .await;
2764
2765 fs.remove_file(&root_path.join(filename), Default::default())
2766 .await
2767 .unwrap();
2768 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2769 .await;
2770
2771 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2772 .await;
2773 }
2774 .boxed_local()
2775 }
2776}
2777
2778#[derive(Clone, Debug)]
2779struct TraversalProgress<'a> {
2780 max_path: &'a Path,
2781 count: usize,
2782 visible_count: usize,
2783 file_count: usize,
2784 visible_file_count: usize,
2785}
2786
2787impl<'a> TraversalProgress<'a> {
2788 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2789 match (include_ignored, include_dirs) {
2790 (true, true) => self.count,
2791 (true, false) => self.file_count,
2792 (false, true) => self.visible_count,
2793 (false, false) => self.visible_file_count,
2794 }
2795 }
2796}
2797
2798impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2799 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2800 self.max_path = summary.max_path.as_ref();
2801 self.count += summary.count;
2802 self.visible_count += summary.visible_count;
2803 self.file_count += summary.file_count;
2804 self.visible_file_count += summary.visible_file_count;
2805 }
2806}
2807
2808impl<'a> Default for TraversalProgress<'a> {
2809 fn default() -> Self {
2810 Self {
2811 max_path: Path::new(""),
2812 count: 0,
2813 visible_count: 0,
2814 file_count: 0,
2815 visible_file_count: 0,
2816 }
2817 }
2818}
2819
2820pub struct Traversal<'a> {
2821 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2822 include_ignored: bool,
2823 include_dirs: bool,
2824}
2825
2826impl<'a> Traversal<'a> {
2827 pub fn advance(&mut self) -> bool {
2828 self.advance_to_offset(self.offset() + 1)
2829 }
2830
2831 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2832 self.cursor.seek_forward(
2833 &TraversalTarget::Count {
2834 count: offset,
2835 include_dirs: self.include_dirs,
2836 include_ignored: self.include_ignored,
2837 },
2838 Bias::Right,
2839 &(),
2840 )
2841 }
2842
2843 pub fn advance_to_sibling(&mut self) -> bool {
2844 while let Some(entry) = self.cursor.item() {
2845 self.cursor.seek_forward(
2846 &TraversalTarget::PathSuccessor(&entry.path),
2847 Bias::Left,
2848 &(),
2849 );
2850 if let Some(entry) = self.cursor.item() {
2851 if (self.include_dirs || !entry.is_dir())
2852 && (self.include_ignored || !entry.is_ignored)
2853 {
2854 return true;
2855 }
2856 }
2857 }
2858 false
2859 }
2860
2861 pub fn entry(&self) -> Option<&'a Entry> {
2862 self.cursor.item()
2863 }
2864
2865 pub fn offset(&self) -> usize {
2866 self.cursor
2867 .start()
2868 .count(self.include_dirs, self.include_ignored)
2869 }
2870}
2871
2872impl<'a> Iterator for Traversal<'a> {
2873 type Item = &'a Entry;
2874
2875 fn next(&mut self) -> Option<Self::Item> {
2876 if let Some(item) = self.entry() {
2877 self.advance();
2878 Some(item)
2879 } else {
2880 None
2881 }
2882 }
2883}
2884
2885#[derive(Debug)]
2886enum TraversalTarget<'a> {
2887 Path(&'a Path),
2888 PathSuccessor(&'a Path),
2889 Count {
2890 count: usize,
2891 include_ignored: bool,
2892 include_dirs: bool,
2893 },
2894}
2895
2896impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2897 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2898 match self {
2899 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2900 TraversalTarget::PathSuccessor(path) => {
2901 if !cursor_location.max_path.starts_with(path) {
2902 Ordering::Equal
2903 } else {
2904 Ordering::Greater
2905 }
2906 }
2907 TraversalTarget::Count {
2908 count,
2909 include_dirs,
2910 include_ignored,
2911 } => Ord::cmp(
2912 count,
2913 &cursor_location.count(*include_dirs, *include_ignored),
2914 ),
2915 }
2916 }
2917}
2918
2919struct ChildEntriesIter<'a> {
2920 parent_path: &'a Path,
2921 traversal: Traversal<'a>,
2922}
2923
2924impl<'a> Iterator for ChildEntriesIter<'a> {
2925 type Item = &'a Entry;
2926
2927 fn next(&mut self) -> Option<Self::Item> {
2928 if let Some(item) = self.traversal.entry() {
2929 if item.path.starts_with(&self.parent_path) {
2930 self.traversal.advance_to_sibling();
2931 return Some(item);
2932 }
2933 }
2934 None
2935 }
2936}
2937
2938impl<'a> From<&'a Entry> for proto::Entry {
2939 fn from(entry: &'a Entry) -> Self {
2940 Self {
2941 id: entry.id.to_proto(),
2942 is_dir: entry.is_dir(),
2943 path: entry.path.to_string_lossy().into(),
2944 inode: entry.inode,
2945 mtime: Some(entry.mtime.into()),
2946 is_symlink: entry.is_symlink,
2947 is_ignored: entry.is_ignored,
2948 }
2949 }
2950}
2951
2952impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2953 type Error = anyhow::Error;
2954
2955 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2956 if let Some(mtime) = entry.mtime {
2957 let kind = if entry.is_dir {
2958 EntryKind::Dir
2959 } else {
2960 let mut char_bag = *root_char_bag;
2961 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2962 EntryKind::File(char_bag)
2963 };
2964 let path: Arc<Path> = PathBuf::from(entry.path).into();
2965 Ok(Entry {
2966 id: ProjectEntryId::from_proto(entry.id),
2967 kind,
2968 path,
2969 inode: entry.inode,
2970 mtime: mtime.into(),
2971 is_symlink: entry.is_symlink,
2972 is_ignored: entry.is_ignored,
2973 })
2974 } else {
2975 Err(anyhow!(
2976 "missing mtime in remote worktree entry {:?}",
2977 entry.path
2978 ))
2979 }
2980 }
2981}
2982
2983async fn send_worktree_update(client: &Arc<Client>, update: proto::UpdateWorktree) -> Result<()> {
2984 #[cfg(any(test, feature = "test-support"))]
2985 const MAX_CHUNK_SIZE: usize = 2;
2986 #[cfg(not(any(test, feature = "test-support")))]
2987 const MAX_CHUNK_SIZE: usize = 256;
2988
2989 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
2990 client.request(update).await?;
2991 }
2992
2993 Ok(())
2994}
2995
2996#[cfg(test)]
2997mod tests {
2998 use super::*;
2999 use anyhow::Result;
3000 use client::test::FakeHttpClient;
3001 use fs::repository::FakeGitRepository;
3002 use fs::{FakeFs, RealFs};
3003 use gpui::{executor::Deterministic, TestAppContext};
3004 use rand::prelude::*;
3005 use serde_json::json;
3006 use std::{
3007 env,
3008 fmt::Write,
3009 time::{SystemTime, UNIX_EPOCH},
3010 };
3011
3012 use util::test::temp_tree;
3013
3014 #[gpui::test]
3015 async fn test_traversal(cx: &mut TestAppContext) {
3016 let fs = FakeFs::new(cx.background());
3017 fs.insert_tree(
3018 "/root",
3019 json!({
3020 ".gitignore": "a/b\n",
3021 "a": {
3022 "b": "",
3023 "c": "",
3024 }
3025 }),
3026 )
3027 .await;
3028
3029 let http_client = FakeHttpClient::with_404_response();
3030 let client = cx.read(|cx| Client::new(http_client, cx));
3031
3032 let tree = Worktree::local(
3033 client,
3034 Arc::from(Path::new("/root")),
3035 true,
3036 fs,
3037 Default::default(),
3038 &mut cx.to_async(),
3039 )
3040 .await
3041 .unwrap();
3042 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3043 .await;
3044
3045 tree.read_with(cx, |tree, _| {
3046 assert_eq!(
3047 tree.entries(false)
3048 .map(|entry| entry.path.as_ref())
3049 .collect::<Vec<_>>(),
3050 vec![
3051 Path::new(""),
3052 Path::new(".gitignore"),
3053 Path::new("a"),
3054 Path::new("a/c"),
3055 ]
3056 );
3057 assert_eq!(
3058 tree.entries(true)
3059 .map(|entry| entry.path.as_ref())
3060 .collect::<Vec<_>>(),
3061 vec![
3062 Path::new(""),
3063 Path::new(".gitignore"),
3064 Path::new("a"),
3065 Path::new("a/b"),
3066 Path::new("a/c"),
3067 ]
3068 );
3069 })
3070 }
3071
3072 #[gpui::test(iterations = 10)]
3073 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3074 let fs = FakeFs::new(cx.background());
3075 fs.insert_tree(
3076 "/root",
3077 json!({
3078 "lib": {
3079 "a": {
3080 "a.txt": ""
3081 },
3082 "b": {
3083 "b.txt": ""
3084 }
3085 }
3086 }),
3087 )
3088 .await;
3089 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3090 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3091
3092 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3093 let tree = Worktree::local(
3094 client,
3095 Arc::from(Path::new("/root")),
3096 true,
3097 fs.clone(),
3098 Default::default(),
3099 &mut cx.to_async(),
3100 )
3101 .await
3102 .unwrap();
3103
3104 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3105 .await;
3106
3107 tree.read_with(cx, |tree, _| {
3108 assert_eq!(
3109 tree.entries(false)
3110 .map(|entry| entry.path.as_ref())
3111 .collect::<Vec<_>>(),
3112 vec![
3113 Path::new(""),
3114 Path::new("lib"),
3115 Path::new("lib/a"),
3116 Path::new("lib/a/a.txt"),
3117 Path::new("lib/a/lib"),
3118 Path::new("lib/b"),
3119 Path::new("lib/b/b.txt"),
3120 Path::new("lib/b/lib"),
3121 ]
3122 );
3123 });
3124
3125 fs.rename(
3126 Path::new("/root/lib/a/lib"),
3127 Path::new("/root/lib/a/lib-2"),
3128 Default::default(),
3129 )
3130 .await
3131 .unwrap();
3132 executor.run_until_parked();
3133 tree.read_with(cx, |tree, _| {
3134 assert_eq!(
3135 tree.entries(false)
3136 .map(|entry| entry.path.as_ref())
3137 .collect::<Vec<_>>(),
3138 vec![
3139 Path::new(""),
3140 Path::new("lib"),
3141 Path::new("lib/a"),
3142 Path::new("lib/a/a.txt"),
3143 Path::new("lib/a/lib-2"),
3144 Path::new("lib/b"),
3145 Path::new("lib/b/b.txt"),
3146 Path::new("lib/b/lib"),
3147 ]
3148 );
3149 });
3150 }
3151
3152 #[gpui::test]
3153 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3154 let parent_dir = temp_tree(json!({
3155 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3156 "tree": {
3157 ".git": {},
3158 ".gitignore": "ignored-dir\n",
3159 "tracked-dir": {
3160 "tracked-file1": "",
3161 "ancestor-ignored-file1": "",
3162 },
3163 "ignored-dir": {
3164 "ignored-file1": ""
3165 }
3166 }
3167 }));
3168 let dir = parent_dir.path().join("tree");
3169
3170 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3171
3172 let tree = Worktree::local(
3173 client,
3174 dir.as_path(),
3175 true,
3176 Arc::new(RealFs),
3177 Default::default(),
3178 &mut cx.to_async(),
3179 )
3180 .await
3181 .unwrap();
3182 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3183 .await;
3184 tree.flush_fs_events(cx).await;
3185 cx.read(|cx| {
3186 let tree = tree.read(cx);
3187 assert!(
3188 !tree
3189 .entry_for_path("tracked-dir/tracked-file1")
3190 .unwrap()
3191 .is_ignored
3192 );
3193 assert!(
3194 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3195 .unwrap()
3196 .is_ignored
3197 );
3198 assert!(
3199 tree.entry_for_path("ignored-dir/ignored-file1")
3200 .unwrap()
3201 .is_ignored
3202 );
3203 });
3204
3205 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3206 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3207 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3208 tree.flush_fs_events(cx).await;
3209 cx.read(|cx| {
3210 let tree = tree.read(cx);
3211 assert!(
3212 !tree
3213 .entry_for_path("tracked-dir/tracked-file2")
3214 .unwrap()
3215 .is_ignored
3216 );
3217 assert!(
3218 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3219 .unwrap()
3220 .is_ignored
3221 );
3222 assert!(
3223 tree.entry_for_path("ignored-dir/ignored-file2")
3224 .unwrap()
3225 .is_ignored
3226 );
3227 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3228 });
3229 }
3230
3231 #[gpui::test]
3232 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3233 let root = temp_tree(json!({
3234 "dir1": {
3235 ".git": {},
3236 "deps": {
3237 "dep1": {
3238 ".git": {},
3239 "src": {
3240 "a.txt": ""
3241 }
3242 }
3243 },
3244 "src": {
3245 "b.txt": ""
3246 }
3247 },
3248 "c.txt": "",
3249 }));
3250
3251 let http_client = FakeHttpClient::with_404_response();
3252 let client = cx.read(|cx| Client::new(http_client, cx));
3253 let tree = Worktree::local(
3254 client,
3255 root.path(),
3256 true,
3257 Arc::new(RealFs),
3258 Default::default(),
3259 &mut cx.to_async(),
3260 )
3261 .await
3262 .unwrap();
3263
3264 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3265 .await;
3266 tree.flush_fs_events(cx).await;
3267
3268 tree.read_with(cx, |tree, _cx| {
3269 let tree = tree.as_local().unwrap();
3270
3271 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3272
3273 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3274 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3275 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3276
3277 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3278 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3279 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3280 });
3281
3282 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3283 let tree = tree.as_local().unwrap();
3284 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3285 });
3286
3287 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3288 tree.flush_fs_events(cx).await;
3289
3290 tree.read_with(cx, |tree, _cx| {
3291 let tree = tree.as_local().unwrap();
3292 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3293 assert_ne!(
3294 original_scan_id, new_scan_id,
3295 "original {original_scan_id}, new {new_scan_id}"
3296 );
3297 });
3298
3299 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3300 tree.flush_fs_events(cx).await;
3301
3302 tree.read_with(cx, |tree, _cx| {
3303 let tree = tree.as_local().unwrap();
3304
3305 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3306 });
3307 }
3308
3309 #[test]
3310 fn test_changed_repos() {
3311 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3312 GitRepositoryEntry {
3313 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3314 scan_id,
3315 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3316 git_dir_path: git_dir_path.as_ref().into(),
3317 }
3318 }
3319
3320 let prev_repos: Vec<GitRepositoryEntry> = vec![
3321 fake_entry("/.git", 0),
3322 fake_entry("/a/.git", 0),
3323 fake_entry("/a/b/.git", 0),
3324 ];
3325
3326 let new_repos: Vec<GitRepositoryEntry> = vec![
3327 fake_entry("/a/.git", 1),
3328 fake_entry("/a/b/.git", 0),
3329 fake_entry("/a/c/.git", 0),
3330 ];
3331
3332 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3333
3334 // Deletion retained
3335 assert!(res
3336 .iter()
3337 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3338 .is_some());
3339
3340 // Update retained
3341 assert!(res
3342 .iter()
3343 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3344 .is_some());
3345
3346 // Addition retained
3347 assert!(res
3348 .iter()
3349 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3350 .is_some());
3351
3352 // Nochange, not retained
3353 assert!(res
3354 .iter()
3355 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3356 .is_none());
3357 }
3358
3359 #[gpui::test]
3360 async fn test_write_file(cx: &mut TestAppContext) {
3361 let dir = temp_tree(json!({
3362 ".git": {},
3363 ".gitignore": "ignored-dir\n",
3364 "tracked-dir": {},
3365 "ignored-dir": {}
3366 }));
3367
3368 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3369
3370 let tree = Worktree::local(
3371 client,
3372 dir.path(),
3373 true,
3374 Arc::new(RealFs),
3375 Default::default(),
3376 &mut cx.to_async(),
3377 )
3378 .await
3379 .unwrap();
3380 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3381 .await;
3382 tree.flush_fs_events(cx).await;
3383
3384 tree.update(cx, |tree, cx| {
3385 tree.as_local().unwrap().write_file(
3386 Path::new("tracked-dir/file.txt"),
3387 "hello".into(),
3388 Default::default(),
3389 cx,
3390 )
3391 })
3392 .await
3393 .unwrap();
3394 tree.update(cx, |tree, cx| {
3395 tree.as_local().unwrap().write_file(
3396 Path::new("ignored-dir/file.txt"),
3397 "world".into(),
3398 Default::default(),
3399 cx,
3400 )
3401 })
3402 .await
3403 .unwrap();
3404
3405 tree.read_with(cx, |tree, _| {
3406 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3407 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3408 assert!(!tracked.is_ignored);
3409 assert!(ignored.is_ignored);
3410 });
3411 }
3412
3413 #[gpui::test(iterations = 30)]
3414 async fn test_create_directory(cx: &mut TestAppContext) {
3415 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3416
3417 let fs = FakeFs::new(cx.background());
3418 fs.insert_tree(
3419 "/a",
3420 json!({
3421 "b": {},
3422 "c": {},
3423 "d": {},
3424 }),
3425 )
3426 .await;
3427
3428 let tree = Worktree::local(
3429 client,
3430 "/a".as_ref(),
3431 true,
3432 fs,
3433 Default::default(),
3434 &mut cx.to_async(),
3435 )
3436 .await
3437 .unwrap();
3438
3439 let entry = tree
3440 .update(cx, |tree, cx| {
3441 tree.as_local_mut()
3442 .unwrap()
3443 .create_entry("a/e".as_ref(), true, cx)
3444 })
3445 .await
3446 .unwrap();
3447 assert!(entry.is_dir());
3448
3449 cx.foreground().run_until_parked();
3450 tree.read_with(cx, |tree, _| {
3451 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3452 });
3453 }
3454
3455 #[gpui::test(iterations = 100)]
3456 fn test_random(mut rng: StdRng) {
3457 let operations = env::var("OPERATIONS")
3458 .map(|o| o.parse().unwrap())
3459 .unwrap_or(40);
3460 let initial_entries = env::var("INITIAL_ENTRIES")
3461 .map(|o| o.parse().unwrap())
3462 .unwrap_or(20);
3463
3464 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3465 for _ in 0..initial_entries {
3466 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3467 }
3468 log::info!("Generated initial tree");
3469
3470 let (notify_tx, _notify_rx) = mpsc::unbounded();
3471 let fs = Arc::new(RealFs);
3472 let next_entry_id = Arc::new(AtomicUsize::new(0));
3473 let mut initial_snapshot = LocalSnapshot {
3474 removed_entry_ids: Default::default(),
3475 ignores_by_parent_abs_path: Default::default(),
3476 git_repositories: Default::default(),
3477 next_entry_id: next_entry_id.clone(),
3478 snapshot: Snapshot {
3479 id: WorktreeId::from_usize(0),
3480 entries_by_path: Default::default(),
3481 entries_by_id: Default::default(),
3482 abs_path: root_dir.path().into(),
3483 root_name: Default::default(),
3484 root_char_bag: Default::default(),
3485 scan_id: 0,
3486 is_complete: true,
3487 },
3488 extension_counts: Default::default(),
3489 };
3490 initial_snapshot.insert_entry(
3491 Entry::new(
3492 Path::new("").into(),
3493 &smol::block_on(fs.metadata(root_dir.path()))
3494 .unwrap()
3495 .unwrap(),
3496 &next_entry_id,
3497 Default::default(),
3498 ),
3499 fs.as_ref(),
3500 );
3501 let mut scanner = BackgroundScanner::new(
3502 Arc::new(Mutex::new(initial_snapshot.clone())),
3503 notify_tx,
3504 fs.clone(),
3505 Arc::new(gpui::executor::Background::new()),
3506 );
3507 smol::block_on(scanner.scan_dirs()).unwrap();
3508 scanner.snapshot().check_invariants();
3509
3510 let mut events = Vec::new();
3511 let mut snapshots = Vec::new();
3512 let mut mutations_len = operations;
3513 while mutations_len > 1 {
3514 if !events.is_empty() && rng.gen_bool(0.4) {
3515 let len = rng.gen_range(0..=events.len());
3516 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3517 log::info!("Delivering events: {:#?}", to_deliver);
3518 smol::block_on(scanner.process_events(to_deliver));
3519 scanner.snapshot().check_invariants();
3520 } else {
3521 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3522 mutations_len -= 1;
3523 }
3524
3525 if rng.gen_bool(0.2) {
3526 snapshots.push(scanner.snapshot());
3527 }
3528 }
3529 log::info!("Quiescing: {:#?}", events);
3530 smol::block_on(scanner.process_events(events));
3531 scanner.snapshot().check_invariants();
3532
3533 let (notify_tx, _notify_rx) = mpsc::unbounded();
3534 let mut new_scanner = BackgroundScanner::new(
3535 Arc::new(Mutex::new(initial_snapshot)),
3536 notify_tx,
3537 scanner.fs.clone(),
3538 scanner.executor.clone(),
3539 );
3540 smol::block_on(new_scanner.scan_dirs()).unwrap();
3541 assert_eq!(
3542 scanner.snapshot().to_vec(true),
3543 new_scanner.snapshot().to_vec(true)
3544 );
3545
3546 for mut prev_snapshot in snapshots {
3547 let include_ignored = rng.gen::<bool>();
3548 if !include_ignored {
3549 let mut entries_by_path_edits = Vec::new();
3550 let mut entries_by_id_edits = Vec::new();
3551 for entry in prev_snapshot
3552 .entries_by_id
3553 .cursor::<()>()
3554 .filter(|e| e.is_ignored)
3555 {
3556 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3557 entries_by_id_edits.push(Edit::Remove(entry.id));
3558 }
3559
3560 prev_snapshot
3561 .entries_by_path
3562 .edit(entries_by_path_edits, &());
3563 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3564 }
3565
3566 let update = scanner
3567 .snapshot()
3568 .build_update(&prev_snapshot, 0, 0, include_ignored);
3569 prev_snapshot.apply_remote_update(update).unwrap();
3570 assert_eq!(
3571 prev_snapshot.to_vec(true),
3572 scanner.snapshot().to_vec(include_ignored)
3573 );
3574 }
3575 }
3576
3577 fn randomly_mutate_tree(
3578 root_path: &Path,
3579 insertion_probability: f64,
3580 rng: &mut impl Rng,
3581 ) -> Result<Vec<fsevent::Event>> {
3582 let root_path = root_path.canonicalize().unwrap();
3583 let (dirs, files) = read_dir_recursive(root_path.clone());
3584
3585 let mut events = Vec::new();
3586 let mut record_event = |path: PathBuf| {
3587 events.push(fsevent::Event {
3588 event_id: SystemTime::now()
3589 .duration_since(UNIX_EPOCH)
3590 .unwrap()
3591 .as_secs(),
3592 flags: fsevent::StreamFlags::empty(),
3593 path,
3594 });
3595 };
3596
3597 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3598 let path = dirs.choose(rng).unwrap();
3599 let new_path = path.join(gen_name(rng));
3600
3601 if rng.gen() {
3602 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3603 std::fs::create_dir(&new_path)?;
3604 } else {
3605 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3606 std::fs::write(&new_path, "")?;
3607 }
3608 record_event(new_path);
3609 } else if rng.gen_bool(0.05) {
3610 let ignore_dir_path = dirs.choose(rng).unwrap();
3611 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3612
3613 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3614 let files_to_ignore = {
3615 let len = rng.gen_range(0..=subfiles.len());
3616 subfiles.choose_multiple(rng, len)
3617 };
3618 let dirs_to_ignore = {
3619 let len = rng.gen_range(0..subdirs.len());
3620 subdirs.choose_multiple(rng, len)
3621 };
3622
3623 let mut ignore_contents = String::new();
3624 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3625 writeln!(
3626 ignore_contents,
3627 "{}",
3628 path_to_ignore
3629 .strip_prefix(&ignore_dir_path)?
3630 .to_str()
3631 .unwrap()
3632 )
3633 .unwrap();
3634 }
3635 log::info!(
3636 "Creating {:?} with contents:\n{}",
3637 ignore_path.strip_prefix(&root_path)?,
3638 ignore_contents
3639 );
3640 std::fs::write(&ignore_path, ignore_contents).unwrap();
3641 record_event(ignore_path);
3642 } else {
3643 let old_path = {
3644 let file_path = files.choose(rng);
3645 let dir_path = dirs[1..].choose(rng);
3646 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3647 };
3648
3649 let is_rename = rng.gen();
3650 if is_rename {
3651 let new_path_parent = dirs
3652 .iter()
3653 .filter(|d| !d.starts_with(old_path))
3654 .choose(rng)
3655 .unwrap();
3656
3657 let overwrite_existing_dir =
3658 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3659 let new_path = if overwrite_existing_dir {
3660 std::fs::remove_dir_all(&new_path_parent).ok();
3661 new_path_parent.to_path_buf()
3662 } else {
3663 new_path_parent.join(gen_name(rng))
3664 };
3665
3666 log::info!(
3667 "Renaming {:?} to {}{:?}",
3668 old_path.strip_prefix(&root_path)?,
3669 if overwrite_existing_dir {
3670 "overwrite "
3671 } else {
3672 ""
3673 },
3674 new_path.strip_prefix(&root_path)?
3675 );
3676 std::fs::rename(&old_path, &new_path)?;
3677 record_event(old_path.clone());
3678 record_event(new_path);
3679 } else if old_path.is_dir() {
3680 let (dirs, files) = read_dir_recursive(old_path.clone());
3681
3682 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3683 std::fs::remove_dir_all(&old_path).unwrap();
3684 for file in files {
3685 record_event(file);
3686 }
3687 for dir in dirs {
3688 record_event(dir);
3689 }
3690 } else {
3691 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3692 std::fs::remove_file(old_path).unwrap();
3693 record_event(old_path.clone());
3694 }
3695 }
3696
3697 Ok(events)
3698 }
3699
3700 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3701 let child_entries = std::fs::read_dir(&path).unwrap();
3702 let mut dirs = vec![path];
3703 let mut files = Vec::new();
3704 for child_entry in child_entries {
3705 let child_path = child_entry.unwrap().path();
3706 if child_path.is_dir() {
3707 let (child_dirs, child_files) = read_dir_recursive(child_path);
3708 dirs.extend(child_dirs);
3709 files.extend(child_files);
3710 } else {
3711 files.push(child_path);
3712 }
3713 }
3714 (dirs, files)
3715 }
3716
3717 fn gen_name(rng: &mut impl Rng) -> String {
3718 (0..6)
3719 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3720 .map(char::from)
3721 .collect()
3722 }
3723
3724 impl LocalSnapshot {
3725 fn check_invariants(&self) {
3726 let mut files = self.files(true, 0);
3727 let mut visible_files = self.files(false, 0);
3728 for entry in self.entries_by_path.cursor::<()>() {
3729 if entry.is_file() {
3730 assert_eq!(files.next().unwrap().inode, entry.inode);
3731 if !entry.is_ignored {
3732 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3733 }
3734 }
3735 }
3736 assert!(files.next().is_none());
3737 assert!(visible_files.next().is_none());
3738
3739 let mut bfs_paths = Vec::new();
3740 let mut stack = vec![Path::new("")];
3741 while let Some(path) = stack.pop() {
3742 bfs_paths.push(path);
3743 let ix = stack.len();
3744 for child_entry in self.child_entries(path) {
3745 stack.insert(ix, &child_entry.path);
3746 }
3747 }
3748
3749 let dfs_paths_via_iter = self
3750 .entries_by_path
3751 .cursor::<()>()
3752 .map(|e| e.path.as_ref())
3753 .collect::<Vec<_>>();
3754 assert_eq!(bfs_paths, dfs_paths_via_iter);
3755
3756 let dfs_paths_via_traversal = self
3757 .entries(true)
3758 .map(|e| e.path.as_ref())
3759 .collect::<Vec<_>>();
3760 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3761
3762 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3763 let ignore_parent_path =
3764 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3765 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3766 assert!(self
3767 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3768 .is_some());
3769 }
3770
3771 // Ensure extension counts are correct.
3772 let mut expected_extension_counts = HashMap::default();
3773 for extension in self.entries(false).filter_map(|e| e.path.extension()) {
3774 *expected_extension_counts
3775 .entry(extension.into())
3776 .or_insert(0) += 1;
3777 }
3778 assert_eq!(self.extension_counts, expected_extension_counts);
3779 }
3780
3781 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3782 let mut paths = Vec::new();
3783 for entry in self.entries_by_path.cursor::<()>() {
3784 if include_ignored || !entry.is_ignored {
3785 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3786 }
3787 }
3788 paths.sort_by(|a, b| a.0.cmp(b.0));
3789 paths
3790 }
3791 }
3792}