1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::{repository::GitRepository, Fs};
9use fs::{HomeDir, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::{
24 proto::{deserialize_version, serialize_line_ending, serialize_version},
25 Buffer, DiagnosticEntry, PointUtf16, Rope,
26};
27use parking_lot::Mutex;
28use postage::{
29 prelude::{Sink as _, Stream as _},
30 watch,
31};
32
33use smol::channel::{self, Sender};
34use std::{
35 any::Any,
36 cmp::{self, Ordering},
37 convert::TryFrom,
38 ffi::{OsStr, OsString},
39 fmt,
40 future::Future,
41 mem,
42 ops::{Deref, DerefMut},
43 path::{Path, PathBuf},
44 sync::{atomic::AtomicUsize, Arc},
45 task::Poll,
46 time::{Duration, SystemTime},
47};
48use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
49use util::{ResultExt, TryFutureExt};
50
51#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
52pub struct WorktreeId(usize);
53
54#[allow(clippy::large_enum_variant)]
55pub enum Worktree {
56 Local(LocalWorktree),
57 Remote(RemoteWorktree),
58}
59
60pub struct LocalWorktree {
61 snapshot: LocalSnapshot,
62 background_snapshot: Arc<Mutex<LocalSnapshot>>,
63 last_scan_state_rx: watch::Receiver<ScanState>,
64 _background_scanner_task: Option<Task<()>>,
65 poll_task: Option<Task<()>>,
66 share: Option<ShareState>,
67 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
68 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
69 client: Arc<Client>,
70 fs: Arc<dyn Fs>,
71 visible: bool,
72}
73
74pub struct RemoteWorktree {
75 pub snapshot: Snapshot,
76 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
77 project_id: u64,
78 client: Arc<Client>,
79 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
80 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
81 replica_id: ReplicaId,
82 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
83 visible: bool,
84}
85
86#[derive(Clone)]
87pub struct Snapshot {
88 id: WorktreeId,
89 abs_path: Arc<Path>,
90 root_name: String,
91 root_char_bag: CharBag,
92 entries_by_path: SumTree<Entry>,
93 entries_by_id: SumTree<PathEntry>,
94 scan_id: usize,
95 is_complete: bool,
96}
97
98#[derive(Clone)]
99pub struct GitRepositoryEntry {
100 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
101
102 pub(crate) scan_id: usize,
103 // Path to folder containing the .git file or directory
104 pub(crate) content_path: Arc<Path>,
105 // Path to the actual .git folder.
106 // Note: if .git is a file, this points to the folder indicated by the .git file
107 pub(crate) git_dir_path: Arc<Path>,
108}
109
110impl std::fmt::Debug for GitRepositoryEntry {
111 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
112 f.debug_struct("GitRepositoryEntry")
113 .field("content_path", &self.content_path)
114 .field("git_dir_path", &self.git_dir_path)
115 .field("libgit_repository", &"LibGitRepository")
116 .finish()
117 }
118}
119
120pub struct LocalSnapshot {
121 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
122 git_repositories: Vec<GitRepositoryEntry>,
123 removed_entry_ids: HashMap<u64, ProjectEntryId>,
124 next_entry_id: Arc<AtomicUsize>,
125 snapshot: Snapshot,
126 extension_counts: HashMap<OsString, usize>,
127}
128
129impl Clone for LocalSnapshot {
130 fn clone(&self) -> Self {
131 Self {
132 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
133 git_repositories: self.git_repositories.iter().cloned().collect(),
134 removed_entry_ids: self.removed_entry_ids.clone(),
135 next_entry_id: self.next_entry_id.clone(),
136 snapshot: self.snapshot.clone(),
137 extension_counts: self.extension_counts.clone(),
138 }
139 }
140}
141
142impl Deref for LocalSnapshot {
143 type Target = Snapshot;
144
145 fn deref(&self) -> &Self::Target {
146 &self.snapshot
147 }
148}
149
150impl DerefMut for LocalSnapshot {
151 fn deref_mut(&mut self) -> &mut Self::Target {
152 &mut self.snapshot
153 }
154}
155
156#[derive(Clone, Debug)]
157enum ScanState {
158 Idle,
159 /// The worktree is performing its initial scan of the filesystem.
160 Initializing,
161 /// The worktree is updating in response to filesystem events.
162 Updating,
163 Err(Arc<anyhow::Error>),
164}
165
166struct ShareState {
167 project_id: u64,
168 snapshots_tx: watch::Sender<LocalSnapshot>,
169 diagnostic_summaries_tx: mpsc::UnboundedSender<(Arc<Path>, DiagnosticSummary)>,
170 _maintain_remote_snapshot: Task<Option<()>>,
171 _maintain_remote_diagnostic_summaries: Task<()>,
172}
173
174pub enum Event {
175 UpdatedEntries,
176 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
177}
178
179impl Entity for Worktree {
180 type Event = Event;
181}
182
183impl Worktree {
184 pub async fn local(
185 client: Arc<Client>,
186 path: impl Into<Arc<Path>>,
187 visible: bool,
188 fs: Arc<dyn Fs>,
189 next_entry_id: Arc<AtomicUsize>,
190 cx: &mut AsyncAppContext,
191 ) -> Result<ModelHandle<Self>> {
192 let (tree, scan_states_tx) =
193 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
194 tree.update(cx, |tree, cx| {
195 let tree = tree.as_local_mut().unwrap();
196 let abs_path = tree.abs_path().clone();
197 let background_snapshot = tree.background_snapshot.clone();
198 let background = cx.background().clone();
199 tree._background_scanner_task = Some(cx.background().spawn(async move {
200 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
201 let scanner =
202 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
203 scanner.run(events).await;
204 }));
205 });
206 Ok(tree)
207 }
208
209 pub fn remote(
210 project_remote_id: u64,
211 replica_id: ReplicaId,
212 worktree: proto::WorktreeMetadata,
213 client: Arc<Client>,
214 cx: &mut MutableAppContext,
215 ) -> ModelHandle<Self> {
216 let remote_id = worktree.id;
217 let root_char_bag: CharBag = worktree
218 .root_name
219 .chars()
220 .map(|c| c.to_ascii_lowercase())
221 .collect();
222 let root_name = worktree.root_name.clone();
223 let visible = worktree.visible;
224
225 let abs_path = PathBuf::from(worktree.abs_path);
226 let snapshot = Snapshot {
227 id: WorktreeId(remote_id as usize),
228 abs_path: Arc::from(abs_path.deref()),
229 root_name,
230 root_char_bag,
231 entries_by_path: Default::default(),
232 entries_by_id: Default::default(),
233 scan_id: 0,
234 is_complete: false,
235 };
236
237 let (updates_tx, mut updates_rx) = mpsc::unbounded();
238 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
239 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
240 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
241 Worktree::Remote(RemoteWorktree {
242 project_id: project_remote_id,
243 replica_id,
244 snapshot: snapshot.clone(),
245 background_snapshot: background_snapshot.clone(),
246 updates_tx: Some(updates_tx),
247 snapshot_subscriptions: Default::default(),
248 client: client.clone(),
249 diagnostic_summaries: Default::default(),
250 visible,
251 })
252 });
253
254 cx.background()
255 .spawn(async move {
256 while let Some(update) = updates_rx.next().await {
257 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
258 log::error!("error applying worktree update: {}", error);
259 }
260 snapshot_updated_tx.send(()).await.ok();
261 }
262 })
263 .detach();
264
265 cx.spawn(|mut cx| {
266 let this = worktree_handle.downgrade();
267 async move {
268 while (snapshot_updated_rx.recv().await).is_some() {
269 if let Some(this) = this.upgrade(&cx) {
270 this.update(&mut cx, |this, cx| {
271 this.poll_snapshot(cx);
272 let this = this.as_remote_mut().unwrap();
273 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
274 if this.observed_snapshot(*scan_id) {
275 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
276 let _ = tx.send(());
277 } else {
278 break;
279 }
280 }
281 });
282 } else {
283 break;
284 }
285 }
286 }
287 })
288 .detach();
289
290 worktree_handle
291 }
292
293 pub fn as_local(&self) -> Option<&LocalWorktree> {
294 if let Worktree::Local(worktree) = self {
295 Some(worktree)
296 } else {
297 None
298 }
299 }
300
301 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
302 if let Worktree::Remote(worktree) = self {
303 Some(worktree)
304 } else {
305 None
306 }
307 }
308
309 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
310 if let Worktree::Local(worktree) = self {
311 Some(worktree)
312 } else {
313 None
314 }
315 }
316
317 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
318 if let Worktree::Remote(worktree) = self {
319 Some(worktree)
320 } else {
321 None
322 }
323 }
324
325 pub fn is_local(&self) -> bool {
326 matches!(self, Worktree::Local(_))
327 }
328
329 pub fn is_remote(&self) -> bool {
330 !self.is_local()
331 }
332
333 pub fn snapshot(&self) -> Snapshot {
334 match self {
335 Worktree::Local(worktree) => worktree.snapshot().snapshot,
336 Worktree::Remote(worktree) => worktree.snapshot(),
337 }
338 }
339
340 pub fn scan_id(&self) -> usize {
341 match self {
342 Worktree::Local(worktree) => worktree.snapshot.scan_id,
343 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
344 }
345 }
346
347 pub fn is_visible(&self) -> bool {
348 match self {
349 Worktree::Local(worktree) => worktree.visible,
350 Worktree::Remote(worktree) => worktree.visible,
351 }
352 }
353
354 pub fn replica_id(&self) -> ReplicaId {
355 match self {
356 Worktree::Local(_) => 0,
357 Worktree::Remote(worktree) => worktree.replica_id,
358 }
359 }
360
361 pub fn diagnostic_summaries(
362 &self,
363 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
364 match self {
365 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
366 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
367 }
368 .iter()
369 .filter(|(_, summary)| !summary.is_empty())
370 .map(|(path, summary)| (path.0.clone(), *summary))
371 }
372
373 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
374 match self {
375 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
376 Self::Remote(worktree) => worktree.poll_snapshot(cx),
377 };
378 }
379
380 pub fn abs_path(&self) -> Arc<Path> {
381 match self {
382 Worktree::Local(worktree) => worktree.abs_path.clone(),
383 Worktree::Remote(worktree) => worktree.abs_path.clone(),
384 }
385 }
386}
387
388impl LocalWorktree {
389 async fn create(
390 client: Arc<Client>,
391 path: impl Into<Arc<Path>>,
392 visible: bool,
393 fs: Arc<dyn Fs>,
394 next_entry_id: Arc<AtomicUsize>,
395 cx: &mut AsyncAppContext,
396 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
397 let abs_path = path.into();
398 let path: Arc<Path> = Arc::from(Path::new(""));
399
400 // After determining whether the root entry is a file or a directory, populate the
401 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
402 let root_name = abs_path
403 .file_name()
404 .map_or(String::new(), |f| f.to_string_lossy().to_string());
405 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
406 let metadata = fs
407 .metadata(&abs_path)
408 .await
409 .context("failed to stat worktree path")?;
410
411 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
412 let (mut last_scan_state_tx, last_scan_state_rx) =
413 watch::channel_with(ScanState::Initializing);
414 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
415 let mut snapshot = LocalSnapshot {
416 ignores_by_parent_abs_path: Default::default(),
417 git_repositories: Default::default(),
418 removed_entry_ids: Default::default(),
419 next_entry_id,
420 snapshot: Snapshot {
421 id: WorktreeId::from_usize(cx.model_id()),
422 abs_path,
423 root_name: root_name.clone(),
424 root_char_bag,
425 entries_by_path: Default::default(),
426 entries_by_id: Default::default(),
427 scan_id: 0,
428 is_complete: true,
429 },
430 extension_counts: Default::default(),
431 };
432 if let Some(metadata) = metadata {
433 let entry = Entry::new(
434 path,
435 &metadata,
436 &snapshot.next_entry_id,
437 snapshot.root_char_bag,
438 );
439 snapshot.insert_entry(entry, fs.as_ref());
440 }
441
442 let tree = Self {
443 snapshot: snapshot.clone(),
444 background_snapshot: Arc::new(Mutex::new(snapshot)),
445 last_scan_state_rx,
446 _background_scanner_task: None,
447 share: None,
448 poll_task: None,
449 diagnostics: Default::default(),
450 diagnostic_summaries: Default::default(),
451 client,
452 fs,
453 visible,
454 };
455
456 cx.spawn_weak(|this, mut cx| async move {
457 while let Some(scan_state) = scan_states_rx.next().await {
458 if let Some(this) = this.upgrade(&cx) {
459 last_scan_state_tx.blocking_send(scan_state).ok();
460 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
461 } else {
462 break;
463 }
464 }
465 })
466 .detach();
467
468 Worktree::Local(tree)
469 });
470
471 Ok((tree, scan_states_tx))
472 }
473
474 pub fn contains_abs_path(&self, path: &Path) -> bool {
475 path.starts_with(&self.abs_path)
476 }
477
478 fn absolutize(&self, path: &Path) -> PathBuf {
479 if path.file_name().is_some() {
480 self.abs_path.join(path)
481 } else {
482 self.abs_path.to_path_buf()
483 }
484 }
485
486 pub(crate) fn load_buffer(
487 &mut self,
488 path: &Path,
489 cx: &mut ModelContext<Worktree>,
490 ) -> Task<Result<ModelHandle<Buffer>>> {
491 let path = Arc::from(path);
492 cx.spawn(move |this, mut cx| async move {
493 let (file, contents, diff_base) = this
494 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
495 .await?;
496 Ok(cx.add_model(|cx| {
497 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
498 buffer.git_diff_recalc(cx);
499 buffer
500 }))
501 })
502 }
503
504 pub fn diagnostics_for_path(&self, path: &Path) -> Option<Vec<DiagnosticEntry<PointUtf16>>> {
505 self.diagnostics.get(path).cloned()
506 }
507
508 pub fn update_diagnostics(
509 &mut self,
510 language_server_id: usize,
511 worktree_path: Arc<Path>,
512 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
513 _: &mut ModelContext<Worktree>,
514 ) -> Result<bool> {
515 self.diagnostics.remove(&worktree_path);
516 let old_summary = self
517 .diagnostic_summaries
518 .remove(&PathKey(worktree_path.clone()))
519 .unwrap_or_default();
520 let new_summary =
521 DiagnosticSummary::new(language_server_id, old_summary.version + 1, &diagnostics);
522 if !new_summary.is_empty() {
523 self.diagnostic_summaries
524 .insert(PathKey(worktree_path.clone()), new_summary);
525 self.diagnostics.insert(worktree_path.clone(), diagnostics);
526 }
527
528 let updated = !old_summary.is_empty() || !new_summary.is_empty();
529 if updated {
530 if let Some(share) = self.share.as_ref() {
531 let _ = share
532 .diagnostic_summaries_tx
533 .unbounded_send((worktree_path.clone(), new_summary));
534 }
535 }
536
537 Ok(updated)
538 }
539
540 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
541 self.poll_task.take();
542
543 match self.scan_state() {
544 ScanState::Idle => {
545 let new_snapshot = self.background_snapshot.lock().clone();
546 let updated_repos = Self::changed_repos(
547 &self.snapshot.git_repositories,
548 &new_snapshot.git_repositories,
549 );
550 self.snapshot = new_snapshot;
551
552 if let Some(share) = self.share.as_mut() {
553 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
554 }
555
556 cx.emit(Event::UpdatedEntries);
557
558 if !updated_repos.is_empty() {
559 cx.emit(Event::UpdatedGitRepositories(updated_repos));
560 }
561 }
562
563 ScanState::Initializing => {
564 let is_fake_fs = self.fs.is_fake();
565
566 let new_snapshot = self.background_snapshot.lock().clone();
567 let updated_repos = Self::changed_repos(
568 &self.snapshot.git_repositories,
569 &new_snapshot.git_repositories,
570 );
571 self.snapshot = new_snapshot;
572
573 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
574 if is_fake_fs {
575 #[cfg(any(test, feature = "test-support"))]
576 cx.background().simulate_random_delay().await;
577 } else {
578 smol::Timer::after(Duration::from_millis(100)).await;
579 }
580 if let Some(this) = this.upgrade(&cx) {
581 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
582 }
583 }));
584
585 cx.emit(Event::UpdatedEntries);
586
587 if !updated_repos.is_empty() {
588 cx.emit(Event::UpdatedGitRepositories(updated_repos));
589 }
590 }
591
592 _ => {
593 if force {
594 self.snapshot = self.background_snapshot.lock().clone();
595 }
596 }
597 }
598
599 cx.notify();
600 }
601
602 fn changed_repos(
603 old_repos: &[GitRepositoryEntry],
604 new_repos: &[GitRepositoryEntry],
605 ) -> Vec<GitRepositoryEntry> {
606 fn diff<'a>(
607 a: &'a [GitRepositoryEntry],
608 b: &'a [GitRepositoryEntry],
609 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
610 ) {
611 for a_repo in a {
612 let matched = b.iter().find(|b_repo| {
613 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
614 });
615
616 if matched.is_none() {
617 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
618 }
619 }
620 }
621
622 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
623
624 diff(old_repos, new_repos, &mut updated);
625 diff(new_repos, old_repos, &mut updated);
626
627 updated.into_values().collect()
628 }
629
630 pub fn scan_complete(&self) -> impl Future<Output = ()> {
631 let mut scan_state_rx = self.last_scan_state_rx.clone();
632 async move {
633 let mut scan_state = Some(scan_state_rx.borrow().clone());
634 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
635 scan_state = scan_state_rx.recv().await;
636 }
637 }
638 }
639
640 fn scan_state(&self) -> ScanState {
641 self.last_scan_state_rx.borrow().clone()
642 }
643
644 pub fn snapshot(&self) -> LocalSnapshot {
645 self.snapshot.clone()
646 }
647
648 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
649 proto::WorktreeMetadata {
650 id: self.id().to_proto(),
651 root_name: self.root_name().to_string(),
652 visible: self.visible,
653 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
654 }
655 }
656
657 fn load(
658 &self,
659 path: &Path,
660 cx: &mut ModelContext<Worktree>,
661 ) -> Task<Result<(File, String, Option<String>)>> {
662 let handle = cx.handle();
663 let path = Arc::from(path);
664 let abs_path = self.absolutize(&path);
665 let fs = self.fs.clone();
666 let snapshot = self.snapshot();
667
668 cx.spawn(|this, mut cx| async move {
669 let text = fs.load(&abs_path).await?;
670
671 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
672 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
673 let repo_relative = repo_relative.to_owned();
674 cx.background()
675 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
676 .await
677 } else {
678 None
679 }
680 } else {
681 None
682 };
683
684 // Eagerly populate the snapshot with an updated entry for the loaded file
685 let entry = this
686 .update(&mut cx, |this, cx| {
687 this.as_local()
688 .unwrap()
689 .refresh_entry(path, abs_path, None, cx)
690 })
691 .await?;
692
693 Ok((
694 File {
695 entry_id: entry.id,
696 worktree: handle,
697 path: entry.path,
698 mtime: entry.mtime,
699 is_local: true,
700 is_deleted: false,
701 },
702 text,
703 diff_base,
704 ))
705 })
706 }
707
708 pub fn save_buffer_as(
709 &self,
710 buffer_handle: ModelHandle<Buffer>,
711 path: impl Into<Arc<Path>>,
712 cx: &mut ModelContext<Worktree>,
713 ) -> Task<Result<()>> {
714 let buffer = buffer_handle.read(cx);
715 let text = buffer.as_rope().clone();
716 let fingerprint = text.fingerprint();
717 let version = buffer.version();
718 let save = self.write_file(path, text, buffer.line_ending(), cx);
719 let handle = cx.handle();
720 cx.as_mut().spawn(|mut cx| async move {
721 let entry = save.await?;
722 let file = File {
723 entry_id: entry.id,
724 worktree: handle,
725 path: entry.path,
726 mtime: entry.mtime,
727 is_local: true,
728 is_deleted: false,
729 };
730
731 buffer_handle.update(&mut cx, |buffer, cx| {
732 buffer.did_save(version, fingerprint, file.mtime, Some(Arc::new(file)), cx);
733 });
734
735 Ok(())
736 })
737 }
738
739 pub fn create_entry(
740 &self,
741 path: impl Into<Arc<Path>>,
742 is_dir: bool,
743 cx: &mut ModelContext<Worktree>,
744 ) -> Task<Result<Entry>> {
745 self.write_entry_internal(
746 path,
747 if is_dir {
748 None
749 } else {
750 Some(Default::default())
751 },
752 cx,
753 )
754 }
755
756 pub fn write_file(
757 &self,
758 path: impl Into<Arc<Path>>,
759 text: Rope,
760 line_ending: LineEnding,
761 cx: &mut ModelContext<Worktree>,
762 ) -> Task<Result<Entry>> {
763 self.write_entry_internal(path, Some((text, line_ending)), cx)
764 }
765
766 pub fn delete_entry(
767 &self,
768 entry_id: ProjectEntryId,
769 cx: &mut ModelContext<Worktree>,
770 ) -> Option<Task<Result<()>>> {
771 let entry = self.entry_for_id(entry_id)?.clone();
772 let abs_path = self.absolutize(&entry.path);
773 let delete = cx.background().spawn({
774 let fs = self.fs.clone();
775 let abs_path = abs_path;
776 async move {
777 if entry.is_file() {
778 fs.remove_file(&abs_path, Default::default()).await
779 } else {
780 fs.remove_dir(
781 &abs_path,
782 RemoveOptions {
783 recursive: true,
784 ignore_if_not_exists: false,
785 },
786 )
787 .await
788 }
789 }
790 });
791
792 Some(cx.spawn(|this, mut cx| async move {
793 delete.await?;
794 this.update(&mut cx, |this, cx| {
795 let this = this.as_local_mut().unwrap();
796 {
797 let mut snapshot = this.background_snapshot.lock();
798 snapshot.delete_entry(entry_id);
799 }
800 this.poll_snapshot(true, cx);
801 });
802 Ok(())
803 }))
804 }
805
806 pub fn rename_entry(
807 &self,
808 entry_id: ProjectEntryId,
809 new_path: impl Into<Arc<Path>>,
810 cx: &mut ModelContext<Worktree>,
811 ) -> Option<Task<Result<Entry>>> {
812 let old_path = self.entry_for_id(entry_id)?.path.clone();
813 let new_path = new_path.into();
814 let abs_old_path = self.absolutize(&old_path);
815 let abs_new_path = self.absolutize(&new_path);
816 let rename = cx.background().spawn({
817 let fs = self.fs.clone();
818 let abs_new_path = abs_new_path.clone();
819 async move {
820 fs.rename(&abs_old_path, &abs_new_path, Default::default())
821 .await
822 }
823 });
824
825 Some(cx.spawn(|this, mut cx| async move {
826 rename.await?;
827 let entry = this
828 .update(&mut cx, |this, cx| {
829 this.as_local_mut().unwrap().refresh_entry(
830 new_path.clone(),
831 abs_new_path,
832 Some(old_path),
833 cx,
834 )
835 })
836 .await?;
837 Ok(entry)
838 }))
839 }
840
841 pub fn copy_entry(
842 &self,
843 entry_id: ProjectEntryId,
844 new_path: impl Into<Arc<Path>>,
845 cx: &mut ModelContext<Worktree>,
846 ) -> Option<Task<Result<Entry>>> {
847 let old_path = self.entry_for_id(entry_id)?.path.clone();
848 let new_path = new_path.into();
849 let abs_old_path = self.absolutize(&old_path);
850 let abs_new_path = self.absolutize(&new_path);
851 let copy = cx.background().spawn({
852 let fs = self.fs.clone();
853 let abs_new_path = abs_new_path.clone();
854 async move {
855 copy_recursive(
856 fs.as_ref(),
857 &abs_old_path,
858 &abs_new_path,
859 Default::default(),
860 )
861 .await
862 }
863 });
864
865 Some(cx.spawn(|this, mut cx| async move {
866 copy.await?;
867 let entry = this
868 .update(&mut cx, |this, cx| {
869 this.as_local_mut().unwrap().refresh_entry(
870 new_path.clone(),
871 abs_new_path,
872 None,
873 cx,
874 )
875 })
876 .await?;
877 Ok(entry)
878 }))
879 }
880
881 fn write_entry_internal(
882 &self,
883 path: impl Into<Arc<Path>>,
884 text_if_file: Option<(Rope, LineEnding)>,
885 cx: &mut ModelContext<Worktree>,
886 ) -> Task<Result<Entry>> {
887 let path = path.into();
888 let abs_path = self.absolutize(&path);
889 let write = cx.background().spawn({
890 let fs = self.fs.clone();
891 let abs_path = abs_path.clone();
892 async move {
893 if let Some((text, line_ending)) = text_if_file {
894 fs.save(&abs_path, &text, line_ending).await
895 } else {
896 fs.create_dir(&abs_path).await
897 }
898 }
899 });
900
901 cx.spawn(|this, mut cx| async move {
902 write.await?;
903 let entry = this
904 .update(&mut cx, |this, cx| {
905 this.as_local_mut()
906 .unwrap()
907 .refresh_entry(path, abs_path, None, cx)
908 })
909 .await?;
910 Ok(entry)
911 })
912 }
913
914 fn refresh_entry(
915 &self,
916 path: Arc<Path>,
917 abs_path: PathBuf,
918 old_path: Option<Arc<Path>>,
919 cx: &mut ModelContext<Worktree>,
920 ) -> Task<Result<Entry>> {
921 let fs = self.fs.clone();
922 let root_char_bag;
923 let next_entry_id;
924 {
925 let snapshot = self.background_snapshot.lock();
926 root_char_bag = snapshot.root_char_bag;
927 next_entry_id = snapshot.next_entry_id.clone();
928 }
929 cx.spawn_weak(|this, mut cx| async move {
930 let metadata = fs
931 .metadata(&abs_path)
932 .await?
933 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
934 let this = this
935 .upgrade(&cx)
936 .ok_or_else(|| anyhow!("worktree was dropped"))?;
937 this.update(&mut cx, |this, cx| {
938 let this = this.as_local_mut().unwrap();
939 let inserted_entry;
940 {
941 let mut snapshot = this.background_snapshot.lock();
942 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
943 entry.is_ignored = snapshot
944 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
945 .is_abs_path_ignored(&abs_path, entry.is_dir());
946 if let Some(old_path) = old_path {
947 snapshot.remove_path(&old_path);
948 }
949 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
950 snapshot.scan_id += 1;
951 }
952 this.poll_snapshot(true, cx);
953 Ok(inserted_entry)
954 })
955 })
956 }
957
958 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
959 let (share_tx, share_rx) = oneshot::channel();
960
961 if self.share.is_some() {
962 let _ = share_tx.send(Ok(()));
963 } else {
964 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
965 let worktree_id = cx.model_id() as u64;
966
967 let maintain_remote_snapshot = cx.background().spawn({
968 let rpc = self.client.clone();
969 async move {
970 let mut prev_snapshot = match snapshots_rx.recv().await {
971 Some(snapshot) => {
972 let update = proto::UpdateWorktree {
973 project_id,
974 worktree_id,
975 abs_path: snapshot.abs_path().to_string_lossy().into(),
976 root_name: snapshot.root_name().to_string(),
977 updated_entries: snapshot
978 .entries_by_path
979 .iter()
980 .map(Into::into)
981 .collect(),
982 removed_entries: Default::default(),
983 scan_id: snapshot.scan_id as u64,
984 is_last_update: true,
985 };
986 if let Err(error) = send_worktree_update(&rpc, update).await {
987 let _ = share_tx.send(Err(error));
988 return Err(anyhow!("failed to send initial update worktree"));
989 } else {
990 let _ = share_tx.send(Ok(()));
991 snapshot
992 }
993 }
994 None => {
995 share_tx
996 .send(Err(anyhow!("worktree dropped before share completed")))
997 .ok();
998 return Err(anyhow!("failed to send initial update worktree"));
999 }
1000 };
1001
1002 while let Some(snapshot) = snapshots_rx.recv().await {
1003 send_worktree_update(
1004 &rpc,
1005 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true),
1006 )
1007 .await?;
1008 prev_snapshot = snapshot;
1009 }
1010
1011 Ok::<_, anyhow::Error>(())
1012 }
1013 .log_err()
1014 });
1015
1016 let (diagnostic_summaries_tx, mut diagnostic_summaries_rx) = mpsc::unbounded();
1017 for (path, summary) in self.diagnostic_summaries.iter() {
1018 let _ = diagnostic_summaries_tx.unbounded_send((path.0.clone(), summary.clone()));
1019 }
1020 let maintain_remote_diagnostic_summaries = cx.background().spawn({
1021 let rpc = self.client.clone();
1022 async move {
1023 while let Some((path, summary)) = diagnostic_summaries_rx.next().await {
1024 rpc.request(proto::UpdateDiagnosticSummary {
1025 project_id,
1026 worktree_id,
1027 summary: Some(summary.to_proto(&path)),
1028 })
1029 .await
1030 .log_err();
1031 }
1032 }
1033 });
1034
1035 self.share = Some(ShareState {
1036 project_id,
1037 snapshots_tx,
1038 diagnostic_summaries_tx,
1039 _maintain_remote_snapshot: maintain_remote_snapshot,
1040 _maintain_remote_diagnostic_summaries: maintain_remote_diagnostic_summaries,
1041 });
1042 }
1043
1044 cx.foreground().spawn(async move {
1045 share_rx
1046 .await
1047 .unwrap_or_else(|_| Err(anyhow!("share ended")))
1048 })
1049 }
1050
1051 pub fn unshare(&mut self) {
1052 self.share.take();
1053 }
1054
1055 pub fn is_shared(&self) -> bool {
1056 self.share.is_some()
1057 }
1058}
1059
1060impl RemoteWorktree {
1061 fn snapshot(&self) -> Snapshot {
1062 self.snapshot.clone()
1063 }
1064
1065 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1066 self.snapshot = self.background_snapshot.lock().clone();
1067 cx.emit(Event::UpdatedEntries);
1068 cx.notify();
1069 }
1070
1071 pub fn disconnected_from_host(&mut self) {
1072 self.updates_tx.take();
1073 self.snapshot_subscriptions.clear();
1074 }
1075
1076 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1077 if let Some(updates_tx) = &self.updates_tx {
1078 updates_tx
1079 .unbounded_send(update)
1080 .expect("consumer runs to completion");
1081 }
1082 }
1083
1084 fn observed_snapshot(&self, scan_id: usize) -> bool {
1085 self.scan_id > scan_id || (self.scan_id == scan_id && self.is_complete)
1086 }
1087
1088 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = ()> {
1089 let (tx, rx) = oneshot::channel();
1090 if self.observed_snapshot(scan_id) {
1091 let _ = tx.send(());
1092 } else {
1093 match self
1094 .snapshot_subscriptions
1095 .binary_search_by_key(&scan_id, |probe| probe.0)
1096 {
1097 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1098 }
1099 }
1100
1101 async move {
1102 let _ = rx.await;
1103 }
1104 }
1105
1106 pub fn update_diagnostic_summary(
1107 &mut self,
1108 path: Arc<Path>,
1109 summary: &proto::DiagnosticSummary,
1110 ) {
1111 let old_summary = self.diagnostic_summaries.get(&PathKey(path.clone()));
1112 let new_summary = DiagnosticSummary {
1113 language_server_id: summary.language_server_id as usize,
1114 error_count: summary.error_count as usize,
1115 warning_count: summary.warning_count as usize,
1116 version: summary.version as usize,
1117 };
1118 if old_summary.map_or(true, |old_summary| {
1119 new_summary.version >= old_summary.version
1120 }) {
1121 self.diagnostic_summaries.insert(PathKey(path), new_summary);
1122 }
1123 }
1124
1125 pub fn insert_entry(
1126 &mut self,
1127 entry: proto::Entry,
1128 scan_id: usize,
1129 cx: &mut ModelContext<Worktree>,
1130 ) -> Task<Result<Entry>> {
1131 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1132 cx.spawn(|this, mut cx| async move {
1133 wait_for_snapshot.await;
1134 this.update(&mut cx, |worktree, _| {
1135 let worktree = worktree.as_remote_mut().unwrap();
1136 let mut snapshot = worktree.background_snapshot.lock();
1137 let entry = snapshot.insert_entry(entry);
1138 worktree.snapshot = snapshot.clone();
1139 entry
1140 })
1141 })
1142 }
1143
1144 pub(crate) fn delete_entry(
1145 &mut self,
1146 id: ProjectEntryId,
1147 scan_id: usize,
1148 cx: &mut ModelContext<Worktree>,
1149 ) -> Task<Result<()>> {
1150 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1151 cx.spawn(|this, mut cx| async move {
1152 wait_for_snapshot.await;
1153 this.update(&mut cx, |worktree, _| {
1154 let worktree = worktree.as_remote_mut().unwrap();
1155 let mut snapshot = worktree.background_snapshot.lock();
1156 snapshot.delete_entry(id);
1157 worktree.snapshot = snapshot.clone();
1158 });
1159 Ok(())
1160 })
1161 }
1162}
1163
1164impl Snapshot {
1165 pub fn id(&self) -> WorktreeId {
1166 self.id
1167 }
1168
1169 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1170 self.entries_by_id.get(&entry_id, &()).is_some()
1171 }
1172
1173 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1174 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1175 let old_entry = self.entries_by_id.insert_or_replace(
1176 PathEntry {
1177 id: entry.id,
1178 path: entry.path.clone(),
1179 is_ignored: entry.is_ignored,
1180 scan_id: 0,
1181 },
1182 &(),
1183 );
1184 if let Some(old_entry) = old_entry {
1185 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1186 }
1187 self.entries_by_path.insert_or_replace(entry.clone(), &());
1188 Ok(entry)
1189 }
1190
1191 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1192 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1193 self.entries_by_path = {
1194 let mut cursor = self.entries_by_path.cursor();
1195 let mut new_entries_by_path =
1196 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1197 while let Some(entry) = cursor.item() {
1198 if entry.path.starts_with(&removed_entry.path) {
1199 self.entries_by_id.remove(&entry.id, &());
1200 cursor.next(&());
1201 } else {
1202 break;
1203 }
1204 }
1205 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1206 new_entries_by_path
1207 };
1208
1209 true
1210 } else {
1211 false
1212 }
1213 }
1214
1215 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1216 let mut entries_by_path_edits = Vec::new();
1217 let mut entries_by_id_edits = Vec::new();
1218 for entry_id in update.removed_entries {
1219 let entry = self
1220 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1221 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1222 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1223 entries_by_id_edits.push(Edit::Remove(entry.id));
1224 }
1225
1226 for entry in update.updated_entries {
1227 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1228 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1229 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1230 }
1231 entries_by_id_edits.push(Edit::Insert(PathEntry {
1232 id: entry.id,
1233 path: entry.path.clone(),
1234 is_ignored: entry.is_ignored,
1235 scan_id: 0,
1236 }));
1237 entries_by_path_edits.push(Edit::Insert(entry));
1238 }
1239
1240 self.entries_by_path.edit(entries_by_path_edits, &());
1241 self.entries_by_id.edit(entries_by_id_edits, &());
1242 self.scan_id = update.scan_id as usize;
1243 self.is_complete = update.is_last_update;
1244
1245 Ok(())
1246 }
1247
1248 pub fn file_count(&self) -> usize {
1249 self.entries_by_path.summary().file_count
1250 }
1251
1252 pub fn visible_file_count(&self) -> usize {
1253 self.entries_by_path.summary().visible_file_count
1254 }
1255
1256 fn traverse_from_offset(
1257 &self,
1258 include_dirs: bool,
1259 include_ignored: bool,
1260 start_offset: usize,
1261 ) -> Traversal {
1262 let mut cursor = self.entries_by_path.cursor();
1263 cursor.seek(
1264 &TraversalTarget::Count {
1265 count: start_offset,
1266 include_dirs,
1267 include_ignored,
1268 },
1269 Bias::Right,
1270 &(),
1271 );
1272 Traversal {
1273 cursor,
1274 include_dirs,
1275 include_ignored,
1276 }
1277 }
1278
1279 fn traverse_from_path(
1280 &self,
1281 include_dirs: bool,
1282 include_ignored: bool,
1283 path: &Path,
1284 ) -> Traversal {
1285 let mut cursor = self.entries_by_path.cursor();
1286 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1287 Traversal {
1288 cursor,
1289 include_dirs,
1290 include_ignored,
1291 }
1292 }
1293
1294 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1295 self.traverse_from_offset(false, include_ignored, start)
1296 }
1297
1298 pub fn entries(&self, include_ignored: bool) -> Traversal {
1299 self.traverse_from_offset(true, include_ignored, 0)
1300 }
1301
1302 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1303 let empty_path = Path::new("");
1304 self.entries_by_path
1305 .cursor::<()>()
1306 .filter(move |entry| entry.path.as_ref() != empty_path)
1307 .map(|entry| &entry.path)
1308 }
1309
1310 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1311 let mut cursor = self.entries_by_path.cursor();
1312 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1313 let traversal = Traversal {
1314 cursor,
1315 include_dirs: true,
1316 include_ignored: true,
1317 };
1318 ChildEntriesIter {
1319 traversal,
1320 parent_path,
1321 }
1322 }
1323
1324 pub fn root_entry(&self) -> Option<&Entry> {
1325 self.entry_for_path("")
1326 }
1327
1328 pub fn root_name(&self) -> &str {
1329 &self.root_name
1330 }
1331
1332 pub fn scan_id(&self) -> usize {
1333 self.scan_id
1334 }
1335
1336 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1337 let path = path.as_ref();
1338 self.traverse_from_path(true, true, path)
1339 .entry()
1340 .and_then(|entry| {
1341 if entry.path.as_ref() == path {
1342 Some(entry)
1343 } else {
1344 None
1345 }
1346 })
1347 }
1348
1349 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1350 let entry = self.entries_by_id.get(&id, &())?;
1351 self.entry_for_path(&entry.path)
1352 }
1353
1354 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1355 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1356 }
1357}
1358
1359impl LocalSnapshot {
1360 pub fn abs_path(&self) -> &Arc<Path> {
1361 &self.abs_path
1362 }
1363
1364 pub fn extension_counts(&self) -> &HashMap<OsString, usize> {
1365 &self.extension_counts
1366 }
1367
1368 // Gives the most specific git repository for a given path
1369 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1370 self.git_repositories
1371 .iter()
1372 .rev() //git_repository is ordered lexicographically
1373 .find(|repo| repo.manages(path))
1374 .cloned()
1375 }
1376
1377 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1378 // Git repositories cannot be nested, so we don't need to reverse the order
1379 self.git_repositories
1380 .iter_mut()
1381 .find(|repo| repo.in_dot_git(path))
1382 }
1383
1384 #[cfg(test)]
1385 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1386 let root_name = self.root_name.clone();
1387 proto::UpdateWorktree {
1388 project_id,
1389 worktree_id: self.id().to_proto(),
1390 abs_path: self.abs_path().to_string_lossy().into(),
1391 root_name,
1392 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1393 removed_entries: Default::default(),
1394 scan_id: self.scan_id as u64,
1395 is_last_update: true,
1396 }
1397 }
1398
1399 pub(crate) fn build_update(
1400 &self,
1401 other: &Self,
1402 project_id: u64,
1403 worktree_id: u64,
1404 include_ignored: bool,
1405 ) -> proto::UpdateWorktree {
1406 let mut updated_entries = Vec::new();
1407 let mut removed_entries = Vec::new();
1408 let mut self_entries = self
1409 .entries_by_id
1410 .cursor::<()>()
1411 .filter(|e| include_ignored || !e.is_ignored)
1412 .peekable();
1413 let mut other_entries = other
1414 .entries_by_id
1415 .cursor::<()>()
1416 .filter(|e| include_ignored || !e.is_ignored)
1417 .peekable();
1418 loop {
1419 match (self_entries.peek(), other_entries.peek()) {
1420 (Some(self_entry), Some(other_entry)) => {
1421 match Ord::cmp(&self_entry.id, &other_entry.id) {
1422 Ordering::Less => {
1423 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1424 updated_entries.push(entry);
1425 self_entries.next();
1426 }
1427 Ordering::Equal => {
1428 if self_entry.scan_id != other_entry.scan_id {
1429 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1430 updated_entries.push(entry);
1431 }
1432
1433 self_entries.next();
1434 other_entries.next();
1435 }
1436 Ordering::Greater => {
1437 removed_entries.push(other_entry.id.to_proto());
1438 other_entries.next();
1439 }
1440 }
1441 }
1442 (Some(self_entry), None) => {
1443 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1444 updated_entries.push(entry);
1445 self_entries.next();
1446 }
1447 (None, Some(other_entry)) => {
1448 removed_entries.push(other_entry.id.to_proto());
1449 other_entries.next();
1450 }
1451 (None, None) => break,
1452 }
1453 }
1454
1455 proto::UpdateWorktree {
1456 project_id,
1457 worktree_id,
1458 abs_path: self.abs_path().to_string_lossy().into(),
1459 root_name: self.root_name().to_string(),
1460 updated_entries,
1461 removed_entries,
1462 scan_id: self.scan_id as u64,
1463 is_last_update: true,
1464 }
1465 }
1466
1467 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1468 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1469 let abs_path = self.abs_path.join(&entry.path);
1470 match smol::block_on(build_gitignore(&abs_path, fs)) {
1471 Ok(ignore) => {
1472 self.ignores_by_parent_abs_path.insert(
1473 abs_path.parent().unwrap().into(),
1474 (Arc::new(ignore), self.scan_id),
1475 );
1476 }
1477 Err(error) => {
1478 log::error!(
1479 "error loading .gitignore file {:?} - {:?}",
1480 &entry.path,
1481 error
1482 );
1483 }
1484 }
1485 }
1486
1487 self.reuse_entry_id(&mut entry);
1488
1489 if entry.kind == EntryKind::PendingDir {
1490 if let Some(existing_entry) =
1491 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1492 {
1493 entry.kind = existing_entry.kind;
1494 }
1495 }
1496
1497 self.entries_by_path.insert_or_replace(entry.clone(), &());
1498 let scan_id = self.scan_id;
1499 let removed_entry = self.entries_by_id.insert_or_replace(
1500 PathEntry {
1501 id: entry.id,
1502 path: entry.path.clone(),
1503 is_ignored: entry.is_ignored,
1504 scan_id,
1505 },
1506 &(),
1507 );
1508
1509 if let Some(removed_entry) = removed_entry {
1510 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1511 }
1512 self.inc_extension_count(&entry.path, entry.is_ignored);
1513
1514 entry
1515 }
1516
1517 fn populate_dir(
1518 &mut self,
1519 parent_path: Arc<Path>,
1520 entries: impl IntoIterator<Item = Entry>,
1521 ignore: Option<Arc<Gitignore>>,
1522 fs: &dyn Fs,
1523 ) {
1524 let mut parent_entry = if let Some(parent_entry) =
1525 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1526 {
1527 parent_entry.clone()
1528 } else {
1529 log::warn!(
1530 "populating a directory {:?} that has been removed",
1531 parent_path
1532 );
1533 return;
1534 };
1535
1536 if let Some(ignore) = ignore {
1537 self.ignores_by_parent_abs_path.insert(
1538 self.abs_path.join(&parent_path).into(),
1539 (ignore, self.scan_id),
1540 );
1541 }
1542 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1543 parent_entry.kind = EntryKind::Dir;
1544 } else {
1545 unreachable!();
1546 }
1547
1548 if parent_path.file_name() == Some(&DOT_GIT) {
1549 let abs_path = self.abs_path.join(&parent_path);
1550 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1551 if let Err(ix) = self
1552 .git_repositories
1553 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1554 {
1555 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1556 self.git_repositories.insert(
1557 ix,
1558 GitRepositoryEntry {
1559 repo,
1560 scan_id: 0,
1561 content_path,
1562 git_dir_path: parent_path,
1563 },
1564 );
1565 }
1566 }
1567 }
1568
1569 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1570 let mut entries_by_id_edits = Vec::new();
1571
1572 for mut entry in entries {
1573 self.reuse_entry_id(&mut entry);
1574 self.inc_extension_count(&entry.path, entry.is_ignored);
1575 entries_by_id_edits.push(Edit::Insert(PathEntry {
1576 id: entry.id,
1577 path: entry.path.clone(),
1578 is_ignored: entry.is_ignored,
1579 scan_id: self.scan_id,
1580 }));
1581 entries_by_path_edits.push(Edit::Insert(entry));
1582 }
1583
1584 self.entries_by_path.edit(entries_by_path_edits, &());
1585 let removed_entries = self.entries_by_id.edit(entries_by_id_edits, &());
1586
1587 for removed_entry in removed_entries {
1588 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1589 }
1590 }
1591
1592 fn inc_extension_count(&mut self, path: &Path, ignored: bool) {
1593 if !ignored {
1594 if let Some(extension) = path.extension() {
1595 if let Some(count) = self.extension_counts.get_mut(extension) {
1596 *count += 1;
1597 } else {
1598 self.extension_counts.insert(extension.into(), 1);
1599 }
1600 }
1601 }
1602 }
1603
1604 fn dec_extension_count(&mut self, path: &Path, ignored: bool) {
1605 if !ignored {
1606 if let Some(extension) = path.extension() {
1607 if let Some(count) = self.extension_counts.get_mut(extension) {
1608 *count -= 1;
1609 }
1610 }
1611 }
1612 }
1613
1614 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1615 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1616 entry.id = removed_entry_id;
1617 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1618 entry.id = existing_entry.id;
1619 }
1620 }
1621
1622 fn remove_path(&mut self, path: &Path) {
1623 let mut new_entries;
1624 let removed_entries;
1625 {
1626 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1627 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1628 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1629 new_entries.push_tree(cursor.suffix(&()), &());
1630 }
1631 self.entries_by_path = new_entries;
1632
1633 let mut entries_by_id_edits = Vec::new();
1634 for entry in removed_entries.cursor::<()>() {
1635 let removed_entry_id = self
1636 .removed_entry_ids
1637 .entry(entry.inode)
1638 .or_insert(entry.id);
1639 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1640 entries_by_id_edits.push(Edit::Remove(entry.id));
1641 self.dec_extension_count(&entry.path, entry.is_ignored);
1642 }
1643 self.entries_by_id.edit(entries_by_id_edits, &());
1644
1645 if path.file_name() == Some(&GITIGNORE) {
1646 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1647 if let Some((_, scan_id)) = self
1648 .ignores_by_parent_abs_path
1649 .get_mut(abs_parent_path.as_path())
1650 {
1651 *scan_id = self.snapshot.scan_id;
1652 }
1653 } else if path.file_name() == Some(&DOT_GIT) {
1654 let parent_path = path.parent().unwrap();
1655 if let Ok(ix) = self
1656 .git_repositories
1657 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1658 {
1659 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1660 }
1661 }
1662 }
1663
1664 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1665 let mut inodes = TreeSet::default();
1666 for ancestor in path.ancestors().skip(1) {
1667 if let Some(entry) = self.entry_for_path(ancestor) {
1668 inodes.insert(entry.inode);
1669 }
1670 }
1671 inodes
1672 }
1673
1674 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1675 let mut new_ignores = Vec::new();
1676 for ancestor in abs_path.ancestors().skip(1) {
1677 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1678 new_ignores.push((ancestor, Some(ignore.clone())));
1679 } else {
1680 new_ignores.push((ancestor, None));
1681 }
1682 }
1683
1684 let mut ignore_stack = IgnoreStack::none();
1685 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1686 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1687 ignore_stack = IgnoreStack::all();
1688 break;
1689 } else if let Some(ignore) = ignore {
1690 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1691 }
1692 }
1693
1694 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1695 ignore_stack = IgnoreStack::all();
1696 }
1697
1698 ignore_stack
1699 }
1700
1701 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1702 &self.git_repositories
1703 }
1704}
1705
1706impl GitRepositoryEntry {
1707 // Note that these paths should be relative to the worktree root.
1708 pub(crate) fn manages(&self, path: &Path) -> bool {
1709 path.starts_with(self.content_path.as_ref())
1710 }
1711
1712 // Note that theis path should be relative to the worktree root.
1713 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1714 path.starts_with(self.git_dir_path.as_ref())
1715 }
1716}
1717
1718async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1719 let contents = fs.load(abs_path).await?;
1720 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1721 let mut builder = GitignoreBuilder::new(parent);
1722 for line in contents.lines() {
1723 builder.add_line(Some(abs_path.into()), line)?;
1724 }
1725 Ok(builder.build()?)
1726}
1727
1728impl WorktreeId {
1729 pub fn from_usize(handle_id: usize) -> Self {
1730 Self(handle_id)
1731 }
1732
1733 pub(crate) fn from_proto(id: u64) -> Self {
1734 Self(id as usize)
1735 }
1736
1737 pub fn to_proto(&self) -> u64 {
1738 self.0 as u64
1739 }
1740
1741 pub fn to_usize(&self) -> usize {
1742 self.0
1743 }
1744}
1745
1746impl fmt::Display for WorktreeId {
1747 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1748 self.0.fmt(f)
1749 }
1750}
1751
1752impl Deref for Worktree {
1753 type Target = Snapshot;
1754
1755 fn deref(&self) -> &Self::Target {
1756 match self {
1757 Worktree::Local(worktree) => &worktree.snapshot,
1758 Worktree::Remote(worktree) => &worktree.snapshot,
1759 }
1760 }
1761}
1762
1763impl Deref for LocalWorktree {
1764 type Target = LocalSnapshot;
1765
1766 fn deref(&self) -> &Self::Target {
1767 &self.snapshot
1768 }
1769}
1770
1771impl Deref for RemoteWorktree {
1772 type Target = Snapshot;
1773
1774 fn deref(&self) -> &Self::Target {
1775 &self.snapshot
1776 }
1777}
1778
1779impl fmt::Debug for LocalWorktree {
1780 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1781 self.snapshot.fmt(f)
1782 }
1783}
1784
1785impl fmt::Debug for Snapshot {
1786 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1787 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1788 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1789
1790 impl<'a> fmt::Debug for EntriesByPath<'a> {
1791 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1792 f.debug_map()
1793 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1794 .finish()
1795 }
1796 }
1797
1798 impl<'a> fmt::Debug for EntriesById<'a> {
1799 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1800 f.debug_list().entries(self.0.iter()).finish()
1801 }
1802 }
1803
1804 f.debug_struct("Snapshot")
1805 .field("id", &self.id)
1806 .field("root_name", &self.root_name)
1807 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1808 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1809 .finish()
1810 }
1811}
1812
1813#[derive(Clone, PartialEq)]
1814pub struct File {
1815 pub worktree: ModelHandle<Worktree>,
1816 pub path: Arc<Path>,
1817 pub mtime: SystemTime,
1818 pub(crate) entry_id: ProjectEntryId,
1819 pub(crate) is_local: bool,
1820 pub(crate) is_deleted: bool,
1821}
1822
1823impl language::File for File {
1824 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1825 if self.is_local {
1826 Some(self)
1827 } else {
1828 None
1829 }
1830 }
1831
1832 fn mtime(&self) -> SystemTime {
1833 self.mtime
1834 }
1835
1836 fn path(&self) -> &Arc<Path> {
1837 &self.path
1838 }
1839
1840 fn full_path(&self, cx: &AppContext) -> PathBuf {
1841 let mut full_path = PathBuf::new();
1842 let worktree = self.worktree.read(cx);
1843
1844 if worktree.is_visible() {
1845 full_path.push(worktree.root_name());
1846 } else {
1847 let path = worktree.abs_path();
1848
1849 if worktree.is_local() && path.starts_with(cx.global::<HomeDir>().as_path()) {
1850 full_path.push("~");
1851 full_path.push(path.strip_prefix(cx.global::<HomeDir>().as_path()).unwrap());
1852 } else {
1853 full_path.push(path)
1854 }
1855 }
1856
1857 if self.path.components().next().is_some() {
1858 full_path.push(&self.path);
1859 }
1860
1861 full_path
1862 }
1863
1864 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1865 /// of its worktree, then this method will return the name of the worktree itself.
1866 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1867 self.path
1868 .file_name()
1869 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1870 }
1871
1872 fn is_deleted(&self) -> bool {
1873 self.is_deleted
1874 }
1875
1876 fn save(
1877 &self,
1878 buffer_id: u64,
1879 text: Rope,
1880 version: clock::Global,
1881 line_ending: LineEnding,
1882 cx: &mut MutableAppContext,
1883 ) -> Task<Result<(clock::Global, String, SystemTime)>> {
1884 self.worktree.update(cx, |worktree, cx| match worktree {
1885 Worktree::Local(worktree) => {
1886 let rpc = worktree.client.clone();
1887 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1888 let fingerprint = text.fingerprint();
1889 let save = worktree.write_file(self.path.clone(), text, line_ending, cx);
1890 cx.background().spawn(async move {
1891 let entry = save.await?;
1892 if let Some(project_id) = project_id {
1893 rpc.send(proto::BufferSaved {
1894 project_id,
1895 buffer_id,
1896 version: serialize_version(&version),
1897 mtime: Some(entry.mtime.into()),
1898 fingerprint: fingerprint.clone(),
1899 })?;
1900 }
1901 Ok((version, fingerprint, entry.mtime))
1902 })
1903 }
1904 Worktree::Remote(worktree) => {
1905 let rpc = worktree.client.clone();
1906 let project_id = worktree.project_id;
1907 cx.foreground().spawn(async move {
1908 let response = rpc
1909 .request(proto::SaveBuffer {
1910 project_id,
1911 buffer_id,
1912 version: serialize_version(&version),
1913 })
1914 .await?;
1915 let version = deserialize_version(response.version);
1916 let mtime = response
1917 .mtime
1918 .ok_or_else(|| anyhow!("missing mtime"))?
1919 .into();
1920 Ok((version, response.fingerprint, mtime))
1921 })
1922 }
1923 })
1924 }
1925
1926 fn as_any(&self) -> &dyn Any {
1927 self
1928 }
1929
1930 fn to_proto(&self) -> rpc::proto::File {
1931 rpc::proto::File {
1932 worktree_id: self.worktree.id() as u64,
1933 entry_id: self.entry_id.to_proto(),
1934 path: self.path.to_string_lossy().into(),
1935 mtime: Some(self.mtime.into()),
1936 is_deleted: self.is_deleted,
1937 }
1938 }
1939}
1940
1941impl language::LocalFile for File {
1942 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1943 self.worktree
1944 .read(cx)
1945 .as_local()
1946 .unwrap()
1947 .abs_path
1948 .join(&self.path)
1949 }
1950
1951 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1952 let worktree = self.worktree.read(cx).as_local().unwrap();
1953 let abs_path = worktree.absolutize(&self.path);
1954 let fs = worktree.fs.clone();
1955 cx.background()
1956 .spawn(async move { fs.load(&abs_path).await })
1957 }
1958
1959 fn buffer_reloaded(
1960 &self,
1961 buffer_id: u64,
1962 version: &clock::Global,
1963 fingerprint: String,
1964 line_ending: LineEnding,
1965 mtime: SystemTime,
1966 cx: &mut MutableAppContext,
1967 ) {
1968 let worktree = self.worktree.read(cx).as_local().unwrap();
1969 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1970 worktree
1971 .client
1972 .send(proto::BufferReloaded {
1973 project_id,
1974 buffer_id,
1975 version: serialize_version(version),
1976 mtime: Some(mtime.into()),
1977 fingerprint,
1978 line_ending: serialize_line_ending(line_ending) as i32,
1979 })
1980 .log_err();
1981 }
1982 }
1983}
1984
1985impl File {
1986 pub fn from_proto(
1987 proto: rpc::proto::File,
1988 worktree: ModelHandle<Worktree>,
1989 cx: &AppContext,
1990 ) -> Result<Self> {
1991 let worktree_id = worktree
1992 .read(cx)
1993 .as_remote()
1994 .ok_or_else(|| anyhow!("not remote"))?
1995 .id();
1996
1997 if worktree_id.to_proto() != proto.worktree_id {
1998 return Err(anyhow!("worktree id does not match file"));
1999 }
2000
2001 Ok(Self {
2002 worktree,
2003 path: Path::new(&proto.path).into(),
2004 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2005 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2006 is_local: false,
2007 is_deleted: proto.is_deleted,
2008 })
2009 }
2010
2011 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2012 file.and_then(|f| f.as_any().downcast_ref())
2013 }
2014
2015 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2016 self.worktree.read(cx).id()
2017 }
2018
2019 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2020 if self.is_deleted {
2021 None
2022 } else {
2023 Some(self.entry_id)
2024 }
2025 }
2026}
2027
2028#[derive(Clone, Debug, PartialEq, Eq)]
2029pub struct Entry {
2030 pub id: ProjectEntryId,
2031 pub kind: EntryKind,
2032 pub path: Arc<Path>,
2033 pub inode: u64,
2034 pub mtime: SystemTime,
2035 pub is_symlink: bool,
2036 pub is_ignored: bool,
2037}
2038
2039#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2040pub enum EntryKind {
2041 PendingDir,
2042 Dir,
2043 File(CharBag),
2044}
2045
2046impl Entry {
2047 fn new(
2048 path: Arc<Path>,
2049 metadata: &fs::Metadata,
2050 next_entry_id: &AtomicUsize,
2051 root_char_bag: CharBag,
2052 ) -> Self {
2053 Self {
2054 id: ProjectEntryId::new(next_entry_id),
2055 kind: if metadata.is_dir {
2056 EntryKind::PendingDir
2057 } else {
2058 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2059 },
2060 path,
2061 inode: metadata.inode,
2062 mtime: metadata.mtime,
2063 is_symlink: metadata.is_symlink,
2064 is_ignored: false,
2065 }
2066 }
2067
2068 pub fn is_dir(&self) -> bool {
2069 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2070 }
2071
2072 pub fn is_file(&self) -> bool {
2073 matches!(self.kind, EntryKind::File(_))
2074 }
2075}
2076
2077impl sum_tree::Item for Entry {
2078 type Summary = EntrySummary;
2079
2080 fn summary(&self) -> Self::Summary {
2081 let visible_count = if self.is_ignored { 0 } else { 1 };
2082 let file_count;
2083 let visible_file_count;
2084 if self.is_file() {
2085 file_count = 1;
2086 visible_file_count = visible_count;
2087 } else {
2088 file_count = 0;
2089 visible_file_count = 0;
2090 }
2091
2092 EntrySummary {
2093 max_path: self.path.clone(),
2094 count: 1,
2095 visible_count,
2096 file_count,
2097 visible_file_count,
2098 }
2099 }
2100}
2101
2102impl sum_tree::KeyedItem for Entry {
2103 type Key = PathKey;
2104
2105 fn key(&self) -> Self::Key {
2106 PathKey(self.path.clone())
2107 }
2108}
2109
2110#[derive(Clone, Debug)]
2111pub struct EntrySummary {
2112 max_path: Arc<Path>,
2113 count: usize,
2114 visible_count: usize,
2115 file_count: usize,
2116 visible_file_count: usize,
2117}
2118
2119impl Default for EntrySummary {
2120 fn default() -> Self {
2121 Self {
2122 max_path: Arc::from(Path::new("")),
2123 count: 0,
2124 visible_count: 0,
2125 file_count: 0,
2126 visible_file_count: 0,
2127 }
2128 }
2129}
2130
2131impl sum_tree::Summary for EntrySummary {
2132 type Context = ();
2133
2134 fn add_summary(&mut self, rhs: &Self, _: &()) {
2135 self.max_path = rhs.max_path.clone();
2136 self.count += rhs.count;
2137 self.visible_count += rhs.visible_count;
2138 self.file_count += rhs.file_count;
2139 self.visible_file_count += rhs.visible_file_count;
2140 }
2141}
2142
2143#[derive(Clone, Debug)]
2144struct PathEntry {
2145 id: ProjectEntryId,
2146 path: Arc<Path>,
2147 is_ignored: bool,
2148 scan_id: usize,
2149}
2150
2151impl sum_tree::Item for PathEntry {
2152 type Summary = PathEntrySummary;
2153
2154 fn summary(&self) -> Self::Summary {
2155 PathEntrySummary { max_id: self.id }
2156 }
2157}
2158
2159impl sum_tree::KeyedItem for PathEntry {
2160 type Key = ProjectEntryId;
2161
2162 fn key(&self) -> Self::Key {
2163 self.id
2164 }
2165}
2166
2167#[derive(Clone, Debug, Default)]
2168struct PathEntrySummary {
2169 max_id: ProjectEntryId,
2170}
2171
2172impl sum_tree::Summary for PathEntrySummary {
2173 type Context = ();
2174
2175 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2176 self.max_id = summary.max_id;
2177 }
2178}
2179
2180impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2181 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2182 *self = summary.max_id;
2183 }
2184}
2185
2186#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2187pub struct PathKey(Arc<Path>);
2188
2189impl Default for PathKey {
2190 fn default() -> Self {
2191 Self(Path::new("").into())
2192 }
2193}
2194
2195impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2196 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2197 self.0 = summary.max_path.clone();
2198 }
2199}
2200
2201struct BackgroundScanner {
2202 fs: Arc<dyn Fs>,
2203 snapshot: Arc<Mutex<LocalSnapshot>>,
2204 notify: UnboundedSender<ScanState>,
2205 executor: Arc<executor::Background>,
2206}
2207
2208impl BackgroundScanner {
2209 fn new(
2210 snapshot: Arc<Mutex<LocalSnapshot>>,
2211 notify: UnboundedSender<ScanState>,
2212 fs: Arc<dyn Fs>,
2213 executor: Arc<executor::Background>,
2214 ) -> Self {
2215 Self {
2216 fs,
2217 snapshot,
2218 notify,
2219 executor,
2220 }
2221 }
2222
2223 fn abs_path(&self) -> Arc<Path> {
2224 self.snapshot.lock().abs_path.clone()
2225 }
2226
2227 fn snapshot(&self) -> LocalSnapshot {
2228 self.snapshot.lock().clone()
2229 }
2230
2231 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2232 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2233 return;
2234 }
2235
2236 if let Err(err) = self.scan_dirs().await {
2237 if self
2238 .notify
2239 .unbounded_send(ScanState::Err(Arc::new(err)))
2240 .is_err()
2241 {
2242 return;
2243 }
2244 }
2245
2246 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2247 return;
2248 }
2249
2250 futures::pin_mut!(events_rx);
2251
2252 while let Some(mut events) = events_rx.next().await {
2253 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2254 events.extend(additional_events);
2255 }
2256
2257 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2258 break;
2259 }
2260
2261 if !self.process_events(events).await {
2262 break;
2263 }
2264
2265 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2266 break;
2267 }
2268 }
2269 }
2270
2271 async fn scan_dirs(&mut self) -> Result<()> {
2272 let root_char_bag;
2273 let root_abs_path;
2274 let root_inode;
2275 let is_dir;
2276 let next_entry_id;
2277 {
2278 let snapshot = self.snapshot.lock();
2279 root_char_bag = snapshot.root_char_bag;
2280 root_abs_path = snapshot.abs_path.clone();
2281 root_inode = snapshot.root_entry().map(|e| e.inode);
2282 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2283 next_entry_id = snapshot.next_entry_id.clone();
2284 };
2285
2286 // Populate ignores above the root.
2287 for ancestor in root_abs_path.ancestors().skip(1) {
2288 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2289 {
2290 self.snapshot
2291 .lock()
2292 .ignores_by_parent_abs_path
2293 .insert(ancestor.into(), (ignore.into(), 0));
2294 }
2295 }
2296
2297 let ignore_stack = {
2298 let mut snapshot = self.snapshot.lock();
2299 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2300 if ignore_stack.is_all() {
2301 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2302 root_entry.is_ignored = true;
2303 snapshot.insert_entry(root_entry, self.fs.as_ref());
2304 }
2305 }
2306 ignore_stack
2307 };
2308
2309 if is_dir {
2310 let path: Arc<Path> = Arc::from(Path::new(""));
2311 let mut ancestor_inodes = TreeSet::default();
2312 if let Some(root_inode) = root_inode {
2313 ancestor_inodes.insert(root_inode);
2314 }
2315
2316 let (tx, rx) = channel::unbounded();
2317 self.executor
2318 .block(tx.send(ScanJob {
2319 abs_path: root_abs_path.to_path_buf(),
2320 path,
2321 ignore_stack,
2322 ancestor_inodes,
2323 scan_queue: tx.clone(),
2324 }))
2325 .unwrap();
2326 drop(tx);
2327
2328 self.executor
2329 .scoped(|scope| {
2330 for _ in 0..self.executor.num_cpus() {
2331 scope.spawn(async {
2332 while let Ok(job) = rx.recv().await {
2333 if let Err(err) = self
2334 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2335 .await
2336 {
2337 log::error!("error scanning {:?}: {}", job.abs_path, err);
2338 }
2339 }
2340 });
2341 }
2342 })
2343 .await;
2344 }
2345
2346 Ok(())
2347 }
2348
2349 async fn scan_dir(
2350 &self,
2351 root_char_bag: CharBag,
2352 next_entry_id: Arc<AtomicUsize>,
2353 job: &ScanJob,
2354 ) -> Result<()> {
2355 let mut new_entries: Vec<Entry> = Vec::new();
2356 let mut new_jobs: Vec<ScanJob> = Vec::new();
2357 let mut ignore_stack = job.ignore_stack.clone();
2358 let mut new_ignore = None;
2359
2360 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2361 while let Some(child_abs_path) = child_paths.next().await {
2362 let child_abs_path = match child_abs_path {
2363 Ok(child_abs_path) => child_abs_path,
2364 Err(error) => {
2365 log::error!("error processing entry {:?}", error);
2366 continue;
2367 }
2368 };
2369 let child_name = child_abs_path.file_name().unwrap();
2370 let child_path: Arc<Path> = job.path.join(child_name).into();
2371 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2372 Ok(Some(metadata)) => metadata,
2373 Ok(None) => continue,
2374 Err(err) => {
2375 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2376 continue;
2377 }
2378 };
2379
2380 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2381 if child_name == *GITIGNORE {
2382 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2383 Ok(ignore) => {
2384 let ignore = Arc::new(ignore);
2385 ignore_stack =
2386 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2387 new_ignore = Some(ignore);
2388 }
2389 Err(error) => {
2390 log::error!(
2391 "error loading .gitignore file {:?} - {:?}",
2392 child_name,
2393 error
2394 );
2395 }
2396 }
2397
2398 // Update ignore status of any child entries we've already processed to reflect the
2399 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2400 // there should rarely be too numerous. Update the ignore stack associated with any
2401 // new jobs as well.
2402 let mut new_jobs = new_jobs.iter_mut();
2403 for entry in &mut new_entries {
2404 let entry_abs_path = self.abs_path().join(&entry.path);
2405 entry.is_ignored =
2406 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2407 if entry.is_dir() {
2408 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2409 IgnoreStack::all()
2410 } else {
2411 ignore_stack.clone()
2412 };
2413 }
2414 }
2415 }
2416
2417 let mut child_entry = Entry::new(
2418 child_path.clone(),
2419 &child_metadata,
2420 &next_entry_id,
2421 root_char_bag,
2422 );
2423
2424 if child_entry.is_dir() {
2425 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2426 child_entry.is_ignored = is_ignored;
2427
2428 if !job.ancestor_inodes.contains(&child_entry.inode) {
2429 let mut ancestor_inodes = job.ancestor_inodes.clone();
2430 ancestor_inodes.insert(child_entry.inode);
2431 new_jobs.push(ScanJob {
2432 abs_path: child_abs_path,
2433 path: child_path,
2434 ignore_stack: if is_ignored {
2435 IgnoreStack::all()
2436 } else {
2437 ignore_stack.clone()
2438 },
2439 ancestor_inodes,
2440 scan_queue: job.scan_queue.clone(),
2441 });
2442 }
2443 } else {
2444 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2445 }
2446
2447 new_entries.push(child_entry);
2448 }
2449
2450 self.snapshot.lock().populate_dir(
2451 job.path.clone(),
2452 new_entries,
2453 new_ignore,
2454 self.fs.as_ref(),
2455 );
2456 for new_job in new_jobs {
2457 job.scan_queue.send(new_job).await.unwrap();
2458 }
2459
2460 Ok(())
2461 }
2462
2463 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2464 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2465 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2466
2467 let root_char_bag;
2468 let root_abs_path;
2469 let next_entry_id;
2470 {
2471 let snapshot = self.snapshot.lock();
2472 root_char_bag = snapshot.root_char_bag;
2473 root_abs_path = snapshot.abs_path.clone();
2474 next_entry_id = snapshot.next_entry_id.clone();
2475 }
2476
2477 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2478 path
2479 } else {
2480 return false;
2481 };
2482 let metadata = futures::future::join_all(
2483 events
2484 .iter()
2485 .map(|event| self.fs.metadata(&event.path))
2486 .collect::<Vec<_>>(),
2487 )
2488 .await;
2489
2490 // Hold the snapshot lock while clearing and re-inserting the root entries
2491 // for each event. This way, the snapshot is not observable to the foreground
2492 // thread while this operation is in-progress.
2493 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2494 {
2495 let mut snapshot = self.snapshot.lock();
2496 snapshot.scan_id += 1;
2497 for event in &events {
2498 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2499 snapshot.remove_path(path);
2500 }
2501 }
2502
2503 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2504 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2505 Ok(path) => Arc::from(path.to_path_buf()),
2506 Err(_) => {
2507 log::error!(
2508 "unexpected event {:?} for root path {:?}",
2509 event.path,
2510 root_canonical_path
2511 );
2512 continue;
2513 }
2514 };
2515 let abs_path = root_abs_path.join(&path);
2516
2517 match metadata {
2518 Ok(Some(metadata)) => {
2519 let ignore_stack =
2520 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2521 let mut fs_entry = Entry::new(
2522 path.clone(),
2523 &metadata,
2524 snapshot.next_entry_id.as_ref(),
2525 snapshot.root_char_bag,
2526 );
2527 fs_entry.is_ignored = ignore_stack.is_all();
2528 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2529
2530 let scan_id = snapshot.scan_id;
2531 if let Some(repo) = snapshot.in_dot_git(&path) {
2532 repo.repo.lock().reload_index();
2533 repo.scan_id = scan_id;
2534 }
2535
2536 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2537 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2538 ancestor_inodes.insert(metadata.inode);
2539 self.executor
2540 .block(scan_queue_tx.send(ScanJob {
2541 abs_path,
2542 path,
2543 ignore_stack,
2544 ancestor_inodes,
2545 scan_queue: scan_queue_tx.clone(),
2546 }))
2547 .unwrap();
2548 }
2549 }
2550 Ok(None) => {}
2551 Err(err) => {
2552 // TODO - create a special 'error' entry in the entries tree to mark this
2553 log::error!("error reading file on event {:?}", err);
2554 }
2555 }
2556 }
2557 drop(scan_queue_tx);
2558 }
2559
2560 // Scan any directories that were created as part of this event batch.
2561 self.executor
2562 .scoped(|scope| {
2563 for _ in 0..self.executor.num_cpus() {
2564 scope.spawn(async {
2565 while let Ok(job) = scan_queue_rx.recv().await {
2566 if let Err(err) = self
2567 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2568 .await
2569 {
2570 log::error!("error scanning {:?}: {}", job.abs_path, err);
2571 }
2572 }
2573 });
2574 }
2575 })
2576 .await;
2577
2578 // Attempt to detect renames only over a single batch of file-system events.
2579 self.snapshot.lock().removed_entry_ids.clear();
2580
2581 self.update_ignore_statuses().await;
2582 self.update_git_repositories();
2583 true
2584 }
2585
2586 async fn update_ignore_statuses(&self) {
2587 let mut snapshot = self.snapshot();
2588
2589 let mut ignores_to_update = Vec::new();
2590 let mut ignores_to_delete = Vec::new();
2591 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2592 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2593 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2594 ignores_to_update.push(parent_abs_path.clone());
2595 }
2596
2597 let ignore_path = parent_path.join(&*GITIGNORE);
2598 if snapshot.entry_for_path(ignore_path).is_none() {
2599 ignores_to_delete.push(parent_abs_path.clone());
2600 }
2601 }
2602 }
2603
2604 for parent_abs_path in ignores_to_delete {
2605 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2606 self.snapshot
2607 .lock()
2608 .ignores_by_parent_abs_path
2609 .remove(&parent_abs_path);
2610 }
2611
2612 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2613 ignores_to_update.sort_unstable();
2614 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2615 while let Some(parent_abs_path) = ignores_to_update.next() {
2616 while ignores_to_update
2617 .peek()
2618 .map_or(false, |p| p.starts_with(&parent_abs_path))
2619 {
2620 ignores_to_update.next().unwrap();
2621 }
2622
2623 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2624 ignore_queue_tx
2625 .send(UpdateIgnoreStatusJob {
2626 abs_path: parent_abs_path,
2627 ignore_stack,
2628 ignore_queue: ignore_queue_tx.clone(),
2629 })
2630 .await
2631 .unwrap();
2632 }
2633 drop(ignore_queue_tx);
2634
2635 self.executor
2636 .scoped(|scope| {
2637 for _ in 0..self.executor.num_cpus() {
2638 scope.spawn(async {
2639 while let Ok(job) = ignore_queue_rx.recv().await {
2640 self.update_ignore_status(job, &snapshot).await;
2641 }
2642 });
2643 }
2644 })
2645 .await;
2646 }
2647
2648 fn update_git_repositories(&self) {
2649 let mut snapshot = self.snapshot.lock();
2650 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2651 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2652 snapshot.git_repositories = git_repositories;
2653 }
2654
2655 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2656 let mut ignore_stack = job.ignore_stack;
2657 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2658 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2659 }
2660
2661 let mut entries_by_id_edits = Vec::new();
2662 let mut entries_by_path_edits = Vec::new();
2663 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2664 for mut entry in snapshot.child_entries(path).cloned() {
2665 let was_ignored = entry.is_ignored;
2666 let abs_path = self.abs_path().join(&entry.path);
2667 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2668 if entry.is_dir() {
2669 let child_ignore_stack = if entry.is_ignored {
2670 IgnoreStack::all()
2671 } else {
2672 ignore_stack.clone()
2673 };
2674 job.ignore_queue
2675 .send(UpdateIgnoreStatusJob {
2676 abs_path: abs_path.into(),
2677 ignore_stack: child_ignore_stack,
2678 ignore_queue: job.ignore_queue.clone(),
2679 })
2680 .await
2681 .unwrap();
2682 }
2683
2684 if entry.is_ignored != was_ignored {
2685 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2686 path_entry.scan_id = snapshot.scan_id;
2687 path_entry.is_ignored = entry.is_ignored;
2688 entries_by_id_edits.push(Edit::Insert(path_entry));
2689 entries_by_path_edits.push(Edit::Insert(entry));
2690 }
2691 }
2692
2693 let mut snapshot = self.snapshot.lock();
2694 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2695 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2696 }
2697}
2698
2699fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2700 let mut result = root_char_bag;
2701 result.extend(
2702 path.to_string_lossy()
2703 .chars()
2704 .map(|c| c.to_ascii_lowercase()),
2705 );
2706 result
2707}
2708
2709struct ScanJob {
2710 abs_path: PathBuf,
2711 path: Arc<Path>,
2712 ignore_stack: Arc<IgnoreStack>,
2713 scan_queue: Sender<ScanJob>,
2714 ancestor_inodes: TreeSet<u64>,
2715}
2716
2717struct UpdateIgnoreStatusJob {
2718 abs_path: Arc<Path>,
2719 ignore_stack: Arc<IgnoreStack>,
2720 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2721}
2722
2723pub trait WorktreeHandle {
2724 #[cfg(any(test, feature = "test-support"))]
2725 fn flush_fs_events<'a>(
2726 &self,
2727 cx: &'a gpui::TestAppContext,
2728 ) -> futures::future::LocalBoxFuture<'a, ()>;
2729}
2730
2731impl WorktreeHandle for ModelHandle<Worktree> {
2732 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2733 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2734 // extra directory scans, and emit extra scan-state notifications.
2735 //
2736 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2737 // to ensure that all redundant FS events have already been processed.
2738 #[cfg(any(test, feature = "test-support"))]
2739 fn flush_fs_events<'a>(
2740 &self,
2741 cx: &'a gpui::TestAppContext,
2742 ) -> futures::future::LocalBoxFuture<'a, ()> {
2743 use smol::future::FutureExt;
2744
2745 let filename = "fs-event-sentinel";
2746 let tree = self.clone();
2747 let (fs, root_path) = self.read_with(cx, |tree, _| {
2748 let tree = tree.as_local().unwrap();
2749 (tree.fs.clone(), tree.abs_path().clone())
2750 });
2751
2752 async move {
2753 fs.create_file(&root_path.join(filename), Default::default())
2754 .await
2755 .unwrap();
2756 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2757 .await;
2758
2759 fs.remove_file(&root_path.join(filename), Default::default())
2760 .await
2761 .unwrap();
2762 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2763 .await;
2764
2765 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2766 .await;
2767 }
2768 .boxed_local()
2769 }
2770}
2771
2772#[derive(Clone, Debug)]
2773struct TraversalProgress<'a> {
2774 max_path: &'a Path,
2775 count: usize,
2776 visible_count: usize,
2777 file_count: usize,
2778 visible_file_count: usize,
2779}
2780
2781impl<'a> TraversalProgress<'a> {
2782 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2783 match (include_ignored, include_dirs) {
2784 (true, true) => self.count,
2785 (true, false) => self.file_count,
2786 (false, true) => self.visible_count,
2787 (false, false) => self.visible_file_count,
2788 }
2789 }
2790}
2791
2792impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2793 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2794 self.max_path = summary.max_path.as_ref();
2795 self.count += summary.count;
2796 self.visible_count += summary.visible_count;
2797 self.file_count += summary.file_count;
2798 self.visible_file_count += summary.visible_file_count;
2799 }
2800}
2801
2802impl<'a> Default for TraversalProgress<'a> {
2803 fn default() -> Self {
2804 Self {
2805 max_path: Path::new(""),
2806 count: 0,
2807 visible_count: 0,
2808 file_count: 0,
2809 visible_file_count: 0,
2810 }
2811 }
2812}
2813
2814pub struct Traversal<'a> {
2815 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2816 include_ignored: bool,
2817 include_dirs: bool,
2818}
2819
2820impl<'a> Traversal<'a> {
2821 pub fn advance(&mut self) -> bool {
2822 self.advance_to_offset(self.offset() + 1)
2823 }
2824
2825 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2826 self.cursor.seek_forward(
2827 &TraversalTarget::Count {
2828 count: offset,
2829 include_dirs: self.include_dirs,
2830 include_ignored: self.include_ignored,
2831 },
2832 Bias::Right,
2833 &(),
2834 )
2835 }
2836
2837 pub fn advance_to_sibling(&mut self) -> bool {
2838 while let Some(entry) = self.cursor.item() {
2839 self.cursor.seek_forward(
2840 &TraversalTarget::PathSuccessor(&entry.path),
2841 Bias::Left,
2842 &(),
2843 );
2844 if let Some(entry) = self.cursor.item() {
2845 if (self.include_dirs || !entry.is_dir())
2846 && (self.include_ignored || !entry.is_ignored)
2847 {
2848 return true;
2849 }
2850 }
2851 }
2852 false
2853 }
2854
2855 pub fn entry(&self) -> Option<&'a Entry> {
2856 self.cursor.item()
2857 }
2858
2859 pub fn offset(&self) -> usize {
2860 self.cursor
2861 .start()
2862 .count(self.include_dirs, self.include_ignored)
2863 }
2864}
2865
2866impl<'a> Iterator for Traversal<'a> {
2867 type Item = &'a Entry;
2868
2869 fn next(&mut self) -> Option<Self::Item> {
2870 if let Some(item) = self.entry() {
2871 self.advance();
2872 Some(item)
2873 } else {
2874 None
2875 }
2876 }
2877}
2878
2879#[derive(Debug)]
2880enum TraversalTarget<'a> {
2881 Path(&'a Path),
2882 PathSuccessor(&'a Path),
2883 Count {
2884 count: usize,
2885 include_ignored: bool,
2886 include_dirs: bool,
2887 },
2888}
2889
2890impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2891 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2892 match self {
2893 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2894 TraversalTarget::PathSuccessor(path) => {
2895 if !cursor_location.max_path.starts_with(path) {
2896 Ordering::Equal
2897 } else {
2898 Ordering::Greater
2899 }
2900 }
2901 TraversalTarget::Count {
2902 count,
2903 include_dirs,
2904 include_ignored,
2905 } => Ord::cmp(
2906 count,
2907 &cursor_location.count(*include_dirs, *include_ignored),
2908 ),
2909 }
2910 }
2911}
2912
2913struct ChildEntriesIter<'a> {
2914 parent_path: &'a Path,
2915 traversal: Traversal<'a>,
2916}
2917
2918impl<'a> Iterator for ChildEntriesIter<'a> {
2919 type Item = &'a Entry;
2920
2921 fn next(&mut self) -> Option<Self::Item> {
2922 if let Some(item) = self.traversal.entry() {
2923 if item.path.starts_with(&self.parent_path) {
2924 self.traversal.advance_to_sibling();
2925 return Some(item);
2926 }
2927 }
2928 None
2929 }
2930}
2931
2932impl<'a> From<&'a Entry> for proto::Entry {
2933 fn from(entry: &'a Entry) -> Self {
2934 Self {
2935 id: entry.id.to_proto(),
2936 is_dir: entry.is_dir(),
2937 path: entry.path.to_string_lossy().into(),
2938 inode: entry.inode,
2939 mtime: Some(entry.mtime.into()),
2940 is_symlink: entry.is_symlink,
2941 is_ignored: entry.is_ignored,
2942 }
2943 }
2944}
2945
2946impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2947 type Error = anyhow::Error;
2948
2949 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2950 if let Some(mtime) = entry.mtime {
2951 let kind = if entry.is_dir {
2952 EntryKind::Dir
2953 } else {
2954 let mut char_bag = *root_char_bag;
2955 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2956 EntryKind::File(char_bag)
2957 };
2958 let path: Arc<Path> = PathBuf::from(entry.path).into();
2959 Ok(Entry {
2960 id: ProjectEntryId::from_proto(entry.id),
2961 kind,
2962 path,
2963 inode: entry.inode,
2964 mtime: mtime.into(),
2965 is_symlink: entry.is_symlink,
2966 is_ignored: entry.is_ignored,
2967 })
2968 } else {
2969 Err(anyhow!(
2970 "missing mtime in remote worktree entry {:?}",
2971 entry.path
2972 ))
2973 }
2974 }
2975}
2976
2977async fn send_worktree_update(client: &Arc<Client>, update: proto::UpdateWorktree) -> Result<()> {
2978 #[cfg(any(test, feature = "test-support"))]
2979 const MAX_CHUNK_SIZE: usize = 2;
2980 #[cfg(not(any(test, feature = "test-support")))]
2981 const MAX_CHUNK_SIZE: usize = 256;
2982
2983 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
2984 client.request(update).await?;
2985 }
2986
2987 Ok(())
2988}
2989
2990#[cfg(test)]
2991mod tests {
2992 use super::*;
2993 use anyhow::Result;
2994 use client::test::FakeHttpClient;
2995 use fs::repository::FakeGitRepository;
2996 use fs::{FakeFs, RealFs};
2997 use gpui::{executor::Deterministic, TestAppContext};
2998 use rand::prelude::*;
2999 use serde_json::json;
3000 use std::{
3001 env,
3002 fmt::Write,
3003 time::{SystemTime, UNIX_EPOCH},
3004 };
3005
3006 use util::test::temp_tree;
3007
3008 #[gpui::test]
3009 async fn test_traversal(cx: &mut TestAppContext) {
3010 let fs = FakeFs::new(cx.background());
3011 fs.insert_tree(
3012 "/root",
3013 json!({
3014 ".gitignore": "a/b\n",
3015 "a": {
3016 "b": "",
3017 "c": "",
3018 }
3019 }),
3020 )
3021 .await;
3022
3023 let http_client = FakeHttpClient::with_404_response();
3024 let client = cx.read(|cx| Client::new(http_client, cx));
3025
3026 let tree = Worktree::local(
3027 client,
3028 Arc::from(Path::new("/root")),
3029 true,
3030 fs,
3031 Default::default(),
3032 &mut cx.to_async(),
3033 )
3034 .await
3035 .unwrap();
3036 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3037 .await;
3038
3039 tree.read_with(cx, |tree, _| {
3040 assert_eq!(
3041 tree.entries(false)
3042 .map(|entry| entry.path.as_ref())
3043 .collect::<Vec<_>>(),
3044 vec![
3045 Path::new(""),
3046 Path::new(".gitignore"),
3047 Path::new("a"),
3048 Path::new("a/c"),
3049 ]
3050 );
3051 assert_eq!(
3052 tree.entries(true)
3053 .map(|entry| entry.path.as_ref())
3054 .collect::<Vec<_>>(),
3055 vec![
3056 Path::new(""),
3057 Path::new(".gitignore"),
3058 Path::new("a"),
3059 Path::new("a/b"),
3060 Path::new("a/c"),
3061 ]
3062 );
3063 })
3064 }
3065
3066 #[gpui::test(iterations = 10)]
3067 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3068 let fs = FakeFs::new(cx.background());
3069 fs.insert_tree(
3070 "/root",
3071 json!({
3072 "lib": {
3073 "a": {
3074 "a.txt": ""
3075 },
3076 "b": {
3077 "b.txt": ""
3078 }
3079 }
3080 }),
3081 )
3082 .await;
3083 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3084 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3085
3086 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3087 let tree = Worktree::local(
3088 client,
3089 Arc::from(Path::new("/root")),
3090 true,
3091 fs.clone(),
3092 Default::default(),
3093 &mut cx.to_async(),
3094 )
3095 .await
3096 .unwrap();
3097
3098 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3099 .await;
3100
3101 tree.read_with(cx, |tree, _| {
3102 assert_eq!(
3103 tree.entries(false)
3104 .map(|entry| entry.path.as_ref())
3105 .collect::<Vec<_>>(),
3106 vec![
3107 Path::new(""),
3108 Path::new("lib"),
3109 Path::new("lib/a"),
3110 Path::new("lib/a/a.txt"),
3111 Path::new("lib/a/lib"),
3112 Path::new("lib/b"),
3113 Path::new("lib/b/b.txt"),
3114 Path::new("lib/b/lib"),
3115 ]
3116 );
3117 });
3118
3119 fs.rename(
3120 Path::new("/root/lib/a/lib"),
3121 Path::new("/root/lib/a/lib-2"),
3122 Default::default(),
3123 )
3124 .await
3125 .unwrap();
3126 executor.run_until_parked();
3127 tree.read_with(cx, |tree, _| {
3128 assert_eq!(
3129 tree.entries(false)
3130 .map(|entry| entry.path.as_ref())
3131 .collect::<Vec<_>>(),
3132 vec![
3133 Path::new(""),
3134 Path::new("lib"),
3135 Path::new("lib/a"),
3136 Path::new("lib/a/a.txt"),
3137 Path::new("lib/a/lib-2"),
3138 Path::new("lib/b"),
3139 Path::new("lib/b/b.txt"),
3140 Path::new("lib/b/lib"),
3141 ]
3142 );
3143 });
3144 }
3145
3146 #[gpui::test]
3147 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3148 let parent_dir = temp_tree(json!({
3149 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3150 "tree": {
3151 ".git": {},
3152 ".gitignore": "ignored-dir\n",
3153 "tracked-dir": {
3154 "tracked-file1": "",
3155 "ancestor-ignored-file1": "",
3156 },
3157 "ignored-dir": {
3158 "ignored-file1": ""
3159 }
3160 }
3161 }));
3162 let dir = parent_dir.path().join("tree");
3163
3164 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3165
3166 let tree = Worktree::local(
3167 client,
3168 dir.as_path(),
3169 true,
3170 Arc::new(RealFs),
3171 Default::default(),
3172 &mut cx.to_async(),
3173 )
3174 .await
3175 .unwrap();
3176 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3177 .await;
3178 tree.flush_fs_events(cx).await;
3179 cx.read(|cx| {
3180 let tree = tree.read(cx);
3181 assert!(
3182 !tree
3183 .entry_for_path("tracked-dir/tracked-file1")
3184 .unwrap()
3185 .is_ignored
3186 );
3187 assert!(
3188 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3189 .unwrap()
3190 .is_ignored
3191 );
3192 assert!(
3193 tree.entry_for_path("ignored-dir/ignored-file1")
3194 .unwrap()
3195 .is_ignored
3196 );
3197 });
3198
3199 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3200 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3201 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3202 tree.flush_fs_events(cx).await;
3203 cx.read(|cx| {
3204 let tree = tree.read(cx);
3205 assert!(
3206 !tree
3207 .entry_for_path("tracked-dir/tracked-file2")
3208 .unwrap()
3209 .is_ignored
3210 );
3211 assert!(
3212 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3213 .unwrap()
3214 .is_ignored
3215 );
3216 assert!(
3217 tree.entry_for_path("ignored-dir/ignored-file2")
3218 .unwrap()
3219 .is_ignored
3220 );
3221 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3222 });
3223 }
3224
3225 #[gpui::test]
3226 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3227 let root = temp_tree(json!({
3228 "dir1": {
3229 ".git": {},
3230 "deps": {
3231 "dep1": {
3232 ".git": {},
3233 "src": {
3234 "a.txt": ""
3235 }
3236 }
3237 },
3238 "src": {
3239 "b.txt": ""
3240 }
3241 },
3242 "c.txt": "",
3243 }));
3244
3245 let http_client = FakeHttpClient::with_404_response();
3246 let client = cx.read(|cx| Client::new(http_client, cx));
3247 let tree = Worktree::local(
3248 client,
3249 root.path(),
3250 true,
3251 Arc::new(RealFs),
3252 Default::default(),
3253 &mut cx.to_async(),
3254 )
3255 .await
3256 .unwrap();
3257
3258 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3259 .await;
3260 tree.flush_fs_events(cx).await;
3261
3262 tree.read_with(cx, |tree, _cx| {
3263 let tree = tree.as_local().unwrap();
3264
3265 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3266
3267 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3268 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3269 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3270
3271 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3272 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3273 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3274 });
3275
3276 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3277 let tree = tree.as_local().unwrap();
3278 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3279 });
3280
3281 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3282 tree.flush_fs_events(cx).await;
3283
3284 tree.read_with(cx, |tree, _cx| {
3285 let tree = tree.as_local().unwrap();
3286 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3287 assert_ne!(
3288 original_scan_id, new_scan_id,
3289 "original {original_scan_id}, new {new_scan_id}"
3290 );
3291 });
3292
3293 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3294 tree.flush_fs_events(cx).await;
3295
3296 tree.read_with(cx, |tree, _cx| {
3297 let tree = tree.as_local().unwrap();
3298
3299 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3300 });
3301 }
3302
3303 #[test]
3304 fn test_changed_repos() {
3305 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3306 GitRepositoryEntry {
3307 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3308 scan_id,
3309 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3310 git_dir_path: git_dir_path.as_ref().into(),
3311 }
3312 }
3313
3314 let prev_repos: Vec<GitRepositoryEntry> = vec![
3315 fake_entry("/.git", 0),
3316 fake_entry("/a/.git", 0),
3317 fake_entry("/a/b/.git", 0),
3318 ];
3319
3320 let new_repos: Vec<GitRepositoryEntry> = vec![
3321 fake_entry("/a/.git", 1),
3322 fake_entry("/a/b/.git", 0),
3323 fake_entry("/a/c/.git", 0),
3324 ];
3325
3326 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3327
3328 // Deletion retained
3329 assert!(res
3330 .iter()
3331 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3332 .is_some());
3333
3334 // Update retained
3335 assert!(res
3336 .iter()
3337 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3338 .is_some());
3339
3340 // Addition retained
3341 assert!(res
3342 .iter()
3343 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3344 .is_some());
3345
3346 // Nochange, not retained
3347 assert!(res
3348 .iter()
3349 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3350 .is_none());
3351 }
3352
3353 #[gpui::test]
3354 async fn test_write_file(cx: &mut TestAppContext) {
3355 let dir = temp_tree(json!({
3356 ".git": {},
3357 ".gitignore": "ignored-dir\n",
3358 "tracked-dir": {},
3359 "ignored-dir": {}
3360 }));
3361
3362 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3363
3364 let tree = Worktree::local(
3365 client,
3366 dir.path(),
3367 true,
3368 Arc::new(RealFs),
3369 Default::default(),
3370 &mut cx.to_async(),
3371 )
3372 .await
3373 .unwrap();
3374 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3375 .await;
3376 tree.flush_fs_events(cx).await;
3377
3378 tree.update(cx, |tree, cx| {
3379 tree.as_local().unwrap().write_file(
3380 Path::new("tracked-dir/file.txt"),
3381 "hello".into(),
3382 Default::default(),
3383 cx,
3384 )
3385 })
3386 .await
3387 .unwrap();
3388 tree.update(cx, |tree, cx| {
3389 tree.as_local().unwrap().write_file(
3390 Path::new("ignored-dir/file.txt"),
3391 "world".into(),
3392 Default::default(),
3393 cx,
3394 )
3395 })
3396 .await
3397 .unwrap();
3398
3399 tree.read_with(cx, |tree, _| {
3400 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3401 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3402 assert!(!tracked.is_ignored);
3403 assert!(ignored.is_ignored);
3404 });
3405 }
3406
3407 #[gpui::test(iterations = 30)]
3408 async fn test_create_directory(cx: &mut TestAppContext) {
3409 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3410
3411 let fs = FakeFs::new(cx.background());
3412 fs.insert_tree(
3413 "/a",
3414 json!({
3415 "b": {},
3416 "c": {},
3417 "d": {},
3418 }),
3419 )
3420 .await;
3421
3422 let tree = Worktree::local(
3423 client,
3424 "/a".as_ref(),
3425 true,
3426 fs,
3427 Default::default(),
3428 &mut cx.to_async(),
3429 )
3430 .await
3431 .unwrap();
3432
3433 let entry = tree
3434 .update(cx, |tree, cx| {
3435 tree.as_local_mut()
3436 .unwrap()
3437 .create_entry("a/e".as_ref(), true, cx)
3438 })
3439 .await
3440 .unwrap();
3441 assert!(entry.is_dir());
3442
3443 cx.foreground().run_until_parked();
3444 tree.read_with(cx, |tree, _| {
3445 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3446 });
3447 }
3448
3449 #[gpui::test(iterations = 100)]
3450 fn test_random(mut rng: StdRng) {
3451 let operations = env::var("OPERATIONS")
3452 .map(|o| o.parse().unwrap())
3453 .unwrap_or(40);
3454 let initial_entries = env::var("INITIAL_ENTRIES")
3455 .map(|o| o.parse().unwrap())
3456 .unwrap_or(20);
3457
3458 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3459 for _ in 0..initial_entries {
3460 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3461 }
3462 log::info!("Generated initial tree");
3463
3464 let (notify_tx, _notify_rx) = mpsc::unbounded();
3465 let fs = Arc::new(RealFs);
3466 let next_entry_id = Arc::new(AtomicUsize::new(0));
3467 let mut initial_snapshot = LocalSnapshot {
3468 removed_entry_ids: Default::default(),
3469 ignores_by_parent_abs_path: Default::default(),
3470 git_repositories: Default::default(),
3471 next_entry_id: next_entry_id.clone(),
3472 snapshot: Snapshot {
3473 id: WorktreeId::from_usize(0),
3474 entries_by_path: Default::default(),
3475 entries_by_id: Default::default(),
3476 abs_path: root_dir.path().into(),
3477 root_name: Default::default(),
3478 root_char_bag: Default::default(),
3479 scan_id: 0,
3480 is_complete: true,
3481 },
3482 extension_counts: Default::default(),
3483 };
3484 initial_snapshot.insert_entry(
3485 Entry::new(
3486 Path::new("").into(),
3487 &smol::block_on(fs.metadata(root_dir.path()))
3488 .unwrap()
3489 .unwrap(),
3490 &next_entry_id,
3491 Default::default(),
3492 ),
3493 fs.as_ref(),
3494 );
3495 let mut scanner = BackgroundScanner::new(
3496 Arc::new(Mutex::new(initial_snapshot.clone())),
3497 notify_tx,
3498 fs.clone(),
3499 Arc::new(gpui::executor::Background::new()),
3500 );
3501 smol::block_on(scanner.scan_dirs()).unwrap();
3502 scanner.snapshot().check_invariants();
3503
3504 let mut events = Vec::new();
3505 let mut snapshots = Vec::new();
3506 let mut mutations_len = operations;
3507 while mutations_len > 1 {
3508 if !events.is_empty() && rng.gen_bool(0.4) {
3509 let len = rng.gen_range(0..=events.len());
3510 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3511 log::info!("Delivering events: {:#?}", to_deliver);
3512 smol::block_on(scanner.process_events(to_deliver));
3513 scanner.snapshot().check_invariants();
3514 } else {
3515 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3516 mutations_len -= 1;
3517 }
3518
3519 if rng.gen_bool(0.2) {
3520 snapshots.push(scanner.snapshot());
3521 }
3522 }
3523 log::info!("Quiescing: {:#?}", events);
3524 smol::block_on(scanner.process_events(events));
3525 scanner.snapshot().check_invariants();
3526
3527 let (notify_tx, _notify_rx) = mpsc::unbounded();
3528 let mut new_scanner = BackgroundScanner::new(
3529 Arc::new(Mutex::new(initial_snapshot)),
3530 notify_tx,
3531 scanner.fs.clone(),
3532 scanner.executor.clone(),
3533 );
3534 smol::block_on(new_scanner.scan_dirs()).unwrap();
3535 assert_eq!(
3536 scanner.snapshot().to_vec(true),
3537 new_scanner.snapshot().to_vec(true)
3538 );
3539
3540 for mut prev_snapshot in snapshots {
3541 let include_ignored = rng.gen::<bool>();
3542 if !include_ignored {
3543 let mut entries_by_path_edits = Vec::new();
3544 let mut entries_by_id_edits = Vec::new();
3545 for entry in prev_snapshot
3546 .entries_by_id
3547 .cursor::<()>()
3548 .filter(|e| e.is_ignored)
3549 {
3550 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3551 entries_by_id_edits.push(Edit::Remove(entry.id));
3552 }
3553
3554 prev_snapshot
3555 .entries_by_path
3556 .edit(entries_by_path_edits, &());
3557 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3558 }
3559
3560 let update = scanner
3561 .snapshot()
3562 .build_update(&prev_snapshot, 0, 0, include_ignored);
3563 prev_snapshot.apply_remote_update(update).unwrap();
3564 assert_eq!(
3565 prev_snapshot.to_vec(true),
3566 scanner.snapshot().to_vec(include_ignored)
3567 );
3568 }
3569 }
3570
3571 fn randomly_mutate_tree(
3572 root_path: &Path,
3573 insertion_probability: f64,
3574 rng: &mut impl Rng,
3575 ) -> Result<Vec<fsevent::Event>> {
3576 let root_path = root_path.canonicalize().unwrap();
3577 let (dirs, files) = read_dir_recursive(root_path.clone());
3578
3579 let mut events = Vec::new();
3580 let mut record_event = |path: PathBuf| {
3581 events.push(fsevent::Event {
3582 event_id: SystemTime::now()
3583 .duration_since(UNIX_EPOCH)
3584 .unwrap()
3585 .as_secs(),
3586 flags: fsevent::StreamFlags::empty(),
3587 path,
3588 });
3589 };
3590
3591 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3592 let path = dirs.choose(rng).unwrap();
3593 let new_path = path.join(gen_name(rng));
3594
3595 if rng.gen() {
3596 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3597 std::fs::create_dir(&new_path)?;
3598 } else {
3599 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3600 std::fs::write(&new_path, "")?;
3601 }
3602 record_event(new_path);
3603 } else if rng.gen_bool(0.05) {
3604 let ignore_dir_path = dirs.choose(rng).unwrap();
3605 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3606
3607 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3608 let files_to_ignore = {
3609 let len = rng.gen_range(0..=subfiles.len());
3610 subfiles.choose_multiple(rng, len)
3611 };
3612 let dirs_to_ignore = {
3613 let len = rng.gen_range(0..subdirs.len());
3614 subdirs.choose_multiple(rng, len)
3615 };
3616
3617 let mut ignore_contents = String::new();
3618 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3619 writeln!(
3620 ignore_contents,
3621 "{}",
3622 path_to_ignore
3623 .strip_prefix(&ignore_dir_path)?
3624 .to_str()
3625 .unwrap()
3626 )
3627 .unwrap();
3628 }
3629 log::info!(
3630 "Creating {:?} with contents:\n{}",
3631 ignore_path.strip_prefix(&root_path)?,
3632 ignore_contents
3633 );
3634 std::fs::write(&ignore_path, ignore_contents).unwrap();
3635 record_event(ignore_path);
3636 } else {
3637 let old_path = {
3638 let file_path = files.choose(rng);
3639 let dir_path = dirs[1..].choose(rng);
3640 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3641 };
3642
3643 let is_rename = rng.gen();
3644 if is_rename {
3645 let new_path_parent = dirs
3646 .iter()
3647 .filter(|d| !d.starts_with(old_path))
3648 .choose(rng)
3649 .unwrap();
3650
3651 let overwrite_existing_dir =
3652 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3653 let new_path = if overwrite_existing_dir {
3654 std::fs::remove_dir_all(&new_path_parent).ok();
3655 new_path_parent.to_path_buf()
3656 } else {
3657 new_path_parent.join(gen_name(rng))
3658 };
3659
3660 log::info!(
3661 "Renaming {:?} to {}{:?}",
3662 old_path.strip_prefix(&root_path)?,
3663 if overwrite_existing_dir {
3664 "overwrite "
3665 } else {
3666 ""
3667 },
3668 new_path.strip_prefix(&root_path)?
3669 );
3670 std::fs::rename(&old_path, &new_path)?;
3671 record_event(old_path.clone());
3672 record_event(new_path);
3673 } else if old_path.is_dir() {
3674 let (dirs, files) = read_dir_recursive(old_path.clone());
3675
3676 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3677 std::fs::remove_dir_all(&old_path).unwrap();
3678 for file in files {
3679 record_event(file);
3680 }
3681 for dir in dirs {
3682 record_event(dir);
3683 }
3684 } else {
3685 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3686 std::fs::remove_file(old_path).unwrap();
3687 record_event(old_path.clone());
3688 }
3689 }
3690
3691 Ok(events)
3692 }
3693
3694 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3695 let child_entries = std::fs::read_dir(&path).unwrap();
3696 let mut dirs = vec![path];
3697 let mut files = Vec::new();
3698 for child_entry in child_entries {
3699 let child_path = child_entry.unwrap().path();
3700 if child_path.is_dir() {
3701 let (child_dirs, child_files) = read_dir_recursive(child_path);
3702 dirs.extend(child_dirs);
3703 files.extend(child_files);
3704 } else {
3705 files.push(child_path);
3706 }
3707 }
3708 (dirs, files)
3709 }
3710
3711 fn gen_name(rng: &mut impl Rng) -> String {
3712 (0..6)
3713 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3714 .map(char::from)
3715 .collect()
3716 }
3717
3718 impl LocalSnapshot {
3719 fn check_invariants(&self) {
3720 let mut files = self.files(true, 0);
3721 let mut visible_files = self.files(false, 0);
3722 for entry in self.entries_by_path.cursor::<()>() {
3723 if entry.is_file() {
3724 assert_eq!(files.next().unwrap().inode, entry.inode);
3725 if !entry.is_ignored {
3726 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3727 }
3728 }
3729 }
3730 assert!(files.next().is_none());
3731 assert!(visible_files.next().is_none());
3732
3733 let mut bfs_paths = Vec::new();
3734 let mut stack = vec![Path::new("")];
3735 while let Some(path) = stack.pop() {
3736 bfs_paths.push(path);
3737 let ix = stack.len();
3738 for child_entry in self.child_entries(path) {
3739 stack.insert(ix, &child_entry.path);
3740 }
3741 }
3742
3743 let dfs_paths_via_iter = self
3744 .entries_by_path
3745 .cursor::<()>()
3746 .map(|e| e.path.as_ref())
3747 .collect::<Vec<_>>();
3748 assert_eq!(bfs_paths, dfs_paths_via_iter);
3749
3750 let dfs_paths_via_traversal = self
3751 .entries(true)
3752 .map(|e| e.path.as_ref())
3753 .collect::<Vec<_>>();
3754 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3755
3756 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3757 let ignore_parent_path =
3758 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3759 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3760 assert!(self
3761 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3762 .is_some());
3763 }
3764
3765 // Ensure extension counts are correct.
3766 let mut expected_extension_counts = HashMap::default();
3767 for extension in self.entries(false).filter_map(|e| e.path.extension()) {
3768 *expected_extension_counts
3769 .entry(extension.into())
3770 .or_insert(0) += 1;
3771 }
3772 assert_eq!(self.extension_counts, expected_extension_counts);
3773 }
3774
3775 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3776 let mut paths = Vec::new();
3777 for entry in self.entries_by_path.cursor::<()>() {
3778 if include_ignored || !entry.is_ignored {
3779 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3780 }
3781 }
3782 paths.sort_by(|a, b| a.0.cmp(b.0));
3783 paths
3784 }
3785 }
3786}