1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::{repository::GitRepository, Fs};
9use fs::{HomeDir, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::Unclipped;
24use language::{
25 proto::{deserialize_version, serialize_line_ending, serialize_version},
26 Buffer, DiagnosticEntry, PointUtf16, Rope,
27};
28use parking_lot::Mutex;
29use postage::{
30 prelude::{Sink as _, Stream as _},
31 watch,
32};
33
34use smol::channel::{self, Sender};
35use std::{
36 any::Any,
37 cmp::{self, Ordering},
38 convert::TryFrom,
39 ffi::{OsStr, OsString},
40 fmt,
41 future::Future,
42 mem,
43 ops::{Deref, DerefMut},
44 os::unix::prelude::{OsStrExt, OsStringExt},
45 path::{Path, PathBuf},
46 sync::{atomic::AtomicUsize, Arc},
47 task::Poll,
48 time::{Duration, SystemTime},
49};
50use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
51use util::{ResultExt, TryFutureExt};
52
53#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
54pub struct WorktreeId(usize);
55
56#[allow(clippy::large_enum_variant)]
57pub enum Worktree {
58 Local(LocalWorktree),
59 Remote(RemoteWorktree),
60}
61
62pub struct LocalWorktree {
63 snapshot: LocalSnapshot,
64 background_snapshot: Arc<Mutex<LocalSnapshot>>,
65 last_scan_state_rx: watch::Receiver<ScanState>,
66 _background_scanner_task: Option<Task<()>>,
67 poll_task: Option<Task<()>>,
68 share: Option<ShareState>,
69 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
70 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
71 client: Arc<Client>,
72 fs: Arc<dyn Fs>,
73 visible: bool,
74}
75
76pub struct RemoteWorktree {
77 pub snapshot: Snapshot,
78 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
79 project_id: u64,
80 client: Arc<Client>,
81 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
82 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
83 replica_id: ReplicaId,
84 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
85 visible: bool,
86}
87
88#[derive(Clone)]
89pub struct Snapshot {
90 id: WorktreeId,
91 abs_path: Arc<Path>,
92 root_name: String,
93 root_char_bag: CharBag,
94 entries_by_path: SumTree<Entry>,
95 entries_by_id: SumTree<PathEntry>,
96 scan_id: usize,
97 is_complete: bool,
98}
99
100#[derive(Clone)]
101pub struct GitRepositoryEntry {
102 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
103
104 pub(crate) scan_id: usize,
105 // Path to folder containing the .git file or directory
106 pub(crate) content_path: Arc<Path>,
107 // Path to the actual .git folder.
108 // Note: if .git is a file, this points to the folder indicated by the .git file
109 pub(crate) git_dir_path: Arc<Path>,
110}
111
112impl std::fmt::Debug for GitRepositoryEntry {
113 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
114 f.debug_struct("GitRepositoryEntry")
115 .field("content_path", &self.content_path)
116 .field("git_dir_path", &self.git_dir_path)
117 .field("libgit_repository", &"LibGitRepository")
118 .finish()
119 }
120}
121
122pub struct LocalSnapshot {
123 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
124 git_repositories: Vec<GitRepositoryEntry>,
125 removed_entry_ids: HashMap<u64, ProjectEntryId>,
126 next_entry_id: Arc<AtomicUsize>,
127 snapshot: Snapshot,
128 extension_counts: HashMap<OsString, usize>,
129}
130
131impl Clone for LocalSnapshot {
132 fn clone(&self) -> Self {
133 Self {
134 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
135 git_repositories: self.git_repositories.iter().cloned().collect(),
136 removed_entry_ids: self.removed_entry_ids.clone(),
137 next_entry_id: self.next_entry_id.clone(),
138 snapshot: self.snapshot.clone(),
139 extension_counts: self.extension_counts.clone(),
140 }
141 }
142}
143
144impl Deref for LocalSnapshot {
145 type Target = Snapshot;
146
147 fn deref(&self) -> &Self::Target {
148 &self.snapshot
149 }
150}
151
152impl DerefMut for LocalSnapshot {
153 fn deref_mut(&mut self) -> &mut Self::Target {
154 &mut self.snapshot
155 }
156}
157
158#[derive(Clone, Debug)]
159enum ScanState {
160 Idle,
161 /// The worktree is performing its initial scan of the filesystem.
162 Initializing,
163 /// The worktree is updating in response to filesystem events.
164 Updating,
165 Err(Arc<anyhow::Error>),
166}
167
168struct ShareState {
169 project_id: u64,
170 snapshots_tx: watch::Sender<LocalSnapshot>,
171 _maintain_remote_snapshot: Option<Task<Option<()>>>,
172}
173
174pub enum Event {
175 UpdatedEntries,
176 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
177}
178
179impl Entity for Worktree {
180 type Event = Event;
181}
182
183impl Worktree {
184 pub async fn local(
185 client: Arc<Client>,
186 path: impl Into<Arc<Path>>,
187 visible: bool,
188 fs: Arc<dyn Fs>,
189 next_entry_id: Arc<AtomicUsize>,
190 cx: &mut AsyncAppContext,
191 ) -> Result<ModelHandle<Self>> {
192 let (tree, scan_states_tx) =
193 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
194 tree.update(cx, |tree, cx| {
195 let tree = tree.as_local_mut().unwrap();
196 let abs_path = tree.abs_path().clone();
197 let background_snapshot = tree.background_snapshot.clone();
198 let background = cx.background().clone();
199 tree._background_scanner_task = Some(cx.background().spawn(async move {
200 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
201 let scanner =
202 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
203 scanner.run(events).await;
204 }));
205 });
206 Ok(tree)
207 }
208
209 pub fn remote(
210 project_remote_id: u64,
211 replica_id: ReplicaId,
212 worktree: proto::WorktreeMetadata,
213 client: Arc<Client>,
214 cx: &mut MutableAppContext,
215 ) -> ModelHandle<Self> {
216 let remote_id = worktree.id;
217 let root_char_bag: CharBag = worktree
218 .root_name
219 .chars()
220 .map(|c| c.to_ascii_lowercase())
221 .collect();
222 let root_name = worktree.root_name.clone();
223 let visible = worktree.visible;
224
225 let abs_path = PathBuf::from(OsString::from_vec(worktree.abs_path));
226 let snapshot = Snapshot {
227 id: WorktreeId(remote_id as usize),
228 abs_path: Arc::from(abs_path.deref()),
229 root_name,
230 root_char_bag,
231 entries_by_path: Default::default(),
232 entries_by_id: Default::default(),
233 scan_id: 0,
234 is_complete: false,
235 };
236
237 let (updates_tx, mut updates_rx) = mpsc::unbounded();
238 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
239 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
240 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
241 Worktree::Remote(RemoteWorktree {
242 project_id: project_remote_id,
243 replica_id,
244 snapshot: snapshot.clone(),
245 background_snapshot: background_snapshot.clone(),
246 updates_tx: Some(updates_tx),
247 snapshot_subscriptions: Default::default(),
248 client: client.clone(),
249 diagnostic_summaries: Default::default(),
250 visible,
251 })
252 });
253
254 cx.background()
255 .spawn(async move {
256 while let Some(update) = updates_rx.next().await {
257 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
258 log::error!("error applying worktree update: {}", error);
259 }
260 snapshot_updated_tx.send(()).await.ok();
261 }
262 })
263 .detach();
264
265 cx.spawn(|mut cx| {
266 let this = worktree_handle.downgrade();
267 async move {
268 while (snapshot_updated_rx.recv().await).is_some() {
269 if let Some(this) = this.upgrade(&cx) {
270 this.update(&mut cx, |this, cx| {
271 this.poll_snapshot(cx);
272 let this = this.as_remote_mut().unwrap();
273 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
274 if this.observed_snapshot(*scan_id) {
275 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
276 let _ = tx.send(());
277 } else {
278 break;
279 }
280 }
281 });
282 } else {
283 break;
284 }
285 }
286 }
287 })
288 .detach();
289
290 worktree_handle
291 }
292
293 pub fn as_local(&self) -> Option<&LocalWorktree> {
294 if let Worktree::Local(worktree) = self {
295 Some(worktree)
296 } else {
297 None
298 }
299 }
300
301 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
302 if let Worktree::Remote(worktree) = self {
303 Some(worktree)
304 } else {
305 None
306 }
307 }
308
309 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
310 if let Worktree::Local(worktree) = self {
311 Some(worktree)
312 } else {
313 None
314 }
315 }
316
317 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
318 if let Worktree::Remote(worktree) = self {
319 Some(worktree)
320 } else {
321 None
322 }
323 }
324
325 pub fn is_local(&self) -> bool {
326 matches!(self, Worktree::Local(_))
327 }
328
329 pub fn is_remote(&self) -> bool {
330 !self.is_local()
331 }
332
333 pub fn snapshot(&self) -> Snapshot {
334 match self {
335 Worktree::Local(worktree) => worktree.snapshot().snapshot,
336 Worktree::Remote(worktree) => worktree.snapshot(),
337 }
338 }
339
340 pub fn scan_id(&self) -> usize {
341 match self {
342 Worktree::Local(worktree) => worktree.snapshot.scan_id,
343 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
344 }
345 }
346
347 pub fn is_visible(&self) -> bool {
348 match self {
349 Worktree::Local(worktree) => worktree.visible,
350 Worktree::Remote(worktree) => worktree.visible,
351 }
352 }
353
354 pub fn replica_id(&self) -> ReplicaId {
355 match self {
356 Worktree::Local(_) => 0,
357 Worktree::Remote(worktree) => worktree.replica_id,
358 }
359 }
360
361 pub fn diagnostic_summaries(
362 &self,
363 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
364 match self {
365 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
366 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
367 }
368 .iter()
369 .map(|(path, summary)| (path.0.clone(), *summary))
370 }
371
372 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
373 match self {
374 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
375 Self::Remote(worktree) => worktree.poll_snapshot(cx),
376 };
377 }
378
379 pub fn abs_path(&self) -> Arc<Path> {
380 match self {
381 Worktree::Local(worktree) => worktree.abs_path.clone(),
382 Worktree::Remote(worktree) => worktree.abs_path.clone(),
383 }
384 }
385}
386
387impl LocalWorktree {
388 async fn create(
389 client: Arc<Client>,
390 path: impl Into<Arc<Path>>,
391 visible: bool,
392 fs: Arc<dyn Fs>,
393 next_entry_id: Arc<AtomicUsize>,
394 cx: &mut AsyncAppContext,
395 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
396 let abs_path = path.into();
397 let path: Arc<Path> = Arc::from(Path::new(""));
398
399 // After determining whether the root entry is a file or a directory, populate the
400 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
401 let root_name = abs_path
402 .file_name()
403 .map_or(String::new(), |f| f.to_string_lossy().to_string());
404 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
405 let metadata = fs
406 .metadata(&abs_path)
407 .await
408 .context("failed to stat worktree path")?;
409
410 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
411 let (mut last_scan_state_tx, last_scan_state_rx) =
412 watch::channel_with(ScanState::Initializing);
413 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
414 let mut snapshot = LocalSnapshot {
415 ignores_by_parent_abs_path: Default::default(),
416 git_repositories: Default::default(),
417 removed_entry_ids: Default::default(),
418 next_entry_id,
419 snapshot: Snapshot {
420 id: WorktreeId::from_usize(cx.model_id()),
421 abs_path,
422 root_name: root_name.clone(),
423 root_char_bag,
424 entries_by_path: Default::default(),
425 entries_by_id: Default::default(),
426 scan_id: 0,
427 is_complete: true,
428 },
429 extension_counts: Default::default(),
430 };
431 if let Some(metadata) = metadata {
432 let entry = Entry::new(
433 path,
434 &metadata,
435 &snapshot.next_entry_id,
436 snapshot.root_char_bag,
437 );
438 snapshot.insert_entry(entry, fs.as_ref());
439 }
440
441 let tree = Self {
442 snapshot: snapshot.clone(),
443 background_snapshot: Arc::new(Mutex::new(snapshot)),
444 last_scan_state_rx,
445 _background_scanner_task: None,
446 share: None,
447 poll_task: None,
448 diagnostics: Default::default(),
449 diagnostic_summaries: Default::default(),
450 client,
451 fs,
452 visible,
453 };
454
455 cx.spawn_weak(|this, mut cx| async move {
456 while let Some(scan_state) = scan_states_rx.next().await {
457 if let Some(this) = this.upgrade(&cx) {
458 last_scan_state_tx.blocking_send(scan_state).ok();
459 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
460 } else {
461 break;
462 }
463 }
464 })
465 .detach();
466
467 Worktree::Local(tree)
468 });
469
470 Ok((tree, scan_states_tx))
471 }
472
473 pub fn contains_abs_path(&self, path: &Path) -> bool {
474 path.starts_with(&self.abs_path)
475 }
476
477 fn absolutize(&self, path: &Path) -> PathBuf {
478 if path.file_name().is_some() {
479 self.abs_path.join(path)
480 } else {
481 self.abs_path.to_path_buf()
482 }
483 }
484
485 pub(crate) fn load_buffer(
486 &mut self,
487 path: &Path,
488 cx: &mut ModelContext<Worktree>,
489 ) -> Task<Result<ModelHandle<Buffer>>> {
490 let path = Arc::from(path);
491 cx.spawn(move |this, mut cx| async move {
492 let (file, contents, diff_base) = this
493 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
494 .await?;
495 Ok(cx.add_model(|cx| {
496 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
497 buffer.git_diff_recalc(cx);
498 buffer
499 }))
500 })
501 }
502
503 pub fn diagnostics_for_path(
504 &self,
505 path: &Path,
506 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
507 self.diagnostics.get(path).cloned()
508 }
509
510 pub fn update_diagnostics(
511 &mut self,
512 language_server_id: usize,
513 worktree_path: Arc<Path>,
514 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
515 _: &mut ModelContext<Worktree>,
516 ) -> Result<bool> {
517 self.diagnostics.remove(&worktree_path);
518 let old_summary = self
519 .diagnostic_summaries
520 .remove(&PathKey(worktree_path.clone()))
521 .unwrap_or_default();
522 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
523 if !new_summary.is_empty() {
524 self.diagnostic_summaries
525 .insert(PathKey(worktree_path.clone()), new_summary);
526 self.diagnostics.insert(worktree_path.clone(), diagnostics);
527 }
528
529 let updated = !old_summary.is_empty() || !new_summary.is_empty();
530 if updated {
531 if let Some(share) = self.share.as_ref() {
532 self.client
533 .send(proto::UpdateDiagnosticSummary {
534 project_id: share.project_id,
535 worktree_id: self.id().to_proto(),
536 summary: Some(proto::DiagnosticSummary {
537 path: worktree_path.to_string_lossy().to_string(),
538 language_server_id: language_server_id as u64,
539 error_count: new_summary.error_count as u32,
540 warning_count: new_summary.warning_count as u32,
541 }),
542 })
543 .log_err();
544 }
545 }
546
547 Ok(updated)
548 }
549
550 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
551 self.poll_task.take();
552
553 match self.scan_state() {
554 ScanState::Idle => {
555 let new_snapshot = self.background_snapshot.lock().clone();
556 let updated_repos = Self::changed_repos(
557 &self.snapshot.git_repositories,
558 &new_snapshot.git_repositories,
559 );
560 self.snapshot = new_snapshot;
561
562 if let Some(share) = self.share.as_mut() {
563 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
564 }
565
566 cx.emit(Event::UpdatedEntries);
567
568 if !updated_repos.is_empty() {
569 cx.emit(Event::UpdatedGitRepositories(updated_repos));
570 }
571 }
572
573 ScanState::Initializing => {
574 let is_fake_fs = self.fs.is_fake();
575
576 let new_snapshot = self.background_snapshot.lock().clone();
577 let updated_repos = Self::changed_repos(
578 &self.snapshot.git_repositories,
579 &new_snapshot.git_repositories,
580 );
581 self.snapshot = new_snapshot;
582
583 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
584 if is_fake_fs {
585 #[cfg(any(test, feature = "test-support"))]
586 cx.background().simulate_random_delay().await;
587 } else {
588 smol::Timer::after(Duration::from_millis(100)).await;
589 }
590 if let Some(this) = this.upgrade(&cx) {
591 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
592 }
593 }));
594
595 cx.emit(Event::UpdatedEntries);
596
597 if !updated_repos.is_empty() {
598 cx.emit(Event::UpdatedGitRepositories(updated_repos));
599 }
600 }
601
602 _ => {
603 if force {
604 self.snapshot = self.background_snapshot.lock().clone();
605 }
606 }
607 }
608
609 cx.notify();
610 }
611
612 fn changed_repos(
613 old_repos: &[GitRepositoryEntry],
614 new_repos: &[GitRepositoryEntry],
615 ) -> Vec<GitRepositoryEntry> {
616 fn diff<'a>(
617 a: &'a [GitRepositoryEntry],
618 b: &'a [GitRepositoryEntry],
619 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
620 ) {
621 for a_repo in a {
622 let matched = b.iter().find(|b_repo| {
623 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
624 });
625
626 if matched.is_none() {
627 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
628 }
629 }
630 }
631
632 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
633
634 diff(old_repos, new_repos, &mut updated);
635 diff(new_repos, old_repos, &mut updated);
636
637 updated.into_values().collect()
638 }
639
640 pub fn scan_complete(&self) -> impl Future<Output = ()> {
641 let mut scan_state_rx = self.last_scan_state_rx.clone();
642 async move {
643 let mut scan_state = Some(scan_state_rx.borrow().clone());
644 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
645 scan_state = scan_state_rx.recv().await;
646 }
647 }
648 }
649
650 fn scan_state(&self) -> ScanState {
651 self.last_scan_state_rx.borrow().clone()
652 }
653
654 pub fn snapshot(&self) -> LocalSnapshot {
655 self.snapshot.clone()
656 }
657
658 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
659 proto::WorktreeMetadata {
660 id: self.id().to_proto(),
661 root_name: self.root_name().to_string(),
662 visible: self.visible,
663 abs_path: self.abs_path().as_os_str().as_bytes().to_vec(),
664 }
665 }
666
667 fn load(
668 &self,
669 path: &Path,
670 cx: &mut ModelContext<Worktree>,
671 ) -> Task<Result<(File, String, Option<String>)>> {
672 let handle = cx.handle();
673 let path = Arc::from(path);
674 let abs_path = self.absolutize(&path);
675 let fs = self.fs.clone();
676 let snapshot = self.snapshot();
677
678 cx.spawn(|this, mut cx| async move {
679 let text = fs.load(&abs_path).await?;
680
681 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
682 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
683 let repo_relative = repo_relative.to_owned();
684 cx.background()
685 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
686 .await
687 } else {
688 None
689 }
690 } else {
691 None
692 };
693
694 // Eagerly populate the snapshot with an updated entry for the loaded file
695 let entry = this
696 .update(&mut cx, |this, cx| {
697 this.as_local()
698 .unwrap()
699 .refresh_entry(path, abs_path, None, cx)
700 })
701 .await?;
702
703 Ok((
704 File {
705 entry_id: entry.id,
706 worktree: handle,
707 path: entry.path,
708 mtime: entry.mtime,
709 is_local: true,
710 is_deleted: false,
711 },
712 text,
713 diff_base,
714 ))
715 })
716 }
717
718 pub fn save_buffer_as(
719 &self,
720 buffer_handle: ModelHandle<Buffer>,
721 path: impl Into<Arc<Path>>,
722 cx: &mut ModelContext<Worktree>,
723 ) -> Task<Result<()>> {
724 let buffer = buffer_handle.read(cx);
725 let text = buffer.as_rope().clone();
726 let fingerprint = text.fingerprint();
727 let version = buffer.version();
728 let save = self.write_file(path, text, buffer.line_ending(), cx);
729 let handle = cx.handle();
730 cx.as_mut().spawn(|mut cx| async move {
731 let entry = save.await?;
732 let file = File {
733 entry_id: entry.id,
734 worktree: handle,
735 path: entry.path,
736 mtime: entry.mtime,
737 is_local: true,
738 is_deleted: false,
739 };
740
741 buffer_handle.update(&mut cx, |buffer, cx| {
742 buffer.did_save(version, fingerprint, file.mtime, Some(Arc::new(file)), cx);
743 });
744
745 Ok(())
746 })
747 }
748
749 pub fn create_entry(
750 &self,
751 path: impl Into<Arc<Path>>,
752 is_dir: bool,
753 cx: &mut ModelContext<Worktree>,
754 ) -> Task<Result<Entry>> {
755 self.write_entry_internal(
756 path,
757 if is_dir {
758 None
759 } else {
760 Some(Default::default())
761 },
762 cx,
763 )
764 }
765
766 pub fn write_file(
767 &self,
768 path: impl Into<Arc<Path>>,
769 text: Rope,
770 line_ending: LineEnding,
771 cx: &mut ModelContext<Worktree>,
772 ) -> Task<Result<Entry>> {
773 self.write_entry_internal(path, Some((text, line_ending)), cx)
774 }
775
776 pub fn delete_entry(
777 &self,
778 entry_id: ProjectEntryId,
779 cx: &mut ModelContext<Worktree>,
780 ) -> Option<Task<Result<()>>> {
781 let entry = self.entry_for_id(entry_id)?.clone();
782 let abs_path = self.absolutize(&entry.path);
783 let delete = cx.background().spawn({
784 let fs = self.fs.clone();
785 let abs_path = abs_path;
786 async move {
787 if entry.is_file() {
788 fs.remove_file(&abs_path, Default::default()).await
789 } else {
790 fs.remove_dir(
791 &abs_path,
792 RemoveOptions {
793 recursive: true,
794 ignore_if_not_exists: false,
795 },
796 )
797 .await
798 }
799 }
800 });
801
802 Some(cx.spawn(|this, mut cx| async move {
803 delete.await?;
804 this.update(&mut cx, |this, cx| {
805 let this = this.as_local_mut().unwrap();
806 {
807 let mut snapshot = this.background_snapshot.lock();
808 snapshot.delete_entry(entry_id);
809 }
810 this.poll_snapshot(true, cx);
811 });
812 Ok(())
813 }))
814 }
815
816 pub fn rename_entry(
817 &self,
818 entry_id: ProjectEntryId,
819 new_path: impl Into<Arc<Path>>,
820 cx: &mut ModelContext<Worktree>,
821 ) -> Option<Task<Result<Entry>>> {
822 let old_path = self.entry_for_id(entry_id)?.path.clone();
823 let new_path = new_path.into();
824 let abs_old_path = self.absolutize(&old_path);
825 let abs_new_path = self.absolutize(&new_path);
826 let rename = cx.background().spawn({
827 let fs = self.fs.clone();
828 let abs_new_path = abs_new_path.clone();
829 async move {
830 fs.rename(&abs_old_path, &abs_new_path, Default::default())
831 .await
832 }
833 });
834
835 Some(cx.spawn(|this, mut cx| async move {
836 rename.await?;
837 let entry = this
838 .update(&mut cx, |this, cx| {
839 this.as_local_mut().unwrap().refresh_entry(
840 new_path.clone(),
841 abs_new_path,
842 Some(old_path),
843 cx,
844 )
845 })
846 .await?;
847 Ok(entry)
848 }))
849 }
850
851 pub fn copy_entry(
852 &self,
853 entry_id: ProjectEntryId,
854 new_path: impl Into<Arc<Path>>,
855 cx: &mut ModelContext<Worktree>,
856 ) -> Option<Task<Result<Entry>>> {
857 let old_path = self.entry_for_id(entry_id)?.path.clone();
858 let new_path = new_path.into();
859 let abs_old_path = self.absolutize(&old_path);
860 let abs_new_path = self.absolutize(&new_path);
861 let copy = cx.background().spawn({
862 let fs = self.fs.clone();
863 let abs_new_path = abs_new_path.clone();
864 async move {
865 copy_recursive(
866 fs.as_ref(),
867 &abs_old_path,
868 &abs_new_path,
869 Default::default(),
870 )
871 .await
872 }
873 });
874
875 Some(cx.spawn(|this, mut cx| async move {
876 copy.await?;
877 let entry = this
878 .update(&mut cx, |this, cx| {
879 this.as_local_mut().unwrap().refresh_entry(
880 new_path.clone(),
881 abs_new_path,
882 None,
883 cx,
884 )
885 })
886 .await?;
887 Ok(entry)
888 }))
889 }
890
891 fn write_entry_internal(
892 &self,
893 path: impl Into<Arc<Path>>,
894 text_if_file: Option<(Rope, LineEnding)>,
895 cx: &mut ModelContext<Worktree>,
896 ) -> Task<Result<Entry>> {
897 let path = path.into();
898 let abs_path = self.absolutize(&path);
899 let write = cx.background().spawn({
900 let fs = self.fs.clone();
901 let abs_path = abs_path.clone();
902 async move {
903 if let Some((text, line_ending)) = text_if_file {
904 fs.save(&abs_path, &text, line_ending).await
905 } else {
906 fs.create_dir(&abs_path).await
907 }
908 }
909 });
910
911 cx.spawn(|this, mut cx| async move {
912 write.await?;
913 let entry = this
914 .update(&mut cx, |this, cx| {
915 this.as_local_mut()
916 .unwrap()
917 .refresh_entry(path, abs_path, None, cx)
918 })
919 .await?;
920 Ok(entry)
921 })
922 }
923
924 fn refresh_entry(
925 &self,
926 path: Arc<Path>,
927 abs_path: PathBuf,
928 old_path: Option<Arc<Path>>,
929 cx: &mut ModelContext<Worktree>,
930 ) -> Task<Result<Entry>> {
931 let fs = self.fs.clone();
932 let root_char_bag;
933 let next_entry_id;
934 {
935 let snapshot = self.background_snapshot.lock();
936 root_char_bag = snapshot.root_char_bag;
937 next_entry_id = snapshot.next_entry_id.clone();
938 }
939 cx.spawn_weak(|this, mut cx| async move {
940 let metadata = fs
941 .metadata(&abs_path)
942 .await?
943 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
944 let this = this
945 .upgrade(&cx)
946 .ok_or_else(|| anyhow!("worktree was dropped"))?;
947 this.update(&mut cx, |this, cx| {
948 let this = this.as_local_mut().unwrap();
949 let inserted_entry;
950 {
951 let mut snapshot = this.background_snapshot.lock();
952 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
953 entry.is_ignored = snapshot
954 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
955 .is_abs_path_ignored(&abs_path, entry.is_dir());
956 if let Some(old_path) = old_path {
957 snapshot.remove_path(&old_path);
958 }
959 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
960 snapshot.scan_id += 1;
961 }
962 this.poll_snapshot(true, cx);
963 Ok(inserted_entry)
964 })
965 })
966 }
967
968 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
969 let (share_tx, share_rx) = oneshot::channel();
970
971 if self.share.is_some() {
972 let _ = share_tx.send(Ok(()));
973 } else {
974 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
975 let rpc = self.client.clone();
976 let worktree_id = cx.model_id() as u64;
977
978 for (path, summary) in self.diagnostic_summaries.iter() {
979 if let Err(e) = rpc.send(proto::UpdateDiagnosticSummary {
980 project_id,
981 worktree_id,
982 summary: Some(summary.to_proto(&path.0)),
983 }) {
984 return Task::ready(Err(e));
985 }
986 }
987
988 let maintain_remote_snapshot = cx.background().spawn({
989 let rpc = rpc;
990
991 async move {
992 let mut prev_snapshot = match snapshots_rx.recv().await {
993 Some(snapshot) => {
994 let update = proto::UpdateWorktree {
995 project_id,
996 worktree_id,
997 abs_path: snapshot.abs_path().as_os_str().as_bytes().to_vec(),
998 root_name: snapshot.root_name().to_string(),
999 updated_entries: snapshot
1000 .entries_by_path
1001 .iter()
1002 .map(Into::into)
1003 .collect(),
1004 removed_entries: Default::default(),
1005 scan_id: snapshot.scan_id as u64,
1006 is_last_update: true,
1007 };
1008 if let Err(error) = send_worktree_update(&rpc, update).await {
1009 let _ = share_tx.send(Err(error));
1010 return Err(anyhow!("failed to send initial update worktree"));
1011 } else {
1012 let _ = share_tx.send(Ok(()));
1013 snapshot
1014 }
1015 }
1016 None => {
1017 share_tx
1018 .send(Err(anyhow!("worktree dropped before share completed")))
1019 .ok();
1020 return Err(anyhow!("failed to send initial update worktree"));
1021 }
1022 };
1023
1024 while let Some(snapshot) = snapshots_rx.recv().await {
1025 send_worktree_update(
1026 &rpc,
1027 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true),
1028 )
1029 .await?;
1030 prev_snapshot = snapshot;
1031 }
1032
1033 Ok::<_, anyhow::Error>(())
1034 }
1035 .log_err()
1036 });
1037 self.share = Some(ShareState {
1038 project_id,
1039 snapshots_tx,
1040 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1041 });
1042 }
1043
1044 cx.foreground().spawn(async move {
1045 share_rx
1046 .await
1047 .unwrap_or_else(|_| Err(anyhow!("share ended")))
1048 })
1049 }
1050
1051 pub fn unshare(&mut self) {
1052 self.share.take();
1053 }
1054
1055 pub fn is_shared(&self) -> bool {
1056 self.share.is_some()
1057 }
1058
1059 pub fn send_extension_counts(&self, project_id: u64) {
1060 let mut extensions = Vec::new();
1061 let mut counts = Vec::new();
1062
1063 for (extension, count) in self.extension_counts() {
1064 extensions.push(extension.to_string_lossy().to_string());
1065 counts.push(*count as u32);
1066 }
1067
1068 self.client
1069 .send(proto::UpdateWorktreeExtensions {
1070 project_id,
1071 worktree_id: self.id().to_proto(),
1072 extensions,
1073 counts,
1074 })
1075 .log_err();
1076 }
1077}
1078
1079impl RemoteWorktree {
1080 fn snapshot(&self) -> Snapshot {
1081 self.snapshot.clone()
1082 }
1083
1084 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1085 self.snapshot = self.background_snapshot.lock().clone();
1086 cx.emit(Event::UpdatedEntries);
1087 cx.notify();
1088 }
1089
1090 pub fn disconnected_from_host(&mut self) {
1091 self.updates_tx.take();
1092 self.snapshot_subscriptions.clear();
1093 }
1094
1095 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1096 if let Some(updates_tx) = &self.updates_tx {
1097 updates_tx
1098 .unbounded_send(update)
1099 .expect("consumer runs to completion");
1100 }
1101 }
1102
1103 fn observed_snapshot(&self, scan_id: usize) -> bool {
1104 self.scan_id > scan_id || (self.scan_id == scan_id && self.is_complete)
1105 }
1106
1107 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = ()> {
1108 let (tx, rx) = oneshot::channel();
1109 if self.observed_snapshot(scan_id) {
1110 let _ = tx.send(());
1111 } else {
1112 match self
1113 .snapshot_subscriptions
1114 .binary_search_by_key(&scan_id, |probe| probe.0)
1115 {
1116 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1117 }
1118 }
1119
1120 async move {
1121 let _ = rx.await;
1122 }
1123 }
1124
1125 pub fn update_diagnostic_summary(
1126 &mut self,
1127 path: Arc<Path>,
1128 summary: &proto::DiagnosticSummary,
1129 ) {
1130 let summary = DiagnosticSummary {
1131 language_server_id: summary.language_server_id as usize,
1132 error_count: summary.error_count as usize,
1133 warning_count: summary.warning_count as usize,
1134 };
1135 if summary.is_empty() {
1136 self.diagnostic_summaries.remove(&PathKey(path));
1137 } else {
1138 self.diagnostic_summaries.insert(PathKey(path), summary);
1139 }
1140 }
1141
1142 pub fn insert_entry(
1143 &mut self,
1144 entry: proto::Entry,
1145 scan_id: usize,
1146 cx: &mut ModelContext<Worktree>,
1147 ) -> Task<Result<Entry>> {
1148 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1149 cx.spawn(|this, mut cx| async move {
1150 wait_for_snapshot.await;
1151 this.update(&mut cx, |worktree, _| {
1152 let worktree = worktree.as_remote_mut().unwrap();
1153 let mut snapshot = worktree.background_snapshot.lock();
1154 let entry = snapshot.insert_entry(entry);
1155 worktree.snapshot = snapshot.clone();
1156 entry
1157 })
1158 })
1159 }
1160
1161 pub(crate) fn delete_entry(
1162 &mut self,
1163 id: ProjectEntryId,
1164 scan_id: usize,
1165 cx: &mut ModelContext<Worktree>,
1166 ) -> Task<Result<()>> {
1167 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1168 cx.spawn(|this, mut cx| async move {
1169 wait_for_snapshot.await;
1170 this.update(&mut cx, |worktree, _| {
1171 let worktree = worktree.as_remote_mut().unwrap();
1172 let mut snapshot = worktree.background_snapshot.lock();
1173 snapshot.delete_entry(id);
1174 worktree.snapshot = snapshot.clone();
1175 });
1176 Ok(())
1177 })
1178 }
1179}
1180
1181impl Snapshot {
1182 pub fn id(&self) -> WorktreeId {
1183 self.id
1184 }
1185
1186 pub fn abs_path(&self) -> &Arc<Path> {
1187 &self.abs_path
1188 }
1189
1190 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1191 self.entries_by_id.get(&entry_id, &()).is_some()
1192 }
1193
1194 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1195 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1196 let old_entry = self.entries_by_id.insert_or_replace(
1197 PathEntry {
1198 id: entry.id,
1199 path: entry.path.clone(),
1200 is_ignored: entry.is_ignored,
1201 scan_id: 0,
1202 },
1203 &(),
1204 );
1205 if let Some(old_entry) = old_entry {
1206 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1207 }
1208 self.entries_by_path.insert_or_replace(entry.clone(), &());
1209 Ok(entry)
1210 }
1211
1212 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1213 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1214 self.entries_by_path = {
1215 let mut cursor = self.entries_by_path.cursor();
1216 let mut new_entries_by_path =
1217 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1218 while let Some(entry) = cursor.item() {
1219 if entry.path.starts_with(&removed_entry.path) {
1220 self.entries_by_id.remove(&entry.id, &());
1221 cursor.next(&());
1222 } else {
1223 break;
1224 }
1225 }
1226 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1227 new_entries_by_path
1228 };
1229
1230 true
1231 } else {
1232 false
1233 }
1234 }
1235
1236 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1237 let mut entries_by_path_edits = Vec::new();
1238 let mut entries_by_id_edits = Vec::new();
1239 for entry_id in update.removed_entries {
1240 let entry = self
1241 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1242 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1243 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1244 entries_by_id_edits.push(Edit::Remove(entry.id));
1245 }
1246
1247 for entry in update.updated_entries {
1248 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1249 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1250 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1251 }
1252 entries_by_id_edits.push(Edit::Insert(PathEntry {
1253 id: entry.id,
1254 path: entry.path.clone(),
1255 is_ignored: entry.is_ignored,
1256 scan_id: 0,
1257 }));
1258 entries_by_path_edits.push(Edit::Insert(entry));
1259 }
1260
1261 self.entries_by_path.edit(entries_by_path_edits, &());
1262 self.entries_by_id.edit(entries_by_id_edits, &());
1263 self.scan_id = update.scan_id as usize;
1264 self.is_complete = update.is_last_update;
1265
1266 Ok(())
1267 }
1268
1269 pub fn file_count(&self) -> usize {
1270 self.entries_by_path.summary().file_count
1271 }
1272
1273 pub fn visible_file_count(&self) -> usize {
1274 self.entries_by_path.summary().visible_file_count
1275 }
1276
1277 fn traverse_from_offset(
1278 &self,
1279 include_dirs: bool,
1280 include_ignored: bool,
1281 start_offset: usize,
1282 ) -> Traversal {
1283 let mut cursor = self.entries_by_path.cursor();
1284 cursor.seek(
1285 &TraversalTarget::Count {
1286 count: start_offset,
1287 include_dirs,
1288 include_ignored,
1289 },
1290 Bias::Right,
1291 &(),
1292 );
1293 Traversal {
1294 cursor,
1295 include_dirs,
1296 include_ignored,
1297 }
1298 }
1299
1300 fn traverse_from_path(
1301 &self,
1302 include_dirs: bool,
1303 include_ignored: bool,
1304 path: &Path,
1305 ) -> Traversal {
1306 let mut cursor = self.entries_by_path.cursor();
1307 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1308 Traversal {
1309 cursor,
1310 include_dirs,
1311 include_ignored,
1312 }
1313 }
1314
1315 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1316 self.traverse_from_offset(false, include_ignored, start)
1317 }
1318
1319 pub fn entries(&self, include_ignored: bool) -> Traversal {
1320 self.traverse_from_offset(true, include_ignored, 0)
1321 }
1322
1323 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1324 let empty_path = Path::new("");
1325 self.entries_by_path
1326 .cursor::<()>()
1327 .filter(move |entry| entry.path.as_ref() != empty_path)
1328 .map(|entry| &entry.path)
1329 }
1330
1331 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1332 let mut cursor = self.entries_by_path.cursor();
1333 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1334 let traversal = Traversal {
1335 cursor,
1336 include_dirs: true,
1337 include_ignored: true,
1338 };
1339 ChildEntriesIter {
1340 traversal,
1341 parent_path,
1342 }
1343 }
1344
1345 pub fn root_entry(&self) -> Option<&Entry> {
1346 self.entry_for_path("")
1347 }
1348
1349 pub fn root_name(&self) -> &str {
1350 &self.root_name
1351 }
1352
1353 pub fn scan_id(&self) -> usize {
1354 self.scan_id
1355 }
1356
1357 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1358 let path = path.as_ref();
1359 self.traverse_from_path(true, true, path)
1360 .entry()
1361 .and_then(|entry| {
1362 if entry.path.as_ref() == path {
1363 Some(entry)
1364 } else {
1365 None
1366 }
1367 })
1368 }
1369
1370 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1371 let entry = self.entries_by_id.get(&id, &())?;
1372 self.entry_for_path(&entry.path)
1373 }
1374
1375 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1376 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1377 }
1378}
1379
1380impl LocalSnapshot {
1381 pub fn extension_counts(&self) -> &HashMap<OsString, usize> {
1382 &self.extension_counts
1383 }
1384
1385 // Gives the most specific git repository for a given path
1386 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1387 self.git_repositories
1388 .iter()
1389 .rev() //git_repository is ordered lexicographically
1390 .find(|repo| repo.manages(path))
1391 .cloned()
1392 }
1393
1394 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1395 // Git repositories cannot be nested, so we don't need to reverse the order
1396 self.git_repositories
1397 .iter_mut()
1398 .find(|repo| repo.in_dot_git(path))
1399 }
1400
1401 #[cfg(test)]
1402 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1403 let root_name = self.root_name.clone();
1404 proto::UpdateWorktree {
1405 project_id,
1406 worktree_id: self.id().to_proto(),
1407 abs_path: self.abs_path().as_os_str().as_bytes().to_vec(),
1408 root_name,
1409 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1410 removed_entries: Default::default(),
1411 scan_id: self.scan_id as u64,
1412 is_last_update: true,
1413 }
1414 }
1415
1416 pub(crate) fn build_update(
1417 &self,
1418 other: &Self,
1419 project_id: u64,
1420 worktree_id: u64,
1421 include_ignored: bool,
1422 ) -> proto::UpdateWorktree {
1423 let mut updated_entries = Vec::new();
1424 let mut removed_entries = Vec::new();
1425 let mut self_entries = self
1426 .entries_by_id
1427 .cursor::<()>()
1428 .filter(|e| include_ignored || !e.is_ignored)
1429 .peekable();
1430 let mut other_entries = other
1431 .entries_by_id
1432 .cursor::<()>()
1433 .filter(|e| include_ignored || !e.is_ignored)
1434 .peekable();
1435 loop {
1436 match (self_entries.peek(), other_entries.peek()) {
1437 (Some(self_entry), Some(other_entry)) => {
1438 match Ord::cmp(&self_entry.id, &other_entry.id) {
1439 Ordering::Less => {
1440 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1441 updated_entries.push(entry);
1442 self_entries.next();
1443 }
1444 Ordering::Equal => {
1445 if self_entry.scan_id != other_entry.scan_id {
1446 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1447 updated_entries.push(entry);
1448 }
1449
1450 self_entries.next();
1451 other_entries.next();
1452 }
1453 Ordering::Greater => {
1454 removed_entries.push(other_entry.id.to_proto());
1455 other_entries.next();
1456 }
1457 }
1458 }
1459 (Some(self_entry), None) => {
1460 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1461 updated_entries.push(entry);
1462 self_entries.next();
1463 }
1464 (None, Some(other_entry)) => {
1465 removed_entries.push(other_entry.id.to_proto());
1466 other_entries.next();
1467 }
1468 (None, None) => break,
1469 }
1470 }
1471
1472 proto::UpdateWorktree {
1473 project_id,
1474 worktree_id,
1475 abs_path: self.abs_path().as_os_str().as_bytes().to_vec(),
1476 root_name: self.root_name().to_string(),
1477 updated_entries,
1478 removed_entries,
1479 scan_id: self.scan_id as u64,
1480 is_last_update: true,
1481 }
1482 }
1483
1484 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1485 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1486 let abs_path = self.abs_path.join(&entry.path);
1487 match smol::block_on(build_gitignore(&abs_path, fs)) {
1488 Ok(ignore) => {
1489 self.ignores_by_parent_abs_path.insert(
1490 abs_path.parent().unwrap().into(),
1491 (Arc::new(ignore), self.scan_id),
1492 );
1493 }
1494 Err(error) => {
1495 log::error!(
1496 "error loading .gitignore file {:?} - {:?}",
1497 &entry.path,
1498 error
1499 );
1500 }
1501 }
1502 }
1503
1504 self.reuse_entry_id(&mut entry);
1505
1506 if entry.kind == EntryKind::PendingDir {
1507 if let Some(existing_entry) =
1508 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1509 {
1510 entry.kind = existing_entry.kind;
1511 }
1512 }
1513
1514 self.entries_by_path.insert_or_replace(entry.clone(), &());
1515 let scan_id = self.scan_id;
1516 let removed_entry = self.entries_by_id.insert_or_replace(
1517 PathEntry {
1518 id: entry.id,
1519 path: entry.path.clone(),
1520 is_ignored: entry.is_ignored,
1521 scan_id,
1522 },
1523 &(),
1524 );
1525
1526 if let Some(removed_entry) = removed_entry {
1527 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1528 }
1529 self.inc_extension_count(&entry.path, entry.is_ignored);
1530
1531 entry
1532 }
1533
1534 fn populate_dir(
1535 &mut self,
1536 parent_path: Arc<Path>,
1537 entries: impl IntoIterator<Item = Entry>,
1538 ignore: Option<Arc<Gitignore>>,
1539 fs: &dyn Fs,
1540 ) {
1541 let mut parent_entry = if let Some(parent_entry) =
1542 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1543 {
1544 parent_entry.clone()
1545 } else {
1546 log::warn!(
1547 "populating a directory {:?} that has been removed",
1548 parent_path
1549 );
1550 return;
1551 };
1552
1553 if let Some(ignore) = ignore {
1554 self.ignores_by_parent_abs_path.insert(
1555 self.abs_path.join(&parent_path).into(),
1556 (ignore, self.scan_id),
1557 );
1558 }
1559 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1560 parent_entry.kind = EntryKind::Dir;
1561 } else {
1562 unreachable!();
1563 }
1564
1565 if parent_path.file_name() == Some(&DOT_GIT) {
1566 let abs_path = self.abs_path.join(&parent_path);
1567 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1568 if let Err(ix) = self
1569 .git_repositories
1570 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1571 {
1572 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1573 self.git_repositories.insert(
1574 ix,
1575 GitRepositoryEntry {
1576 repo,
1577 scan_id: 0,
1578 content_path,
1579 git_dir_path: parent_path,
1580 },
1581 );
1582 }
1583 }
1584 }
1585
1586 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1587 let mut entries_by_id_edits = Vec::new();
1588
1589 for mut entry in entries {
1590 self.reuse_entry_id(&mut entry);
1591 self.inc_extension_count(&entry.path, entry.is_ignored);
1592 entries_by_id_edits.push(Edit::Insert(PathEntry {
1593 id: entry.id,
1594 path: entry.path.clone(),
1595 is_ignored: entry.is_ignored,
1596 scan_id: self.scan_id,
1597 }));
1598 entries_by_path_edits.push(Edit::Insert(entry));
1599 }
1600
1601 self.entries_by_path.edit(entries_by_path_edits, &());
1602 let removed_entries = self.entries_by_id.edit(entries_by_id_edits, &());
1603
1604 for removed_entry in removed_entries {
1605 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1606 }
1607 }
1608
1609 fn inc_extension_count(&mut self, path: &Path, ignored: bool) {
1610 if !ignored {
1611 if let Some(extension) = path.extension() {
1612 if let Some(count) = self.extension_counts.get_mut(extension) {
1613 *count += 1;
1614 } else {
1615 self.extension_counts.insert(extension.into(), 1);
1616 }
1617 }
1618 }
1619 }
1620
1621 fn dec_extension_count(&mut self, path: &Path, ignored: bool) {
1622 if !ignored {
1623 if let Some(extension) = path.extension() {
1624 if let Some(count) = self.extension_counts.get_mut(extension) {
1625 *count -= 1;
1626 }
1627 }
1628 }
1629 }
1630
1631 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1632 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1633 entry.id = removed_entry_id;
1634 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1635 entry.id = existing_entry.id;
1636 }
1637 }
1638
1639 fn remove_path(&mut self, path: &Path) {
1640 let mut new_entries;
1641 let removed_entries;
1642 {
1643 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1644 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1645 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1646 new_entries.push_tree(cursor.suffix(&()), &());
1647 }
1648 self.entries_by_path = new_entries;
1649
1650 let mut entries_by_id_edits = Vec::new();
1651 for entry in removed_entries.cursor::<()>() {
1652 let removed_entry_id = self
1653 .removed_entry_ids
1654 .entry(entry.inode)
1655 .or_insert(entry.id);
1656 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1657 entries_by_id_edits.push(Edit::Remove(entry.id));
1658 self.dec_extension_count(&entry.path, entry.is_ignored);
1659 }
1660 self.entries_by_id.edit(entries_by_id_edits, &());
1661
1662 if path.file_name() == Some(&GITIGNORE) {
1663 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1664 if let Some((_, scan_id)) = self
1665 .ignores_by_parent_abs_path
1666 .get_mut(abs_parent_path.as_path())
1667 {
1668 *scan_id = self.snapshot.scan_id;
1669 }
1670 } else if path.file_name() == Some(&DOT_GIT) {
1671 let parent_path = path.parent().unwrap();
1672 if let Ok(ix) = self
1673 .git_repositories
1674 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1675 {
1676 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1677 }
1678 }
1679 }
1680
1681 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1682 let mut inodes = TreeSet::default();
1683 for ancestor in path.ancestors().skip(1) {
1684 if let Some(entry) = self.entry_for_path(ancestor) {
1685 inodes.insert(entry.inode);
1686 }
1687 }
1688 inodes
1689 }
1690
1691 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1692 let mut new_ignores = Vec::new();
1693 for ancestor in abs_path.ancestors().skip(1) {
1694 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1695 new_ignores.push((ancestor, Some(ignore.clone())));
1696 } else {
1697 new_ignores.push((ancestor, None));
1698 }
1699 }
1700
1701 let mut ignore_stack = IgnoreStack::none();
1702 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1703 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1704 ignore_stack = IgnoreStack::all();
1705 break;
1706 } else if let Some(ignore) = ignore {
1707 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1708 }
1709 }
1710
1711 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1712 ignore_stack = IgnoreStack::all();
1713 }
1714
1715 ignore_stack
1716 }
1717
1718 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1719 &self.git_repositories
1720 }
1721}
1722
1723impl GitRepositoryEntry {
1724 // Note that these paths should be relative to the worktree root.
1725 pub(crate) fn manages(&self, path: &Path) -> bool {
1726 path.starts_with(self.content_path.as_ref())
1727 }
1728
1729 // Note that theis path should be relative to the worktree root.
1730 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1731 path.starts_with(self.git_dir_path.as_ref())
1732 }
1733}
1734
1735async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1736 let contents = fs.load(abs_path).await?;
1737 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1738 let mut builder = GitignoreBuilder::new(parent);
1739 for line in contents.lines() {
1740 builder.add_line(Some(abs_path.into()), line)?;
1741 }
1742 Ok(builder.build()?)
1743}
1744
1745impl WorktreeId {
1746 pub fn from_usize(handle_id: usize) -> Self {
1747 Self(handle_id)
1748 }
1749
1750 pub(crate) fn from_proto(id: u64) -> Self {
1751 Self(id as usize)
1752 }
1753
1754 pub fn to_proto(&self) -> u64 {
1755 self.0 as u64
1756 }
1757
1758 pub fn to_usize(&self) -> usize {
1759 self.0
1760 }
1761}
1762
1763impl fmt::Display for WorktreeId {
1764 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1765 self.0.fmt(f)
1766 }
1767}
1768
1769impl Deref for Worktree {
1770 type Target = Snapshot;
1771
1772 fn deref(&self) -> &Self::Target {
1773 match self {
1774 Worktree::Local(worktree) => &worktree.snapshot,
1775 Worktree::Remote(worktree) => &worktree.snapshot,
1776 }
1777 }
1778}
1779
1780impl Deref for LocalWorktree {
1781 type Target = LocalSnapshot;
1782
1783 fn deref(&self) -> &Self::Target {
1784 &self.snapshot
1785 }
1786}
1787
1788impl Deref for RemoteWorktree {
1789 type Target = Snapshot;
1790
1791 fn deref(&self) -> &Self::Target {
1792 &self.snapshot
1793 }
1794}
1795
1796impl fmt::Debug for LocalWorktree {
1797 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1798 self.snapshot.fmt(f)
1799 }
1800}
1801
1802impl fmt::Debug for Snapshot {
1803 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1804 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1805 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1806
1807 impl<'a> fmt::Debug for EntriesByPath<'a> {
1808 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1809 f.debug_map()
1810 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1811 .finish()
1812 }
1813 }
1814
1815 impl<'a> fmt::Debug for EntriesById<'a> {
1816 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1817 f.debug_list().entries(self.0.iter()).finish()
1818 }
1819 }
1820
1821 f.debug_struct("Snapshot")
1822 .field("id", &self.id)
1823 .field("root_name", &self.root_name)
1824 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1825 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1826 .finish()
1827 }
1828}
1829
1830#[derive(Clone, PartialEq)]
1831pub struct File {
1832 pub worktree: ModelHandle<Worktree>,
1833 pub path: Arc<Path>,
1834 pub mtime: SystemTime,
1835 pub(crate) entry_id: ProjectEntryId,
1836 pub(crate) is_local: bool,
1837 pub(crate) is_deleted: bool,
1838}
1839
1840impl language::File for File {
1841 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1842 if self.is_local {
1843 Some(self)
1844 } else {
1845 None
1846 }
1847 }
1848
1849 fn mtime(&self) -> SystemTime {
1850 self.mtime
1851 }
1852
1853 fn path(&self) -> &Arc<Path> {
1854 &self.path
1855 }
1856
1857 fn full_path(&self, cx: &AppContext) -> PathBuf {
1858 let mut full_path = PathBuf::new();
1859 let worktree = self.worktree.read(cx);
1860
1861 if worktree.is_visible() {
1862 full_path.push(worktree.root_name());
1863 } else {
1864 let path = worktree.abs_path();
1865
1866 if worktree.is_local() && path.starts_with(cx.global::<HomeDir>().as_path()) {
1867 full_path.push("~");
1868 full_path.push(path.strip_prefix(cx.global::<HomeDir>().as_path()).unwrap());
1869 } else {
1870 full_path.push(path)
1871 }
1872 }
1873
1874 if self.path.components().next().is_some() {
1875 full_path.push(&self.path);
1876 }
1877
1878 full_path
1879 }
1880
1881 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1882 /// of its worktree, then this method will return the name of the worktree itself.
1883 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1884 self.path
1885 .file_name()
1886 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1887 }
1888
1889 fn is_deleted(&self) -> bool {
1890 self.is_deleted
1891 }
1892
1893 fn save(
1894 &self,
1895 buffer_id: u64,
1896 text: Rope,
1897 version: clock::Global,
1898 line_ending: LineEnding,
1899 cx: &mut MutableAppContext,
1900 ) -> Task<Result<(clock::Global, String, SystemTime)>> {
1901 self.worktree.update(cx, |worktree, cx| match worktree {
1902 Worktree::Local(worktree) => {
1903 let rpc = worktree.client.clone();
1904 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1905 let fingerprint = text.fingerprint();
1906 let save = worktree.write_file(self.path.clone(), text, line_ending, cx);
1907 cx.background().spawn(async move {
1908 let entry = save.await?;
1909 if let Some(project_id) = project_id {
1910 rpc.send(proto::BufferSaved {
1911 project_id,
1912 buffer_id,
1913 version: serialize_version(&version),
1914 mtime: Some(entry.mtime.into()),
1915 fingerprint: fingerprint.clone(),
1916 })?;
1917 }
1918 Ok((version, fingerprint, entry.mtime))
1919 })
1920 }
1921 Worktree::Remote(worktree) => {
1922 let rpc = worktree.client.clone();
1923 let project_id = worktree.project_id;
1924 cx.foreground().spawn(async move {
1925 let response = rpc
1926 .request(proto::SaveBuffer {
1927 project_id,
1928 buffer_id,
1929 version: serialize_version(&version),
1930 })
1931 .await?;
1932 let version = deserialize_version(response.version);
1933 let mtime = response
1934 .mtime
1935 .ok_or_else(|| anyhow!("missing mtime"))?
1936 .into();
1937 Ok((version, response.fingerprint, mtime))
1938 })
1939 }
1940 })
1941 }
1942
1943 fn as_any(&self) -> &dyn Any {
1944 self
1945 }
1946
1947 fn to_proto(&self) -> rpc::proto::File {
1948 rpc::proto::File {
1949 worktree_id: self.worktree.id() as u64,
1950 entry_id: self.entry_id.to_proto(),
1951 path: self.path.to_string_lossy().into(),
1952 mtime: Some(self.mtime.into()),
1953 is_deleted: self.is_deleted,
1954 }
1955 }
1956}
1957
1958impl language::LocalFile for File {
1959 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1960 self.worktree
1961 .read(cx)
1962 .as_local()
1963 .unwrap()
1964 .abs_path
1965 .join(&self.path)
1966 }
1967
1968 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1969 let worktree = self.worktree.read(cx).as_local().unwrap();
1970 let abs_path = worktree.absolutize(&self.path);
1971 let fs = worktree.fs.clone();
1972 cx.background()
1973 .spawn(async move { fs.load(&abs_path).await })
1974 }
1975
1976 fn buffer_reloaded(
1977 &self,
1978 buffer_id: u64,
1979 version: &clock::Global,
1980 fingerprint: String,
1981 line_ending: LineEnding,
1982 mtime: SystemTime,
1983 cx: &mut MutableAppContext,
1984 ) {
1985 let worktree = self.worktree.read(cx).as_local().unwrap();
1986 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1987 worktree
1988 .client
1989 .send(proto::BufferReloaded {
1990 project_id,
1991 buffer_id,
1992 version: serialize_version(version),
1993 mtime: Some(mtime.into()),
1994 fingerprint,
1995 line_ending: serialize_line_ending(line_ending) as i32,
1996 })
1997 .log_err();
1998 }
1999 }
2000}
2001
2002impl File {
2003 pub fn from_proto(
2004 proto: rpc::proto::File,
2005 worktree: ModelHandle<Worktree>,
2006 cx: &AppContext,
2007 ) -> Result<Self> {
2008 let worktree_id = worktree
2009 .read(cx)
2010 .as_remote()
2011 .ok_or_else(|| anyhow!("not remote"))?
2012 .id();
2013
2014 if worktree_id.to_proto() != proto.worktree_id {
2015 return Err(anyhow!("worktree id does not match file"));
2016 }
2017
2018 Ok(Self {
2019 worktree,
2020 path: Path::new(&proto.path).into(),
2021 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2022 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2023 is_local: false,
2024 is_deleted: proto.is_deleted,
2025 })
2026 }
2027
2028 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2029 file.and_then(|f| f.as_any().downcast_ref())
2030 }
2031
2032 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2033 self.worktree.read(cx).id()
2034 }
2035
2036 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2037 if self.is_deleted {
2038 None
2039 } else {
2040 Some(self.entry_id)
2041 }
2042 }
2043}
2044
2045#[derive(Clone, Debug, PartialEq, Eq)]
2046pub struct Entry {
2047 pub id: ProjectEntryId,
2048 pub kind: EntryKind,
2049 pub path: Arc<Path>,
2050 pub inode: u64,
2051 pub mtime: SystemTime,
2052 pub is_symlink: bool,
2053 pub is_ignored: bool,
2054}
2055
2056#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2057pub enum EntryKind {
2058 PendingDir,
2059 Dir,
2060 File(CharBag),
2061}
2062
2063impl Entry {
2064 fn new(
2065 path: Arc<Path>,
2066 metadata: &fs::Metadata,
2067 next_entry_id: &AtomicUsize,
2068 root_char_bag: CharBag,
2069 ) -> Self {
2070 Self {
2071 id: ProjectEntryId::new(next_entry_id),
2072 kind: if metadata.is_dir {
2073 EntryKind::PendingDir
2074 } else {
2075 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2076 },
2077 path,
2078 inode: metadata.inode,
2079 mtime: metadata.mtime,
2080 is_symlink: metadata.is_symlink,
2081 is_ignored: false,
2082 }
2083 }
2084
2085 pub fn is_dir(&self) -> bool {
2086 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2087 }
2088
2089 pub fn is_file(&self) -> bool {
2090 matches!(self.kind, EntryKind::File(_))
2091 }
2092}
2093
2094impl sum_tree::Item for Entry {
2095 type Summary = EntrySummary;
2096
2097 fn summary(&self) -> Self::Summary {
2098 let visible_count = if self.is_ignored { 0 } else { 1 };
2099 let file_count;
2100 let visible_file_count;
2101 if self.is_file() {
2102 file_count = 1;
2103 visible_file_count = visible_count;
2104 } else {
2105 file_count = 0;
2106 visible_file_count = 0;
2107 }
2108
2109 EntrySummary {
2110 max_path: self.path.clone(),
2111 count: 1,
2112 visible_count,
2113 file_count,
2114 visible_file_count,
2115 }
2116 }
2117}
2118
2119impl sum_tree::KeyedItem for Entry {
2120 type Key = PathKey;
2121
2122 fn key(&self) -> Self::Key {
2123 PathKey(self.path.clone())
2124 }
2125}
2126
2127#[derive(Clone, Debug)]
2128pub struct EntrySummary {
2129 max_path: Arc<Path>,
2130 count: usize,
2131 visible_count: usize,
2132 file_count: usize,
2133 visible_file_count: usize,
2134}
2135
2136impl Default for EntrySummary {
2137 fn default() -> Self {
2138 Self {
2139 max_path: Arc::from(Path::new("")),
2140 count: 0,
2141 visible_count: 0,
2142 file_count: 0,
2143 visible_file_count: 0,
2144 }
2145 }
2146}
2147
2148impl sum_tree::Summary for EntrySummary {
2149 type Context = ();
2150
2151 fn add_summary(&mut self, rhs: &Self, _: &()) {
2152 self.max_path = rhs.max_path.clone();
2153 self.count += rhs.count;
2154 self.visible_count += rhs.visible_count;
2155 self.file_count += rhs.file_count;
2156 self.visible_file_count += rhs.visible_file_count;
2157 }
2158}
2159
2160#[derive(Clone, Debug)]
2161struct PathEntry {
2162 id: ProjectEntryId,
2163 path: Arc<Path>,
2164 is_ignored: bool,
2165 scan_id: usize,
2166}
2167
2168impl sum_tree::Item for PathEntry {
2169 type Summary = PathEntrySummary;
2170
2171 fn summary(&self) -> Self::Summary {
2172 PathEntrySummary { max_id: self.id }
2173 }
2174}
2175
2176impl sum_tree::KeyedItem for PathEntry {
2177 type Key = ProjectEntryId;
2178
2179 fn key(&self) -> Self::Key {
2180 self.id
2181 }
2182}
2183
2184#[derive(Clone, Debug, Default)]
2185struct PathEntrySummary {
2186 max_id: ProjectEntryId,
2187}
2188
2189impl sum_tree::Summary for PathEntrySummary {
2190 type Context = ();
2191
2192 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2193 self.max_id = summary.max_id;
2194 }
2195}
2196
2197impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2198 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2199 *self = summary.max_id;
2200 }
2201}
2202
2203#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2204pub struct PathKey(Arc<Path>);
2205
2206impl Default for PathKey {
2207 fn default() -> Self {
2208 Self(Path::new("").into())
2209 }
2210}
2211
2212impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2213 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2214 self.0 = summary.max_path.clone();
2215 }
2216}
2217
2218struct BackgroundScanner {
2219 fs: Arc<dyn Fs>,
2220 snapshot: Arc<Mutex<LocalSnapshot>>,
2221 notify: UnboundedSender<ScanState>,
2222 executor: Arc<executor::Background>,
2223}
2224
2225impl BackgroundScanner {
2226 fn new(
2227 snapshot: Arc<Mutex<LocalSnapshot>>,
2228 notify: UnboundedSender<ScanState>,
2229 fs: Arc<dyn Fs>,
2230 executor: Arc<executor::Background>,
2231 ) -> Self {
2232 Self {
2233 fs,
2234 snapshot,
2235 notify,
2236 executor,
2237 }
2238 }
2239
2240 fn abs_path(&self) -> Arc<Path> {
2241 self.snapshot.lock().abs_path.clone()
2242 }
2243
2244 fn snapshot(&self) -> LocalSnapshot {
2245 self.snapshot.lock().clone()
2246 }
2247
2248 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2249 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2250 return;
2251 }
2252
2253 if let Err(err) = self.scan_dirs().await {
2254 if self
2255 .notify
2256 .unbounded_send(ScanState::Err(Arc::new(err)))
2257 .is_err()
2258 {
2259 return;
2260 }
2261 }
2262
2263 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2264 return;
2265 }
2266
2267 futures::pin_mut!(events_rx);
2268
2269 while let Some(mut events) = events_rx.next().await {
2270 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2271 events.extend(additional_events);
2272 }
2273
2274 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2275 break;
2276 }
2277
2278 if !self.process_events(events).await {
2279 break;
2280 }
2281
2282 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2283 break;
2284 }
2285 }
2286 }
2287
2288 async fn scan_dirs(&mut self) -> Result<()> {
2289 let root_char_bag;
2290 let root_abs_path;
2291 let root_inode;
2292 let is_dir;
2293 let next_entry_id;
2294 {
2295 let snapshot = self.snapshot.lock();
2296 root_char_bag = snapshot.root_char_bag;
2297 root_abs_path = snapshot.abs_path.clone();
2298 root_inode = snapshot.root_entry().map(|e| e.inode);
2299 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2300 next_entry_id = snapshot.next_entry_id.clone();
2301 };
2302
2303 // Populate ignores above the root.
2304 for ancestor in root_abs_path.ancestors().skip(1) {
2305 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2306 {
2307 self.snapshot
2308 .lock()
2309 .ignores_by_parent_abs_path
2310 .insert(ancestor.into(), (ignore.into(), 0));
2311 }
2312 }
2313
2314 let ignore_stack = {
2315 let mut snapshot = self.snapshot.lock();
2316 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2317 if ignore_stack.is_all() {
2318 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2319 root_entry.is_ignored = true;
2320 snapshot.insert_entry(root_entry, self.fs.as_ref());
2321 }
2322 }
2323 ignore_stack
2324 };
2325
2326 if is_dir {
2327 let path: Arc<Path> = Arc::from(Path::new(""));
2328 let mut ancestor_inodes = TreeSet::default();
2329 if let Some(root_inode) = root_inode {
2330 ancestor_inodes.insert(root_inode);
2331 }
2332
2333 let (tx, rx) = channel::unbounded();
2334 self.executor
2335 .block(tx.send(ScanJob {
2336 abs_path: root_abs_path.to_path_buf(),
2337 path,
2338 ignore_stack,
2339 ancestor_inodes,
2340 scan_queue: tx.clone(),
2341 }))
2342 .unwrap();
2343 drop(tx);
2344
2345 self.executor
2346 .scoped(|scope| {
2347 for _ in 0..self.executor.num_cpus() {
2348 scope.spawn(async {
2349 while let Ok(job) = rx.recv().await {
2350 if let Err(err) = self
2351 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2352 .await
2353 {
2354 log::error!("error scanning {:?}: {}", job.abs_path, err);
2355 }
2356 }
2357 });
2358 }
2359 })
2360 .await;
2361 }
2362
2363 Ok(())
2364 }
2365
2366 async fn scan_dir(
2367 &self,
2368 root_char_bag: CharBag,
2369 next_entry_id: Arc<AtomicUsize>,
2370 job: &ScanJob,
2371 ) -> Result<()> {
2372 let mut new_entries: Vec<Entry> = Vec::new();
2373 let mut new_jobs: Vec<ScanJob> = Vec::new();
2374 let mut ignore_stack = job.ignore_stack.clone();
2375 let mut new_ignore = None;
2376
2377 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2378 while let Some(child_abs_path) = child_paths.next().await {
2379 let child_abs_path = match child_abs_path {
2380 Ok(child_abs_path) => child_abs_path,
2381 Err(error) => {
2382 log::error!("error processing entry {:?}", error);
2383 continue;
2384 }
2385 };
2386 let child_name = child_abs_path.file_name().unwrap();
2387 let child_path: Arc<Path> = job.path.join(child_name).into();
2388 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2389 Ok(Some(metadata)) => metadata,
2390 Ok(None) => continue,
2391 Err(err) => {
2392 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2393 continue;
2394 }
2395 };
2396
2397 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2398 if child_name == *GITIGNORE {
2399 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2400 Ok(ignore) => {
2401 let ignore = Arc::new(ignore);
2402 ignore_stack =
2403 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2404 new_ignore = Some(ignore);
2405 }
2406 Err(error) => {
2407 log::error!(
2408 "error loading .gitignore file {:?} - {:?}",
2409 child_name,
2410 error
2411 );
2412 }
2413 }
2414
2415 // Update ignore status of any child entries we've already processed to reflect the
2416 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2417 // there should rarely be too numerous. Update the ignore stack associated with any
2418 // new jobs as well.
2419 let mut new_jobs = new_jobs.iter_mut();
2420 for entry in &mut new_entries {
2421 let entry_abs_path = self.abs_path().join(&entry.path);
2422 entry.is_ignored =
2423 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2424 if entry.is_dir() {
2425 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2426 IgnoreStack::all()
2427 } else {
2428 ignore_stack.clone()
2429 };
2430 }
2431 }
2432 }
2433
2434 let mut child_entry = Entry::new(
2435 child_path.clone(),
2436 &child_metadata,
2437 &next_entry_id,
2438 root_char_bag,
2439 );
2440
2441 if child_entry.is_dir() {
2442 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2443 child_entry.is_ignored = is_ignored;
2444
2445 if !job.ancestor_inodes.contains(&child_entry.inode) {
2446 let mut ancestor_inodes = job.ancestor_inodes.clone();
2447 ancestor_inodes.insert(child_entry.inode);
2448 new_jobs.push(ScanJob {
2449 abs_path: child_abs_path,
2450 path: child_path,
2451 ignore_stack: if is_ignored {
2452 IgnoreStack::all()
2453 } else {
2454 ignore_stack.clone()
2455 },
2456 ancestor_inodes,
2457 scan_queue: job.scan_queue.clone(),
2458 });
2459 }
2460 } else {
2461 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2462 }
2463
2464 new_entries.push(child_entry);
2465 }
2466
2467 self.snapshot.lock().populate_dir(
2468 job.path.clone(),
2469 new_entries,
2470 new_ignore,
2471 self.fs.as_ref(),
2472 );
2473 for new_job in new_jobs {
2474 job.scan_queue.send(new_job).await.unwrap();
2475 }
2476
2477 Ok(())
2478 }
2479
2480 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2481 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2482 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2483
2484 let root_char_bag;
2485 let root_abs_path;
2486 let next_entry_id;
2487 {
2488 let snapshot = self.snapshot.lock();
2489 root_char_bag = snapshot.root_char_bag;
2490 root_abs_path = snapshot.abs_path.clone();
2491 next_entry_id = snapshot.next_entry_id.clone();
2492 }
2493
2494 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2495 path
2496 } else {
2497 return false;
2498 };
2499 let metadata = futures::future::join_all(
2500 events
2501 .iter()
2502 .map(|event| self.fs.metadata(&event.path))
2503 .collect::<Vec<_>>(),
2504 )
2505 .await;
2506
2507 // Hold the snapshot lock while clearing and re-inserting the root entries
2508 // for each event. This way, the snapshot is not observable to the foreground
2509 // thread while this operation is in-progress.
2510 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2511 {
2512 let mut snapshot = self.snapshot.lock();
2513 snapshot.scan_id += 1;
2514 for event in &events {
2515 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2516 snapshot.remove_path(path);
2517 }
2518 }
2519
2520 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2521 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2522 Ok(path) => Arc::from(path.to_path_buf()),
2523 Err(_) => {
2524 log::error!(
2525 "unexpected event {:?} for root path {:?}",
2526 event.path,
2527 root_canonical_path
2528 );
2529 continue;
2530 }
2531 };
2532 let abs_path = root_abs_path.join(&path);
2533
2534 match metadata {
2535 Ok(Some(metadata)) => {
2536 let ignore_stack =
2537 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2538 let mut fs_entry = Entry::new(
2539 path.clone(),
2540 &metadata,
2541 snapshot.next_entry_id.as_ref(),
2542 snapshot.root_char_bag,
2543 );
2544 fs_entry.is_ignored = ignore_stack.is_all();
2545 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2546
2547 let scan_id = snapshot.scan_id;
2548 if let Some(repo) = snapshot.in_dot_git(&path) {
2549 repo.repo.lock().reload_index();
2550 repo.scan_id = scan_id;
2551 }
2552
2553 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2554 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2555 ancestor_inodes.insert(metadata.inode);
2556 self.executor
2557 .block(scan_queue_tx.send(ScanJob {
2558 abs_path,
2559 path,
2560 ignore_stack,
2561 ancestor_inodes,
2562 scan_queue: scan_queue_tx.clone(),
2563 }))
2564 .unwrap();
2565 }
2566 }
2567 Ok(None) => {}
2568 Err(err) => {
2569 // TODO - create a special 'error' entry in the entries tree to mark this
2570 log::error!("error reading file on event {:?}", err);
2571 }
2572 }
2573 }
2574 drop(scan_queue_tx);
2575 }
2576
2577 // Scan any directories that were created as part of this event batch.
2578 self.executor
2579 .scoped(|scope| {
2580 for _ in 0..self.executor.num_cpus() {
2581 scope.spawn(async {
2582 while let Ok(job) = scan_queue_rx.recv().await {
2583 if let Err(err) = self
2584 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2585 .await
2586 {
2587 log::error!("error scanning {:?}: {}", job.abs_path, err);
2588 }
2589 }
2590 });
2591 }
2592 })
2593 .await;
2594
2595 // Attempt to detect renames only over a single batch of file-system events.
2596 self.snapshot.lock().removed_entry_ids.clear();
2597
2598 self.update_ignore_statuses().await;
2599 self.update_git_repositories();
2600 true
2601 }
2602
2603 async fn update_ignore_statuses(&self) {
2604 let mut snapshot = self.snapshot();
2605
2606 let mut ignores_to_update = Vec::new();
2607 let mut ignores_to_delete = Vec::new();
2608 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2609 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2610 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2611 ignores_to_update.push(parent_abs_path.clone());
2612 }
2613
2614 let ignore_path = parent_path.join(&*GITIGNORE);
2615 if snapshot.entry_for_path(ignore_path).is_none() {
2616 ignores_to_delete.push(parent_abs_path.clone());
2617 }
2618 }
2619 }
2620
2621 for parent_abs_path in ignores_to_delete {
2622 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2623 self.snapshot
2624 .lock()
2625 .ignores_by_parent_abs_path
2626 .remove(&parent_abs_path);
2627 }
2628
2629 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2630 ignores_to_update.sort_unstable();
2631 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2632 while let Some(parent_abs_path) = ignores_to_update.next() {
2633 while ignores_to_update
2634 .peek()
2635 .map_or(false, |p| p.starts_with(&parent_abs_path))
2636 {
2637 ignores_to_update.next().unwrap();
2638 }
2639
2640 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2641 ignore_queue_tx
2642 .send(UpdateIgnoreStatusJob {
2643 abs_path: parent_abs_path,
2644 ignore_stack,
2645 ignore_queue: ignore_queue_tx.clone(),
2646 })
2647 .await
2648 .unwrap();
2649 }
2650 drop(ignore_queue_tx);
2651
2652 self.executor
2653 .scoped(|scope| {
2654 for _ in 0..self.executor.num_cpus() {
2655 scope.spawn(async {
2656 while let Ok(job) = ignore_queue_rx.recv().await {
2657 self.update_ignore_status(job, &snapshot).await;
2658 }
2659 });
2660 }
2661 })
2662 .await;
2663 }
2664
2665 fn update_git_repositories(&self) {
2666 let mut snapshot = self.snapshot.lock();
2667 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2668 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2669 snapshot.git_repositories = git_repositories;
2670 }
2671
2672 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2673 let mut ignore_stack = job.ignore_stack;
2674 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2675 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2676 }
2677
2678 let mut entries_by_id_edits = Vec::new();
2679 let mut entries_by_path_edits = Vec::new();
2680 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2681 for mut entry in snapshot.child_entries(path).cloned() {
2682 let was_ignored = entry.is_ignored;
2683 let abs_path = self.abs_path().join(&entry.path);
2684 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2685 if entry.is_dir() {
2686 let child_ignore_stack = if entry.is_ignored {
2687 IgnoreStack::all()
2688 } else {
2689 ignore_stack.clone()
2690 };
2691 job.ignore_queue
2692 .send(UpdateIgnoreStatusJob {
2693 abs_path: abs_path.into(),
2694 ignore_stack: child_ignore_stack,
2695 ignore_queue: job.ignore_queue.clone(),
2696 })
2697 .await
2698 .unwrap();
2699 }
2700
2701 if entry.is_ignored != was_ignored {
2702 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2703 path_entry.scan_id = snapshot.scan_id;
2704 path_entry.is_ignored = entry.is_ignored;
2705 entries_by_id_edits.push(Edit::Insert(path_entry));
2706 entries_by_path_edits.push(Edit::Insert(entry));
2707 }
2708 }
2709
2710 let mut snapshot = self.snapshot.lock();
2711 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2712 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2713 }
2714}
2715
2716fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2717 let mut result = root_char_bag;
2718 result.extend(
2719 path.to_string_lossy()
2720 .chars()
2721 .map(|c| c.to_ascii_lowercase()),
2722 );
2723 result
2724}
2725
2726struct ScanJob {
2727 abs_path: PathBuf,
2728 path: Arc<Path>,
2729 ignore_stack: Arc<IgnoreStack>,
2730 scan_queue: Sender<ScanJob>,
2731 ancestor_inodes: TreeSet<u64>,
2732}
2733
2734struct UpdateIgnoreStatusJob {
2735 abs_path: Arc<Path>,
2736 ignore_stack: Arc<IgnoreStack>,
2737 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2738}
2739
2740pub trait WorktreeHandle {
2741 #[cfg(any(test, feature = "test-support"))]
2742 fn flush_fs_events<'a>(
2743 &self,
2744 cx: &'a gpui::TestAppContext,
2745 ) -> futures::future::LocalBoxFuture<'a, ()>;
2746}
2747
2748impl WorktreeHandle for ModelHandle<Worktree> {
2749 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2750 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2751 // extra directory scans, and emit extra scan-state notifications.
2752 //
2753 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2754 // to ensure that all redundant FS events have already been processed.
2755 #[cfg(any(test, feature = "test-support"))]
2756 fn flush_fs_events<'a>(
2757 &self,
2758 cx: &'a gpui::TestAppContext,
2759 ) -> futures::future::LocalBoxFuture<'a, ()> {
2760 use smol::future::FutureExt;
2761
2762 let filename = "fs-event-sentinel";
2763 let tree = self.clone();
2764 let (fs, root_path) = self.read_with(cx, |tree, _| {
2765 let tree = tree.as_local().unwrap();
2766 (tree.fs.clone(), tree.abs_path().clone())
2767 });
2768
2769 async move {
2770 fs.create_file(&root_path.join(filename), Default::default())
2771 .await
2772 .unwrap();
2773 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2774 .await;
2775
2776 fs.remove_file(&root_path.join(filename), Default::default())
2777 .await
2778 .unwrap();
2779 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2780 .await;
2781
2782 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2783 .await;
2784 }
2785 .boxed_local()
2786 }
2787}
2788
2789#[derive(Clone, Debug)]
2790struct TraversalProgress<'a> {
2791 max_path: &'a Path,
2792 count: usize,
2793 visible_count: usize,
2794 file_count: usize,
2795 visible_file_count: usize,
2796}
2797
2798impl<'a> TraversalProgress<'a> {
2799 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2800 match (include_ignored, include_dirs) {
2801 (true, true) => self.count,
2802 (true, false) => self.file_count,
2803 (false, true) => self.visible_count,
2804 (false, false) => self.visible_file_count,
2805 }
2806 }
2807}
2808
2809impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2810 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2811 self.max_path = summary.max_path.as_ref();
2812 self.count += summary.count;
2813 self.visible_count += summary.visible_count;
2814 self.file_count += summary.file_count;
2815 self.visible_file_count += summary.visible_file_count;
2816 }
2817}
2818
2819impl<'a> Default for TraversalProgress<'a> {
2820 fn default() -> Self {
2821 Self {
2822 max_path: Path::new(""),
2823 count: 0,
2824 visible_count: 0,
2825 file_count: 0,
2826 visible_file_count: 0,
2827 }
2828 }
2829}
2830
2831pub struct Traversal<'a> {
2832 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2833 include_ignored: bool,
2834 include_dirs: bool,
2835}
2836
2837impl<'a> Traversal<'a> {
2838 pub fn advance(&mut self) -> bool {
2839 self.advance_to_offset(self.offset() + 1)
2840 }
2841
2842 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2843 self.cursor.seek_forward(
2844 &TraversalTarget::Count {
2845 count: offset,
2846 include_dirs: self.include_dirs,
2847 include_ignored: self.include_ignored,
2848 },
2849 Bias::Right,
2850 &(),
2851 )
2852 }
2853
2854 pub fn advance_to_sibling(&mut self) -> bool {
2855 while let Some(entry) = self.cursor.item() {
2856 self.cursor.seek_forward(
2857 &TraversalTarget::PathSuccessor(&entry.path),
2858 Bias::Left,
2859 &(),
2860 );
2861 if let Some(entry) = self.cursor.item() {
2862 if (self.include_dirs || !entry.is_dir())
2863 && (self.include_ignored || !entry.is_ignored)
2864 {
2865 return true;
2866 }
2867 }
2868 }
2869 false
2870 }
2871
2872 pub fn entry(&self) -> Option<&'a Entry> {
2873 self.cursor.item()
2874 }
2875
2876 pub fn offset(&self) -> usize {
2877 self.cursor
2878 .start()
2879 .count(self.include_dirs, self.include_ignored)
2880 }
2881}
2882
2883impl<'a> Iterator for Traversal<'a> {
2884 type Item = &'a Entry;
2885
2886 fn next(&mut self) -> Option<Self::Item> {
2887 if let Some(item) = self.entry() {
2888 self.advance();
2889 Some(item)
2890 } else {
2891 None
2892 }
2893 }
2894}
2895
2896#[derive(Debug)]
2897enum TraversalTarget<'a> {
2898 Path(&'a Path),
2899 PathSuccessor(&'a Path),
2900 Count {
2901 count: usize,
2902 include_ignored: bool,
2903 include_dirs: bool,
2904 },
2905}
2906
2907impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2908 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2909 match self {
2910 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2911 TraversalTarget::PathSuccessor(path) => {
2912 if !cursor_location.max_path.starts_with(path) {
2913 Ordering::Equal
2914 } else {
2915 Ordering::Greater
2916 }
2917 }
2918 TraversalTarget::Count {
2919 count,
2920 include_dirs,
2921 include_ignored,
2922 } => Ord::cmp(
2923 count,
2924 &cursor_location.count(*include_dirs, *include_ignored),
2925 ),
2926 }
2927 }
2928}
2929
2930struct ChildEntriesIter<'a> {
2931 parent_path: &'a Path,
2932 traversal: Traversal<'a>,
2933}
2934
2935impl<'a> Iterator for ChildEntriesIter<'a> {
2936 type Item = &'a Entry;
2937
2938 fn next(&mut self) -> Option<Self::Item> {
2939 if let Some(item) = self.traversal.entry() {
2940 if item.path.starts_with(&self.parent_path) {
2941 self.traversal.advance_to_sibling();
2942 return Some(item);
2943 }
2944 }
2945 None
2946 }
2947}
2948
2949impl<'a> From<&'a Entry> for proto::Entry {
2950 fn from(entry: &'a Entry) -> Self {
2951 Self {
2952 id: entry.id.to_proto(),
2953 is_dir: entry.is_dir(),
2954 path: entry.path.as_os_str().as_bytes().to_vec(),
2955 inode: entry.inode,
2956 mtime: Some(entry.mtime.into()),
2957 is_symlink: entry.is_symlink,
2958 is_ignored: entry.is_ignored,
2959 }
2960 }
2961}
2962
2963impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2964 type Error = anyhow::Error;
2965
2966 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2967 if let Some(mtime) = entry.mtime {
2968 let kind = if entry.is_dir {
2969 EntryKind::Dir
2970 } else {
2971 let mut char_bag = *root_char_bag;
2972 char_bag.extend(
2973 String::from_utf8_lossy(&entry.path)
2974 .chars()
2975 .map(|c| c.to_ascii_lowercase()),
2976 );
2977 EntryKind::File(char_bag)
2978 };
2979 let path: Arc<Path> = PathBuf::from(OsString::from_vec(entry.path)).into();
2980 Ok(Entry {
2981 id: ProjectEntryId::from_proto(entry.id),
2982 kind,
2983 path,
2984 inode: entry.inode,
2985 mtime: mtime.into(),
2986 is_symlink: entry.is_symlink,
2987 is_ignored: entry.is_ignored,
2988 })
2989 } else {
2990 Err(anyhow!(
2991 "missing mtime in remote worktree entry {:?}",
2992 entry.path
2993 ))
2994 }
2995 }
2996}
2997
2998async fn send_worktree_update(client: &Arc<Client>, update: proto::UpdateWorktree) -> Result<()> {
2999 #[cfg(any(test, feature = "test-support"))]
3000 const MAX_CHUNK_SIZE: usize = 2;
3001 #[cfg(not(any(test, feature = "test-support")))]
3002 const MAX_CHUNK_SIZE: usize = 256;
3003
3004 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
3005 client.request(update).await?;
3006 }
3007
3008 Ok(())
3009}
3010
3011#[cfg(test)]
3012mod tests {
3013 use super::*;
3014 use anyhow::Result;
3015 use client::test::FakeHttpClient;
3016 use fs::repository::FakeGitRepository;
3017 use fs::{FakeFs, RealFs};
3018 use gpui::{executor::Deterministic, TestAppContext};
3019 use rand::prelude::*;
3020 use serde_json::json;
3021 use std::{
3022 env,
3023 fmt::Write,
3024 time::{SystemTime, UNIX_EPOCH},
3025 };
3026
3027 use util::test::temp_tree;
3028
3029 #[gpui::test]
3030 async fn test_traversal(cx: &mut TestAppContext) {
3031 let fs = FakeFs::new(cx.background());
3032 fs.insert_tree(
3033 "/root",
3034 json!({
3035 ".gitignore": "a/b\n",
3036 "a": {
3037 "b": "",
3038 "c": "",
3039 }
3040 }),
3041 )
3042 .await;
3043
3044 let http_client = FakeHttpClient::with_404_response();
3045 let client = cx.read(|cx| Client::new(http_client, cx));
3046
3047 let tree = Worktree::local(
3048 client,
3049 Arc::from(Path::new("/root")),
3050 true,
3051 fs,
3052 Default::default(),
3053 &mut cx.to_async(),
3054 )
3055 .await
3056 .unwrap();
3057 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3058 .await;
3059
3060 tree.read_with(cx, |tree, _| {
3061 assert_eq!(
3062 tree.entries(false)
3063 .map(|entry| entry.path.as_ref())
3064 .collect::<Vec<_>>(),
3065 vec![
3066 Path::new(""),
3067 Path::new(".gitignore"),
3068 Path::new("a"),
3069 Path::new("a/c"),
3070 ]
3071 );
3072 assert_eq!(
3073 tree.entries(true)
3074 .map(|entry| entry.path.as_ref())
3075 .collect::<Vec<_>>(),
3076 vec![
3077 Path::new(""),
3078 Path::new(".gitignore"),
3079 Path::new("a"),
3080 Path::new("a/b"),
3081 Path::new("a/c"),
3082 ]
3083 );
3084 })
3085 }
3086
3087 #[gpui::test(iterations = 10)]
3088 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3089 let fs = FakeFs::new(cx.background());
3090 fs.insert_tree(
3091 "/root",
3092 json!({
3093 "lib": {
3094 "a": {
3095 "a.txt": ""
3096 },
3097 "b": {
3098 "b.txt": ""
3099 }
3100 }
3101 }),
3102 )
3103 .await;
3104 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3105 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3106
3107 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3108 let tree = Worktree::local(
3109 client,
3110 Arc::from(Path::new("/root")),
3111 true,
3112 fs.clone(),
3113 Default::default(),
3114 &mut cx.to_async(),
3115 )
3116 .await
3117 .unwrap();
3118
3119 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3120 .await;
3121
3122 tree.read_with(cx, |tree, _| {
3123 assert_eq!(
3124 tree.entries(false)
3125 .map(|entry| entry.path.as_ref())
3126 .collect::<Vec<_>>(),
3127 vec![
3128 Path::new(""),
3129 Path::new("lib"),
3130 Path::new("lib/a"),
3131 Path::new("lib/a/a.txt"),
3132 Path::new("lib/a/lib"),
3133 Path::new("lib/b"),
3134 Path::new("lib/b/b.txt"),
3135 Path::new("lib/b/lib"),
3136 ]
3137 );
3138 });
3139
3140 fs.rename(
3141 Path::new("/root/lib/a/lib"),
3142 Path::new("/root/lib/a/lib-2"),
3143 Default::default(),
3144 )
3145 .await
3146 .unwrap();
3147 executor.run_until_parked();
3148 tree.read_with(cx, |tree, _| {
3149 assert_eq!(
3150 tree.entries(false)
3151 .map(|entry| entry.path.as_ref())
3152 .collect::<Vec<_>>(),
3153 vec![
3154 Path::new(""),
3155 Path::new("lib"),
3156 Path::new("lib/a"),
3157 Path::new("lib/a/a.txt"),
3158 Path::new("lib/a/lib-2"),
3159 Path::new("lib/b"),
3160 Path::new("lib/b/b.txt"),
3161 Path::new("lib/b/lib"),
3162 ]
3163 );
3164 });
3165 }
3166
3167 #[gpui::test]
3168 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3169 let parent_dir = temp_tree(json!({
3170 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3171 "tree": {
3172 ".git": {},
3173 ".gitignore": "ignored-dir\n",
3174 "tracked-dir": {
3175 "tracked-file1": "",
3176 "ancestor-ignored-file1": "",
3177 },
3178 "ignored-dir": {
3179 "ignored-file1": ""
3180 }
3181 }
3182 }));
3183 let dir = parent_dir.path().join("tree");
3184
3185 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3186
3187 let tree = Worktree::local(
3188 client,
3189 dir.as_path(),
3190 true,
3191 Arc::new(RealFs),
3192 Default::default(),
3193 &mut cx.to_async(),
3194 )
3195 .await
3196 .unwrap();
3197 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3198 .await;
3199 tree.flush_fs_events(cx).await;
3200 cx.read(|cx| {
3201 let tree = tree.read(cx);
3202 assert!(
3203 !tree
3204 .entry_for_path("tracked-dir/tracked-file1")
3205 .unwrap()
3206 .is_ignored
3207 );
3208 assert!(
3209 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3210 .unwrap()
3211 .is_ignored
3212 );
3213 assert!(
3214 tree.entry_for_path("ignored-dir/ignored-file1")
3215 .unwrap()
3216 .is_ignored
3217 );
3218 });
3219
3220 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3221 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3222 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3223 tree.flush_fs_events(cx).await;
3224 cx.read(|cx| {
3225 let tree = tree.read(cx);
3226 assert!(
3227 !tree
3228 .entry_for_path("tracked-dir/tracked-file2")
3229 .unwrap()
3230 .is_ignored
3231 );
3232 assert!(
3233 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3234 .unwrap()
3235 .is_ignored
3236 );
3237 assert!(
3238 tree.entry_for_path("ignored-dir/ignored-file2")
3239 .unwrap()
3240 .is_ignored
3241 );
3242 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3243 });
3244 }
3245
3246 #[gpui::test]
3247 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3248 let root = temp_tree(json!({
3249 "dir1": {
3250 ".git": {},
3251 "deps": {
3252 "dep1": {
3253 ".git": {},
3254 "src": {
3255 "a.txt": ""
3256 }
3257 }
3258 },
3259 "src": {
3260 "b.txt": ""
3261 }
3262 },
3263 "c.txt": "",
3264 }));
3265
3266 let http_client = FakeHttpClient::with_404_response();
3267 let client = cx.read(|cx| Client::new(http_client, cx));
3268 let tree = Worktree::local(
3269 client,
3270 root.path(),
3271 true,
3272 Arc::new(RealFs),
3273 Default::default(),
3274 &mut cx.to_async(),
3275 )
3276 .await
3277 .unwrap();
3278
3279 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3280 .await;
3281 tree.flush_fs_events(cx).await;
3282
3283 tree.read_with(cx, |tree, _cx| {
3284 let tree = tree.as_local().unwrap();
3285
3286 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3287
3288 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3289 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3290 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3291
3292 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3293 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3294 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3295 });
3296
3297 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3298 let tree = tree.as_local().unwrap();
3299 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3300 });
3301
3302 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3303 tree.flush_fs_events(cx).await;
3304
3305 tree.read_with(cx, |tree, _cx| {
3306 let tree = tree.as_local().unwrap();
3307 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3308 assert_ne!(
3309 original_scan_id, new_scan_id,
3310 "original {original_scan_id}, new {new_scan_id}"
3311 );
3312 });
3313
3314 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3315 tree.flush_fs_events(cx).await;
3316
3317 tree.read_with(cx, |tree, _cx| {
3318 let tree = tree.as_local().unwrap();
3319
3320 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3321 });
3322 }
3323
3324 #[test]
3325 fn test_changed_repos() {
3326 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3327 GitRepositoryEntry {
3328 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3329 scan_id,
3330 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3331 git_dir_path: git_dir_path.as_ref().into(),
3332 }
3333 }
3334
3335 let prev_repos: Vec<GitRepositoryEntry> = vec![
3336 fake_entry("/.git", 0),
3337 fake_entry("/a/.git", 0),
3338 fake_entry("/a/b/.git", 0),
3339 ];
3340
3341 let new_repos: Vec<GitRepositoryEntry> = vec![
3342 fake_entry("/a/.git", 1),
3343 fake_entry("/a/b/.git", 0),
3344 fake_entry("/a/c/.git", 0),
3345 ];
3346
3347 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3348
3349 // Deletion retained
3350 assert!(res
3351 .iter()
3352 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3353 .is_some());
3354
3355 // Update retained
3356 assert!(res
3357 .iter()
3358 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3359 .is_some());
3360
3361 // Addition retained
3362 assert!(res
3363 .iter()
3364 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3365 .is_some());
3366
3367 // Nochange, not retained
3368 assert!(res
3369 .iter()
3370 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3371 .is_none());
3372 }
3373
3374 #[gpui::test]
3375 async fn test_write_file(cx: &mut TestAppContext) {
3376 let dir = temp_tree(json!({
3377 ".git": {},
3378 ".gitignore": "ignored-dir\n",
3379 "tracked-dir": {},
3380 "ignored-dir": {}
3381 }));
3382
3383 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3384
3385 let tree = Worktree::local(
3386 client,
3387 dir.path(),
3388 true,
3389 Arc::new(RealFs),
3390 Default::default(),
3391 &mut cx.to_async(),
3392 )
3393 .await
3394 .unwrap();
3395 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3396 .await;
3397 tree.flush_fs_events(cx).await;
3398
3399 tree.update(cx, |tree, cx| {
3400 tree.as_local().unwrap().write_file(
3401 Path::new("tracked-dir/file.txt"),
3402 "hello".into(),
3403 Default::default(),
3404 cx,
3405 )
3406 })
3407 .await
3408 .unwrap();
3409 tree.update(cx, |tree, cx| {
3410 tree.as_local().unwrap().write_file(
3411 Path::new("ignored-dir/file.txt"),
3412 "world".into(),
3413 Default::default(),
3414 cx,
3415 )
3416 })
3417 .await
3418 .unwrap();
3419
3420 tree.read_with(cx, |tree, _| {
3421 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3422 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3423 assert!(!tracked.is_ignored);
3424 assert!(ignored.is_ignored);
3425 });
3426 }
3427
3428 #[gpui::test(iterations = 30)]
3429 async fn test_create_directory(cx: &mut TestAppContext) {
3430 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3431
3432 let fs = FakeFs::new(cx.background());
3433 fs.insert_tree(
3434 "/a",
3435 json!({
3436 "b": {},
3437 "c": {},
3438 "d": {},
3439 }),
3440 )
3441 .await;
3442
3443 let tree = Worktree::local(
3444 client,
3445 "/a".as_ref(),
3446 true,
3447 fs,
3448 Default::default(),
3449 &mut cx.to_async(),
3450 )
3451 .await
3452 .unwrap();
3453
3454 let entry = tree
3455 .update(cx, |tree, cx| {
3456 tree.as_local_mut()
3457 .unwrap()
3458 .create_entry("a/e".as_ref(), true, cx)
3459 })
3460 .await
3461 .unwrap();
3462 assert!(entry.is_dir());
3463
3464 cx.foreground().run_until_parked();
3465 tree.read_with(cx, |tree, _| {
3466 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3467 });
3468 }
3469
3470 #[gpui::test(iterations = 100)]
3471 fn test_random(mut rng: StdRng) {
3472 let operations = env::var("OPERATIONS")
3473 .map(|o| o.parse().unwrap())
3474 .unwrap_or(40);
3475 let initial_entries = env::var("INITIAL_ENTRIES")
3476 .map(|o| o.parse().unwrap())
3477 .unwrap_or(20);
3478
3479 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3480 for _ in 0..initial_entries {
3481 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3482 }
3483 log::info!("Generated initial tree");
3484
3485 let (notify_tx, _notify_rx) = mpsc::unbounded();
3486 let fs = Arc::new(RealFs);
3487 let next_entry_id = Arc::new(AtomicUsize::new(0));
3488 let mut initial_snapshot = LocalSnapshot {
3489 removed_entry_ids: Default::default(),
3490 ignores_by_parent_abs_path: Default::default(),
3491 git_repositories: Default::default(),
3492 next_entry_id: next_entry_id.clone(),
3493 snapshot: Snapshot {
3494 id: WorktreeId::from_usize(0),
3495 entries_by_path: Default::default(),
3496 entries_by_id: Default::default(),
3497 abs_path: root_dir.path().into(),
3498 root_name: Default::default(),
3499 root_char_bag: Default::default(),
3500 scan_id: 0,
3501 is_complete: true,
3502 },
3503 extension_counts: Default::default(),
3504 };
3505 initial_snapshot.insert_entry(
3506 Entry::new(
3507 Path::new("").into(),
3508 &smol::block_on(fs.metadata(root_dir.path()))
3509 .unwrap()
3510 .unwrap(),
3511 &next_entry_id,
3512 Default::default(),
3513 ),
3514 fs.as_ref(),
3515 );
3516 let mut scanner = BackgroundScanner::new(
3517 Arc::new(Mutex::new(initial_snapshot.clone())),
3518 notify_tx,
3519 fs.clone(),
3520 Arc::new(gpui::executor::Background::new()),
3521 );
3522 smol::block_on(scanner.scan_dirs()).unwrap();
3523 scanner.snapshot().check_invariants();
3524
3525 let mut events = Vec::new();
3526 let mut snapshots = Vec::new();
3527 let mut mutations_len = operations;
3528 while mutations_len > 1 {
3529 if !events.is_empty() && rng.gen_bool(0.4) {
3530 let len = rng.gen_range(0..=events.len());
3531 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3532 log::info!("Delivering events: {:#?}", to_deliver);
3533 smol::block_on(scanner.process_events(to_deliver));
3534 scanner.snapshot().check_invariants();
3535 } else {
3536 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3537 mutations_len -= 1;
3538 }
3539
3540 if rng.gen_bool(0.2) {
3541 snapshots.push(scanner.snapshot());
3542 }
3543 }
3544 log::info!("Quiescing: {:#?}", events);
3545 smol::block_on(scanner.process_events(events));
3546 scanner.snapshot().check_invariants();
3547
3548 let (notify_tx, _notify_rx) = mpsc::unbounded();
3549 let mut new_scanner = BackgroundScanner::new(
3550 Arc::new(Mutex::new(initial_snapshot)),
3551 notify_tx,
3552 scanner.fs.clone(),
3553 scanner.executor.clone(),
3554 );
3555 smol::block_on(new_scanner.scan_dirs()).unwrap();
3556 assert_eq!(
3557 scanner.snapshot().to_vec(true),
3558 new_scanner.snapshot().to_vec(true)
3559 );
3560
3561 for mut prev_snapshot in snapshots {
3562 let include_ignored = rng.gen::<bool>();
3563 if !include_ignored {
3564 let mut entries_by_path_edits = Vec::new();
3565 let mut entries_by_id_edits = Vec::new();
3566 for entry in prev_snapshot
3567 .entries_by_id
3568 .cursor::<()>()
3569 .filter(|e| e.is_ignored)
3570 {
3571 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3572 entries_by_id_edits.push(Edit::Remove(entry.id));
3573 }
3574
3575 prev_snapshot
3576 .entries_by_path
3577 .edit(entries_by_path_edits, &());
3578 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3579 }
3580
3581 let update = scanner
3582 .snapshot()
3583 .build_update(&prev_snapshot, 0, 0, include_ignored);
3584 prev_snapshot.apply_remote_update(update).unwrap();
3585 assert_eq!(
3586 prev_snapshot.to_vec(true),
3587 scanner.snapshot().to_vec(include_ignored)
3588 );
3589 }
3590 }
3591
3592 fn randomly_mutate_tree(
3593 root_path: &Path,
3594 insertion_probability: f64,
3595 rng: &mut impl Rng,
3596 ) -> Result<Vec<fsevent::Event>> {
3597 let root_path = root_path.canonicalize().unwrap();
3598 let (dirs, files) = read_dir_recursive(root_path.clone());
3599
3600 let mut events = Vec::new();
3601 let mut record_event = |path: PathBuf| {
3602 events.push(fsevent::Event {
3603 event_id: SystemTime::now()
3604 .duration_since(UNIX_EPOCH)
3605 .unwrap()
3606 .as_secs(),
3607 flags: fsevent::StreamFlags::empty(),
3608 path,
3609 });
3610 };
3611
3612 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3613 let path = dirs.choose(rng).unwrap();
3614 let new_path = path.join(gen_name(rng));
3615
3616 if rng.gen() {
3617 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3618 std::fs::create_dir(&new_path)?;
3619 } else {
3620 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3621 std::fs::write(&new_path, "")?;
3622 }
3623 record_event(new_path);
3624 } else if rng.gen_bool(0.05) {
3625 let ignore_dir_path = dirs.choose(rng).unwrap();
3626 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3627
3628 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3629 let files_to_ignore = {
3630 let len = rng.gen_range(0..=subfiles.len());
3631 subfiles.choose_multiple(rng, len)
3632 };
3633 let dirs_to_ignore = {
3634 let len = rng.gen_range(0..subdirs.len());
3635 subdirs.choose_multiple(rng, len)
3636 };
3637
3638 let mut ignore_contents = String::new();
3639 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3640 writeln!(
3641 ignore_contents,
3642 "{}",
3643 path_to_ignore
3644 .strip_prefix(&ignore_dir_path)?
3645 .to_str()
3646 .unwrap()
3647 )
3648 .unwrap();
3649 }
3650 log::info!(
3651 "Creating {:?} with contents:\n{}",
3652 ignore_path.strip_prefix(&root_path)?,
3653 ignore_contents
3654 );
3655 std::fs::write(&ignore_path, ignore_contents).unwrap();
3656 record_event(ignore_path);
3657 } else {
3658 let old_path = {
3659 let file_path = files.choose(rng);
3660 let dir_path = dirs[1..].choose(rng);
3661 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3662 };
3663
3664 let is_rename = rng.gen();
3665 if is_rename {
3666 let new_path_parent = dirs
3667 .iter()
3668 .filter(|d| !d.starts_with(old_path))
3669 .choose(rng)
3670 .unwrap();
3671
3672 let overwrite_existing_dir =
3673 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3674 let new_path = if overwrite_existing_dir {
3675 std::fs::remove_dir_all(&new_path_parent).ok();
3676 new_path_parent.to_path_buf()
3677 } else {
3678 new_path_parent.join(gen_name(rng))
3679 };
3680
3681 log::info!(
3682 "Renaming {:?} to {}{:?}",
3683 old_path.strip_prefix(&root_path)?,
3684 if overwrite_existing_dir {
3685 "overwrite "
3686 } else {
3687 ""
3688 },
3689 new_path.strip_prefix(&root_path)?
3690 );
3691 std::fs::rename(&old_path, &new_path)?;
3692 record_event(old_path.clone());
3693 record_event(new_path);
3694 } else if old_path.is_dir() {
3695 let (dirs, files) = read_dir_recursive(old_path.clone());
3696
3697 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3698 std::fs::remove_dir_all(&old_path).unwrap();
3699 for file in files {
3700 record_event(file);
3701 }
3702 for dir in dirs {
3703 record_event(dir);
3704 }
3705 } else {
3706 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3707 std::fs::remove_file(old_path).unwrap();
3708 record_event(old_path.clone());
3709 }
3710 }
3711
3712 Ok(events)
3713 }
3714
3715 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3716 let child_entries = std::fs::read_dir(&path).unwrap();
3717 let mut dirs = vec![path];
3718 let mut files = Vec::new();
3719 for child_entry in child_entries {
3720 let child_path = child_entry.unwrap().path();
3721 if child_path.is_dir() {
3722 let (child_dirs, child_files) = read_dir_recursive(child_path);
3723 dirs.extend(child_dirs);
3724 files.extend(child_files);
3725 } else {
3726 files.push(child_path);
3727 }
3728 }
3729 (dirs, files)
3730 }
3731
3732 fn gen_name(rng: &mut impl Rng) -> String {
3733 (0..6)
3734 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3735 .map(char::from)
3736 .collect()
3737 }
3738
3739 impl LocalSnapshot {
3740 fn check_invariants(&self) {
3741 let mut files = self.files(true, 0);
3742 let mut visible_files = self.files(false, 0);
3743 for entry in self.entries_by_path.cursor::<()>() {
3744 if entry.is_file() {
3745 assert_eq!(files.next().unwrap().inode, entry.inode);
3746 if !entry.is_ignored {
3747 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3748 }
3749 }
3750 }
3751 assert!(files.next().is_none());
3752 assert!(visible_files.next().is_none());
3753
3754 let mut bfs_paths = Vec::new();
3755 let mut stack = vec![Path::new("")];
3756 while let Some(path) = stack.pop() {
3757 bfs_paths.push(path);
3758 let ix = stack.len();
3759 for child_entry in self.child_entries(path) {
3760 stack.insert(ix, &child_entry.path);
3761 }
3762 }
3763
3764 let dfs_paths_via_iter = self
3765 .entries_by_path
3766 .cursor::<()>()
3767 .map(|e| e.path.as_ref())
3768 .collect::<Vec<_>>();
3769 assert_eq!(bfs_paths, dfs_paths_via_iter);
3770
3771 let dfs_paths_via_traversal = self
3772 .entries(true)
3773 .map(|e| e.path.as_ref())
3774 .collect::<Vec<_>>();
3775 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3776
3777 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3778 let ignore_parent_path =
3779 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3780 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3781 assert!(self
3782 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3783 .is_some());
3784 }
3785
3786 // Ensure extension counts are correct.
3787 let mut expected_extension_counts = HashMap::default();
3788 for extension in self.entries(false).filter_map(|e| e.path.extension()) {
3789 *expected_extension_counts
3790 .entry(extension.into())
3791 .or_insert(0) += 1;
3792 }
3793 assert_eq!(self.extension_counts, expected_extension_counts);
3794 }
3795
3796 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3797 let mut paths = Vec::new();
3798 for entry in self.entries_by_path.cursor::<()>() {
3799 if include_ignored || !entry.is_ignored {
3800 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3801 }
3802 }
3803 paths.sort_by(|a, b| a.0.cmp(b.0));
3804 paths
3805 }
3806 }
3807}