1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::{repository::GitRepository, Fs};
9use fs::{HomeDir, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::{
24 proto::{
25 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
26 serialize_version,
27 },
28 Buffer, DiagnosticEntry, PointUtf16, Rope, RopeFingerprint, Unclipped,
29};
30use parking_lot::Mutex;
31use postage::{
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 sync::{atomic::AtomicUsize, Arc},
48 task::Poll,
49 time::{Duration, SystemTime},
50};
51use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
52use util::{ResultExt, TryFutureExt};
53
54#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
55pub struct WorktreeId(usize);
56
57#[allow(clippy::large_enum_variant)]
58pub enum Worktree {
59 Local(LocalWorktree),
60 Remote(RemoteWorktree),
61}
62
63pub struct LocalWorktree {
64 snapshot: LocalSnapshot,
65 background_snapshot: Arc<Mutex<LocalSnapshot>>,
66 last_scan_state_rx: watch::Receiver<ScanState>,
67 _background_scanner_task: Option<Task<()>>,
68 poll_task: Option<Task<()>>,
69 share: Option<ShareState>,
70 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
71 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 visible: bool,
75}
76
77pub struct RemoteWorktree {
78 pub snapshot: Snapshot,
79 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
80 project_id: u64,
81 client: Arc<Client>,
82 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
83 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
84 replica_id: ReplicaId,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 visible: bool,
87 disconnected: bool,
88}
89
90#[derive(Clone)]
91pub struct Snapshot {
92 id: WorktreeId,
93 abs_path: Arc<Path>,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98 scan_id: usize,
99 completed_scan_id: usize,
100}
101
102#[derive(Clone)]
103pub struct GitRepositoryEntry {
104 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
105
106 pub(crate) scan_id: usize,
107 // Path to folder containing the .git file or directory
108 pub(crate) content_path: Arc<Path>,
109 // Path to the actual .git folder.
110 // Note: if .git is a file, this points to the folder indicated by the .git file
111 pub(crate) git_dir_path: Arc<Path>,
112}
113
114impl std::fmt::Debug for GitRepositoryEntry {
115 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
116 f.debug_struct("GitRepositoryEntry")
117 .field("content_path", &self.content_path)
118 .field("git_dir_path", &self.git_dir_path)
119 .field("libgit_repository", &"LibGitRepository")
120 .finish()
121 }
122}
123
124pub struct LocalSnapshot {
125 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
126 git_repositories: Vec<GitRepositoryEntry>,
127 removed_entry_ids: HashMap<u64, ProjectEntryId>,
128 next_entry_id: Arc<AtomicUsize>,
129 snapshot: Snapshot,
130}
131
132impl Clone for LocalSnapshot {
133 fn clone(&self) -> Self {
134 Self {
135 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
136 git_repositories: self.git_repositories.iter().cloned().collect(),
137 removed_entry_ids: self.removed_entry_ids.clone(),
138 next_entry_id: self.next_entry_id.clone(),
139 snapshot: self.snapshot.clone(),
140 }
141 }
142}
143
144impl Deref for LocalSnapshot {
145 type Target = Snapshot;
146
147 fn deref(&self) -> &Self::Target {
148 &self.snapshot
149 }
150}
151
152impl DerefMut for LocalSnapshot {
153 fn deref_mut(&mut self) -> &mut Self::Target {
154 &mut self.snapshot
155 }
156}
157
158#[derive(Clone, Debug)]
159enum ScanState {
160 Idle,
161 /// The worktree is performing its initial scan of the filesystem.
162 Initializing,
163 /// The worktree is updating in response to filesystem events.
164 Updating,
165 Err(Arc<anyhow::Error>),
166}
167
168struct ShareState {
169 project_id: u64,
170 snapshots_tx: watch::Sender<LocalSnapshot>,
171 resume_updates: watch::Sender<()>,
172 _maintain_remote_snapshot: Task<Option<()>>,
173}
174
175pub enum Event {
176 UpdatedEntries,
177 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
178}
179
180impl Entity for Worktree {
181 type Event = Event;
182}
183
184impl Worktree {
185 pub async fn local(
186 client: Arc<Client>,
187 path: impl Into<Arc<Path>>,
188 visible: bool,
189 fs: Arc<dyn Fs>,
190 next_entry_id: Arc<AtomicUsize>,
191 cx: &mut AsyncAppContext,
192 ) -> Result<ModelHandle<Self>> {
193 let (tree, scan_states_tx) =
194 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
195 tree.update(cx, |tree, cx| {
196 let tree = tree.as_local_mut().unwrap();
197 let abs_path = tree.abs_path().clone();
198 let background_snapshot = tree.background_snapshot.clone();
199 let background = cx.background().clone();
200 tree._background_scanner_task = Some(cx.background().spawn(async move {
201 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
202 let scanner =
203 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
204 scanner.run(events).await;
205 }));
206 });
207 Ok(tree)
208 }
209
210 pub fn remote(
211 project_remote_id: u64,
212 replica_id: ReplicaId,
213 worktree: proto::WorktreeMetadata,
214 client: Arc<Client>,
215 cx: &mut MutableAppContext,
216 ) -> ModelHandle<Self> {
217 let remote_id = worktree.id;
218 let root_char_bag: CharBag = worktree
219 .root_name
220 .chars()
221 .map(|c| c.to_ascii_lowercase())
222 .collect();
223 let root_name = worktree.root_name.clone();
224 let visible = worktree.visible;
225
226 let abs_path = PathBuf::from(worktree.abs_path);
227 let snapshot = Snapshot {
228 id: WorktreeId(remote_id as usize),
229 abs_path: Arc::from(abs_path.deref()),
230 root_name,
231 root_char_bag,
232 entries_by_path: Default::default(),
233 entries_by_id: Default::default(),
234 scan_id: 0,
235 completed_scan_id: 0,
236 };
237
238 let (updates_tx, mut updates_rx) = mpsc::unbounded();
239 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
240 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
241 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
242 Worktree::Remote(RemoteWorktree {
243 project_id: project_remote_id,
244 replica_id,
245 snapshot: snapshot.clone(),
246 background_snapshot: background_snapshot.clone(),
247 updates_tx: Some(updates_tx),
248 snapshot_subscriptions: Default::default(),
249 client: client.clone(),
250 diagnostic_summaries: Default::default(),
251 visible,
252 disconnected: false,
253 })
254 });
255
256 cx.background()
257 .spawn(async move {
258 while let Some(update) = updates_rx.next().await {
259 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
260 log::error!("error applying worktree update: {}", error);
261 }
262 snapshot_updated_tx.send(()).await.ok();
263 }
264 })
265 .detach();
266
267 cx.spawn(|mut cx| {
268 let this = worktree_handle.downgrade();
269 async move {
270 while (snapshot_updated_rx.recv().await).is_some() {
271 if let Some(this) = this.upgrade(&cx) {
272 this.update(&mut cx, |this, cx| {
273 this.poll_snapshot(cx);
274 let this = this.as_remote_mut().unwrap();
275 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
276 if this.observed_snapshot(*scan_id) {
277 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
278 let _ = tx.send(());
279 } else {
280 break;
281 }
282 }
283 });
284 } else {
285 break;
286 }
287 }
288 }
289 })
290 .detach();
291
292 worktree_handle
293 }
294
295 pub fn as_local(&self) -> Option<&LocalWorktree> {
296 if let Worktree::Local(worktree) = self {
297 Some(worktree)
298 } else {
299 None
300 }
301 }
302
303 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
304 if let Worktree::Remote(worktree) = self {
305 Some(worktree)
306 } else {
307 None
308 }
309 }
310
311 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
312 if let Worktree::Local(worktree) = self {
313 Some(worktree)
314 } else {
315 None
316 }
317 }
318
319 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
320 if let Worktree::Remote(worktree) = self {
321 Some(worktree)
322 } else {
323 None
324 }
325 }
326
327 pub fn is_local(&self) -> bool {
328 matches!(self, Worktree::Local(_))
329 }
330
331 pub fn is_remote(&self) -> bool {
332 !self.is_local()
333 }
334
335 pub fn snapshot(&self) -> Snapshot {
336 match self {
337 Worktree::Local(worktree) => worktree.snapshot().snapshot,
338 Worktree::Remote(worktree) => worktree.snapshot(),
339 }
340 }
341
342 pub fn scan_id(&self) -> usize {
343 match self {
344 Worktree::Local(worktree) => worktree.snapshot.scan_id,
345 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
346 }
347 }
348
349 pub fn completed_scan_id(&self) -> usize {
350 match self {
351 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
352 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
353 }
354 }
355
356 pub fn is_visible(&self) -> bool {
357 match self {
358 Worktree::Local(worktree) => worktree.visible,
359 Worktree::Remote(worktree) => worktree.visible,
360 }
361 }
362
363 pub fn replica_id(&self) -> ReplicaId {
364 match self {
365 Worktree::Local(_) => 0,
366 Worktree::Remote(worktree) => worktree.replica_id,
367 }
368 }
369
370 pub fn diagnostic_summaries(
371 &self,
372 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
373 match self {
374 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
375 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
376 }
377 .iter()
378 .map(|(path, summary)| (path.0.clone(), *summary))
379 }
380
381 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
382 match self {
383 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
384 Self::Remote(worktree) => worktree.poll_snapshot(cx),
385 };
386 }
387
388 pub fn abs_path(&self) -> Arc<Path> {
389 match self {
390 Worktree::Local(worktree) => worktree.abs_path.clone(),
391 Worktree::Remote(worktree) => worktree.abs_path.clone(),
392 }
393 }
394}
395
396impl LocalWorktree {
397 async fn create(
398 client: Arc<Client>,
399 path: impl Into<Arc<Path>>,
400 visible: bool,
401 fs: Arc<dyn Fs>,
402 next_entry_id: Arc<AtomicUsize>,
403 cx: &mut AsyncAppContext,
404 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
405 let abs_path = path.into();
406 let path: Arc<Path> = Arc::from(Path::new(""));
407
408 // After determining whether the root entry is a file or a directory, populate the
409 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
410 let root_name = abs_path
411 .file_name()
412 .map_or(String::new(), |f| f.to_string_lossy().to_string());
413 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
414 let metadata = fs
415 .metadata(&abs_path)
416 .await
417 .context("failed to stat worktree path")?;
418
419 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
420 let (mut last_scan_state_tx, last_scan_state_rx) =
421 watch::channel_with(ScanState::Initializing);
422 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
423 let mut snapshot = LocalSnapshot {
424 ignores_by_parent_abs_path: Default::default(),
425 git_repositories: Default::default(),
426 removed_entry_ids: Default::default(),
427 next_entry_id,
428 snapshot: Snapshot {
429 id: WorktreeId::from_usize(cx.model_id()),
430 abs_path,
431 root_name: root_name.clone(),
432 root_char_bag,
433 entries_by_path: Default::default(),
434 entries_by_id: Default::default(),
435 scan_id: 0,
436 completed_scan_id: 0,
437 },
438 };
439 if let Some(metadata) = metadata {
440 let entry = Entry::new(
441 path,
442 &metadata,
443 &snapshot.next_entry_id,
444 snapshot.root_char_bag,
445 );
446 snapshot.insert_entry(entry, fs.as_ref());
447 }
448
449 let tree = Self {
450 snapshot: snapshot.clone(),
451 background_snapshot: Arc::new(Mutex::new(snapshot)),
452 last_scan_state_rx,
453 _background_scanner_task: None,
454 share: None,
455 poll_task: None,
456 diagnostics: Default::default(),
457 diagnostic_summaries: Default::default(),
458 client,
459 fs,
460 visible,
461 };
462
463 cx.spawn_weak(|this, mut cx| async move {
464 while let Some(scan_state) = scan_states_rx.next().await {
465 if let Some(this) = this.upgrade(&cx) {
466 last_scan_state_tx.blocking_send(scan_state).ok();
467 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
468 } else {
469 break;
470 }
471 }
472 })
473 .detach();
474
475 Worktree::Local(tree)
476 });
477
478 Ok((tree, scan_states_tx))
479 }
480
481 pub fn contains_abs_path(&self, path: &Path) -> bool {
482 path.starts_with(&self.abs_path)
483 }
484
485 fn absolutize(&self, path: &Path) -> PathBuf {
486 if path.file_name().is_some() {
487 self.abs_path.join(path)
488 } else {
489 self.abs_path.to_path_buf()
490 }
491 }
492
493 pub(crate) fn load_buffer(
494 &mut self,
495 path: &Path,
496 cx: &mut ModelContext<Worktree>,
497 ) -> Task<Result<ModelHandle<Buffer>>> {
498 let path = Arc::from(path);
499 cx.spawn(move |this, mut cx| async move {
500 let (file, contents, diff_base) = this
501 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
502 .await?;
503 Ok(cx.add_model(|cx| {
504 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
505 buffer.git_diff_recalc(cx);
506 buffer
507 }))
508 })
509 }
510
511 pub fn diagnostics_for_path(
512 &self,
513 path: &Path,
514 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
515 self.diagnostics.get(path).cloned()
516 }
517
518 pub fn update_diagnostics(
519 &mut self,
520 language_server_id: usize,
521 worktree_path: Arc<Path>,
522 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
523 _: &mut ModelContext<Worktree>,
524 ) -> Result<bool> {
525 self.diagnostics.remove(&worktree_path);
526 let old_summary = self
527 .diagnostic_summaries
528 .remove(&PathKey(worktree_path.clone()))
529 .unwrap_or_default();
530 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
531 if !new_summary.is_empty() {
532 self.diagnostic_summaries
533 .insert(PathKey(worktree_path.clone()), new_summary);
534 self.diagnostics.insert(worktree_path.clone(), diagnostics);
535 }
536
537 let updated = !old_summary.is_empty() || !new_summary.is_empty();
538 if updated {
539 if let Some(share) = self.share.as_ref() {
540 self.client
541 .send(proto::UpdateDiagnosticSummary {
542 project_id: share.project_id,
543 worktree_id: self.id().to_proto(),
544 summary: Some(proto::DiagnosticSummary {
545 path: worktree_path.to_string_lossy().to_string(),
546 language_server_id: language_server_id as u64,
547 error_count: new_summary.error_count as u32,
548 warning_count: new_summary.warning_count as u32,
549 }),
550 })
551 .log_err();
552 }
553 }
554
555 Ok(updated)
556 }
557
558 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
559 self.poll_task.take();
560
561 match self.scan_state() {
562 ScanState::Idle => {
563 let new_snapshot = self.background_snapshot.lock().clone();
564 let updated_repos = Self::changed_repos(
565 &self.snapshot.git_repositories,
566 &new_snapshot.git_repositories,
567 );
568 self.snapshot = new_snapshot;
569
570 if let Some(share) = self.share.as_mut() {
571 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
572 }
573
574 cx.emit(Event::UpdatedEntries);
575
576 if !updated_repos.is_empty() {
577 cx.emit(Event::UpdatedGitRepositories(updated_repos));
578 }
579 }
580
581 ScanState::Initializing => {
582 let is_fake_fs = self.fs.is_fake();
583
584 let new_snapshot = self.background_snapshot.lock().clone();
585 let updated_repos = Self::changed_repos(
586 &self.snapshot.git_repositories,
587 &new_snapshot.git_repositories,
588 );
589 self.snapshot = new_snapshot;
590
591 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
592 if is_fake_fs {
593 #[cfg(any(test, feature = "test-support"))]
594 cx.background().simulate_random_delay().await;
595 } else {
596 smol::Timer::after(Duration::from_millis(100)).await;
597 }
598 if let Some(this) = this.upgrade(&cx) {
599 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
600 }
601 }));
602
603 cx.emit(Event::UpdatedEntries);
604
605 if !updated_repos.is_empty() {
606 cx.emit(Event::UpdatedGitRepositories(updated_repos));
607 }
608 }
609
610 _ => {
611 if force {
612 self.snapshot = self.background_snapshot.lock().clone();
613 }
614 }
615 }
616
617 cx.notify();
618 }
619
620 fn changed_repos(
621 old_repos: &[GitRepositoryEntry],
622 new_repos: &[GitRepositoryEntry],
623 ) -> Vec<GitRepositoryEntry> {
624 fn diff<'a>(
625 a: &'a [GitRepositoryEntry],
626 b: &'a [GitRepositoryEntry],
627 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
628 ) {
629 for a_repo in a {
630 let matched = b.iter().find(|b_repo| {
631 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
632 });
633
634 if matched.is_none() {
635 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
636 }
637 }
638 }
639
640 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
641
642 diff(old_repos, new_repos, &mut updated);
643 diff(new_repos, old_repos, &mut updated);
644
645 updated.into_values().collect()
646 }
647
648 pub fn scan_complete(&self) -> impl Future<Output = ()> {
649 let mut scan_state_rx = self.last_scan_state_rx.clone();
650 async move {
651 let mut scan_state = Some(scan_state_rx.borrow().clone());
652 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
653 scan_state = scan_state_rx.recv().await;
654 }
655 }
656 }
657
658 fn scan_state(&self) -> ScanState {
659 self.last_scan_state_rx.borrow().clone()
660 }
661
662 pub fn snapshot(&self) -> LocalSnapshot {
663 self.snapshot.clone()
664 }
665
666 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
667 proto::WorktreeMetadata {
668 id: self.id().to_proto(),
669 root_name: self.root_name().to_string(),
670 visible: self.visible,
671 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
672 }
673 }
674
675 fn load(
676 &self,
677 path: &Path,
678 cx: &mut ModelContext<Worktree>,
679 ) -> Task<Result<(File, String, Option<String>)>> {
680 let handle = cx.handle();
681 let path = Arc::from(path);
682 let abs_path = self.absolutize(&path);
683 let fs = self.fs.clone();
684 let snapshot = self.snapshot();
685
686 cx.spawn(|this, mut cx| async move {
687 let text = fs.load(&abs_path).await?;
688
689 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
690 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
691 let repo_relative = repo_relative.to_owned();
692 cx.background()
693 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
694 .await
695 } else {
696 None
697 }
698 } else {
699 None
700 };
701
702 // Eagerly populate the snapshot with an updated entry for the loaded file
703 let entry = this
704 .update(&mut cx, |this, cx| {
705 this.as_local()
706 .unwrap()
707 .refresh_entry(path, abs_path, None, cx)
708 })
709 .await?;
710
711 Ok((
712 File {
713 entry_id: entry.id,
714 worktree: handle,
715 path: entry.path,
716 mtime: entry.mtime,
717 is_local: true,
718 is_deleted: false,
719 },
720 text,
721 diff_base,
722 ))
723 })
724 }
725
726 pub fn save_buffer_as(
727 &self,
728 buffer_handle: ModelHandle<Buffer>,
729 path: impl Into<Arc<Path>>,
730 cx: &mut ModelContext<Worktree>,
731 ) -> Task<Result<()>> {
732 let buffer = buffer_handle.read(cx);
733 let text = buffer.as_rope().clone();
734 let fingerprint = text.fingerprint();
735 let version = buffer.version();
736 let save = self.write_file(path, text, buffer.line_ending(), cx);
737 let handle = cx.handle();
738 cx.as_mut().spawn(|mut cx| async move {
739 let entry = save.await?;
740 let file = File {
741 entry_id: entry.id,
742 worktree: handle,
743 path: entry.path,
744 mtime: entry.mtime,
745 is_local: true,
746 is_deleted: false,
747 };
748
749 buffer_handle.update(&mut cx, |buffer, cx| {
750 buffer.did_save(version, fingerprint, file.mtime, Some(Arc::new(file)), cx);
751 });
752
753 Ok(())
754 })
755 }
756
757 pub fn create_entry(
758 &self,
759 path: impl Into<Arc<Path>>,
760 is_dir: bool,
761 cx: &mut ModelContext<Worktree>,
762 ) -> Task<Result<Entry>> {
763 self.write_entry_internal(
764 path,
765 if is_dir {
766 None
767 } else {
768 Some(Default::default())
769 },
770 cx,
771 )
772 }
773
774 pub fn write_file(
775 &self,
776 path: impl Into<Arc<Path>>,
777 text: Rope,
778 line_ending: LineEnding,
779 cx: &mut ModelContext<Worktree>,
780 ) -> Task<Result<Entry>> {
781 self.write_entry_internal(path, Some((text, line_ending)), cx)
782 }
783
784 pub fn delete_entry(
785 &self,
786 entry_id: ProjectEntryId,
787 cx: &mut ModelContext<Worktree>,
788 ) -> Option<Task<Result<()>>> {
789 let entry = self.entry_for_id(entry_id)?.clone();
790 let abs_path = self.absolutize(&entry.path);
791 let delete = cx.background().spawn({
792 let fs = self.fs.clone();
793 let abs_path = abs_path;
794 async move {
795 if entry.is_file() {
796 fs.remove_file(&abs_path, Default::default()).await
797 } else {
798 fs.remove_dir(
799 &abs_path,
800 RemoveOptions {
801 recursive: true,
802 ignore_if_not_exists: false,
803 },
804 )
805 .await
806 }
807 }
808 });
809
810 Some(cx.spawn(|this, mut cx| async move {
811 delete.await?;
812 this.update(&mut cx, |this, cx| {
813 let this = this.as_local_mut().unwrap();
814 {
815 let mut snapshot = this.background_snapshot.lock();
816 snapshot.delete_entry(entry_id);
817 }
818 this.poll_snapshot(true, cx);
819 });
820 Ok(())
821 }))
822 }
823
824 pub fn rename_entry(
825 &self,
826 entry_id: ProjectEntryId,
827 new_path: impl Into<Arc<Path>>,
828 cx: &mut ModelContext<Worktree>,
829 ) -> Option<Task<Result<Entry>>> {
830 let old_path = self.entry_for_id(entry_id)?.path.clone();
831 let new_path = new_path.into();
832 let abs_old_path = self.absolutize(&old_path);
833 let abs_new_path = self.absolutize(&new_path);
834 let rename = cx.background().spawn({
835 let fs = self.fs.clone();
836 let abs_new_path = abs_new_path.clone();
837 async move {
838 fs.rename(&abs_old_path, &abs_new_path, Default::default())
839 .await
840 }
841 });
842
843 Some(cx.spawn(|this, mut cx| async move {
844 rename.await?;
845 let entry = this
846 .update(&mut cx, |this, cx| {
847 this.as_local_mut().unwrap().refresh_entry(
848 new_path.clone(),
849 abs_new_path,
850 Some(old_path),
851 cx,
852 )
853 })
854 .await?;
855 Ok(entry)
856 }))
857 }
858
859 pub fn copy_entry(
860 &self,
861 entry_id: ProjectEntryId,
862 new_path: impl Into<Arc<Path>>,
863 cx: &mut ModelContext<Worktree>,
864 ) -> Option<Task<Result<Entry>>> {
865 let old_path = self.entry_for_id(entry_id)?.path.clone();
866 let new_path = new_path.into();
867 let abs_old_path = self.absolutize(&old_path);
868 let abs_new_path = self.absolutize(&new_path);
869 let copy = cx.background().spawn({
870 let fs = self.fs.clone();
871 let abs_new_path = abs_new_path.clone();
872 async move {
873 copy_recursive(
874 fs.as_ref(),
875 &abs_old_path,
876 &abs_new_path,
877 Default::default(),
878 )
879 .await
880 }
881 });
882
883 Some(cx.spawn(|this, mut cx| async move {
884 copy.await?;
885 let entry = this
886 .update(&mut cx, |this, cx| {
887 this.as_local_mut().unwrap().refresh_entry(
888 new_path.clone(),
889 abs_new_path,
890 None,
891 cx,
892 )
893 })
894 .await?;
895 Ok(entry)
896 }))
897 }
898
899 fn write_entry_internal(
900 &self,
901 path: impl Into<Arc<Path>>,
902 text_if_file: Option<(Rope, LineEnding)>,
903 cx: &mut ModelContext<Worktree>,
904 ) -> Task<Result<Entry>> {
905 let path = path.into();
906 let abs_path = self.absolutize(&path);
907 let write = cx.background().spawn({
908 let fs = self.fs.clone();
909 let abs_path = abs_path.clone();
910 async move {
911 if let Some((text, line_ending)) = text_if_file {
912 fs.save(&abs_path, &text, line_ending).await
913 } else {
914 fs.create_dir(&abs_path).await
915 }
916 }
917 });
918
919 cx.spawn(|this, mut cx| async move {
920 write.await?;
921 let entry = this
922 .update(&mut cx, |this, cx| {
923 this.as_local_mut()
924 .unwrap()
925 .refresh_entry(path, abs_path, None, cx)
926 })
927 .await?;
928 Ok(entry)
929 })
930 }
931
932 fn refresh_entry(
933 &self,
934 path: Arc<Path>,
935 abs_path: PathBuf,
936 old_path: Option<Arc<Path>>,
937 cx: &mut ModelContext<Worktree>,
938 ) -> Task<Result<Entry>> {
939 let fs = self.fs.clone();
940 let root_char_bag;
941 let next_entry_id;
942 {
943 let snapshot = self.background_snapshot.lock();
944 root_char_bag = snapshot.root_char_bag;
945 next_entry_id = snapshot.next_entry_id.clone();
946 }
947 cx.spawn_weak(|this, mut cx| async move {
948 let metadata = fs
949 .metadata(&abs_path)
950 .await?
951 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
952 let this = this
953 .upgrade(&cx)
954 .ok_or_else(|| anyhow!("worktree was dropped"))?;
955 this.update(&mut cx, |this, cx| {
956 let this = this.as_local_mut().unwrap();
957 let inserted_entry;
958 {
959 let mut snapshot = this.background_snapshot.lock();
960 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
961 entry.is_ignored = snapshot
962 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
963 .is_abs_path_ignored(&abs_path, entry.is_dir());
964 if let Some(old_path) = old_path {
965 snapshot.remove_path(&old_path);
966 }
967 snapshot.scan_started();
968 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
969 snapshot.scan_completed();
970 }
971 this.poll_snapshot(true, cx);
972 Ok(inserted_entry)
973 })
974 })
975 }
976
977 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
978 let (share_tx, share_rx) = oneshot::channel();
979
980 if let Some(share) = self.share.as_mut() {
981 let _ = share_tx.send(());
982 *share.resume_updates.borrow_mut() = ();
983 } else {
984 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
985 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
986 let worktree_id = cx.model_id() as u64;
987
988 for (path, summary) in self.diagnostic_summaries.iter() {
989 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
990 project_id,
991 worktree_id,
992 summary: Some(summary.to_proto(&path.0)),
993 }) {
994 return Task::ready(Err(e));
995 }
996 }
997
998 let _maintain_remote_snapshot = cx.background().spawn({
999 let client = self.client.clone();
1000 async move {
1001 let mut share_tx = Some(share_tx);
1002 let mut prev_snapshot = LocalSnapshot {
1003 ignores_by_parent_abs_path: Default::default(),
1004 git_repositories: Default::default(),
1005 removed_entry_ids: Default::default(),
1006 next_entry_id: Default::default(),
1007 snapshot: Snapshot {
1008 id: WorktreeId(worktree_id as usize),
1009 abs_path: Path::new("").into(),
1010 root_name: Default::default(),
1011 root_char_bag: Default::default(),
1012 entries_by_path: Default::default(),
1013 entries_by_id: Default::default(),
1014 scan_id: 0,
1015 completed_scan_id: 0,
1016 },
1017 };
1018 while let Some(snapshot) = snapshots_rx.recv().await {
1019 #[cfg(any(test, feature = "test-support"))]
1020 const MAX_CHUNK_SIZE: usize = 2;
1021 #[cfg(not(any(test, feature = "test-support")))]
1022 const MAX_CHUNK_SIZE: usize = 256;
1023
1024 let update =
1025 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1026 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1027 let _ = resume_updates_rx.try_recv();
1028 while let Err(error) = client.request(update.clone()).await {
1029 log::error!("failed to send worktree update: {}", error);
1030 log::info!("waiting to resume updates");
1031 if resume_updates_rx.next().await.is_none() {
1032 return Ok(());
1033 }
1034 }
1035 }
1036
1037 if let Some(share_tx) = share_tx.take() {
1038 let _ = share_tx.send(());
1039 }
1040
1041 prev_snapshot = snapshot;
1042 }
1043
1044 Ok::<_, anyhow::Error>(())
1045 }
1046 .log_err()
1047 });
1048
1049 self.share = Some(ShareState {
1050 project_id,
1051 snapshots_tx,
1052 resume_updates: resume_updates_tx,
1053 _maintain_remote_snapshot,
1054 });
1055 }
1056
1057 cx.foreground()
1058 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1059 }
1060
1061 pub fn unshare(&mut self) {
1062 self.share.take();
1063 }
1064
1065 pub fn is_shared(&self) -> bool {
1066 self.share.is_some()
1067 }
1068}
1069
1070impl RemoteWorktree {
1071 fn snapshot(&self) -> Snapshot {
1072 self.snapshot.clone()
1073 }
1074
1075 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1076 self.snapshot = self.background_snapshot.lock().clone();
1077 cx.emit(Event::UpdatedEntries);
1078 cx.notify();
1079 }
1080
1081 pub fn disconnected_from_host(&mut self) {
1082 self.updates_tx.take();
1083 self.snapshot_subscriptions.clear();
1084 self.disconnected = true;
1085 }
1086
1087 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1088 if let Some(updates_tx) = &self.updates_tx {
1089 updates_tx
1090 .unbounded_send(update)
1091 .expect("consumer runs to completion");
1092 }
1093 }
1094
1095 fn observed_snapshot(&self, scan_id: usize) -> bool {
1096 self.completed_scan_id >= scan_id
1097 }
1098
1099 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1100 let (tx, rx) = oneshot::channel();
1101 if self.observed_snapshot(scan_id) {
1102 let _ = tx.send(());
1103 } else if self.disconnected {
1104 drop(tx);
1105 } else {
1106 match self
1107 .snapshot_subscriptions
1108 .binary_search_by_key(&scan_id, |probe| probe.0)
1109 {
1110 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1111 }
1112 }
1113
1114 async move {
1115 rx.await?;
1116 Ok(())
1117 }
1118 }
1119
1120 pub fn update_diagnostic_summary(
1121 &mut self,
1122 path: Arc<Path>,
1123 summary: &proto::DiagnosticSummary,
1124 ) {
1125 let summary = DiagnosticSummary {
1126 language_server_id: summary.language_server_id as usize,
1127 error_count: summary.error_count as usize,
1128 warning_count: summary.warning_count as usize,
1129 };
1130 if summary.is_empty() {
1131 self.diagnostic_summaries.remove(&PathKey(path));
1132 } else {
1133 self.diagnostic_summaries.insert(PathKey(path), summary);
1134 }
1135 }
1136
1137 pub fn insert_entry(
1138 &mut self,
1139 entry: proto::Entry,
1140 scan_id: usize,
1141 cx: &mut ModelContext<Worktree>,
1142 ) -> Task<Result<Entry>> {
1143 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1144 cx.spawn(|this, mut cx| async move {
1145 wait_for_snapshot.await?;
1146 this.update(&mut cx, |worktree, _| {
1147 let worktree = worktree.as_remote_mut().unwrap();
1148 let mut snapshot = worktree.background_snapshot.lock();
1149 let entry = snapshot.insert_entry(entry);
1150 worktree.snapshot = snapshot.clone();
1151 entry
1152 })
1153 })
1154 }
1155
1156 pub(crate) fn delete_entry(
1157 &mut self,
1158 id: ProjectEntryId,
1159 scan_id: usize,
1160 cx: &mut ModelContext<Worktree>,
1161 ) -> Task<Result<()>> {
1162 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1163 cx.spawn(|this, mut cx| async move {
1164 wait_for_snapshot.await?;
1165 this.update(&mut cx, |worktree, _| {
1166 let worktree = worktree.as_remote_mut().unwrap();
1167 let mut snapshot = worktree.background_snapshot.lock();
1168 snapshot.delete_entry(id);
1169 worktree.snapshot = snapshot.clone();
1170 });
1171 Ok(())
1172 })
1173 }
1174}
1175
1176impl Snapshot {
1177 pub fn id(&self) -> WorktreeId {
1178 self.id
1179 }
1180
1181 pub fn abs_path(&self) -> &Arc<Path> {
1182 &self.abs_path
1183 }
1184
1185 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1186 self.entries_by_id.get(&entry_id, &()).is_some()
1187 }
1188
1189 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1190 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1191 let old_entry = self.entries_by_id.insert_or_replace(
1192 PathEntry {
1193 id: entry.id,
1194 path: entry.path.clone(),
1195 is_ignored: entry.is_ignored,
1196 scan_id: 0,
1197 },
1198 &(),
1199 );
1200 if let Some(old_entry) = old_entry {
1201 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1202 }
1203 self.entries_by_path.insert_or_replace(entry.clone(), &());
1204 Ok(entry)
1205 }
1206
1207 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1208 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1209 self.entries_by_path = {
1210 let mut cursor = self.entries_by_path.cursor();
1211 let mut new_entries_by_path =
1212 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1213 while let Some(entry) = cursor.item() {
1214 if entry.path.starts_with(&removed_entry.path) {
1215 self.entries_by_id.remove(&entry.id, &());
1216 cursor.next(&());
1217 } else {
1218 break;
1219 }
1220 }
1221 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1222 new_entries_by_path
1223 };
1224
1225 true
1226 } else {
1227 false
1228 }
1229 }
1230
1231 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1232 let mut entries_by_path_edits = Vec::new();
1233 let mut entries_by_id_edits = Vec::new();
1234 for entry_id in update.removed_entries {
1235 let entry = self
1236 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1237 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1238 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1239 entries_by_id_edits.push(Edit::Remove(entry.id));
1240 }
1241
1242 for entry in update.updated_entries {
1243 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1244 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1245 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1246 }
1247 entries_by_id_edits.push(Edit::Insert(PathEntry {
1248 id: entry.id,
1249 path: entry.path.clone(),
1250 is_ignored: entry.is_ignored,
1251 scan_id: 0,
1252 }));
1253 entries_by_path_edits.push(Edit::Insert(entry));
1254 }
1255
1256 self.entries_by_path.edit(entries_by_path_edits, &());
1257 self.entries_by_id.edit(entries_by_id_edits, &());
1258 self.scan_id = update.scan_id as usize;
1259 if update.is_last_update {
1260 self.completed_scan_id = update.scan_id as usize;
1261 }
1262
1263 Ok(())
1264 }
1265
1266 pub fn file_count(&self) -> usize {
1267 self.entries_by_path.summary().file_count
1268 }
1269
1270 pub fn visible_file_count(&self) -> usize {
1271 self.entries_by_path.summary().visible_file_count
1272 }
1273
1274 fn traverse_from_offset(
1275 &self,
1276 include_dirs: bool,
1277 include_ignored: bool,
1278 start_offset: usize,
1279 ) -> Traversal {
1280 let mut cursor = self.entries_by_path.cursor();
1281 cursor.seek(
1282 &TraversalTarget::Count {
1283 count: start_offset,
1284 include_dirs,
1285 include_ignored,
1286 },
1287 Bias::Right,
1288 &(),
1289 );
1290 Traversal {
1291 cursor,
1292 include_dirs,
1293 include_ignored,
1294 }
1295 }
1296
1297 fn traverse_from_path(
1298 &self,
1299 include_dirs: bool,
1300 include_ignored: bool,
1301 path: &Path,
1302 ) -> Traversal {
1303 let mut cursor = self.entries_by_path.cursor();
1304 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1305 Traversal {
1306 cursor,
1307 include_dirs,
1308 include_ignored,
1309 }
1310 }
1311
1312 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1313 self.traverse_from_offset(false, include_ignored, start)
1314 }
1315
1316 pub fn entries(&self, include_ignored: bool) -> Traversal {
1317 self.traverse_from_offset(true, include_ignored, 0)
1318 }
1319
1320 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1321 let empty_path = Path::new("");
1322 self.entries_by_path
1323 .cursor::<()>()
1324 .filter(move |entry| entry.path.as_ref() != empty_path)
1325 .map(|entry| &entry.path)
1326 }
1327
1328 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1329 let mut cursor = self.entries_by_path.cursor();
1330 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1331 let traversal = Traversal {
1332 cursor,
1333 include_dirs: true,
1334 include_ignored: true,
1335 };
1336 ChildEntriesIter {
1337 traversal,
1338 parent_path,
1339 }
1340 }
1341
1342 pub fn root_entry(&self) -> Option<&Entry> {
1343 self.entry_for_path("")
1344 }
1345
1346 pub fn root_name(&self) -> &str {
1347 &self.root_name
1348 }
1349
1350 pub fn scan_started(&mut self) {
1351 self.scan_id += 1;
1352 }
1353
1354 pub fn scan_completed(&mut self) {
1355 self.completed_scan_id = self.scan_id;
1356 }
1357
1358 pub fn scan_id(&self) -> usize {
1359 self.scan_id
1360 }
1361
1362 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1363 let path = path.as_ref();
1364 self.traverse_from_path(true, true, path)
1365 .entry()
1366 .and_then(|entry| {
1367 if entry.path.as_ref() == path {
1368 Some(entry)
1369 } else {
1370 None
1371 }
1372 })
1373 }
1374
1375 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1376 let entry = self.entries_by_id.get(&id, &())?;
1377 self.entry_for_path(&entry.path)
1378 }
1379
1380 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1381 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1382 }
1383}
1384
1385impl LocalSnapshot {
1386 // Gives the most specific git repository for a given path
1387 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1388 self.git_repositories
1389 .iter()
1390 .rev() //git_repository is ordered lexicographically
1391 .find(|repo| repo.manages(path))
1392 .cloned()
1393 }
1394
1395 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1396 // Git repositories cannot be nested, so we don't need to reverse the order
1397 self.git_repositories
1398 .iter_mut()
1399 .find(|repo| repo.in_dot_git(path))
1400 }
1401
1402 #[cfg(test)]
1403 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1404 let root_name = self.root_name.clone();
1405 proto::UpdateWorktree {
1406 project_id,
1407 worktree_id: self.id().to_proto(),
1408 abs_path: self.abs_path().to_string_lossy().into(),
1409 root_name,
1410 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1411 removed_entries: Default::default(),
1412 scan_id: self.scan_id as u64,
1413 is_last_update: true,
1414 }
1415 }
1416
1417 pub(crate) fn build_update(
1418 &self,
1419 other: &Self,
1420 project_id: u64,
1421 worktree_id: u64,
1422 include_ignored: bool,
1423 ) -> proto::UpdateWorktree {
1424 let mut updated_entries = Vec::new();
1425 let mut removed_entries = Vec::new();
1426 let mut self_entries = self
1427 .entries_by_id
1428 .cursor::<()>()
1429 .filter(|e| include_ignored || !e.is_ignored)
1430 .peekable();
1431 let mut other_entries = other
1432 .entries_by_id
1433 .cursor::<()>()
1434 .filter(|e| include_ignored || !e.is_ignored)
1435 .peekable();
1436 loop {
1437 match (self_entries.peek(), other_entries.peek()) {
1438 (Some(self_entry), Some(other_entry)) => {
1439 match Ord::cmp(&self_entry.id, &other_entry.id) {
1440 Ordering::Less => {
1441 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1442 updated_entries.push(entry);
1443 self_entries.next();
1444 }
1445 Ordering::Equal => {
1446 if self_entry.scan_id != other_entry.scan_id {
1447 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1448 updated_entries.push(entry);
1449 }
1450
1451 self_entries.next();
1452 other_entries.next();
1453 }
1454 Ordering::Greater => {
1455 removed_entries.push(other_entry.id.to_proto());
1456 other_entries.next();
1457 }
1458 }
1459 }
1460 (Some(self_entry), None) => {
1461 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1462 updated_entries.push(entry);
1463 self_entries.next();
1464 }
1465 (None, Some(other_entry)) => {
1466 removed_entries.push(other_entry.id.to_proto());
1467 other_entries.next();
1468 }
1469 (None, None) => break,
1470 }
1471 }
1472
1473 proto::UpdateWorktree {
1474 project_id,
1475 worktree_id,
1476 abs_path: self.abs_path().to_string_lossy().into(),
1477 root_name: self.root_name().to_string(),
1478 updated_entries,
1479 removed_entries,
1480 scan_id: self.scan_id as u64,
1481 is_last_update: self.completed_scan_id == self.scan_id,
1482 }
1483 }
1484
1485 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1486 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1487 let abs_path = self.abs_path.join(&entry.path);
1488 match smol::block_on(build_gitignore(&abs_path, fs)) {
1489 Ok(ignore) => {
1490 self.ignores_by_parent_abs_path.insert(
1491 abs_path.parent().unwrap().into(),
1492 (Arc::new(ignore), self.scan_id),
1493 );
1494 }
1495 Err(error) => {
1496 log::error!(
1497 "error loading .gitignore file {:?} - {:?}",
1498 &entry.path,
1499 error
1500 );
1501 }
1502 }
1503 }
1504
1505 self.reuse_entry_id(&mut entry);
1506
1507 if entry.kind == EntryKind::PendingDir {
1508 if let Some(existing_entry) =
1509 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1510 {
1511 entry.kind = existing_entry.kind;
1512 }
1513 }
1514
1515 let scan_id = self.scan_id;
1516 self.entries_by_path.insert_or_replace(entry.clone(), &());
1517 self.entries_by_id.insert_or_replace(
1518 PathEntry {
1519 id: entry.id,
1520 path: entry.path.clone(),
1521 is_ignored: entry.is_ignored,
1522 scan_id,
1523 },
1524 &(),
1525 );
1526
1527 entry
1528 }
1529
1530 fn populate_dir(
1531 &mut self,
1532 parent_path: Arc<Path>,
1533 entries: impl IntoIterator<Item = Entry>,
1534 ignore: Option<Arc<Gitignore>>,
1535 fs: &dyn Fs,
1536 ) {
1537 let mut parent_entry = if let Some(parent_entry) =
1538 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1539 {
1540 parent_entry.clone()
1541 } else {
1542 log::warn!(
1543 "populating a directory {:?} that has been removed",
1544 parent_path
1545 );
1546 return;
1547 };
1548
1549 if let Some(ignore) = ignore {
1550 self.ignores_by_parent_abs_path.insert(
1551 self.abs_path.join(&parent_path).into(),
1552 (ignore, self.scan_id),
1553 );
1554 }
1555 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1556 parent_entry.kind = EntryKind::Dir;
1557 } else {
1558 unreachable!();
1559 }
1560
1561 if parent_path.file_name() == Some(&DOT_GIT) {
1562 let abs_path = self.abs_path.join(&parent_path);
1563 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1564 if let Err(ix) = self
1565 .git_repositories
1566 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1567 {
1568 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1569 self.git_repositories.insert(
1570 ix,
1571 GitRepositoryEntry {
1572 repo,
1573 scan_id: 0,
1574 content_path,
1575 git_dir_path: parent_path,
1576 },
1577 );
1578 }
1579 }
1580 }
1581
1582 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1583 let mut entries_by_id_edits = Vec::new();
1584
1585 for mut entry in entries {
1586 self.reuse_entry_id(&mut entry);
1587 entries_by_id_edits.push(Edit::Insert(PathEntry {
1588 id: entry.id,
1589 path: entry.path.clone(),
1590 is_ignored: entry.is_ignored,
1591 scan_id: self.scan_id,
1592 }));
1593 entries_by_path_edits.push(Edit::Insert(entry));
1594 }
1595
1596 self.entries_by_path.edit(entries_by_path_edits, &());
1597 self.entries_by_id.edit(entries_by_id_edits, &());
1598 }
1599
1600 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1601 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1602 entry.id = removed_entry_id;
1603 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1604 entry.id = existing_entry.id;
1605 }
1606 }
1607
1608 fn remove_path(&mut self, path: &Path) {
1609 let mut new_entries;
1610 let removed_entries;
1611 {
1612 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1613 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1614 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1615 new_entries.push_tree(cursor.suffix(&()), &());
1616 }
1617 self.entries_by_path = new_entries;
1618
1619 let mut entries_by_id_edits = Vec::new();
1620 for entry in removed_entries.cursor::<()>() {
1621 let removed_entry_id = self
1622 .removed_entry_ids
1623 .entry(entry.inode)
1624 .or_insert(entry.id);
1625 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1626 entries_by_id_edits.push(Edit::Remove(entry.id));
1627 }
1628 self.entries_by_id.edit(entries_by_id_edits, &());
1629
1630 if path.file_name() == Some(&GITIGNORE) {
1631 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1632 if let Some((_, scan_id)) = self
1633 .ignores_by_parent_abs_path
1634 .get_mut(abs_parent_path.as_path())
1635 {
1636 *scan_id = self.snapshot.scan_id;
1637 }
1638 } else if path.file_name() == Some(&DOT_GIT) {
1639 let parent_path = path.parent().unwrap();
1640 if let Ok(ix) = self
1641 .git_repositories
1642 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1643 {
1644 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1645 }
1646 }
1647 }
1648
1649 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1650 let mut inodes = TreeSet::default();
1651 for ancestor in path.ancestors().skip(1) {
1652 if let Some(entry) = self.entry_for_path(ancestor) {
1653 inodes.insert(entry.inode);
1654 }
1655 }
1656 inodes
1657 }
1658
1659 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1660 let mut new_ignores = Vec::new();
1661 for ancestor in abs_path.ancestors().skip(1) {
1662 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1663 new_ignores.push((ancestor, Some(ignore.clone())));
1664 } else {
1665 new_ignores.push((ancestor, None));
1666 }
1667 }
1668
1669 let mut ignore_stack = IgnoreStack::none();
1670 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1671 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1672 ignore_stack = IgnoreStack::all();
1673 break;
1674 } else if let Some(ignore) = ignore {
1675 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1676 }
1677 }
1678
1679 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1680 ignore_stack = IgnoreStack::all();
1681 }
1682
1683 ignore_stack
1684 }
1685
1686 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1687 &self.git_repositories
1688 }
1689}
1690
1691impl GitRepositoryEntry {
1692 // Note that these paths should be relative to the worktree root.
1693 pub(crate) fn manages(&self, path: &Path) -> bool {
1694 path.starts_with(self.content_path.as_ref())
1695 }
1696
1697 // Note that theis path should be relative to the worktree root.
1698 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1699 path.starts_with(self.git_dir_path.as_ref())
1700 }
1701}
1702
1703async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1704 let contents = fs.load(abs_path).await?;
1705 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1706 let mut builder = GitignoreBuilder::new(parent);
1707 for line in contents.lines() {
1708 builder.add_line(Some(abs_path.into()), line)?;
1709 }
1710 Ok(builder.build()?)
1711}
1712
1713impl WorktreeId {
1714 pub fn from_usize(handle_id: usize) -> Self {
1715 Self(handle_id)
1716 }
1717
1718 pub(crate) fn from_proto(id: u64) -> Self {
1719 Self(id as usize)
1720 }
1721
1722 pub fn to_proto(&self) -> u64 {
1723 self.0 as u64
1724 }
1725
1726 pub fn to_usize(&self) -> usize {
1727 self.0
1728 }
1729}
1730
1731impl fmt::Display for WorktreeId {
1732 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1733 self.0.fmt(f)
1734 }
1735}
1736
1737impl Deref for Worktree {
1738 type Target = Snapshot;
1739
1740 fn deref(&self) -> &Self::Target {
1741 match self {
1742 Worktree::Local(worktree) => &worktree.snapshot,
1743 Worktree::Remote(worktree) => &worktree.snapshot,
1744 }
1745 }
1746}
1747
1748impl Deref for LocalWorktree {
1749 type Target = LocalSnapshot;
1750
1751 fn deref(&self) -> &Self::Target {
1752 &self.snapshot
1753 }
1754}
1755
1756impl Deref for RemoteWorktree {
1757 type Target = Snapshot;
1758
1759 fn deref(&self) -> &Self::Target {
1760 &self.snapshot
1761 }
1762}
1763
1764impl fmt::Debug for LocalWorktree {
1765 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1766 self.snapshot.fmt(f)
1767 }
1768}
1769
1770impl fmt::Debug for Snapshot {
1771 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1772 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1773 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1774
1775 impl<'a> fmt::Debug for EntriesByPath<'a> {
1776 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1777 f.debug_map()
1778 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1779 .finish()
1780 }
1781 }
1782
1783 impl<'a> fmt::Debug for EntriesById<'a> {
1784 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1785 f.debug_list().entries(self.0.iter()).finish()
1786 }
1787 }
1788
1789 f.debug_struct("Snapshot")
1790 .field("id", &self.id)
1791 .field("root_name", &self.root_name)
1792 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1793 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1794 .finish()
1795 }
1796}
1797
1798#[derive(Clone, PartialEq)]
1799pub struct File {
1800 pub worktree: ModelHandle<Worktree>,
1801 pub path: Arc<Path>,
1802 pub mtime: SystemTime,
1803 pub(crate) entry_id: ProjectEntryId,
1804 pub(crate) is_local: bool,
1805 pub(crate) is_deleted: bool,
1806}
1807
1808impl language::File for File {
1809 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1810 if self.is_local {
1811 Some(self)
1812 } else {
1813 None
1814 }
1815 }
1816
1817 fn mtime(&self) -> SystemTime {
1818 self.mtime
1819 }
1820
1821 fn path(&self) -> &Arc<Path> {
1822 &self.path
1823 }
1824
1825 fn full_path(&self, cx: &AppContext) -> PathBuf {
1826 let mut full_path = PathBuf::new();
1827 let worktree = self.worktree.read(cx);
1828
1829 if worktree.is_visible() {
1830 full_path.push(worktree.root_name());
1831 } else {
1832 let path = worktree.abs_path();
1833
1834 if worktree.is_local() && path.starts_with(cx.global::<HomeDir>().as_path()) {
1835 full_path.push("~");
1836 full_path.push(path.strip_prefix(cx.global::<HomeDir>().as_path()).unwrap());
1837 } else {
1838 full_path.push(path)
1839 }
1840 }
1841
1842 if self.path.components().next().is_some() {
1843 full_path.push(&self.path);
1844 }
1845
1846 full_path
1847 }
1848
1849 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1850 /// of its worktree, then this method will return the name of the worktree itself.
1851 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1852 self.path
1853 .file_name()
1854 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1855 }
1856
1857 fn is_deleted(&self) -> bool {
1858 self.is_deleted
1859 }
1860
1861 fn save(
1862 &self,
1863 buffer_id: u64,
1864 text: Rope,
1865 version: clock::Global,
1866 line_ending: LineEnding,
1867 cx: &mut MutableAppContext,
1868 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1869 self.worktree.update(cx, |worktree, cx| match worktree {
1870 Worktree::Local(worktree) => {
1871 let rpc = worktree.client.clone();
1872 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1873 let fingerprint = text.fingerprint();
1874 let save = worktree.write_file(self.path.clone(), text, line_ending, cx);
1875 cx.background().spawn(async move {
1876 let entry = save.await?;
1877 if let Some(project_id) = project_id {
1878 rpc.send(proto::BufferSaved {
1879 project_id,
1880 buffer_id,
1881 version: serialize_version(&version),
1882 mtime: Some(entry.mtime.into()),
1883 fingerprint: serialize_fingerprint(fingerprint),
1884 })?;
1885 }
1886 Ok((version, fingerprint, entry.mtime))
1887 })
1888 }
1889 Worktree::Remote(worktree) => {
1890 let rpc = worktree.client.clone();
1891 let project_id = worktree.project_id;
1892 cx.foreground().spawn(async move {
1893 let response = rpc
1894 .request(proto::SaveBuffer {
1895 project_id,
1896 buffer_id,
1897 version: serialize_version(&version),
1898 })
1899 .await?;
1900 let version = deserialize_version(response.version);
1901 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1902 let mtime = response
1903 .mtime
1904 .ok_or_else(|| anyhow!("missing mtime"))?
1905 .into();
1906 Ok((version, fingerprint, mtime))
1907 })
1908 }
1909 })
1910 }
1911
1912 fn as_any(&self) -> &dyn Any {
1913 self
1914 }
1915
1916 fn to_proto(&self) -> rpc::proto::File {
1917 rpc::proto::File {
1918 worktree_id: self.worktree.id() as u64,
1919 entry_id: self.entry_id.to_proto(),
1920 path: self.path.to_string_lossy().into(),
1921 mtime: Some(self.mtime.into()),
1922 is_deleted: self.is_deleted,
1923 }
1924 }
1925}
1926
1927impl language::LocalFile for File {
1928 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1929 self.worktree
1930 .read(cx)
1931 .as_local()
1932 .unwrap()
1933 .abs_path
1934 .join(&self.path)
1935 }
1936
1937 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1938 let worktree = self.worktree.read(cx).as_local().unwrap();
1939 let abs_path = worktree.absolutize(&self.path);
1940 let fs = worktree.fs.clone();
1941 cx.background()
1942 .spawn(async move { fs.load(&abs_path).await })
1943 }
1944
1945 fn buffer_reloaded(
1946 &self,
1947 buffer_id: u64,
1948 version: &clock::Global,
1949 fingerprint: RopeFingerprint,
1950 line_ending: LineEnding,
1951 mtime: SystemTime,
1952 cx: &mut MutableAppContext,
1953 ) {
1954 let worktree = self.worktree.read(cx).as_local().unwrap();
1955 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1956 worktree
1957 .client
1958 .send(proto::BufferReloaded {
1959 project_id,
1960 buffer_id,
1961 version: serialize_version(version),
1962 mtime: Some(mtime.into()),
1963 fingerprint: serialize_fingerprint(fingerprint),
1964 line_ending: serialize_line_ending(line_ending) as i32,
1965 })
1966 .log_err();
1967 }
1968 }
1969}
1970
1971impl File {
1972 pub fn from_proto(
1973 proto: rpc::proto::File,
1974 worktree: ModelHandle<Worktree>,
1975 cx: &AppContext,
1976 ) -> Result<Self> {
1977 let worktree_id = worktree
1978 .read(cx)
1979 .as_remote()
1980 .ok_or_else(|| anyhow!("not remote"))?
1981 .id();
1982
1983 if worktree_id.to_proto() != proto.worktree_id {
1984 return Err(anyhow!("worktree id does not match file"));
1985 }
1986
1987 Ok(Self {
1988 worktree,
1989 path: Path::new(&proto.path).into(),
1990 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1991 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1992 is_local: false,
1993 is_deleted: proto.is_deleted,
1994 })
1995 }
1996
1997 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1998 file.and_then(|f| f.as_any().downcast_ref())
1999 }
2000
2001 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2002 self.worktree.read(cx).id()
2003 }
2004
2005 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2006 if self.is_deleted {
2007 None
2008 } else {
2009 Some(self.entry_id)
2010 }
2011 }
2012}
2013
2014#[derive(Clone, Debug, PartialEq, Eq)]
2015pub struct Entry {
2016 pub id: ProjectEntryId,
2017 pub kind: EntryKind,
2018 pub path: Arc<Path>,
2019 pub inode: u64,
2020 pub mtime: SystemTime,
2021 pub is_symlink: bool,
2022 pub is_ignored: bool,
2023}
2024
2025#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2026pub enum EntryKind {
2027 PendingDir,
2028 Dir,
2029 File(CharBag),
2030}
2031
2032impl Entry {
2033 fn new(
2034 path: Arc<Path>,
2035 metadata: &fs::Metadata,
2036 next_entry_id: &AtomicUsize,
2037 root_char_bag: CharBag,
2038 ) -> Self {
2039 Self {
2040 id: ProjectEntryId::new(next_entry_id),
2041 kind: if metadata.is_dir {
2042 EntryKind::PendingDir
2043 } else {
2044 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2045 },
2046 path,
2047 inode: metadata.inode,
2048 mtime: metadata.mtime,
2049 is_symlink: metadata.is_symlink,
2050 is_ignored: false,
2051 }
2052 }
2053
2054 pub fn is_dir(&self) -> bool {
2055 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2056 }
2057
2058 pub fn is_file(&self) -> bool {
2059 matches!(self.kind, EntryKind::File(_))
2060 }
2061}
2062
2063impl sum_tree::Item for Entry {
2064 type Summary = EntrySummary;
2065
2066 fn summary(&self) -> Self::Summary {
2067 let visible_count = if self.is_ignored { 0 } else { 1 };
2068 let file_count;
2069 let visible_file_count;
2070 if self.is_file() {
2071 file_count = 1;
2072 visible_file_count = visible_count;
2073 } else {
2074 file_count = 0;
2075 visible_file_count = 0;
2076 }
2077
2078 EntrySummary {
2079 max_path: self.path.clone(),
2080 count: 1,
2081 visible_count,
2082 file_count,
2083 visible_file_count,
2084 }
2085 }
2086}
2087
2088impl sum_tree::KeyedItem for Entry {
2089 type Key = PathKey;
2090
2091 fn key(&self) -> Self::Key {
2092 PathKey(self.path.clone())
2093 }
2094}
2095
2096#[derive(Clone, Debug)]
2097pub struct EntrySummary {
2098 max_path: Arc<Path>,
2099 count: usize,
2100 visible_count: usize,
2101 file_count: usize,
2102 visible_file_count: usize,
2103}
2104
2105impl Default for EntrySummary {
2106 fn default() -> Self {
2107 Self {
2108 max_path: Arc::from(Path::new("")),
2109 count: 0,
2110 visible_count: 0,
2111 file_count: 0,
2112 visible_file_count: 0,
2113 }
2114 }
2115}
2116
2117impl sum_tree::Summary for EntrySummary {
2118 type Context = ();
2119
2120 fn add_summary(&mut self, rhs: &Self, _: &()) {
2121 self.max_path = rhs.max_path.clone();
2122 self.count += rhs.count;
2123 self.visible_count += rhs.visible_count;
2124 self.file_count += rhs.file_count;
2125 self.visible_file_count += rhs.visible_file_count;
2126 }
2127}
2128
2129#[derive(Clone, Debug)]
2130struct PathEntry {
2131 id: ProjectEntryId,
2132 path: Arc<Path>,
2133 is_ignored: bool,
2134 scan_id: usize,
2135}
2136
2137impl sum_tree::Item for PathEntry {
2138 type Summary = PathEntrySummary;
2139
2140 fn summary(&self) -> Self::Summary {
2141 PathEntrySummary { max_id: self.id }
2142 }
2143}
2144
2145impl sum_tree::KeyedItem for PathEntry {
2146 type Key = ProjectEntryId;
2147
2148 fn key(&self) -> Self::Key {
2149 self.id
2150 }
2151}
2152
2153#[derive(Clone, Debug, Default)]
2154struct PathEntrySummary {
2155 max_id: ProjectEntryId,
2156}
2157
2158impl sum_tree::Summary for PathEntrySummary {
2159 type Context = ();
2160
2161 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2162 self.max_id = summary.max_id;
2163 }
2164}
2165
2166impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2167 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2168 *self = summary.max_id;
2169 }
2170}
2171
2172#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2173pub struct PathKey(Arc<Path>);
2174
2175impl Default for PathKey {
2176 fn default() -> Self {
2177 Self(Path::new("").into())
2178 }
2179}
2180
2181impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2182 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2183 self.0 = summary.max_path.clone();
2184 }
2185}
2186
2187struct BackgroundScanner {
2188 fs: Arc<dyn Fs>,
2189 snapshot: Arc<Mutex<LocalSnapshot>>,
2190 notify: UnboundedSender<ScanState>,
2191 executor: Arc<executor::Background>,
2192}
2193
2194impl BackgroundScanner {
2195 fn new(
2196 snapshot: Arc<Mutex<LocalSnapshot>>,
2197 notify: UnboundedSender<ScanState>,
2198 fs: Arc<dyn Fs>,
2199 executor: Arc<executor::Background>,
2200 ) -> Self {
2201 Self {
2202 fs,
2203 snapshot,
2204 notify,
2205 executor,
2206 }
2207 }
2208
2209 fn abs_path(&self) -> Arc<Path> {
2210 self.snapshot.lock().abs_path.clone()
2211 }
2212
2213 fn snapshot(&self) -> LocalSnapshot {
2214 self.snapshot.lock().clone()
2215 }
2216
2217 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2218 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2219 return;
2220 }
2221
2222 if let Err(err) = self.scan_dirs().await {
2223 if self
2224 .notify
2225 .unbounded_send(ScanState::Err(Arc::new(err)))
2226 .is_err()
2227 {
2228 return;
2229 }
2230 }
2231
2232 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2233 return;
2234 }
2235
2236 futures::pin_mut!(events_rx);
2237
2238 while let Some(mut events) = events_rx.next().await {
2239 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2240 events.extend(additional_events);
2241 }
2242
2243 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2244 break;
2245 }
2246
2247 if !self.process_events(events).await {
2248 break;
2249 }
2250
2251 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2252 break;
2253 }
2254 }
2255 }
2256
2257 async fn scan_dirs(&mut self) -> Result<()> {
2258 let root_char_bag;
2259 let root_abs_path;
2260 let root_inode;
2261 let is_dir;
2262 let next_entry_id;
2263 {
2264 let mut snapshot = self.snapshot.lock();
2265 snapshot.scan_started();
2266 root_char_bag = snapshot.root_char_bag;
2267 root_abs_path = snapshot.abs_path.clone();
2268 root_inode = snapshot.root_entry().map(|e| e.inode);
2269 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2270 next_entry_id = snapshot.next_entry_id.clone();
2271 };
2272
2273 // Populate ignores above the root.
2274 for ancestor in root_abs_path.ancestors().skip(1) {
2275 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2276 {
2277 self.snapshot
2278 .lock()
2279 .ignores_by_parent_abs_path
2280 .insert(ancestor.into(), (ignore.into(), 0));
2281 }
2282 }
2283
2284 let ignore_stack = {
2285 let mut snapshot = self.snapshot.lock();
2286 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2287 if ignore_stack.is_all() {
2288 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2289 root_entry.is_ignored = true;
2290 snapshot.insert_entry(root_entry, self.fs.as_ref());
2291 }
2292 }
2293 ignore_stack
2294 };
2295
2296 if is_dir {
2297 let path: Arc<Path> = Arc::from(Path::new(""));
2298 let mut ancestor_inodes = TreeSet::default();
2299 if let Some(root_inode) = root_inode {
2300 ancestor_inodes.insert(root_inode);
2301 }
2302
2303 let (tx, rx) = channel::unbounded();
2304 self.executor
2305 .block(tx.send(ScanJob {
2306 abs_path: root_abs_path.to_path_buf(),
2307 path,
2308 ignore_stack,
2309 ancestor_inodes,
2310 scan_queue: tx.clone(),
2311 }))
2312 .unwrap();
2313 drop(tx);
2314
2315 self.executor
2316 .scoped(|scope| {
2317 for _ in 0..self.executor.num_cpus() {
2318 scope.spawn(async {
2319 while let Ok(job) = rx.recv().await {
2320 if let Err(err) = self
2321 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2322 .await
2323 {
2324 log::error!("error scanning {:?}: {}", job.abs_path, err);
2325 }
2326 }
2327 });
2328 }
2329 })
2330 .await;
2331
2332 self.snapshot.lock().scan_completed();
2333 }
2334
2335 Ok(())
2336 }
2337
2338 async fn scan_dir(
2339 &self,
2340 root_char_bag: CharBag,
2341 next_entry_id: Arc<AtomicUsize>,
2342 job: &ScanJob,
2343 ) -> Result<()> {
2344 let mut new_entries: Vec<Entry> = Vec::new();
2345 let mut new_jobs: Vec<ScanJob> = Vec::new();
2346 let mut ignore_stack = job.ignore_stack.clone();
2347 let mut new_ignore = None;
2348
2349 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2350 while let Some(child_abs_path) = child_paths.next().await {
2351 let child_abs_path = match child_abs_path {
2352 Ok(child_abs_path) => child_abs_path,
2353 Err(error) => {
2354 log::error!("error processing entry {:?}", error);
2355 continue;
2356 }
2357 };
2358 let child_name = child_abs_path.file_name().unwrap();
2359 let child_path: Arc<Path> = job.path.join(child_name).into();
2360 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2361 Ok(Some(metadata)) => metadata,
2362 Ok(None) => continue,
2363 Err(err) => {
2364 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2365 continue;
2366 }
2367 };
2368
2369 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2370 if child_name == *GITIGNORE {
2371 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2372 Ok(ignore) => {
2373 let ignore = Arc::new(ignore);
2374 ignore_stack =
2375 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2376 new_ignore = Some(ignore);
2377 }
2378 Err(error) => {
2379 log::error!(
2380 "error loading .gitignore file {:?} - {:?}",
2381 child_name,
2382 error
2383 );
2384 }
2385 }
2386
2387 // Update ignore status of any child entries we've already processed to reflect the
2388 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2389 // there should rarely be too numerous. Update the ignore stack associated with any
2390 // new jobs as well.
2391 let mut new_jobs = new_jobs.iter_mut();
2392 for entry in &mut new_entries {
2393 let entry_abs_path = self.abs_path().join(&entry.path);
2394 entry.is_ignored =
2395 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2396 if entry.is_dir() {
2397 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2398 IgnoreStack::all()
2399 } else {
2400 ignore_stack.clone()
2401 };
2402 }
2403 }
2404 }
2405
2406 let mut child_entry = Entry::new(
2407 child_path.clone(),
2408 &child_metadata,
2409 &next_entry_id,
2410 root_char_bag,
2411 );
2412
2413 if child_entry.is_dir() {
2414 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2415 child_entry.is_ignored = is_ignored;
2416
2417 if !job.ancestor_inodes.contains(&child_entry.inode) {
2418 let mut ancestor_inodes = job.ancestor_inodes.clone();
2419 ancestor_inodes.insert(child_entry.inode);
2420 new_jobs.push(ScanJob {
2421 abs_path: child_abs_path,
2422 path: child_path,
2423 ignore_stack: if is_ignored {
2424 IgnoreStack::all()
2425 } else {
2426 ignore_stack.clone()
2427 },
2428 ancestor_inodes,
2429 scan_queue: job.scan_queue.clone(),
2430 });
2431 }
2432 } else {
2433 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2434 }
2435
2436 new_entries.push(child_entry);
2437 }
2438
2439 self.snapshot.lock().populate_dir(
2440 job.path.clone(),
2441 new_entries,
2442 new_ignore,
2443 self.fs.as_ref(),
2444 );
2445 for new_job in new_jobs {
2446 job.scan_queue.send(new_job).await.unwrap();
2447 }
2448
2449 Ok(())
2450 }
2451
2452 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2453 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2454 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2455
2456 let root_char_bag;
2457 let root_abs_path;
2458 let next_entry_id;
2459 {
2460 let mut snapshot = self.snapshot.lock();
2461 snapshot.scan_started();
2462 root_char_bag = snapshot.root_char_bag;
2463 root_abs_path = snapshot.abs_path.clone();
2464 next_entry_id = snapshot.next_entry_id.clone();
2465 }
2466
2467 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2468 path
2469 } else {
2470 return false;
2471 };
2472 let metadata = futures::future::join_all(
2473 events
2474 .iter()
2475 .map(|event| self.fs.metadata(&event.path))
2476 .collect::<Vec<_>>(),
2477 )
2478 .await;
2479
2480 // Hold the snapshot lock while clearing and re-inserting the root entries
2481 // for each event. This way, the snapshot is not observable to the foreground
2482 // thread while this operation is in-progress.
2483 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2484 {
2485 let mut snapshot = self.snapshot.lock();
2486 for event in &events {
2487 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2488 snapshot.remove_path(path);
2489 }
2490 }
2491
2492 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2493 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2494 Ok(path) => Arc::from(path.to_path_buf()),
2495 Err(_) => {
2496 log::error!(
2497 "unexpected event {:?} for root path {:?}",
2498 event.path,
2499 root_canonical_path
2500 );
2501 continue;
2502 }
2503 };
2504 let abs_path = root_abs_path.join(&path);
2505
2506 match metadata {
2507 Ok(Some(metadata)) => {
2508 let ignore_stack =
2509 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2510 let mut fs_entry = Entry::new(
2511 path.clone(),
2512 &metadata,
2513 snapshot.next_entry_id.as_ref(),
2514 snapshot.root_char_bag,
2515 );
2516 fs_entry.is_ignored = ignore_stack.is_all();
2517 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2518
2519 let scan_id = snapshot.scan_id;
2520 if let Some(repo) = snapshot.in_dot_git(&path) {
2521 repo.repo.lock().reload_index();
2522 repo.scan_id = scan_id;
2523 }
2524
2525 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2526 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2527 ancestor_inodes.insert(metadata.inode);
2528 self.executor
2529 .block(scan_queue_tx.send(ScanJob {
2530 abs_path,
2531 path,
2532 ignore_stack,
2533 ancestor_inodes,
2534 scan_queue: scan_queue_tx.clone(),
2535 }))
2536 .unwrap();
2537 }
2538 }
2539 Ok(None) => {}
2540 Err(err) => {
2541 // TODO - create a special 'error' entry in the entries tree to mark this
2542 log::error!("error reading file on event {:?}", err);
2543 }
2544 }
2545 }
2546 drop(scan_queue_tx);
2547 }
2548
2549 // Scan any directories that were created as part of this event batch.
2550 self.executor
2551 .scoped(|scope| {
2552 for _ in 0..self.executor.num_cpus() {
2553 scope.spawn(async {
2554 while let Ok(job) = scan_queue_rx.recv().await {
2555 if let Err(err) = self
2556 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2557 .await
2558 {
2559 log::error!("error scanning {:?}: {}", job.abs_path, err);
2560 }
2561 }
2562 });
2563 }
2564 })
2565 .await;
2566
2567 // Attempt to detect renames only over a single batch of file-system events.
2568 self.snapshot.lock().removed_entry_ids.clear();
2569
2570 self.update_ignore_statuses().await;
2571 self.update_git_repositories();
2572 self.snapshot.lock().scan_completed();
2573 true
2574 }
2575
2576 async fn update_ignore_statuses(&self) {
2577 let mut snapshot = self.snapshot();
2578
2579 let mut ignores_to_update = Vec::new();
2580 let mut ignores_to_delete = Vec::new();
2581 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2582 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2583 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2584 ignores_to_update.push(parent_abs_path.clone());
2585 }
2586
2587 let ignore_path = parent_path.join(&*GITIGNORE);
2588 if snapshot.entry_for_path(ignore_path).is_none() {
2589 ignores_to_delete.push(parent_abs_path.clone());
2590 }
2591 }
2592 }
2593
2594 for parent_abs_path in ignores_to_delete {
2595 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2596 self.snapshot
2597 .lock()
2598 .ignores_by_parent_abs_path
2599 .remove(&parent_abs_path);
2600 }
2601
2602 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2603 ignores_to_update.sort_unstable();
2604 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2605 while let Some(parent_abs_path) = ignores_to_update.next() {
2606 while ignores_to_update
2607 .peek()
2608 .map_or(false, |p| p.starts_with(&parent_abs_path))
2609 {
2610 ignores_to_update.next().unwrap();
2611 }
2612
2613 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2614 ignore_queue_tx
2615 .send(UpdateIgnoreStatusJob {
2616 abs_path: parent_abs_path,
2617 ignore_stack,
2618 ignore_queue: ignore_queue_tx.clone(),
2619 })
2620 .await
2621 .unwrap();
2622 }
2623 drop(ignore_queue_tx);
2624
2625 self.executor
2626 .scoped(|scope| {
2627 for _ in 0..self.executor.num_cpus() {
2628 scope.spawn(async {
2629 while let Ok(job) = ignore_queue_rx.recv().await {
2630 self.update_ignore_status(job, &snapshot).await;
2631 }
2632 });
2633 }
2634 })
2635 .await;
2636 }
2637
2638 fn update_git_repositories(&self) {
2639 let mut snapshot = self.snapshot.lock();
2640 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2641 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2642 snapshot.git_repositories = git_repositories;
2643 }
2644
2645 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2646 let mut ignore_stack = job.ignore_stack;
2647 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2648 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2649 }
2650
2651 let mut entries_by_id_edits = Vec::new();
2652 let mut entries_by_path_edits = Vec::new();
2653 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2654 for mut entry in snapshot.child_entries(path).cloned() {
2655 let was_ignored = entry.is_ignored;
2656 let abs_path = self.abs_path().join(&entry.path);
2657 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2658 if entry.is_dir() {
2659 let child_ignore_stack = if entry.is_ignored {
2660 IgnoreStack::all()
2661 } else {
2662 ignore_stack.clone()
2663 };
2664 job.ignore_queue
2665 .send(UpdateIgnoreStatusJob {
2666 abs_path: abs_path.into(),
2667 ignore_stack: child_ignore_stack,
2668 ignore_queue: job.ignore_queue.clone(),
2669 })
2670 .await
2671 .unwrap();
2672 }
2673
2674 if entry.is_ignored != was_ignored {
2675 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2676 path_entry.scan_id = snapshot.scan_id;
2677 path_entry.is_ignored = entry.is_ignored;
2678 entries_by_id_edits.push(Edit::Insert(path_entry));
2679 entries_by_path_edits.push(Edit::Insert(entry));
2680 }
2681 }
2682
2683 let mut snapshot = self.snapshot.lock();
2684 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2685 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2686 }
2687}
2688
2689fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2690 let mut result = root_char_bag;
2691 result.extend(
2692 path.to_string_lossy()
2693 .chars()
2694 .map(|c| c.to_ascii_lowercase()),
2695 );
2696 result
2697}
2698
2699struct ScanJob {
2700 abs_path: PathBuf,
2701 path: Arc<Path>,
2702 ignore_stack: Arc<IgnoreStack>,
2703 scan_queue: Sender<ScanJob>,
2704 ancestor_inodes: TreeSet<u64>,
2705}
2706
2707struct UpdateIgnoreStatusJob {
2708 abs_path: Arc<Path>,
2709 ignore_stack: Arc<IgnoreStack>,
2710 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2711}
2712
2713pub trait WorktreeHandle {
2714 #[cfg(any(test, feature = "test-support"))]
2715 fn flush_fs_events<'a>(
2716 &self,
2717 cx: &'a gpui::TestAppContext,
2718 ) -> futures::future::LocalBoxFuture<'a, ()>;
2719}
2720
2721impl WorktreeHandle for ModelHandle<Worktree> {
2722 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2723 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2724 // extra directory scans, and emit extra scan-state notifications.
2725 //
2726 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2727 // to ensure that all redundant FS events have already been processed.
2728 #[cfg(any(test, feature = "test-support"))]
2729 fn flush_fs_events<'a>(
2730 &self,
2731 cx: &'a gpui::TestAppContext,
2732 ) -> futures::future::LocalBoxFuture<'a, ()> {
2733 use smol::future::FutureExt;
2734
2735 let filename = "fs-event-sentinel";
2736 let tree = self.clone();
2737 let (fs, root_path) = self.read_with(cx, |tree, _| {
2738 let tree = tree.as_local().unwrap();
2739 (tree.fs.clone(), tree.abs_path().clone())
2740 });
2741
2742 async move {
2743 fs.create_file(&root_path.join(filename), Default::default())
2744 .await
2745 .unwrap();
2746 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2747 .await;
2748
2749 fs.remove_file(&root_path.join(filename), Default::default())
2750 .await
2751 .unwrap();
2752 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2753 .await;
2754
2755 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2756 .await;
2757 }
2758 .boxed_local()
2759 }
2760}
2761
2762#[derive(Clone, Debug)]
2763struct TraversalProgress<'a> {
2764 max_path: &'a Path,
2765 count: usize,
2766 visible_count: usize,
2767 file_count: usize,
2768 visible_file_count: usize,
2769}
2770
2771impl<'a> TraversalProgress<'a> {
2772 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2773 match (include_ignored, include_dirs) {
2774 (true, true) => self.count,
2775 (true, false) => self.file_count,
2776 (false, true) => self.visible_count,
2777 (false, false) => self.visible_file_count,
2778 }
2779 }
2780}
2781
2782impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2783 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2784 self.max_path = summary.max_path.as_ref();
2785 self.count += summary.count;
2786 self.visible_count += summary.visible_count;
2787 self.file_count += summary.file_count;
2788 self.visible_file_count += summary.visible_file_count;
2789 }
2790}
2791
2792impl<'a> Default for TraversalProgress<'a> {
2793 fn default() -> Self {
2794 Self {
2795 max_path: Path::new(""),
2796 count: 0,
2797 visible_count: 0,
2798 file_count: 0,
2799 visible_file_count: 0,
2800 }
2801 }
2802}
2803
2804pub struct Traversal<'a> {
2805 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2806 include_ignored: bool,
2807 include_dirs: bool,
2808}
2809
2810impl<'a> Traversal<'a> {
2811 pub fn advance(&mut self) -> bool {
2812 self.advance_to_offset(self.offset() + 1)
2813 }
2814
2815 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2816 self.cursor.seek_forward(
2817 &TraversalTarget::Count {
2818 count: offset,
2819 include_dirs: self.include_dirs,
2820 include_ignored: self.include_ignored,
2821 },
2822 Bias::Right,
2823 &(),
2824 )
2825 }
2826
2827 pub fn advance_to_sibling(&mut self) -> bool {
2828 while let Some(entry) = self.cursor.item() {
2829 self.cursor.seek_forward(
2830 &TraversalTarget::PathSuccessor(&entry.path),
2831 Bias::Left,
2832 &(),
2833 );
2834 if let Some(entry) = self.cursor.item() {
2835 if (self.include_dirs || !entry.is_dir())
2836 && (self.include_ignored || !entry.is_ignored)
2837 {
2838 return true;
2839 }
2840 }
2841 }
2842 false
2843 }
2844
2845 pub fn entry(&self) -> Option<&'a Entry> {
2846 self.cursor.item()
2847 }
2848
2849 pub fn offset(&self) -> usize {
2850 self.cursor
2851 .start()
2852 .count(self.include_dirs, self.include_ignored)
2853 }
2854}
2855
2856impl<'a> Iterator for Traversal<'a> {
2857 type Item = &'a Entry;
2858
2859 fn next(&mut self) -> Option<Self::Item> {
2860 if let Some(item) = self.entry() {
2861 self.advance();
2862 Some(item)
2863 } else {
2864 None
2865 }
2866 }
2867}
2868
2869#[derive(Debug)]
2870enum TraversalTarget<'a> {
2871 Path(&'a Path),
2872 PathSuccessor(&'a Path),
2873 Count {
2874 count: usize,
2875 include_ignored: bool,
2876 include_dirs: bool,
2877 },
2878}
2879
2880impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2881 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2882 match self {
2883 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2884 TraversalTarget::PathSuccessor(path) => {
2885 if !cursor_location.max_path.starts_with(path) {
2886 Ordering::Equal
2887 } else {
2888 Ordering::Greater
2889 }
2890 }
2891 TraversalTarget::Count {
2892 count,
2893 include_dirs,
2894 include_ignored,
2895 } => Ord::cmp(
2896 count,
2897 &cursor_location.count(*include_dirs, *include_ignored),
2898 ),
2899 }
2900 }
2901}
2902
2903struct ChildEntriesIter<'a> {
2904 parent_path: &'a Path,
2905 traversal: Traversal<'a>,
2906}
2907
2908impl<'a> Iterator for ChildEntriesIter<'a> {
2909 type Item = &'a Entry;
2910
2911 fn next(&mut self) -> Option<Self::Item> {
2912 if let Some(item) = self.traversal.entry() {
2913 if item.path.starts_with(&self.parent_path) {
2914 self.traversal.advance_to_sibling();
2915 return Some(item);
2916 }
2917 }
2918 None
2919 }
2920}
2921
2922impl<'a> From<&'a Entry> for proto::Entry {
2923 fn from(entry: &'a Entry) -> Self {
2924 Self {
2925 id: entry.id.to_proto(),
2926 is_dir: entry.is_dir(),
2927 path: entry.path.to_string_lossy().into(),
2928 inode: entry.inode,
2929 mtime: Some(entry.mtime.into()),
2930 is_symlink: entry.is_symlink,
2931 is_ignored: entry.is_ignored,
2932 }
2933 }
2934}
2935
2936impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2937 type Error = anyhow::Error;
2938
2939 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2940 if let Some(mtime) = entry.mtime {
2941 let kind = if entry.is_dir {
2942 EntryKind::Dir
2943 } else {
2944 let mut char_bag = *root_char_bag;
2945 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
2946 EntryKind::File(char_bag)
2947 };
2948 let path: Arc<Path> = PathBuf::from(entry.path).into();
2949 Ok(Entry {
2950 id: ProjectEntryId::from_proto(entry.id),
2951 kind,
2952 path,
2953 inode: entry.inode,
2954 mtime: mtime.into(),
2955 is_symlink: entry.is_symlink,
2956 is_ignored: entry.is_ignored,
2957 })
2958 } else {
2959 Err(anyhow!(
2960 "missing mtime in remote worktree entry {:?}",
2961 entry.path
2962 ))
2963 }
2964 }
2965}
2966
2967#[cfg(test)]
2968mod tests {
2969 use super::*;
2970 use anyhow::Result;
2971 use client::test::FakeHttpClient;
2972 use fs::repository::FakeGitRepository;
2973 use fs::{FakeFs, RealFs};
2974 use gpui::{executor::Deterministic, TestAppContext};
2975 use rand::prelude::*;
2976 use serde_json::json;
2977 use std::{
2978 env,
2979 fmt::Write,
2980 time::{SystemTime, UNIX_EPOCH},
2981 };
2982
2983 use util::test::temp_tree;
2984
2985 #[gpui::test]
2986 async fn test_traversal(cx: &mut TestAppContext) {
2987 let fs = FakeFs::new(cx.background());
2988 fs.insert_tree(
2989 "/root",
2990 json!({
2991 ".gitignore": "a/b\n",
2992 "a": {
2993 "b": "",
2994 "c": "",
2995 }
2996 }),
2997 )
2998 .await;
2999
3000 let http_client = FakeHttpClient::with_404_response();
3001 let client = cx.read(|cx| Client::new(http_client, cx));
3002
3003 let tree = Worktree::local(
3004 client,
3005 Arc::from(Path::new("/root")),
3006 true,
3007 fs,
3008 Default::default(),
3009 &mut cx.to_async(),
3010 )
3011 .await
3012 .unwrap();
3013 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3014 .await;
3015
3016 tree.read_with(cx, |tree, _| {
3017 assert_eq!(
3018 tree.entries(false)
3019 .map(|entry| entry.path.as_ref())
3020 .collect::<Vec<_>>(),
3021 vec![
3022 Path::new(""),
3023 Path::new(".gitignore"),
3024 Path::new("a"),
3025 Path::new("a/c"),
3026 ]
3027 );
3028 assert_eq!(
3029 tree.entries(true)
3030 .map(|entry| entry.path.as_ref())
3031 .collect::<Vec<_>>(),
3032 vec![
3033 Path::new(""),
3034 Path::new(".gitignore"),
3035 Path::new("a"),
3036 Path::new("a/b"),
3037 Path::new("a/c"),
3038 ]
3039 );
3040 })
3041 }
3042
3043 #[gpui::test(iterations = 10)]
3044 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3045 let fs = FakeFs::new(cx.background());
3046 fs.insert_tree(
3047 "/root",
3048 json!({
3049 "lib": {
3050 "a": {
3051 "a.txt": ""
3052 },
3053 "b": {
3054 "b.txt": ""
3055 }
3056 }
3057 }),
3058 )
3059 .await;
3060 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3061 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3062
3063 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3064 let tree = Worktree::local(
3065 client,
3066 Arc::from(Path::new("/root")),
3067 true,
3068 fs.clone(),
3069 Default::default(),
3070 &mut cx.to_async(),
3071 )
3072 .await
3073 .unwrap();
3074
3075 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3076 .await;
3077
3078 tree.read_with(cx, |tree, _| {
3079 assert_eq!(
3080 tree.entries(false)
3081 .map(|entry| entry.path.as_ref())
3082 .collect::<Vec<_>>(),
3083 vec![
3084 Path::new(""),
3085 Path::new("lib"),
3086 Path::new("lib/a"),
3087 Path::new("lib/a/a.txt"),
3088 Path::new("lib/a/lib"),
3089 Path::new("lib/b"),
3090 Path::new("lib/b/b.txt"),
3091 Path::new("lib/b/lib"),
3092 ]
3093 );
3094 });
3095
3096 fs.rename(
3097 Path::new("/root/lib/a/lib"),
3098 Path::new("/root/lib/a/lib-2"),
3099 Default::default(),
3100 )
3101 .await
3102 .unwrap();
3103 executor.run_until_parked();
3104 tree.read_with(cx, |tree, _| {
3105 assert_eq!(
3106 tree.entries(false)
3107 .map(|entry| entry.path.as_ref())
3108 .collect::<Vec<_>>(),
3109 vec![
3110 Path::new(""),
3111 Path::new("lib"),
3112 Path::new("lib/a"),
3113 Path::new("lib/a/a.txt"),
3114 Path::new("lib/a/lib-2"),
3115 Path::new("lib/b"),
3116 Path::new("lib/b/b.txt"),
3117 Path::new("lib/b/lib"),
3118 ]
3119 );
3120 });
3121 }
3122
3123 #[gpui::test]
3124 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3125 let parent_dir = temp_tree(json!({
3126 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3127 "tree": {
3128 ".git": {},
3129 ".gitignore": "ignored-dir\n",
3130 "tracked-dir": {
3131 "tracked-file1": "",
3132 "ancestor-ignored-file1": "",
3133 },
3134 "ignored-dir": {
3135 "ignored-file1": ""
3136 }
3137 }
3138 }));
3139 let dir = parent_dir.path().join("tree");
3140
3141 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3142
3143 let tree = Worktree::local(
3144 client,
3145 dir.as_path(),
3146 true,
3147 Arc::new(RealFs),
3148 Default::default(),
3149 &mut cx.to_async(),
3150 )
3151 .await
3152 .unwrap();
3153 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3154 .await;
3155 tree.flush_fs_events(cx).await;
3156 cx.read(|cx| {
3157 let tree = tree.read(cx);
3158 assert!(
3159 !tree
3160 .entry_for_path("tracked-dir/tracked-file1")
3161 .unwrap()
3162 .is_ignored
3163 );
3164 assert!(
3165 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3166 .unwrap()
3167 .is_ignored
3168 );
3169 assert!(
3170 tree.entry_for_path("ignored-dir/ignored-file1")
3171 .unwrap()
3172 .is_ignored
3173 );
3174 });
3175
3176 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3177 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3178 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3179 tree.flush_fs_events(cx).await;
3180 cx.read(|cx| {
3181 let tree = tree.read(cx);
3182 assert!(
3183 !tree
3184 .entry_for_path("tracked-dir/tracked-file2")
3185 .unwrap()
3186 .is_ignored
3187 );
3188 assert!(
3189 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3190 .unwrap()
3191 .is_ignored
3192 );
3193 assert!(
3194 tree.entry_for_path("ignored-dir/ignored-file2")
3195 .unwrap()
3196 .is_ignored
3197 );
3198 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3199 });
3200 }
3201
3202 #[gpui::test]
3203 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3204 let root = temp_tree(json!({
3205 "dir1": {
3206 ".git": {},
3207 "deps": {
3208 "dep1": {
3209 ".git": {},
3210 "src": {
3211 "a.txt": ""
3212 }
3213 }
3214 },
3215 "src": {
3216 "b.txt": ""
3217 }
3218 },
3219 "c.txt": "",
3220 }));
3221
3222 let http_client = FakeHttpClient::with_404_response();
3223 let client = cx.read(|cx| Client::new(http_client, cx));
3224 let tree = Worktree::local(
3225 client,
3226 root.path(),
3227 true,
3228 Arc::new(RealFs),
3229 Default::default(),
3230 &mut cx.to_async(),
3231 )
3232 .await
3233 .unwrap();
3234
3235 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3236 .await;
3237 tree.flush_fs_events(cx).await;
3238
3239 tree.read_with(cx, |tree, _cx| {
3240 let tree = tree.as_local().unwrap();
3241
3242 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3243
3244 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3245 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3246 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3247
3248 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3249 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3250 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3251 });
3252
3253 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3254 let tree = tree.as_local().unwrap();
3255 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3256 });
3257
3258 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3259 tree.flush_fs_events(cx).await;
3260
3261 tree.read_with(cx, |tree, _cx| {
3262 let tree = tree.as_local().unwrap();
3263 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3264 assert_ne!(
3265 original_scan_id, new_scan_id,
3266 "original {original_scan_id}, new {new_scan_id}"
3267 );
3268 });
3269
3270 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3271 tree.flush_fs_events(cx).await;
3272
3273 tree.read_with(cx, |tree, _cx| {
3274 let tree = tree.as_local().unwrap();
3275
3276 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3277 });
3278 }
3279
3280 #[test]
3281 fn test_changed_repos() {
3282 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3283 GitRepositoryEntry {
3284 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3285 scan_id,
3286 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3287 git_dir_path: git_dir_path.as_ref().into(),
3288 }
3289 }
3290
3291 let prev_repos: Vec<GitRepositoryEntry> = vec![
3292 fake_entry("/.git", 0),
3293 fake_entry("/a/.git", 0),
3294 fake_entry("/a/b/.git", 0),
3295 ];
3296
3297 let new_repos: Vec<GitRepositoryEntry> = vec![
3298 fake_entry("/a/.git", 1),
3299 fake_entry("/a/b/.git", 0),
3300 fake_entry("/a/c/.git", 0),
3301 ];
3302
3303 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3304
3305 // Deletion retained
3306 assert!(res
3307 .iter()
3308 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3309 .is_some());
3310
3311 // Update retained
3312 assert!(res
3313 .iter()
3314 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3315 .is_some());
3316
3317 // Addition retained
3318 assert!(res
3319 .iter()
3320 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3321 .is_some());
3322
3323 // Nochange, not retained
3324 assert!(res
3325 .iter()
3326 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3327 .is_none());
3328 }
3329
3330 #[gpui::test]
3331 async fn test_write_file(cx: &mut TestAppContext) {
3332 let dir = temp_tree(json!({
3333 ".git": {},
3334 ".gitignore": "ignored-dir\n",
3335 "tracked-dir": {},
3336 "ignored-dir": {}
3337 }));
3338
3339 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3340
3341 let tree = Worktree::local(
3342 client,
3343 dir.path(),
3344 true,
3345 Arc::new(RealFs),
3346 Default::default(),
3347 &mut cx.to_async(),
3348 )
3349 .await
3350 .unwrap();
3351 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3352 .await;
3353 tree.flush_fs_events(cx).await;
3354
3355 tree.update(cx, |tree, cx| {
3356 tree.as_local().unwrap().write_file(
3357 Path::new("tracked-dir/file.txt"),
3358 "hello".into(),
3359 Default::default(),
3360 cx,
3361 )
3362 })
3363 .await
3364 .unwrap();
3365 tree.update(cx, |tree, cx| {
3366 tree.as_local().unwrap().write_file(
3367 Path::new("ignored-dir/file.txt"),
3368 "world".into(),
3369 Default::default(),
3370 cx,
3371 )
3372 })
3373 .await
3374 .unwrap();
3375
3376 tree.read_with(cx, |tree, _| {
3377 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3378 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3379 assert!(!tracked.is_ignored);
3380 assert!(ignored.is_ignored);
3381 });
3382 }
3383
3384 #[gpui::test(iterations = 30)]
3385 async fn test_create_directory(cx: &mut TestAppContext) {
3386 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3387
3388 let fs = FakeFs::new(cx.background());
3389 fs.insert_tree(
3390 "/a",
3391 json!({
3392 "b": {},
3393 "c": {},
3394 "d": {},
3395 }),
3396 )
3397 .await;
3398
3399 let tree = Worktree::local(
3400 client,
3401 "/a".as_ref(),
3402 true,
3403 fs,
3404 Default::default(),
3405 &mut cx.to_async(),
3406 )
3407 .await
3408 .unwrap();
3409
3410 let entry = tree
3411 .update(cx, |tree, cx| {
3412 tree.as_local_mut()
3413 .unwrap()
3414 .create_entry("a/e".as_ref(), true, cx)
3415 })
3416 .await
3417 .unwrap();
3418 assert!(entry.is_dir());
3419
3420 cx.foreground().run_until_parked();
3421 tree.read_with(cx, |tree, _| {
3422 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3423 });
3424 }
3425
3426 #[gpui::test(iterations = 100)]
3427 fn test_random(mut rng: StdRng) {
3428 let operations = env::var("OPERATIONS")
3429 .map(|o| o.parse().unwrap())
3430 .unwrap_or(40);
3431 let initial_entries = env::var("INITIAL_ENTRIES")
3432 .map(|o| o.parse().unwrap())
3433 .unwrap_or(20);
3434
3435 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3436 for _ in 0..initial_entries {
3437 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3438 }
3439 log::info!("Generated initial tree");
3440
3441 let (notify_tx, _notify_rx) = mpsc::unbounded();
3442 let fs = Arc::new(RealFs);
3443 let next_entry_id = Arc::new(AtomicUsize::new(0));
3444 let mut initial_snapshot = LocalSnapshot {
3445 removed_entry_ids: Default::default(),
3446 ignores_by_parent_abs_path: Default::default(),
3447 git_repositories: Default::default(),
3448 next_entry_id: next_entry_id.clone(),
3449 snapshot: Snapshot {
3450 id: WorktreeId::from_usize(0),
3451 entries_by_path: Default::default(),
3452 entries_by_id: Default::default(),
3453 abs_path: root_dir.path().into(),
3454 root_name: Default::default(),
3455 root_char_bag: Default::default(),
3456 scan_id: 0,
3457 completed_scan_id: 0,
3458 },
3459 };
3460 initial_snapshot.insert_entry(
3461 Entry::new(
3462 Path::new("").into(),
3463 &smol::block_on(fs.metadata(root_dir.path()))
3464 .unwrap()
3465 .unwrap(),
3466 &next_entry_id,
3467 Default::default(),
3468 ),
3469 fs.as_ref(),
3470 );
3471 let mut scanner = BackgroundScanner::new(
3472 Arc::new(Mutex::new(initial_snapshot.clone())),
3473 notify_tx,
3474 fs.clone(),
3475 Arc::new(gpui::executor::Background::new()),
3476 );
3477 smol::block_on(scanner.scan_dirs()).unwrap();
3478 scanner.snapshot().check_invariants();
3479
3480 let mut events = Vec::new();
3481 let mut snapshots = Vec::new();
3482 let mut mutations_len = operations;
3483 while mutations_len > 1 {
3484 if !events.is_empty() && rng.gen_bool(0.4) {
3485 let len = rng.gen_range(0..=events.len());
3486 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3487 log::info!("Delivering events: {:#?}", to_deliver);
3488 smol::block_on(scanner.process_events(to_deliver));
3489 scanner.snapshot().check_invariants();
3490 } else {
3491 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3492 mutations_len -= 1;
3493 }
3494
3495 if rng.gen_bool(0.2) {
3496 snapshots.push(scanner.snapshot());
3497 }
3498 }
3499 log::info!("Quiescing: {:#?}", events);
3500 smol::block_on(scanner.process_events(events));
3501 scanner.snapshot().check_invariants();
3502
3503 let (notify_tx, _notify_rx) = mpsc::unbounded();
3504 let mut new_scanner = BackgroundScanner::new(
3505 Arc::new(Mutex::new(initial_snapshot)),
3506 notify_tx,
3507 scanner.fs.clone(),
3508 scanner.executor.clone(),
3509 );
3510 smol::block_on(new_scanner.scan_dirs()).unwrap();
3511 assert_eq!(
3512 scanner.snapshot().to_vec(true),
3513 new_scanner.snapshot().to_vec(true)
3514 );
3515
3516 for mut prev_snapshot in snapshots {
3517 let include_ignored = rng.gen::<bool>();
3518 if !include_ignored {
3519 let mut entries_by_path_edits = Vec::new();
3520 let mut entries_by_id_edits = Vec::new();
3521 for entry in prev_snapshot
3522 .entries_by_id
3523 .cursor::<()>()
3524 .filter(|e| e.is_ignored)
3525 {
3526 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3527 entries_by_id_edits.push(Edit::Remove(entry.id));
3528 }
3529
3530 prev_snapshot
3531 .entries_by_path
3532 .edit(entries_by_path_edits, &());
3533 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3534 }
3535
3536 let update = scanner
3537 .snapshot()
3538 .build_update(&prev_snapshot, 0, 0, include_ignored);
3539 prev_snapshot.apply_remote_update(update).unwrap();
3540 assert_eq!(
3541 prev_snapshot.to_vec(true),
3542 scanner.snapshot().to_vec(include_ignored)
3543 );
3544 }
3545 }
3546
3547 fn randomly_mutate_tree(
3548 root_path: &Path,
3549 insertion_probability: f64,
3550 rng: &mut impl Rng,
3551 ) -> Result<Vec<fsevent::Event>> {
3552 let root_path = root_path.canonicalize().unwrap();
3553 let (dirs, files) = read_dir_recursive(root_path.clone());
3554
3555 let mut events = Vec::new();
3556 let mut record_event = |path: PathBuf| {
3557 events.push(fsevent::Event {
3558 event_id: SystemTime::now()
3559 .duration_since(UNIX_EPOCH)
3560 .unwrap()
3561 .as_secs(),
3562 flags: fsevent::StreamFlags::empty(),
3563 path,
3564 });
3565 };
3566
3567 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3568 let path = dirs.choose(rng).unwrap();
3569 let new_path = path.join(gen_name(rng));
3570
3571 if rng.gen() {
3572 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3573 std::fs::create_dir(&new_path)?;
3574 } else {
3575 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3576 std::fs::write(&new_path, "")?;
3577 }
3578 record_event(new_path);
3579 } else if rng.gen_bool(0.05) {
3580 let ignore_dir_path = dirs.choose(rng).unwrap();
3581 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3582
3583 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3584 let files_to_ignore = {
3585 let len = rng.gen_range(0..=subfiles.len());
3586 subfiles.choose_multiple(rng, len)
3587 };
3588 let dirs_to_ignore = {
3589 let len = rng.gen_range(0..subdirs.len());
3590 subdirs.choose_multiple(rng, len)
3591 };
3592
3593 let mut ignore_contents = String::new();
3594 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3595 writeln!(
3596 ignore_contents,
3597 "{}",
3598 path_to_ignore
3599 .strip_prefix(&ignore_dir_path)?
3600 .to_str()
3601 .unwrap()
3602 )
3603 .unwrap();
3604 }
3605 log::info!(
3606 "Creating {:?} with contents:\n{}",
3607 ignore_path.strip_prefix(&root_path)?,
3608 ignore_contents
3609 );
3610 std::fs::write(&ignore_path, ignore_contents).unwrap();
3611 record_event(ignore_path);
3612 } else {
3613 let old_path = {
3614 let file_path = files.choose(rng);
3615 let dir_path = dirs[1..].choose(rng);
3616 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3617 };
3618
3619 let is_rename = rng.gen();
3620 if is_rename {
3621 let new_path_parent = dirs
3622 .iter()
3623 .filter(|d| !d.starts_with(old_path))
3624 .choose(rng)
3625 .unwrap();
3626
3627 let overwrite_existing_dir =
3628 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3629 let new_path = if overwrite_existing_dir {
3630 std::fs::remove_dir_all(&new_path_parent).ok();
3631 new_path_parent.to_path_buf()
3632 } else {
3633 new_path_parent.join(gen_name(rng))
3634 };
3635
3636 log::info!(
3637 "Renaming {:?} to {}{:?}",
3638 old_path.strip_prefix(&root_path)?,
3639 if overwrite_existing_dir {
3640 "overwrite "
3641 } else {
3642 ""
3643 },
3644 new_path.strip_prefix(&root_path)?
3645 );
3646 std::fs::rename(&old_path, &new_path)?;
3647 record_event(old_path.clone());
3648 record_event(new_path);
3649 } else if old_path.is_dir() {
3650 let (dirs, files) = read_dir_recursive(old_path.clone());
3651
3652 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3653 std::fs::remove_dir_all(&old_path).unwrap();
3654 for file in files {
3655 record_event(file);
3656 }
3657 for dir in dirs {
3658 record_event(dir);
3659 }
3660 } else {
3661 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3662 std::fs::remove_file(old_path).unwrap();
3663 record_event(old_path.clone());
3664 }
3665 }
3666
3667 Ok(events)
3668 }
3669
3670 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3671 let child_entries = std::fs::read_dir(&path).unwrap();
3672 let mut dirs = vec![path];
3673 let mut files = Vec::new();
3674 for child_entry in child_entries {
3675 let child_path = child_entry.unwrap().path();
3676 if child_path.is_dir() {
3677 let (child_dirs, child_files) = read_dir_recursive(child_path);
3678 dirs.extend(child_dirs);
3679 files.extend(child_files);
3680 } else {
3681 files.push(child_path);
3682 }
3683 }
3684 (dirs, files)
3685 }
3686
3687 fn gen_name(rng: &mut impl Rng) -> String {
3688 (0..6)
3689 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3690 .map(char::from)
3691 .collect()
3692 }
3693
3694 impl LocalSnapshot {
3695 fn check_invariants(&self) {
3696 let mut files = self.files(true, 0);
3697 let mut visible_files = self.files(false, 0);
3698 for entry in self.entries_by_path.cursor::<()>() {
3699 if entry.is_file() {
3700 assert_eq!(files.next().unwrap().inode, entry.inode);
3701 if !entry.is_ignored {
3702 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3703 }
3704 }
3705 }
3706 assert!(files.next().is_none());
3707 assert!(visible_files.next().is_none());
3708
3709 let mut bfs_paths = Vec::new();
3710 let mut stack = vec![Path::new("")];
3711 while let Some(path) = stack.pop() {
3712 bfs_paths.push(path);
3713 let ix = stack.len();
3714 for child_entry in self.child_entries(path) {
3715 stack.insert(ix, &child_entry.path);
3716 }
3717 }
3718
3719 let dfs_paths_via_iter = self
3720 .entries_by_path
3721 .cursor::<()>()
3722 .map(|e| e.path.as_ref())
3723 .collect::<Vec<_>>();
3724 assert_eq!(bfs_paths, dfs_paths_via_iter);
3725
3726 let dfs_paths_via_traversal = self
3727 .entries(true)
3728 .map(|e| e.path.as_ref())
3729 .collect::<Vec<_>>();
3730 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3731
3732 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3733 let ignore_parent_path =
3734 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3735 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3736 assert!(self
3737 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3738 .is_some());
3739 }
3740 }
3741
3742 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3743 let mut paths = Vec::new();
3744 for entry in self.entries_by_path.cursor::<()>() {
3745 if include_ignored || !entry.is_ignored {
3746 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3747 }
3748 }
3749 paths.sort_by(|a, b| a.0.cmp(b.0));
3750 paths
3751 }
3752 }
3753}