1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::{repository::GitRepository, Fs};
9use fs::{HomeDir, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::{
24 proto::{deserialize_version, serialize_line_ending, serialize_version},
25 Buffer, DiagnosticEntry, PointUtf16, Rope,
26};
27use parking_lot::Mutex;
28use postage::{
29 prelude::{Sink as _, Stream as _},
30 watch,
31};
32
33use smol::channel::{self, Sender};
34use std::{
35 any::Any,
36 cmp::{self, Ordering},
37 convert::TryFrom,
38 ffi::{OsStr, OsString},
39 fmt,
40 future::Future,
41 mem,
42 ops::{Deref, DerefMut},
43 os::unix::prelude::{OsStrExt, OsStringExt},
44 path::{Path, PathBuf},
45 sync::{atomic::AtomicUsize, Arc},
46 task::Poll,
47 time::{Duration, SystemTime},
48};
49use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
50use util::{ResultExt, TryFutureExt};
51
52#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
53pub struct WorktreeId(usize);
54
55#[allow(clippy::large_enum_variant)]
56pub enum Worktree {
57 Local(LocalWorktree),
58 Remote(RemoteWorktree),
59}
60
61pub struct LocalWorktree {
62 snapshot: LocalSnapshot,
63 background_snapshot: Arc<Mutex<LocalSnapshot>>,
64 last_scan_state_rx: watch::Receiver<ScanState>,
65 _background_scanner_task: Option<Task<()>>,
66 poll_task: Option<Task<()>>,
67 share: Option<ShareState>,
68 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<PointUtf16>>>,
69 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
70 client: Arc<Client>,
71 fs: Arc<dyn Fs>,
72 visible: bool,
73}
74
75pub struct RemoteWorktree {
76 pub snapshot: Snapshot,
77 pub(crate) background_snapshot: Arc<Mutex<Snapshot>>,
78 project_id: u64,
79 client: Arc<Client>,
80 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
81 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
82 replica_id: ReplicaId,
83 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
84 visible: bool,
85}
86
87#[derive(Clone)]
88pub struct Snapshot {
89 id: WorktreeId,
90 abs_path: Arc<Path>,
91 root_name: String,
92 root_char_bag: CharBag,
93 entries_by_path: SumTree<Entry>,
94 entries_by_id: SumTree<PathEntry>,
95 scan_id: usize,
96 is_complete: bool,
97}
98
99#[derive(Clone)]
100pub struct GitRepositoryEntry {
101 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
102
103 pub(crate) scan_id: usize,
104 // Path to folder containing the .git file or directory
105 pub(crate) content_path: Arc<Path>,
106 // Path to the actual .git folder.
107 // Note: if .git is a file, this points to the folder indicated by the .git file
108 pub(crate) git_dir_path: Arc<Path>,
109}
110
111impl std::fmt::Debug for GitRepositoryEntry {
112 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
113 f.debug_struct("GitRepositoryEntry")
114 .field("content_path", &self.content_path)
115 .field("git_dir_path", &self.git_dir_path)
116 .field("libgit_repository", &"LibGitRepository")
117 .finish()
118 }
119}
120
121pub struct LocalSnapshot {
122 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
123 git_repositories: Vec<GitRepositoryEntry>,
124 removed_entry_ids: HashMap<u64, ProjectEntryId>,
125 next_entry_id: Arc<AtomicUsize>,
126 snapshot: Snapshot,
127 extension_counts: HashMap<OsString, usize>,
128}
129
130impl Clone for LocalSnapshot {
131 fn clone(&self) -> Self {
132 Self {
133 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
134 git_repositories: self.git_repositories.iter().cloned().collect(),
135 removed_entry_ids: self.removed_entry_ids.clone(),
136 next_entry_id: self.next_entry_id.clone(),
137 snapshot: self.snapshot.clone(),
138 extension_counts: self.extension_counts.clone(),
139 }
140 }
141}
142
143impl Deref for LocalSnapshot {
144 type Target = Snapshot;
145
146 fn deref(&self) -> &Self::Target {
147 &self.snapshot
148 }
149}
150
151impl DerefMut for LocalSnapshot {
152 fn deref_mut(&mut self) -> &mut Self::Target {
153 &mut self.snapshot
154 }
155}
156
157#[derive(Clone, Debug)]
158enum ScanState {
159 Idle,
160 /// The worktree is performing its initial scan of the filesystem.
161 Initializing,
162 /// The worktree is updating in response to filesystem events.
163 Updating,
164 Err(Arc<anyhow::Error>),
165}
166
167struct ShareState {
168 project_id: u64,
169 snapshots_tx: watch::Sender<LocalSnapshot>,
170 _maintain_remote_snapshot: Option<Task<Option<()>>>,
171}
172
173pub enum Event {
174 UpdatedEntries,
175 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
176}
177
178impl Entity for Worktree {
179 type Event = Event;
180}
181
182impl Worktree {
183 pub async fn local(
184 client: Arc<Client>,
185 path: impl Into<Arc<Path>>,
186 visible: bool,
187 fs: Arc<dyn Fs>,
188 next_entry_id: Arc<AtomicUsize>,
189 cx: &mut AsyncAppContext,
190 ) -> Result<ModelHandle<Self>> {
191 let (tree, scan_states_tx) =
192 LocalWorktree::create(client, path, visible, fs.clone(), next_entry_id, cx).await?;
193 tree.update(cx, |tree, cx| {
194 let tree = tree.as_local_mut().unwrap();
195 let abs_path = tree.abs_path().clone();
196 let background_snapshot = tree.background_snapshot.clone();
197 let background = cx.background().clone();
198 tree._background_scanner_task = Some(cx.background().spawn(async move {
199 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
200 let scanner =
201 BackgroundScanner::new(background_snapshot, scan_states_tx, fs, background);
202 scanner.run(events).await;
203 }));
204 });
205 Ok(tree)
206 }
207
208 pub fn remote(
209 project_remote_id: u64,
210 replica_id: ReplicaId,
211 worktree: proto::WorktreeMetadata,
212 client: Arc<Client>,
213 cx: &mut MutableAppContext,
214 ) -> ModelHandle<Self> {
215 let remote_id = worktree.id;
216 let root_char_bag: CharBag = worktree
217 .root_name
218 .chars()
219 .map(|c| c.to_ascii_lowercase())
220 .collect();
221 let root_name = worktree.root_name.clone();
222 let visible = worktree.visible;
223
224 let abs_path = PathBuf::from(OsString::from_vec(worktree.abs_path));
225 let snapshot = Snapshot {
226 id: WorktreeId(remote_id as usize),
227 abs_path: Arc::from(abs_path.deref()),
228 root_name,
229 root_char_bag,
230 entries_by_path: Default::default(),
231 entries_by_id: Default::default(),
232 scan_id: 0,
233 is_complete: false,
234 };
235
236 let (updates_tx, mut updates_rx) = mpsc::unbounded();
237 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
238 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
239 let worktree_handle = cx.add_model(|_: &mut ModelContext<Worktree>| {
240 Worktree::Remote(RemoteWorktree {
241 project_id: project_remote_id,
242 replica_id,
243 snapshot: snapshot.clone(),
244 background_snapshot: background_snapshot.clone(),
245 updates_tx: Some(updates_tx),
246 snapshot_subscriptions: Default::default(),
247 client: client.clone(),
248 diagnostic_summaries: Default::default(),
249 visible,
250 })
251 });
252
253 cx.background()
254 .spawn(async move {
255 while let Some(update) = updates_rx.next().await {
256 if let Err(error) = background_snapshot.lock().apply_remote_update(update) {
257 log::error!("error applying worktree update: {}", error);
258 }
259 snapshot_updated_tx.send(()).await.ok();
260 }
261 })
262 .detach();
263
264 cx.spawn(|mut cx| {
265 let this = worktree_handle.downgrade();
266 async move {
267 while (snapshot_updated_rx.recv().await).is_some() {
268 if let Some(this) = this.upgrade(&cx) {
269 this.update(&mut cx, |this, cx| {
270 this.poll_snapshot(cx);
271 let this = this.as_remote_mut().unwrap();
272 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
273 if this.observed_snapshot(*scan_id) {
274 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
275 let _ = tx.send(());
276 } else {
277 break;
278 }
279 }
280 });
281 } else {
282 break;
283 }
284 }
285 }
286 })
287 .detach();
288
289 worktree_handle
290 }
291
292 pub fn as_local(&self) -> Option<&LocalWorktree> {
293 if let Worktree::Local(worktree) = self {
294 Some(worktree)
295 } else {
296 None
297 }
298 }
299
300 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
301 if let Worktree::Remote(worktree) = self {
302 Some(worktree)
303 } else {
304 None
305 }
306 }
307
308 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
309 if let Worktree::Local(worktree) = self {
310 Some(worktree)
311 } else {
312 None
313 }
314 }
315
316 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
317 if let Worktree::Remote(worktree) = self {
318 Some(worktree)
319 } else {
320 None
321 }
322 }
323
324 pub fn is_local(&self) -> bool {
325 matches!(self, Worktree::Local(_))
326 }
327
328 pub fn is_remote(&self) -> bool {
329 !self.is_local()
330 }
331
332 pub fn snapshot(&self) -> Snapshot {
333 match self {
334 Worktree::Local(worktree) => worktree.snapshot().snapshot,
335 Worktree::Remote(worktree) => worktree.snapshot(),
336 }
337 }
338
339 pub fn scan_id(&self) -> usize {
340 match self {
341 Worktree::Local(worktree) => worktree.snapshot.scan_id,
342 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
343 }
344 }
345
346 pub fn is_visible(&self) -> bool {
347 match self {
348 Worktree::Local(worktree) => worktree.visible,
349 Worktree::Remote(worktree) => worktree.visible,
350 }
351 }
352
353 pub fn replica_id(&self) -> ReplicaId {
354 match self {
355 Worktree::Local(_) => 0,
356 Worktree::Remote(worktree) => worktree.replica_id,
357 }
358 }
359
360 pub fn diagnostic_summaries(
361 &self,
362 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
363 match self {
364 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
365 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
366 }
367 .iter()
368 .map(|(path, summary)| (path.0.clone(), *summary))
369 }
370
371 fn poll_snapshot(&mut self, cx: &mut ModelContext<Self>) {
372 match self {
373 Self::Local(worktree) => worktree.poll_snapshot(false, cx),
374 Self::Remote(worktree) => worktree.poll_snapshot(cx),
375 };
376 }
377
378 pub fn abs_path(&self) -> Arc<Path> {
379 match self {
380 Worktree::Local(worktree) => worktree.abs_path.clone(),
381 Worktree::Remote(worktree) => worktree.abs_path.clone(),
382 }
383 }
384}
385
386impl LocalWorktree {
387 async fn create(
388 client: Arc<Client>,
389 path: impl Into<Arc<Path>>,
390 visible: bool,
391 fs: Arc<dyn Fs>,
392 next_entry_id: Arc<AtomicUsize>,
393 cx: &mut AsyncAppContext,
394 ) -> Result<(ModelHandle<Worktree>, UnboundedSender<ScanState>)> {
395 let abs_path = path.into();
396 let path: Arc<Path> = Arc::from(Path::new(""));
397
398 // After determining whether the root entry is a file or a directory, populate the
399 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
400 let root_name = abs_path
401 .file_name()
402 .map_or(String::new(), |f| f.to_string_lossy().to_string());
403 let root_char_bag = root_name.chars().map(|c| c.to_ascii_lowercase()).collect();
404 let metadata = fs
405 .metadata(&abs_path)
406 .await
407 .context("failed to stat worktree path")?;
408
409 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
410 let (mut last_scan_state_tx, last_scan_state_rx) =
411 watch::channel_with(ScanState::Initializing);
412 let tree = cx.add_model(move |cx: &mut ModelContext<Worktree>| {
413 let mut snapshot = LocalSnapshot {
414 ignores_by_parent_abs_path: Default::default(),
415 git_repositories: Default::default(),
416 removed_entry_ids: Default::default(),
417 next_entry_id,
418 snapshot: Snapshot {
419 id: WorktreeId::from_usize(cx.model_id()),
420 abs_path,
421 root_name: root_name.clone(),
422 root_char_bag,
423 entries_by_path: Default::default(),
424 entries_by_id: Default::default(),
425 scan_id: 0,
426 is_complete: true,
427 },
428 extension_counts: Default::default(),
429 };
430 if let Some(metadata) = metadata {
431 let entry = Entry::new(
432 path,
433 &metadata,
434 &snapshot.next_entry_id,
435 snapshot.root_char_bag,
436 );
437 snapshot.insert_entry(entry, fs.as_ref());
438 }
439
440 let tree = Self {
441 snapshot: snapshot.clone(),
442 background_snapshot: Arc::new(Mutex::new(snapshot)),
443 last_scan_state_rx,
444 _background_scanner_task: None,
445 share: None,
446 poll_task: None,
447 diagnostics: Default::default(),
448 diagnostic_summaries: Default::default(),
449 client,
450 fs,
451 visible,
452 };
453
454 cx.spawn_weak(|this, mut cx| async move {
455 while let Some(scan_state) = scan_states_rx.next().await {
456 if let Some(this) = this.upgrade(&cx) {
457 last_scan_state_tx.blocking_send(scan_state).ok();
458 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
459 } else {
460 break;
461 }
462 }
463 })
464 .detach();
465
466 Worktree::Local(tree)
467 });
468
469 Ok((tree, scan_states_tx))
470 }
471
472 pub fn contains_abs_path(&self, path: &Path) -> bool {
473 path.starts_with(&self.abs_path)
474 }
475
476 fn absolutize(&self, path: &Path) -> PathBuf {
477 if path.file_name().is_some() {
478 self.abs_path.join(path)
479 } else {
480 self.abs_path.to_path_buf()
481 }
482 }
483
484 pub(crate) fn load_buffer(
485 &mut self,
486 path: &Path,
487 cx: &mut ModelContext<Worktree>,
488 ) -> Task<Result<ModelHandle<Buffer>>> {
489 let path = Arc::from(path);
490 cx.spawn(move |this, mut cx| async move {
491 let (file, contents, diff_base) = this
492 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
493 .await?;
494 Ok(cx.add_model(|cx| {
495 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
496 buffer.git_diff_recalc(cx);
497 buffer
498 }))
499 })
500 }
501
502 pub fn diagnostics_for_path(&self, path: &Path) -> Option<Vec<DiagnosticEntry<PointUtf16>>> {
503 self.diagnostics.get(path).cloned()
504 }
505
506 pub fn update_diagnostics(
507 &mut self,
508 language_server_id: usize,
509 worktree_path: Arc<Path>,
510 diagnostics: Vec<DiagnosticEntry<PointUtf16>>,
511 _: &mut ModelContext<Worktree>,
512 ) -> Result<bool> {
513 self.diagnostics.remove(&worktree_path);
514 let old_summary = self
515 .diagnostic_summaries
516 .remove(&PathKey(worktree_path.clone()))
517 .unwrap_or_default();
518 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
519 if !new_summary.is_empty() {
520 self.diagnostic_summaries
521 .insert(PathKey(worktree_path.clone()), new_summary);
522 self.diagnostics.insert(worktree_path.clone(), diagnostics);
523 }
524
525 let updated = !old_summary.is_empty() || !new_summary.is_empty();
526 if updated {
527 if let Some(share) = self.share.as_ref() {
528 self.client
529 .send(proto::UpdateDiagnosticSummary {
530 project_id: share.project_id,
531 worktree_id: self.id().to_proto(),
532 summary: Some(proto::DiagnosticSummary {
533 path: worktree_path.to_string_lossy().to_string(),
534 language_server_id: language_server_id as u64,
535 error_count: new_summary.error_count as u32,
536 warning_count: new_summary.warning_count as u32,
537 }),
538 })
539 .log_err();
540 }
541 }
542
543 Ok(updated)
544 }
545
546 fn poll_snapshot(&mut self, force: bool, cx: &mut ModelContext<Worktree>) {
547 self.poll_task.take();
548
549 match self.scan_state() {
550 ScanState::Idle => {
551 let new_snapshot = self.background_snapshot.lock().clone();
552 let updated_repos = Self::changed_repos(
553 &self.snapshot.git_repositories,
554 &new_snapshot.git_repositories,
555 );
556 self.snapshot = new_snapshot;
557
558 if let Some(share) = self.share.as_mut() {
559 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
560 }
561
562 cx.emit(Event::UpdatedEntries);
563
564 if !updated_repos.is_empty() {
565 cx.emit(Event::UpdatedGitRepositories(updated_repos));
566 }
567 }
568
569 ScanState::Initializing => {
570 let is_fake_fs = self.fs.is_fake();
571
572 let new_snapshot = self.background_snapshot.lock().clone();
573 let updated_repos = Self::changed_repos(
574 &self.snapshot.git_repositories,
575 &new_snapshot.git_repositories,
576 );
577 self.snapshot = new_snapshot;
578
579 self.poll_task = Some(cx.spawn_weak(|this, mut cx| async move {
580 if is_fake_fs {
581 #[cfg(any(test, feature = "test-support"))]
582 cx.background().simulate_random_delay().await;
583 } else {
584 smol::Timer::after(Duration::from_millis(100)).await;
585 }
586 if let Some(this) = this.upgrade(&cx) {
587 this.update(&mut cx, |this, cx| this.poll_snapshot(cx));
588 }
589 }));
590
591 cx.emit(Event::UpdatedEntries);
592
593 if !updated_repos.is_empty() {
594 cx.emit(Event::UpdatedGitRepositories(updated_repos));
595 }
596 }
597
598 _ => {
599 if force {
600 self.snapshot = self.background_snapshot.lock().clone();
601 }
602 }
603 }
604
605 cx.notify();
606 }
607
608 fn changed_repos(
609 old_repos: &[GitRepositoryEntry],
610 new_repos: &[GitRepositoryEntry],
611 ) -> Vec<GitRepositoryEntry> {
612 fn diff<'a>(
613 a: &'a [GitRepositoryEntry],
614 b: &'a [GitRepositoryEntry],
615 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
616 ) {
617 for a_repo in a {
618 let matched = b.iter().find(|b_repo| {
619 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
620 });
621
622 if matched.is_none() {
623 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
624 }
625 }
626 }
627
628 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
629
630 diff(old_repos, new_repos, &mut updated);
631 diff(new_repos, old_repos, &mut updated);
632
633 updated.into_values().collect()
634 }
635
636 pub fn scan_complete(&self) -> impl Future<Output = ()> {
637 let mut scan_state_rx = self.last_scan_state_rx.clone();
638 async move {
639 let mut scan_state = Some(scan_state_rx.borrow().clone());
640 while let Some(ScanState::Initializing | ScanState::Updating) = scan_state {
641 scan_state = scan_state_rx.recv().await;
642 }
643 }
644 }
645
646 fn scan_state(&self) -> ScanState {
647 self.last_scan_state_rx.borrow().clone()
648 }
649
650 pub fn snapshot(&self) -> LocalSnapshot {
651 self.snapshot.clone()
652 }
653
654 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
655 proto::WorktreeMetadata {
656 id: self.id().to_proto(),
657 root_name: self.root_name().to_string(),
658 visible: self.visible,
659 abs_path: self.abs_path().as_os_str().as_bytes().to_vec(),
660 }
661 }
662
663 fn load(
664 &self,
665 path: &Path,
666 cx: &mut ModelContext<Worktree>,
667 ) -> Task<Result<(File, String, Option<String>)>> {
668 let handle = cx.handle();
669 let path = Arc::from(path);
670 let abs_path = self.absolutize(&path);
671 let fs = self.fs.clone();
672 let snapshot = self.snapshot();
673
674 cx.spawn(|this, mut cx| async move {
675 let text = fs.load(&abs_path).await?;
676
677 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
678 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
679 let repo_relative = repo_relative.to_owned();
680 cx.background()
681 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
682 .await
683 } else {
684 None
685 }
686 } else {
687 None
688 };
689
690 // Eagerly populate the snapshot with an updated entry for the loaded file
691 let entry = this
692 .update(&mut cx, |this, cx| {
693 this.as_local()
694 .unwrap()
695 .refresh_entry(path, abs_path, None, cx)
696 })
697 .await?;
698
699 Ok((
700 File {
701 entry_id: entry.id,
702 worktree: handle,
703 path: entry.path,
704 mtime: entry.mtime,
705 is_local: true,
706 is_deleted: false,
707 },
708 text,
709 diff_base,
710 ))
711 })
712 }
713
714 pub fn save_buffer_as(
715 &self,
716 buffer_handle: ModelHandle<Buffer>,
717 path: impl Into<Arc<Path>>,
718 cx: &mut ModelContext<Worktree>,
719 ) -> Task<Result<()>> {
720 let buffer = buffer_handle.read(cx);
721 let text = buffer.as_rope().clone();
722 let fingerprint = text.fingerprint();
723 let version = buffer.version();
724 let save = self.write_file(path, text, buffer.line_ending(), cx);
725 let handle = cx.handle();
726 cx.as_mut().spawn(|mut cx| async move {
727 let entry = save.await?;
728 let file = File {
729 entry_id: entry.id,
730 worktree: handle,
731 path: entry.path,
732 mtime: entry.mtime,
733 is_local: true,
734 is_deleted: false,
735 };
736
737 buffer_handle.update(&mut cx, |buffer, cx| {
738 buffer.did_save(version, fingerprint, file.mtime, Some(Arc::new(file)), cx);
739 });
740
741 Ok(())
742 })
743 }
744
745 pub fn create_entry(
746 &self,
747 path: impl Into<Arc<Path>>,
748 is_dir: bool,
749 cx: &mut ModelContext<Worktree>,
750 ) -> Task<Result<Entry>> {
751 self.write_entry_internal(
752 path,
753 if is_dir {
754 None
755 } else {
756 Some(Default::default())
757 },
758 cx,
759 )
760 }
761
762 pub fn write_file(
763 &self,
764 path: impl Into<Arc<Path>>,
765 text: Rope,
766 line_ending: LineEnding,
767 cx: &mut ModelContext<Worktree>,
768 ) -> Task<Result<Entry>> {
769 self.write_entry_internal(path, Some((text, line_ending)), cx)
770 }
771
772 pub fn delete_entry(
773 &self,
774 entry_id: ProjectEntryId,
775 cx: &mut ModelContext<Worktree>,
776 ) -> Option<Task<Result<()>>> {
777 let entry = self.entry_for_id(entry_id)?.clone();
778 let abs_path = self.absolutize(&entry.path);
779 let delete = cx.background().spawn({
780 let fs = self.fs.clone();
781 let abs_path = abs_path;
782 async move {
783 if entry.is_file() {
784 fs.remove_file(&abs_path, Default::default()).await
785 } else {
786 fs.remove_dir(
787 &abs_path,
788 RemoveOptions {
789 recursive: true,
790 ignore_if_not_exists: false,
791 },
792 )
793 .await
794 }
795 }
796 });
797
798 Some(cx.spawn(|this, mut cx| async move {
799 delete.await?;
800 this.update(&mut cx, |this, cx| {
801 let this = this.as_local_mut().unwrap();
802 {
803 let mut snapshot = this.background_snapshot.lock();
804 snapshot.delete_entry(entry_id);
805 }
806 this.poll_snapshot(true, cx);
807 });
808 Ok(())
809 }))
810 }
811
812 pub fn rename_entry(
813 &self,
814 entry_id: ProjectEntryId,
815 new_path: impl Into<Arc<Path>>,
816 cx: &mut ModelContext<Worktree>,
817 ) -> Option<Task<Result<Entry>>> {
818 let old_path = self.entry_for_id(entry_id)?.path.clone();
819 let new_path = new_path.into();
820 let abs_old_path = self.absolutize(&old_path);
821 let abs_new_path = self.absolutize(&new_path);
822 let rename = cx.background().spawn({
823 let fs = self.fs.clone();
824 let abs_new_path = abs_new_path.clone();
825 async move {
826 fs.rename(&abs_old_path, &abs_new_path, Default::default())
827 .await
828 }
829 });
830
831 Some(cx.spawn(|this, mut cx| async move {
832 rename.await?;
833 let entry = this
834 .update(&mut cx, |this, cx| {
835 this.as_local_mut().unwrap().refresh_entry(
836 new_path.clone(),
837 abs_new_path,
838 Some(old_path),
839 cx,
840 )
841 })
842 .await?;
843 Ok(entry)
844 }))
845 }
846
847 pub fn copy_entry(
848 &self,
849 entry_id: ProjectEntryId,
850 new_path: impl Into<Arc<Path>>,
851 cx: &mut ModelContext<Worktree>,
852 ) -> Option<Task<Result<Entry>>> {
853 let old_path = self.entry_for_id(entry_id)?.path.clone();
854 let new_path = new_path.into();
855 let abs_old_path = self.absolutize(&old_path);
856 let abs_new_path = self.absolutize(&new_path);
857 let copy = cx.background().spawn({
858 let fs = self.fs.clone();
859 let abs_new_path = abs_new_path.clone();
860 async move {
861 copy_recursive(
862 fs.as_ref(),
863 &abs_old_path,
864 &abs_new_path,
865 Default::default(),
866 )
867 .await
868 }
869 });
870
871 Some(cx.spawn(|this, mut cx| async move {
872 copy.await?;
873 let entry = this
874 .update(&mut cx, |this, cx| {
875 this.as_local_mut().unwrap().refresh_entry(
876 new_path.clone(),
877 abs_new_path,
878 None,
879 cx,
880 )
881 })
882 .await?;
883 Ok(entry)
884 }))
885 }
886
887 fn write_entry_internal(
888 &self,
889 path: impl Into<Arc<Path>>,
890 text_if_file: Option<(Rope, LineEnding)>,
891 cx: &mut ModelContext<Worktree>,
892 ) -> Task<Result<Entry>> {
893 let path = path.into();
894 let abs_path = self.absolutize(&path);
895 let write = cx.background().spawn({
896 let fs = self.fs.clone();
897 let abs_path = abs_path.clone();
898 async move {
899 if let Some((text, line_ending)) = text_if_file {
900 fs.save(&abs_path, &text, line_ending).await
901 } else {
902 fs.create_dir(&abs_path).await
903 }
904 }
905 });
906
907 cx.spawn(|this, mut cx| async move {
908 write.await?;
909 let entry = this
910 .update(&mut cx, |this, cx| {
911 this.as_local_mut()
912 .unwrap()
913 .refresh_entry(path, abs_path, None, cx)
914 })
915 .await?;
916 Ok(entry)
917 })
918 }
919
920 fn refresh_entry(
921 &self,
922 path: Arc<Path>,
923 abs_path: PathBuf,
924 old_path: Option<Arc<Path>>,
925 cx: &mut ModelContext<Worktree>,
926 ) -> Task<Result<Entry>> {
927 let fs = self.fs.clone();
928 let root_char_bag;
929 let next_entry_id;
930 {
931 let snapshot = self.background_snapshot.lock();
932 root_char_bag = snapshot.root_char_bag;
933 next_entry_id = snapshot.next_entry_id.clone();
934 }
935 cx.spawn_weak(|this, mut cx| async move {
936 let metadata = fs
937 .metadata(&abs_path)
938 .await?
939 .ok_or_else(|| anyhow!("could not read saved file metadata"))?;
940 let this = this
941 .upgrade(&cx)
942 .ok_or_else(|| anyhow!("worktree was dropped"))?;
943 this.update(&mut cx, |this, cx| {
944 let this = this.as_local_mut().unwrap();
945 let inserted_entry;
946 {
947 let mut snapshot = this.background_snapshot.lock();
948 let mut entry = Entry::new(path, &metadata, &next_entry_id, root_char_bag);
949 entry.is_ignored = snapshot
950 .ignore_stack_for_abs_path(&abs_path, entry.is_dir())
951 .is_abs_path_ignored(&abs_path, entry.is_dir());
952 if let Some(old_path) = old_path {
953 snapshot.remove_path(&old_path);
954 }
955 inserted_entry = snapshot.insert_entry(entry, fs.as_ref());
956 snapshot.scan_id += 1;
957 }
958 this.poll_snapshot(true, cx);
959 Ok(inserted_entry)
960 })
961 })
962 }
963
964 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
965 let (share_tx, share_rx) = oneshot::channel();
966
967 if self.share.is_some() {
968 let _ = share_tx.send(Ok(()));
969 } else {
970 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
971 let rpc = self.client.clone();
972 let worktree_id = cx.model_id() as u64;
973
974 for (path, summary) in self.diagnostic_summaries.iter() {
975 if let Err(e) = rpc.send(proto::UpdateDiagnosticSummary {
976 project_id,
977 worktree_id,
978 summary: Some(summary.to_proto(&path.0)),
979 }) {
980 return Task::ready(Err(e));
981 }
982 }
983
984 let maintain_remote_snapshot = cx.background().spawn({
985 let rpc = rpc;
986
987 async move {
988 let mut prev_snapshot = match snapshots_rx.recv().await {
989 Some(snapshot) => {
990 let update = proto::UpdateWorktree {
991 project_id,
992 worktree_id,
993 abs_path: snapshot.abs_path().as_os_str().as_bytes().to_vec(),
994 root_name: snapshot.root_name().to_string(),
995 updated_entries: snapshot
996 .entries_by_path
997 .iter()
998 .map(Into::into)
999 .collect(),
1000 removed_entries: Default::default(),
1001 scan_id: snapshot.scan_id as u64,
1002 is_last_update: true,
1003 };
1004 if let Err(error) = send_worktree_update(&rpc, update).await {
1005 let _ = share_tx.send(Err(error));
1006 return Err(anyhow!("failed to send initial update worktree"));
1007 } else {
1008 let _ = share_tx.send(Ok(()));
1009 snapshot
1010 }
1011 }
1012 None => {
1013 share_tx
1014 .send(Err(anyhow!("worktree dropped before share completed")))
1015 .ok();
1016 return Err(anyhow!("failed to send initial update worktree"));
1017 }
1018 };
1019
1020 while let Some(snapshot) = snapshots_rx.recv().await {
1021 send_worktree_update(
1022 &rpc,
1023 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true),
1024 )
1025 .await?;
1026 prev_snapshot = snapshot;
1027 }
1028
1029 Ok::<_, anyhow::Error>(())
1030 }
1031 .log_err()
1032 });
1033 self.share = Some(ShareState {
1034 project_id,
1035 snapshots_tx,
1036 _maintain_remote_snapshot: Some(maintain_remote_snapshot),
1037 });
1038 }
1039
1040 cx.foreground().spawn(async move {
1041 share_rx
1042 .await
1043 .unwrap_or_else(|_| Err(anyhow!("share ended")))
1044 })
1045 }
1046
1047 pub fn unshare(&mut self) {
1048 self.share.take();
1049 }
1050
1051 pub fn is_shared(&self) -> bool {
1052 self.share.is_some()
1053 }
1054}
1055
1056impl RemoteWorktree {
1057 fn snapshot(&self) -> Snapshot {
1058 self.snapshot.clone()
1059 }
1060
1061 fn poll_snapshot(&mut self, cx: &mut ModelContext<Worktree>) {
1062 self.snapshot = self.background_snapshot.lock().clone();
1063 cx.emit(Event::UpdatedEntries);
1064 cx.notify();
1065 }
1066
1067 pub fn disconnected_from_host(&mut self) {
1068 self.updates_tx.take();
1069 self.snapshot_subscriptions.clear();
1070 }
1071
1072 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1073 if let Some(updates_tx) = &self.updates_tx {
1074 updates_tx
1075 .unbounded_send(update)
1076 .expect("consumer runs to completion");
1077 }
1078 }
1079
1080 fn observed_snapshot(&self, scan_id: usize) -> bool {
1081 self.scan_id > scan_id || (self.scan_id == scan_id && self.is_complete)
1082 }
1083
1084 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = ()> {
1085 let (tx, rx) = oneshot::channel();
1086 if self.observed_snapshot(scan_id) {
1087 let _ = tx.send(());
1088 } else {
1089 match self
1090 .snapshot_subscriptions
1091 .binary_search_by_key(&scan_id, |probe| probe.0)
1092 {
1093 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1094 }
1095 }
1096
1097 async move {
1098 let _ = rx.await;
1099 }
1100 }
1101
1102 pub fn update_diagnostic_summary(
1103 &mut self,
1104 path: Arc<Path>,
1105 summary: &proto::DiagnosticSummary,
1106 ) {
1107 let summary = DiagnosticSummary {
1108 language_server_id: summary.language_server_id as usize,
1109 error_count: summary.error_count as usize,
1110 warning_count: summary.warning_count as usize,
1111 };
1112 if summary.is_empty() {
1113 self.diagnostic_summaries.remove(&PathKey(path));
1114 } else {
1115 self.diagnostic_summaries.insert(PathKey(path), summary);
1116 }
1117 }
1118
1119 pub fn insert_entry(
1120 &mut self,
1121 entry: proto::Entry,
1122 scan_id: usize,
1123 cx: &mut ModelContext<Worktree>,
1124 ) -> Task<Result<Entry>> {
1125 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1126 cx.spawn(|this, mut cx| async move {
1127 wait_for_snapshot.await;
1128 this.update(&mut cx, |worktree, _| {
1129 let worktree = worktree.as_remote_mut().unwrap();
1130 let mut snapshot = worktree.background_snapshot.lock();
1131 let entry = snapshot.insert_entry(entry);
1132 worktree.snapshot = snapshot.clone();
1133 entry
1134 })
1135 })
1136 }
1137
1138 pub(crate) fn delete_entry(
1139 &mut self,
1140 id: ProjectEntryId,
1141 scan_id: usize,
1142 cx: &mut ModelContext<Worktree>,
1143 ) -> Task<Result<()>> {
1144 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1145 cx.spawn(|this, mut cx| async move {
1146 wait_for_snapshot.await;
1147 this.update(&mut cx, |worktree, _| {
1148 let worktree = worktree.as_remote_mut().unwrap();
1149 let mut snapshot = worktree.background_snapshot.lock();
1150 snapshot.delete_entry(id);
1151 worktree.snapshot = snapshot.clone();
1152 });
1153 Ok(())
1154 })
1155 }
1156}
1157
1158impl Snapshot {
1159 pub fn id(&self) -> WorktreeId {
1160 self.id
1161 }
1162
1163 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1164 self.entries_by_id.get(&entry_id, &()).is_some()
1165 }
1166
1167 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1168 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1169 let old_entry = self.entries_by_id.insert_or_replace(
1170 PathEntry {
1171 id: entry.id,
1172 path: entry.path.clone(),
1173 is_ignored: entry.is_ignored,
1174 scan_id: 0,
1175 },
1176 &(),
1177 );
1178 if let Some(old_entry) = old_entry {
1179 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1180 }
1181 self.entries_by_path.insert_or_replace(entry.clone(), &());
1182 Ok(entry)
1183 }
1184
1185 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> bool {
1186 if let Some(removed_entry) = self.entries_by_id.remove(&entry_id, &()) {
1187 self.entries_by_path = {
1188 let mut cursor = self.entries_by_path.cursor();
1189 let mut new_entries_by_path =
1190 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1191 while let Some(entry) = cursor.item() {
1192 if entry.path.starts_with(&removed_entry.path) {
1193 self.entries_by_id.remove(&entry.id, &());
1194 cursor.next(&());
1195 } else {
1196 break;
1197 }
1198 }
1199 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1200 new_entries_by_path
1201 };
1202
1203 true
1204 } else {
1205 false
1206 }
1207 }
1208
1209 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1210 let mut entries_by_path_edits = Vec::new();
1211 let mut entries_by_id_edits = Vec::new();
1212 for entry_id in update.removed_entries {
1213 let entry = self
1214 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1215 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1216 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1217 entries_by_id_edits.push(Edit::Remove(entry.id));
1218 }
1219
1220 for entry in update.updated_entries {
1221 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1222 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1223 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1224 }
1225 entries_by_id_edits.push(Edit::Insert(PathEntry {
1226 id: entry.id,
1227 path: entry.path.clone(),
1228 is_ignored: entry.is_ignored,
1229 scan_id: 0,
1230 }));
1231 entries_by_path_edits.push(Edit::Insert(entry));
1232 }
1233
1234 self.entries_by_path.edit(entries_by_path_edits, &());
1235 self.entries_by_id.edit(entries_by_id_edits, &());
1236 self.scan_id = update.scan_id as usize;
1237 self.is_complete = update.is_last_update;
1238
1239 Ok(())
1240 }
1241
1242 pub fn file_count(&self) -> usize {
1243 self.entries_by_path.summary().file_count
1244 }
1245
1246 pub fn visible_file_count(&self) -> usize {
1247 self.entries_by_path.summary().visible_file_count
1248 }
1249
1250 fn traverse_from_offset(
1251 &self,
1252 include_dirs: bool,
1253 include_ignored: bool,
1254 start_offset: usize,
1255 ) -> Traversal {
1256 let mut cursor = self.entries_by_path.cursor();
1257 cursor.seek(
1258 &TraversalTarget::Count {
1259 count: start_offset,
1260 include_dirs,
1261 include_ignored,
1262 },
1263 Bias::Right,
1264 &(),
1265 );
1266 Traversal {
1267 cursor,
1268 include_dirs,
1269 include_ignored,
1270 }
1271 }
1272
1273 fn traverse_from_path(
1274 &self,
1275 include_dirs: bool,
1276 include_ignored: bool,
1277 path: &Path,
1278 ) -> Traversal {
1279 let mut cursor = self.entries_by_path.cursor();
1280 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1281 Traversal {
1282 cursor,
1283 include_dirs,
1284 include_ignored,
1285 }
1286 }
1287
1288 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1289 self.traverse_from_offset(false, include_ignored, start)
1290 }
1291
1292 pub fn entries(&self, include_ignored: bool) -> Traversal {
1293 self.traverse_from_offset(true, include_ignored, 0)
1294 }
1295
1296 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1297 let empty_path = Path::new("");
1298 self.entries_by_path
1299 .cursor::<()>()
1300 .filter(move |entry| entry.path.as_ref() != empty_path)
1301 .map(|entry| &entry.path)
1302 }
1303
1304 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1305 let mut cursor = self.entries_by_path.cursor();
1306 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1307 let traversal = Traversal {
1308 cursor,
1309 include_dirs: true,
1310 include_ignored: true,
1311 };
1312 ChildEntriesIter {
1313 traversal,
1314 parent_path,
1315 }
1316 }
1317
1318 pub fn root_entry(&self) -> Option<&Entry> {
1319 self.entry_for_path("")
1320 }
1321
1322 pub fn root_name(&self) -> &str {
1323 &self.root_name
1324 }
1325
1326 pub fn scan_id(&self) -> usize {
1327 self.scan_id
1328 }
1329
1330 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1331 let path = path.as_ref();
1332 self.traverse_from_path(true, true, path)
1333 .entry()
1334 .and_then(|entry| {
1335 if entry.path.as_ref() == path {
1336 Some(entry)
1337 } else {
1338 None
1339 }
1340 })
1341 }
1342
1343 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1344 let entry = self.entries_by_id.get(&id, &())?;
1345 self.entry_for_path(&entry.path)
1346 }
1347
1348 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1349 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1350 }
1351}
1352
1353impl LocalSnapshot {
1354 pub fn abs_path(&self) -> &Arc<Path> {
1355 &self.abs_path
1356 }
1357
1358 pub fn extension_counts(&self) -> &HashMap<OsString, usize> {
1359 &self.extension_counts
1360 }
1361
1362 // Gives the most specific git repository for a given path
1363 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1364 self.git_repositories
1365 .iter()
1366 .rev() //git_repository is ordered lexicographically
1367 .find(|repo| repo.manages(path))
1368 .cloned()
1369 }
1370
1371 pub(crate) fn in_dot_git(&mut self, path: &Path) -> Option<&mut GitRepositoryEntry> {
1372 // Git repositories cannot be nested, so we don't need to reverse the order
1373 self.git_repositories
1374 .iter_mut()
1375 .find(|repo| repo.in_dot_git(path))
1376 }
1377
1378 #[cfg(test)]
1379 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1380 let root_name = self.root_name.clone();
1381 proto::UpdateWorktree {
1382 project_id,
1383 worktree_id: self.id().to_proto(),
1384 abs_path: self.abs_path().as_os_str().as_bytes().to_vec(),
1385 root_name,
1386 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1387 removed_entries: Default::default(),
1388 scan_id: self.scan_id as u64,
1389 is_last_update: true,
1390 }
1391 }
1392
1393 pub(crate) fn build_update(
1394 &self,
1395 other: &Self,
1396 project_id: u64,
1397 worktree_id: u64,
1398 include_ignored: bool,
1399 ) -> proto::UpdateWorktree {
1400 let mut updated_entries = Vec::new();
1401 let mut removed_entries = Vec::new();
1402 let mut self_entries = self
1403 .entries_by_id
1404 .cursor::<()>()
1405 .filter(|e| include_ignored || !e.is_ignored)
1406 .peekable();
1407 let mut other_entries = other
1408 .entries_by_id
1409 .cursor::<()>()
1410 .filter(|e| include_ignored || !e.is_ignored)
1411 .peekable();
1412 loop {
1413 match (self_entries.peek(), other_entries.peek()) {
1414 (Some(self_entry), Some(other_entry)) => {
1415 match Ord::cmp(&self_entry.id, &other_entry.id) {
1416 Ordering::Less => {
1417 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1418 updated_entries.push(entry);
1419 self_entries.next();
1420 }
1421 Ordering::Equal => {
1422 if self_entry.scan_id != other_entry.scan_id {
1423 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1424 updated_entries.push(entry);
1425 }
1426
1427 self_entries.next();
1428 other_entries.next();
1429 }
1430 Ordering::Greater => {
1431 removed_entries.push(other_entry.id.to_proto());
1432 other_entries.next();
1433 }
1434 }
1435 }
1436 (Some(self_entry), None) => {
1437 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1438 updated_entries.push(entry);
1439 self_entries.next();
1440 }
1441 (None, Some(other_entry)) => {
1442 removed_entries.push(other_entry.id.to_proto());
1443 other_entries.next();
1444 }
1445 (None, None) => break,
1446 }
1447 }
1448
1449 proto::UpdateWorktree {
1450 project_id,
1451 worktree_id,
1452 abs_path: self.abs_path().as_os_str().as_bytes().to_vec(),
1453 root_name: self.root_name().to_string(),
1454 updated_entries,
1455 removed_entries,
1456 scan_id: self.scan_id as u64,
1457 is_last_update: true,
1458 }
1459 }
1460
1461 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1462 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1463 let abs_path = self.abs_path.join(&entry.path);
1464 match smol::block_on(build_gitignore(&abs_path, fs)) {
1465 Ok(ignore) => {
1466 self.ignores_by_parent_abs_path.insert(
1467 abs_path.parent().unwrap().into(),
1468 (Arc::new(ignore), self.scan_id),
1469 );
1470 }
1471 Err(error) => {
1472 log::error!(
1473 "error loading .gitignore file {:?} - {:?}",
1474 &entry.path,
1475 error
1476 );
1477 }
1478 }
1479 }
1480
1481 self.reuse_entry_id(&mut entry);
1482
1483 if entry.kind == EntryKind::PendingDir {
1484 if let Some(existing_entry) =
1485 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1486 {
1487 entry.kind = existing_entry.kind;
1488 }
1489 }
1490
1491 self.entries_by_path.insert_or_replace(entry.clone(), &());
1492 let scan_id = self.scan_id;
1493 let removed_entry = self.entries_by_id.insert_or_replace(
1494 PathEntry {
1495 id: entry.id,
1496 path: entry.path.clone(),
1497 is_ignored: entry.is_ignored,
1498 scan_id,
1499 },
1500 &(),
1501 );
1502
1503 if let Some(removed_entry) = removed_entry {
1504 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1505 }
1506 self.inc_extension_count(&entry.path, entry.is_ignored);
1507
1508 entry
1509 }
1510
1511 fn populate_dir(
1512 &mut self,
1513 parent_path: Arc<Path>,
1514 entries: impl IntoIterator<Item = Entry>,
1515 ignore: Option<Arc<Gitignore>>,
1516 fs: &dyn Fs,
1517 ) {
1518 let mut parent_entry = if let Some(parent_entry) =
1519 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1520 {
1521 parent_entry.clone()
1522 } else {
1523 log::warn!(
1524 "populating a directory {:?} that has been removed",
1525 parent_path
1526 );
1527 return;
1528 };
1529
1530 if let Some(ignore) = ignore {
1531 self.ignores_by_parent_abs_path.insert(
1532 self.abs_path.join(&parent_path).into(),
1533 (ignore, self.scan_id),
1534 );
1535 }
1536 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1537 parent_entry.kind = EntryKind::Dir;
1538 } else {
1539 unreachable!();
1540 }
1541
1542 if parent_path.file_name() == Some(&DOT_GIT) {
1543 let abs_path = self.abs_path.join(&parent_path);
1544 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1545 if let Err(ix) = self
1546 .git_repositories
1547 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1548 {
1549 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1550 self.git_repositories.insert(
1551 ix,
1552 GitRepositoryEntry {
1553 repo,
1554 scan_id: 0,
1555 content_path,
1556 git_dir_path: parent_path,
1557 },
1558 );
1559 }
1560 }
1561 }
1562
1563 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1564 let mut entries_by_id_edits = Vec::new();
1565
1566 for mut entry in entries {
1567 self.reuse_entry_id(&mut entry);
1568 self.inc_extension_count(&entry.path, entry.is_ignored);
1569 entries_by_id_edits.push(Edit::Insert(PathEntry {
1570 id: entry.id,
1571 path: entry.path.clone(),
1572 is_ignored: entry.is_ignored,
1573 scan_id: self.scan_id,
1574 }));
1575 entries_by_path_edits.push(Edit::Insert(entry));
1576 }
1577
1578 self.entries_by_path.edit(entries_by_path_edits, &());
1579 let removed_entries = self.entries_by_id.edit(entries_by_id_edits, &());
1580
1581 for removed_entry in removed_entries {
1582 self.dec_extension_count(&removed_entry.path, removed_entry.is_ignored);
1583 }
1584 }
1585
1586 fn inc_extension_count(&mut self, path: &Path, ignored: bool) {
1587 if !ignored {
1588 if let Some(extension) = path.extension() {
1589 if let Some(count) = self.extension_counts.get_mut(extension) {
1590 *count += 1;
1591 } else {
1592 self.extension_counts.insert(extension.into(), 1);
1593 }
1594 }
1595 }
1596 }
1597
1598 fn dec_extension_count(&mut self, path: &Path, ignored: bool) {
1599 if !ignored {
1600 if let Some(extension) = path.extension() {
1601 if let Some(count) = self.extension_counts.get_mut(extension) {
1602 *count -= 1;
1603 }
1604 }
1605 }
1606 }
1607
1608 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1609 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1610 entry.id = removed_entry_id;
1611 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1612 entry.id = existing_entry.id;
1613 }
1614 }
1615
1616 fn remove_path(&mut self, path: &Path) {
1617 let mut new_entries;
1618 let removed_entries;
1619 {
1620 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1621 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1622 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1623 new_entries.push_tree(cursor.suffix(&()), &());
1624 }
1625 self.entries_by_path = new_entries;
1626
1627 let mut entries_by_id_edits = Vec::new();
1628 for entry in removed_entries.cursor::<()>() {
1629 let removed_entry_id = self
1630 .removed_entry_ids
1631 .entry(entry.inode)
1632 .or_insert(entry.id);
1633 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1634 entries_by_id_edits.push(Edit::Remove(entry.id));
1635 self.dec_extension_count(&entry.path, entry.is_ignored);
1636 }
1637 self.entries_by_id.edit(entries_by_id_edits, &());
1638
1639 if path.file_name() == Some(&GITIGNORE) {
1640 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1641 if let Some((_, scan_id)) = self
1642 .ignores_by_parent_abs_path
1643 .get_mut(abs_parent_path.as_path())
1644 {
1645 *scan_id = self.snapshot.scan_id;
1646 }
1647 } else if path.file_name() == Some(&DOT_GIT) {
1648 let parent_path = path.parent().unwrap();
1649 if let Ok(ix) = self
1650 .git_repositories
1651 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1652 {
1653 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1654 }
1655 }
1656 }
1657
1658 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1659 let mut inodes = TreeSet::default();
1660 for ancestor in path.ancestors().skip(1) {
1661 if let Some(entry) = self.entry_for_path(ancestor) {
1662 inodes.insert(entry.inode);
1663 }
1664 }
1665 inodes
1666 }
1667
1668 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1669 let mut new_ignores = Vec::new();
1670 for ancestor in abs_path.ancestors().skip(1) {
1671 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1672 new_ignores.push((ancestor, Some(ignore.clone())));
1673 } else {
1674 new_ignores.push((ancestor, None));
1675 }
1676 }
1677
1678 let mut ignore_stack = IgnoreStack::none();
1679 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1680 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1681 ignore_stack = IgnoreStack::all();
1682 break;
1683 } else if let Some(ignore) = ignore {
1684 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1685 }
1686 }
1687
1688 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1689 ignore_stack = IgnoreStack::all();
1690 }
1691
1692 ignore_stack
1693 }
1694
1695 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1696 &self.git_repositories
1697 }
1698}
1699
1700impl GitRepositoryEntry {
1701 // Note that these paths should be relative to the worktree root.
1702 pub(crate) fn manages(&self, path: &Path) -> bool {
1703 path.starts_with(self.content_path.as_ref())
1704 }
1705
1706 // Note that theis path should be relative to the worktree root.
1707 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1708 path.starts_with(self.git_dir_path.as_ref())
1709 }
1710}
1711
1712async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1713 let contents = fs.load(abs_path).await?;
1714 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1715 let mut builder = GitignoreBuilder::new(parent);
1716 for line in contents.lines() {
1717 builder.add_line(Some(abs_path.into()), line)?;
1718 }
1719 Ok(builder.build()?)
1720}
1721
1722impl WorktreeId {
1723 pub fn from_usize(handle_id: usize) -> Self {
1724 Self(handle_id)
1725 }
1726
1727 pub(crate) fn from_proto(id: u64) -> Self {
1728 Self(id as usize)
1729 }
1730
1731 pub fn to_proto(&self) -> u64 {
1732 self.0 as u64
1733 }
1734
1735 pub fn to_usize(&self) -> usize {
1736 self.0
1737 }
1738}
1739
1740impl fmt::Display for WorktreeId {
1741 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1742 self.0.fmt(f)
1743 }
1744}
1745
1746impl Deref for Worktree {
1747 type Target = Snapshot;
1748
1749 fn deref(&self) -> &Self::Target {
1750 match self {
1751 Worktree::Local(worktree) => &worktree.snapshot,
1752 Worktree::Remote(worktree) => &worktree.snapshot,
1753 }
1754 }
1755}
1756
1757impl Deref for LocalWorktree {
1758 type Target = LocalSnapshot;
1759
1760 fn deref(&self) -> &Self::Target {
1761 &self.snapshot
1762 }
1763}
1764
1765impl Deref for RemoteWorktree {
1766 type Target = Snapshot;
1767
1768 fn deref(&self) -> &Self::Target {
1769 &self.snapshot
1770 }
1771}
1772
1773impl fmt::Debug for LocalWorktree {
1774 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1775 self.snapshot.fmt(f)
1776 }
1777}
1778
1779impl fmt::Debug for Snapshot {
1780 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1781 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1782 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1783
1784 impl<'a> fmt::Debug for EntriesByPath<'a> {
1785 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1786 f.debug_map()
1787 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1788 .finish()
1789 }
1790 }
1791
1792 impl<'a> fmt::Debug for EntriesById<'a> {
1793 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1794 f.debug_list().entries(self.0.iter()).finish()
1795 }
1796 }
1797
1798 f.debug_struct("Snapshot")
1799 .field("id", &self.id)
1800 .field("root_name", &self.root_name)
1801 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1802 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1803 .finish()
1804 }
1805}
1806
1807#[derive(Clone, PartialEq)]
1808pub struct File {
1809 pub worktree: ModelHandle<Worktree>,
1810 pub path: Arc<Path>,
1811 pub mtime: SystemTime,
1812 pub(crate) entry_id: ProjectEntryId,
1813 pub(crate) is_local: bool,
1814 pub(crate) is_deleted: bool,
1815}
1816
1817impl language::File for File {
1818 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1819 if self.is_local {
1820 Some(self)
1821 } else {
1822 None
1823 }
1824 }
1825
1826 fn mtime(&self) -> SystemTime {
1827 self.mtime
1828 }
1829
1830 fn path(&self) -> &Arc<Path> {
1831 &self.path
1832 }
1833
1834 fn full_path(&self, cx: &AppContext) -> PathBuf {
1835 let mut full_path = PathBuf::new();
1836 let worktree = self.worktree.read(cx);
1837
1838 if worktree.is_visible() {
1839 full_path.push(worktree.root_name());
1840 } else {
1841 let path = worktree.abs_path();
1842
1843 if worktree.is_local() && path.starts_with(cx.global::<HomeDir>().as_path()) {
1844 full_path.push("~");
1845 full_path.push(path.strip_prefix(cx.global::<HomeDir>().as_path()).unwrap());
1846 } else {
1847 full_path.push(path)
1848 }
1849 }
1850
1851 if self.path.components().next().is_some() {
1852 full_path.push(&self.path);
1853 }
1854
1855 full_path
1856 }
1857
1858 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1859 /// of its worktree, then this method will return the name of the worktree itself.
1860 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1861 self.path
1862 .file_name()
1863 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1864 }
1865
1866 fn is_deleted(&self) -> bool {
1867 self.is_deleted
1868 }
1869
1870 fn save(
1871 &self,
1872 buffer_id: u64,
1873 text: Rope,
1874 version: clock::Global,
1875 line_ending: LineEnding,
1876 cx: &mut MutableAppContext,
1877 ) -> Task<Result<(clock::Global, String, SystemTime)>> {
1878 self.worktree.update(cx, |worktree, cx| match worktree {
1879 Worktree::Local(worktree) => {
1880 let rpc = worktree.client.clone();
1881 let project_id = worktree.share.as_ref().map(|share| share.project_id);
1882 let fingerprint = text.fingerprint();
1883 let save = worktree.write_file(self.path.clone(), text, line_ending, cx);
1884 cx.background().spawn(async move {
1885 let entry = save.await?;
1886 if let Some(project_id) = project_id {
1887 rpc.send(proto::BufferSaved {
1888 project_id,
1889 buffer_id,
1890 version: serialize_version(&version),
1891 mtime: Some(entry.mtime.into()),
1892 fingerprint: fingerprint.clone(),
1893 })?;
1894 }
1895 Ok((version, fingerprint, entry.mtime))
1896 })
1897 }
1898 Worktree::Remote(worktree) => {
1899 let rpc = worktree.client.clone();
1900 let project_id = worktree.project_id;
1901 cx.foreground().spawn(async move {
1902 let response = rpc
1903 .request(proto::SaveBuffer {
1904 project_id,
1905 buffer_id,
1906 version: serialize_version(&version),
1907 })
1908 .await?;
1909 let version = deserialize_version(response.version);
1910 let mtime = response
1911 .mtime
1912 .ok_or_else(|| anyhow!("missing mtime"))?
1913 .into();
1914 Ok((version, response.fingerprint, mtime))
1915 })
1916 }
1917 })
1918 }
1919
1920 fn as_any(&self) -> &dyn Any {
1921 self
1922 }
1923
1924 fn to_proto(&self) -> rpc::proto::File {
1925 rpc::proto::File {
1926 worktree_id: self.worktree.id() as u64,
1927 entry_id: self.entry_id.to_proto(),
1928 path: self.path.to_string_lossy().into(),
1929 mtime: Some(self.mtime.into()),
1930 is_deleted: self.is_deleted,
1931 }
1932 }
1933}
1934
1935impl language::LocalFile for File {
1936 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1937 self.worktree
1938 .read(cx)
1939 .as_local()
1940 .unwrap()
1941 .abs_path
1942 .join(&self.path)
1943 }
1944
1945 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1946 let worktree = self.worktree.read(cx).as_local().unwrap();
1947 let abs_path = worktree.absolutize(&self.path);
1948 let fs = worktree.fs.clone();
1949 cx.background()
1950 .spawn(async move { fs.load(&abs_path).await })
1951 }
1952
1953 fn buffer_reloaded(
1954 &self,
1955 buffer_id: u64,
1956 version: &clock::Global,
1957 fingerprint: String,
1958 line_ending: LineEnding,
1959 mtime: SystemTime,
1960 cx: &mut MutableAppContext,
1961 ) {
1962 let worktree = self.worktree.read(cx).as_local().unwrap();
1963 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1964 worktree
1965 .client
1966 .send(proto::BufferReloaded {
1967 project_id,
1968 buffer_id,
1969 version: serialize_version(version),
1970 mtime: Some(mtime.into()),
1971 fingerprint,
1972 line_ending: serialize_line_ending(line_ending) as i32,
1973 })
1974 .log_err();
1975 }
1976 }
1977}
1978
1979impl File {
1980 pub fn from_proto(
1981 proto: rpc::proto::File,
1982 worktree: ModelHandle<Worktree>,
1983 cx: &AppContext,
1984 ) -> Result<Self> {
1985 let worktree_id = worktree
1986 .read(cx)
1987 .as_remote()
1988 .ok_or_else(|| anyhow!("not remote"))?
1989 .id();
1990
1991 if worktree_id.to_proto() != proto.worktree_id {
1992 return Err(anyhow!("worktree id does not match file"));
1993 }
1994
1995 Ok(Self {
1996 worktree,
1997 path: Path::new(&proto.path).into(),
1998 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1999 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2000 is_local: false,
2001 is_deleted: proto.is_deleted,
2002 })
2003 }
2004
2005 pub fn from_dyn(file: Option<&dyn language::File>) -> Option<&Self> {
2006 file.and_then(|f| f.as_any().downcast_ref())
2007 }
2008
2009 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2010 self.worktree.read(cx).id()
2011 }
2012
2013 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2014 if self.is_deleted {
2015 None
2016 } else {
2017 Some(self.entry_id)
2018 }
2019 }
2020}
2021
2022#[derive(Clone, Debug, PartialEq, Eq)]
2023pub struct Entry {
2024 pub id: ProjectEntryId,
2025 pub kind: EntryKind,
2026 pub path: Arc<Path>,
2027 pub inode: u64,
2028 pub mtime: SystemTime,
2029 pub is_symlink: bool,
2030 pub is_ignored: bool,
2031}
2032
2033#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2034pub enum EntryKind {
2035 PendingDir,
2036 Dir,
2037 File(CharBag),
2038}
2039
2040impl Entry {
2041 fn new(
2042 path: Arc<Path>,
2043 metadata: &fs::Metadata,
2044 next_entry_id: &AtomicUsize,
2045 root_char_bag: CharBag,
2046 ) -> Self {
2047 Self {
2048 id: ProjectEntryId::new(next_entry_id),
2049 kind: if metadata.is_dir {
2050 EntryKind::PendingDir
2051 } else {
2052 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2053 },
2054 path,
2055 inode: metadata.inode,
2056 mtime: metadata.mtime,
2057 is_symlink: metadata.is_symlink,
2058 is_ignored: false,
2059 }
2060 }
2061
2062 pub fn is_dir(&self) -> bool {
2063 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2064 }
2065
2066 pub fn is_file(&self) -> bool {
2067 matches!(self.kind, EntryKind::File(_))
2068 }
2069}
2070
2071impl sum_tree::Item for Entry {
2072 type Summary = EntrySummary;
2073
2074 fn summary(&self) -> Self::Summary {
2075 let visible_count = if self.is_ignored { 0 } else { 1 };
2076 let file_count;
2077 let visible_file_count;
2078 if self.is_file() {
2079 file_count = 1;
2080 visible_file_count = visible_count;
2081 } else {
2082 file_count = 0;
2083 visible_file_count = 0;
2084 }
2085
2086 EntrySummary {
2087 max_path: self.path.clone(),
2088 count: 1,
2089 visible_count,
2090 file_count,
2091 visible_file_count,
2092 }
2093 }
2094}
2095
2096impl sum_tree::KeyedItem for Entry {
2097 type Key = PathKey;
2098
2099 fn key(&self) -> Self::Key {
2100 PathKey(self.path.clone())
2101 }
2102}
2103
2104#[derive(Clone, Debug)]
2105pub struct EntrySummary {
2106 max_path: Arc<Path>,
2107 count: usize,
2108 visible_count: usize,
2109 file_count: usize,
2110 visible_file_count: usize,
2111}
2112
2113impl Default for EntrySummary {
2114 fn default() -> Self {
2115 Self {
2116 max_path: Arc::from(Path::new("")),
2117 count: 0,
2118 visible_count: 0,
2119 file_count: 0,
2120 visible_file_count: 0,
2121 }
2122 }
2123}
2124
2125impl sum_tree::Summary for EntrySummary {
2126 type Context = ();
2127
2128 fn add_summary(&mut self, rhs: &Self, _: &()) {
2129 self.max_path = rhs.max_path.clone();
2130 self.count += rhs.count;
2131 self.visible_count += rhs.visible_count;
2132 self.file_count += rhs.file_count;
2133 self.visible_file_count += rhs.visible_file_count;
2134 }
2135}
2136
2137#[derive(Clone, Debug)]
2138struct PathEntry {
2139 id: ProjectEntryId,
2140 path: Arc<Path>,
2141 is_ignored: bool,
2142 scan_id: usize,
2143}
2144
2145impl sum_tree::Item for PathEntry {
2146 type Summary = PathEntrySummary;
2147
2148 fn summary(&self) -> Self::Summary {
2149 PathEntrySummary { max_id: self.id }
2150 }
2151}
2152
2153impl sum_tree::KeyedItem for PathEntry {
2154 type Key = ProjectEntryId;
2155
2156 fn key(&self) -> Self::Key {
2157 self.id
2158 }
2159}
2160
2161#[derive(Clone, Debug, Default)]
2162struct PathEntrySummary {
2163 max_id: ProjectEntryId,
2164}
2165
2166impl sum_tree::Summary for PathEntrySummary {
2167 type Context = ();
2168
2169 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2170 self.max_id = summary.max_id;
2171 }
2172}
2173
2174impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2175 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2176 *self = summary.max_id;
2177 }
2178}
2179
2180#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2181pub struct PathKey(Arc<Path>);
2182
2183impl Default for PathKey {
2184 fn default() -> Self {
2185 Self(Path::new("").into())
2186 }
2187}
2188
2189impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2190 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2191 self.0 = summary.max_path.clone();
2192 }
2193}
2194
2195struct BackgroundScanner {
2196 fs: Arc<dyn Fs>,
2197 snapshot: Arc<Mutex<LocalSnapshot>>,
2198 notify: UnboundedSender<ScanState>,
2199 executor: Arc<executor::Background>,
2200}
2201
2202impl BackgroundScanner {
2203 fn new(
2204 snapshot: Arc<Mutex<LocalSnapshot>>,
2205 notify: UnboundedSender<ScanState>,
2206 fs: Arc<dyn Fs>,
2207 executor: Arc<executor::Background>,
2208 ) -> Self {
2209 Self {
2210 fs,
2211 snapshot,
2212 notify,
2213 executor,
2214 }
2215 }
2216
2217 fn abs_path(&self) -> Arc<Path> {
2218 self.snapshot.lock().abs_path.clone()
2219 }
2220
2221 fn snapshot(&self) -> LocalSnapshot {
2222 self.snapshot.lock().clone()
2223 }
2224
2225 async fn run(mut self, events_rx: impl Stream<Item = Vec<fsevent::Event>>) {
2226 if self.notify.unbounded_send(ScanState::Initializing).is_err() {
2227 return;
2228 }
2229
2230 if let Err(err) = self.scan_dirs().await {
2231 if self
2232 .notify
2233 .unbounded_send(ScanState::Err(Arc::new(err)))
2234 .is_err()
2235 {
2236 return;
2237 }
2238 }
2239
2240 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2241 return;
2242 }
2243
2244 futures::pin_mut!(events_rx);
2245
2246 while let Some(mut events) = events_rx.next().await {
2247 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2248 events.extend(additional_events);
2249 }
2250
2251 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2252 break;
2253 }
2254
2255 if !self.process_events(events).await {
2256 break;
2257 }
2258
2259 if self.notify.unbounded_send(ScanState::Idle).is_err() {
2260 break;
2261 }
2262 }
2263 }
2264
2265 async fn scan_dirs(&mut self) -> Result<()> {
2266 let root_char_bag;
2267 let root_abs_path;
2268 let root_inode;
2269 let is_dir;
2270 let next_entry_id;
2271 {
2272 let snapshot = self.snapshot.lock();
2273 root_char_bag = snapshot.root_char_bag;
2274 root_abs_path = snapshot.abs_path.clone();
2275 root_inode = snapshot.root_entry().map(|e| e.inode);
2276 is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2277 next_entry_id = snapshot.next_entry_id.clone();
2278 };
2279
2280 // Populate ignores above the root.
2281 for ancestor in root_abs_path.ancestors().skip(1) {
2282 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2283 {
2284 self.snapshot
2285 .lock()
2286 .ignores_by_parent_abs_path
2287 .insert(ancestor.into(), (ignore.into(), 0));
2288 }
2289 }
2290
2291 let ignore_stack = {
2292 let mut snapshot = self.snapshot.lock();
2293 let ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2294 if ignore_stack.is_all() {
2295 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2296 root_entry.is_ignored = true;
2297 snapshot.insert_entry(root_entry, self.fs.as_ref());
2298 }
2299 }
2300 ignore_stack
2301 };
2302
2303 if is_dir {
2304 let path: Arc<Path> = Arc::from(Path::new(""));
2305 let mut ancestor_inodes = TreeSet::default();
2306 if let Some(root_inode) = root_inode {
2307 ancestor_inodes.insert(root_inode);
2308 }
2309
2310 let (tx, rx) = channel::unbounded();
2311 self.executor
2312 .block(tx.send(ScanJob {
2313 abs_path: root_abs_path.to_path_buf(),
2314 path,
2315 ignore_stack,
2316 ancestor_inodes,
2317 scan_queue: tx.clone(),
2318 }))
2319 .unwrap();
2320 drop(tx);
2321
2322 self.executor
2323 .scoped(|scope| {
2324 for _ in 0..self.executor.num_cpus() {
2325 scope.spawn(async {
2326 while let Ok(job) = rx.recv().await {
2327 if let Err(err) = self
2328 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2329 .await
2330 {
2331 log::error!("error scanning {:?}: {}", job.abs_path, err);
2332 }
2333 }
2334 });
2335 }
2336 })
2337 .await;
2338 }
2339
2340 Ok(())
2341 }
2342
2343 async fn scan_dir(
2344 &self,
2345 root_char_bag: CharBag,
2346 next_entry_id: Arc<AtomicUsize>,
2347 job: &ScanJob,
2348 ) -> Result<()> {
2349 let mut new_entries: Vec<Entry> = Vec::new();
2350 let mut new_jobs: Vec<ScanJob> = Vec::new();
2351 let mut ignore_stack = job.ignore_stack.clone();
2352 let mut new_ignore = None;
2353
2354 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2355 while let Some(child_abs_path) = child_paths.next().await {
2356 let child_abs_path = match child_abs_path {
2357 Ok(child_abs_path) => child_abs_path,
2358 Err(error) => {
2359 log::error!("error processing entry {:?}", error);
2360 continue;
2361 }
2362 };
2363 let child_name = child_abs_path.file_name().unwrap();
2364 let child_path: Arc<Path> = job.path.join(child_name).into();
2365 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2366 Ok(Some(metadata)) => metadata,
2367 Ok(None) => continue,
2368 Err(err) => {
2369 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2370 continue;
2371 }
2372 };
2373
2374 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2375 if child_name == *GITIGNORE {
2376 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2377 Ok(ignore) => {
2378 let ignore = Arc::new(ignore);
2379 ignore_stack =
2380 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2381 new_ignore = Some(ignore);
2382 }
2383 Err(error) => {
2384 log::error!(
2385 "error loading .gitignore file {:?} - {:?}",
2386 child_name,
2387 error
2388 );
2389 }
2390 }
2391
2392 // Update ignore status of any child entries we've already processed to reflect the
2393 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2394 // there should rarely be too numerous. Update the ignore stack associated with any
2395 // new jobs as well.
2396 let mut new_jobs = new_jobs.iter_mut();
2397 for entry in &mut new_entries {
2398 let entry_abs_path = self.abs_path().join(&entry.path);
2399 entry.is_ignored =
2400 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2401 if entry.is_dir() {
2402 new_jobs.next().unwrap().ignore_stack = if entry.is_ignored {
2403 IgnoreStack::all()
2404 } else {
2405 ignore_stack.clone()
2406 };
2407 }
2408 }
2409 }
2410
2411 let mut child_entry = Entry::new(
2412 child_path.clone(),
2413 &child_metadata,
2414 &next_entry_id,
2415 root_char_bag,
2416 );
2417
2418 if child_entry.is_dir() {
2419 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2420 child_entry.is_ignored = is_ignored;
2421
2422 if !job.ancestor_inodes.contains(&child_entry.inode) {
2423 let mut ancestor_inodes = job.ancestor_inodes.clone();
2424 ancestor_inodes.insert(child_entry.inode);
2425 new_jobs.push(ScanJob {
2426 abs_path: child_abs_path,
2427 path: child_path,
2428 ignore_stack: if is_ignored {
2429 IgnoreStack::all()
2430 } else {
2431 ignore_stack.clone()
2432 },
2433 ancestor_inodes,
2434 scan_queue: job.scan_queue.clone(),
2435 });
2436 }
2437 } else {
2438 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2439 }
2440
2441 new_entries.push(child_entry);
2442 }
2443
2444 self.snapshot.lock().populate_dir(
2445 job.path.clone(),
2446 new_entries,
2447 new_ignore,
2448 self.fs.as_ref(),
2449 );
2450 for new_job in new_jobs {
2451 job.scan_queue.send(new_job).await.unwrap();
2452 }
2453
2454 Ok(())
2455 }
2456
2457 async fn process_events(&mut self, mut events: Vec<fsevent::Event>) -> bool {
2458 events.sort_unstable_by(|a, b| a.path.cmp(&b.path));
2459 events.dedup_by(|a, b| a.path.starts_with(&b.path));
2460
2461 let root_char_bag;
2462 let root_abs_path;
2463 let next_entry_id;
2464 {
2465 let snapshot = self.snapshot.lock();
2466 root_char_bag = snapshot.root_char_bag;
2467 root_abs_path = snapshot.abs_path.clone();
2468 next_entry_id = snapshot.next_entry_id.clone();
2469 }
2470
2471 let root_canonical_path = if let Ok(path) = self.fs.canonicalize(&root_abs_path).await {
2472 path
2473 } else {
2474 return false;
2475 };
2476 let metadata = futures::future::join_all(
2477 events
2478 .iter()
2479 .map(|event| self.fs.metadata(&event.path))
2480 .collect::<Vec<_>>(),
2481 )
2482 .await;
2483
2484 // Hold the snapshot lock while clearing and re-inserting the root entries
2485 // for each event. This way, the snapshot is not observable to the foreground
2486 // thread while this operation is in-progress.
2487 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2488 {
2489 let mut snapshot = self.snapshot.lock();
2490 snapshot.scan_id += 1;
2491 for event in &events {
2492 if let Ok(path) = event.path.strip_prefix(&root_canonical_path) {
2493 snapshot.remove_path(path);
2494 }
2495 }
2496
2497 for (event, metadata) in events.into_iter().zip(metadata.into_iter()) {
2498 let path: Arc<Path> = match event.path.strip_prefix(&root_canonical_path) {
2499 Ok(path) => Arc::from(path.to_path_buf()),
2500 Err(_) => {
2501 log::error!(
2502 "unexpected event {:?} for root path {:?}",
2503 event.path,
2504 root_canonical_path
2505 );
2506 continue;
2507 }
2508 };
2509 let abs_path = root_abs_path.join(&path);
2510
2511 match metadata {
2512 Ok(Some(metadata)) => {
2513 let ignore_stack =
2514 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2515 let mut fs_entry = Entry::new(
2516 path.clone(),
2517 &metadata,
2518 snapshot.next_entry_id.as_ref(),
2519 snapshot.root_char_bag,
2520 );
2521 fs_entry.is_ignored = ignore_stack.is_all();
2522 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2523
2524 let scan_id = snapshot.scan_id;
2525 if let Some(repo) = snapshot.in_dot_git(&path) {
2526 repo.repo.lock().reload_index();
2527 repo.scan_id = scan_id;
2528 }
2529
2530 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2531 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2532 ancestor_inodes.insert(metadata.inode);
2533 self.executor
2534 .block(scan_queue_tx.send(ScanJob {
2535 abs_path,
2536 path,
2537 ignore_stack,
2538 ancestor_inodes,
2539 scan_queue: scan_queue_tx.clone(),
2540 }))
2541 .unwrap();
2542 }
2543 }
2544 Ok(None) => {}
2545 Err(err) => {
2546 // TODO - create a special 'error' entry in the entries tree to mark this
2547 log::error!("error reading file on event {:?}", err);
2548 }
2549 }
2550 }
2551 drop(scan_queue_tx);
2552 }
2553
2554 // Scan any directories that were created as part of this event batch.
2555 self.executor
2556 .scoped(|scope| {
2557 for _ in 0..self.executor.num_cpus() {
2558 scope.spawn(async {
2559 while let Ok(job) = scan_queue_rx.recv().await {
2560 if let Err(err) = self
2561 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2562 .await
2563 {
2564 log::error!("error scanning {:?}: {}", job.abs_path, err);
2565 }
2566 }
2567 });
2568 }
2569 })
2570 .await;
2571
2572 // Attempt to detect renames only over a single batch of file-system events.
2573 self.snapshot.lock().removed_entry_ids.clear();
2574
2575 self.update_ignore_statuses().await;
2576 self.update_git_repositories();
2577 true
2578 }
2579
2580 async fn update_ignore_statuses(&self) {
2581 let mut snapshot = self.snapshot();
2582
2583 let mut ignores_to_update = Vec::new();
2584 let mut ignores_to_delete = Vec::new();
2585 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2586 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2587 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2588 ignores_to_update.push(parent_abs_path.clone());
2589 }
2590
2591 let ignore_path = parent_path.join(&*GITIGNORE);
2592 if snapshot.entry_for_path(ignore_path).is_none() {
2593 ignores_to_delete.push(parent_abs_path.clone());
2594 }
2595 }
2596 }
2597
2598 for parent_abs_path in ignores_to_delete {
2599 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2600 self.snapshot
2601 .lock()
2602 .ignores_by_parent_abs_path
2603 .remove(&parent_abs_path);
2604 }
2605
2606 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2607 ignores_to_update.sort_unstable();
2608 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2609 while let Some(parent_abs_path) = ignores_to_update.next() {
2610 while ignores_to_update
2611 .peek()
2612 .map_or(false, |p| p.starts_with(&parent_abs_path))
2613 {
2614 ignores_to_update.next().unwrap();
2615 }
2616
2617 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2618 ignore_queue_tx
2619 .send(UpdateIgnoreStatusJob {
2620 abs_path: parent_abs_path,
2621 ignore_stack,
2622 ignore_queue: ignore_queue_tx.clone(),
2623 })
2624 .await
2625 .unwrap();
2626 }
2627 drop(ignore_queue_tx);
2628
2629 self.executor
2630 .scoped(|scope| {
2631 for _ in 0..self.executor.num_cpus() {
2632 scope.spawn(async {
2633 while let Ok(job) = ignore_queue_rx.recv().await {
2634 self.update_ignore_status(job, &snapshot).await;
2635 }
2636 });
2637 }
2638 })
2639 .await;
2640 }
2641
2642 fn update_git_repositories(&self) {
2643 let mut snapshot = self.snapshot.lock();
2644 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2645 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2646 snapshot.git_repositories = git_repositories;
2647 }
2648
2649 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2650 let mut ignore_stack = job.ignore_stack;
2651 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2652 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2653 }
2654
2655 let mut entries_by_id_edits = Vec::new();
2656 let mut entries_by_path_edits = Vec::new();
2657 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2658 for mut entry in snapshot.child_entries(path).cloned() {
2659 let was_ignored = entry.is_ignored;
2660 let abs_path = self.abs_path().join(&entry.path);
2661 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2662 if entry.is_dir() {
2663 let child_ignore_stack = if entry.is_ignored {
2664 IgnoreStack::all()
2665 } else {
2666 ignore_stack.clone()
2667 };
2668 job.ignore_queue
2669 .send(UpdateIgnoreStatusJob {
2670 abs_path: abs_path.into(),
2671 ignore_stack: child_ignore_stack,
2672 ignore_queue: job.ignore_queue.clone(),
2673 })
2674 .await
2675 .unwrap();
2676 }
2677
2678 if entry.is_ignored != was_ignored {
2679 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2680 path_entry.scan_id = snapshot.scan_id;
2681 path_entry.is_ignored = entry.is_ignored;
2682 entries_by_id_edits.push(Edit::Insert(path_entry));
2683 entries_by_path_edits.push(Edit::Insert(entry));
2684 }
2685 }
2686
2687 let mut snapshot = self.snapshot.lock();
2688 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2689 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2690 }
2691}
2692
2693fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2694 let mut result = root_char_bag;
2695 result.extend(
2696 path.to_string_lossy()
2697 .chars()
2698 .map(|c| c.to_ascii_lowercase()),
2699 );
2700 result
2701}
2702
2703struct ScanJob {
2704 abs_path: PathBuf,
2705 path: Arc<Path>,
2706 ignore_stack: Arc<IgnoreStack>,
2707 scan_queue: Sender<ScanJob>,
2708 ancestor_inodes: TreeSet<u64>,
2709}
2710
2711struct UpdateIgnoreStatusJob {
2712 abs_path: Arc<Path>,
2713 ignore_stack: Arc<IgnoreStack>,
2714 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2715}
2716
2717pub trait WorktreeHandle {
2718 #[cfg(any(test, feature = "test-support"))]
2719 fn flush_fs_events<'a>(
2720 &self,
2721 cx: &'a gpui::TestAppContext,
2722 ) -> futures::future::LocalBoxFuture<'a, ()>;
2723}
2724
2725impl WorktreeHandle for ModelHandle<Worktree> {
2726 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2727 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2728 // extra directory scans, and emit extra scan-state notifications.
2729 //
2730 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2731 // to ensure that all redundant FS events have already been processed.
2732 #[cfg(any(test, feature = "test-support"))]
2733 fn flush_fs_events<'a>(
2734 &self,
2735 cx: &'a gpui::TestAppContext,
2736 ) -> futures::future::LocalBoxFuture<'a, ()> {
2737 use smol::future::FutureExt;
2738
2739 let filename = "fs-event-sentinel";
2740 let tree = self.clone();
2741 let (fs, root_path) = self.read_with(cx, |tree, _| {
2742 let tree = tree.as_local().unwrap();
2743 (tree.fs.clone(), tree.abs_path().clone())
2744 });
2745
2746 async move {
2747 fs.create_file(&root_path.join(filename), Default::default())
2748 .await
2749 .unwrap();
2750 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2751 .await;
2752
2753 fs.remove_file(&root_path.join(filename), Default::default())
2754 .await
2755 .unwrap();
2756 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2757 .await;
2758
2759 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2760 .await;
2761 }
2762 .boxed_local()
2763 }
2764}
2765
2766#[derive(Clone, Debug)]
2767struct TraversalProgress<'a> {
2768 max_path: &'a Path,
2769 count: usize,
2770 visible_count: usize,
2771 file_count: usize,
2772 visible_file_count: usize,
2773}
2774
2775impl<'a> TraversalProgress<'a> {
2776 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2777 match (include_ignored, include_dirs) {
2778 (true, true) => self.count,
2779 (true, false) => self.file_count,
2780 (false, true) => self.visible_count,
2781 (false, false) => self.visible_file_count,
2782 }
2783 }
2784}
2785
2786impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2787 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2788 self.max_path = summary.max_path.as_ref();
2789 self.count += summary.count;
2790 self.visible_count += summary.visible_count;
2791 self.file_count += summary.file_count;
2792 self.visible_file_count += summary.visible_file_count;
2793 }
2794}
2795
2796impl<'a> Default for TraversalProgress<'a> {
2797 fn default() -> Self {
2798 Self {
2799 max_path: Path::new(""),
2800 count: 0,
2801 visible_count: 0,
2802 file_count: 0,
2803 visible_file_count: 0,
2804 }
2805 }
2806}
2807
2808pub struct Traversal<'a> {
2809 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2810 include_ignored: bool,
2811 include_dirs: bool,
2812}
2813
2814impl<'a> Traversal<'a> {
2815 pub fn advance(&mut self) -> bool {
2816 self.advance_to_offset(self.offset() + 1)
2817 }
2818
2819 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2820 self.cursor.seek_forward(
2821 &TraversalTarget::Count {
2822 count: offset,
2823 include_dirs: self.include_dirs,
2824 include_ignored: self.include_ignored,
2825 },
2826 Bias::Right,
2827 &(),
2828 )
2829 }
2830
2831 pub fn advance_to_sibling(&mut self) -> bool {
2832 while let Some(entry) = self.cursor.item() {
2833 self.cursor.seek_forward(
2834 &TraversalTarget::PathSuccessor(&entry.path),
2835 Bias::Left,
2836 &(),
2837 );
2838 if let Some(entry) = self.cursor.item() {
2839 if (self.include_dirs || !entry.is_dir())
2840 && (self.include_ignored || !entry.is_ignored)
2841 {
2842 return true;
2843 }
2844 }
2845 }
2846 false
2847 }
2848
2849 pub fn entry(&self) -> Option<&'a Entry> {
2850 self.cursor.item()
2851 }
2852
2853 pub fn offset(&self) -> usize {
2854 self.cursor
2855 .start()
2856 .count(self.include_dirs, self.include_ignored)
2857 }
2858}
2859
2860impl<'a> Iterator for Traversal<'a> {
2861 type Item = &'a Entry;
2862
2863 fn next(&mut self) -> Option<Self::Item> {
2864 if let Some(item) = self.entry() {
2865 self.advance();
2866 Some(item)
2867 } else {
2868 None
2869 }
2870 }
2871}
2872
2873#[derive(Debug)]
2874enum TraversalTarget<'a> {
2875 Path(&'a Path),
2876 PathSuccessor(&'a Path),
2877 Count {
2878 count: usize,
2879 include_ignored: bool,
2880 include_dirs: bool,
2881 },
2882}
2883
2884impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2885 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2886 match self {
2887 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2888 TraversalTarget::PathSuccessor(path) => {
2889 if !cursor_location.max_path.starts_with(path) {
2890 Ordering::Equal
2891 } else {
2892 Ordering::Greater
2893 }
2894 }
2895 TraversalTarget::Count {
2896 count,
2897 include_dirs,
2898 include_ignored,
2899 } => Ord::cmp(
2900 count,
2901 &cursor_location.count(*include_dirs, *include_ignored),
2902 ),
2903 }
2904 }
2905}
2906
2907struct ChildEntriesIter<'a> {
2908 parent_path: &'a Path,
2909 traversal: Traversal<'a>,
2910}
2911
2912impl<'a> Iterator for ChildEntriesIter<'a> {
2913 type Item = &'a Entry;
2914
2915 fn next(&mut self) -> Option<Self::Item> {
2916 if let Some(item) = self.traversal.entry() {
2917 if item.path.starts_with(&self.parent_path) {
2918 self.traversal.advance_to_sibling();
2919 return Some(item);
2920 }
2921 }
2922 None
2923 }
2924}
2925
2926impl<'a> From<&'a Entry> for proto::Entry {
2927 fn from(entry: &'a Entry) -> Self {
2928 Self {
2929 id: entry.id.to_proto(),
2930 is_dir: entry.is_dir(),
2931 path: entry.path.as_os_str().as_bytes().to_vec(),
2932 inode: entry.inode,
2933 mtime: Some(entry.mtime.into()),
2934 is_symlink: entry.is_symlink,
2935 is_ignored: entry.is_ignored,
2936 }
2937 }
2938}
2939
2940impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2941 type Error = anyhow::Error;
2942
2943 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2944 if let Some(mtime) = entry.mtime {
2945 let kind = if entry.is_dir {
2946 EntryKind::Dir
2947 } else {
2948 let mut char_bag = *root_char_bag;
2949 char_bag.extend(
2950 String::from_utf8_lossy(&entry.path)
2951 .chars()
2952 .map(|c| c.to_ascii_lowercase()),
2953 );
2954 EntryKind::File(char_bag)
2955 };
2956 let path: Arc<Path> = PathBuf::from(OsString::from_vec(entry.path)).into();
2957 Ok(Entry {
2958 id: ProjectEntryId::from_proto(entry.id),
2959 kind,
2960 path,
2961 inode: entry.inode,
2962 mtime: mtime.into(),
2963 is_symlink: entry.is_symlink,
2964 is_ignored: entry.is_ignored,
2965 })
2966 } else {
2967 Err(anyhow!(
2968 "missing mtime in remote worktree entry {:?}",
2969 entry.path
2970 ))
2971 }
2972 }
2973}
2974
2975async fn send_worktree_update(client: &Arc<Client>, update: proto::UpdateWorktree) -> Result<()> {
2976 #[cfg(any(test, feature = "test-support"))]
2977 const MAX_CHUNK_SIZE: usize = 2;
2978 #[cfg(not(any(test, feature = "test-support")))]
2979 const MAX_CHUNK_SIZE: usize = 256;
2980
2981 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
2982 client.request(update).await?;
2983 }
2984
2985 Ok(())
2986}
2987
2988#[cfg(test)]
2989mod tests {
2990 use super::*;
2991 use anyhow::Result;
2992 use client::test::FakeHttpClient;
2993 use fs::repository::FakeGitRepository;
2994 use fs::{FakeFs, RealFs};
2995 use gpui::{executor::Deterministic, TestAppContext};
2996 use rand::prelude::*;
2997 use serde_json::json;
2998 use std::{
2999 env,
3000 fmt::Write,
3001 time::{SystemTime, UNIX_EPOCH},
3002 };
3003
3004 use util::test::temp_tree;
3005
3006 #[gpui::test]
3007 async fn test_traversal(cx: &mut TestAppContext) {
3008 let fs = FakeFs::new(cx.background());
3009 fs.insert_tree(
3010 "/root",
3011 json!({
3012 ".gitignore": "a/b\n",
3013 "a": {
3014 "b": "",
3015 "c": "",
3016 }
3017 }),
3018 )
3019 .await;
3020
3021 let http_client = FakeHttpClient::with_404_response();
3022 let client = cx.read(|cx| Client::new(http_client, cx));
3023
3024 let tree = Worktree::local(
3025 client,
3026 Arc::from(Path::new("/root")),
3027 true,
3028 fs,
3029 Default::default(),
3030 &mut cx.to_async(),
3031 )
3032 .await
3033 .unwrap();
3034 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3035 .await;
3036
3037 tree.read_with(cx, |tree, _| {
3038 assert_eq!(
3039 tree.entries(false)
3040 .map(|entry| entry.path.as_ref())
3041 .collect::<Vec<_>>(),
3042 vec![
3043 Path::new(""),
3044 Path::new(".gitignore"),
3045 Path::new("a"),
3046 Path::new("a/c"),
3047 ]
3048 );
3049 assert_eq!(
3050 tree.entries(true)
3051 .map(|entry| entry.path.as_ref())
3052 .collect::<Vec<_>>(),
3053 vec![
3054 Path::new(""),
3055 Path::new(".gitignore"),
3056 Path::new("a"),
3057 Path::new("a/b"),
3058 Path::new("a/c"),
3059 ]
3060 );
3061 })
3062 }
3063
3064 #[gpui::test(iterations = 10)]
3065 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3066 let fs = FakeFs::new(cx.background());
3067 fs.insert_tree(
3068 "/root",
3069 json!({
3070 "lib": {
3071 "a": {
3072 "a.txt": ""
3073 },
3074 "b": {
3075 "b.txt": ""
3076 }
3077 }
3078 }),
3079 )
3080 .await;
3081 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3082 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3083
3084 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3085 let tree = Worktree::local(
3086 client,
3087 Arc::from(Path::new("/root")),
3088 true,
3089 fs.clone(),
3090 Default::default(),
3091 &mut cx.to_async(),
3092 )
3093 .await
3094 .unwrap();
3095
3096 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3097 .await;
3098
3099 tree.read_with(cx, |tree, _| {
3100 assert_eq!(
3101 tree.entries(false)
3102 .map(|entry| entry.path.as_ref())
3103 .collect::<Vec<_>>(),
3104 vec![
3105 Path::new(""),
3106 Path::new("lib"),
3107 Path::new("lib/a"),
3108 Path::new("lib/a/a.txt"),
3109 Path::new("lib/a/lib"),
3110 Path::new("lib/b"),
3111 Path::new("lib/b/b.txt"),
3112 Path::new("lib/b/lib"),
3113 ]
3114 );
3115 });
3116
3117 fs.rename(
3118 Path::new("/root/lib/a/lib"),
3119 Path::new("/root/lib/a/lib-2"),
3120 Default::default(),
3121 )
3122 .await
3123 .unwrap();
3124 executor.run_until_parked();
3125 tree.read_with(cx, |tree, _| {
3126 assert_eq!(
3127 tree.entries(false)
3128 .map(|entry| entry.path.as_ref())
3129 .collect::<Vec<_>>(),
3130 vec![
3131 Path::new(""),
3132 Path::new("lib"),
3133 Path::new("lib/a"),
3134 Path::new("lib/a/a.txt"),
3135 Path::new("lib/a/lib-2"),
3136 Path::new("lib/b"),
3137 Path::new("lib/b/b.txt"),
3138 Path::new("lib/b/lib"),
3139 ]
3140 );
3141 });
3142 }
3143
3144 #[gpui::test]
3145 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3146 let parent_dir = temp_tree(json!({
3147 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3148 "tree": {
3149 ".git": {},
3150 ".gitignore": "ignored-dir\n",
3151 "tracked-dir": {
3152 "tracked-file1": "",
3153 "ancestor-ignored-file1": "",
3154 },
3155 "ignored-dir": {
3156 "ignored-file1": ""
3157 }
3158 }
3159 }));
3160 let dir = parent_dir.path().join("tree");
3161
3162 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3163
3164 let tree = Worktree::local(
3165 client,
3166 dir.as_path(),
3167 true,
3168 Arc::new(RealFs),
3169 Default::default(),
3170 &mut cx.to_async(),
3171 )
3172 .await
3173 .unwrap();
3174 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3175 .await;
3176 tree.flush_fs_events(cx).await;
3177 cx.read(|cx| {
3178 let tree = tree.read(cx);
3179 assert!(
3180 !tree
3181 .entry_for_path("tracked-dir/tracked-file1")
3182 .unwrap()
3183 .is_ignored
3184 );
3185 assert!(
3186 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3187 .unwrap()
3188 .is_ignored
3189 );
3190 assert!(
3191 tree.entry_for_path("ignored-dir/ignored-file1")
3192 .unwrap()
3193 .is_ignored
3194 );
3195 });
3196
3197 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3198 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3199 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3200 tree.flush_fs_events(cx).await;
3201 cx.read(|cx| {
3202 let tree = tree.read(cx);
3203 assert!(
3204 !tree
3205 .entry_for_path("tracked-dir/tracked-file2")
3206 .unwrap()
3207 .is_ignored
3208 );
3209 assert!(
3210 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3211 .unwrap()
3212 .is_ignored
3213 );
3214 assert!(
3215 tree.entry_for_path("ignored-dir/ignored-file2")
3216 .unwrap()
3217 .is_ignored
3218 );
3219 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3220 });
3221 }
3222
3223 #[gpui::test]
3224 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3225 let root = temp_tree(json!({
3226 "dir1": {
3227 ".git": {},
3228 "deps": {
3229 "dep1": {
3230 ".git": {},
3231 "src": {
3232 "a.txt": ""
3233 }
3234 }
3235 },
3236 "src": {
3237 "b.txt": ""
3238 }
3239 },
3240 "c.txt": "",
3241 }));
3242
3243 let http_client = FakeHttpClient::with_404_response();
3244 let client = cx.read(|cx| Client::new(http_client, cx));
3245 let tree = Worktree::local(
3246 client,
3247 root.path(),
3248 true,
3249 Arc::new(RealFs),
3250 Default::default(),
3251 &mut cx.to_async(),
3252 )
3253 .await
3254 .unwrap();
3255
3256 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3257 .await;
3258 tree.flush_fs_events(cx).await;
3259
3260 tree.read_with(cx, |tree, _cx| {
3261 let tree = tree.as_local().unwrap();
3262
3263 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3264
3265 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3266 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3267 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3268
3269 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3270 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3271 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3272 });
3273
3274 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3275 let tree = tree.as_local().unwrap();
3276 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3277 });
3278
3279 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3280 tree.flush_fs_events(cx).await;
3281
3282 tree.read_with(cx, |tree, _cx| {
3283 let tree = tree.as_local().unwrap();
3284 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3285 assert_ne!(
3286 original_scan_id, new_scan_id,
3287 "original {original_scan_id}, new {new_scan_id}"
3288 );
3289 });
3290
3291 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3292 tree.flush_fs_events(cx).await;
3293
3294 tree.read_with(cx, |tree, _cx| {
3295 let tree = tree.as_local().unwrap();
3296
3297 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3298 });
3299 }
3300
3301 #[test]
3302 fn test_changed_repos() {
3303 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3304 GitRepositoryEntry {
3305 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3306 scan_id,
3307 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3308 git_dir_path: git_dir_path.as_ref().into(),
3309 }
3310 }
3311
3312 let prev_repos: Vec<GitRepositoryEntry> = vec![
3313 fake_entry("/.git", 0),
3314 fake_entry("/a/.git", 0),
3315 fake_entry("/a/b/.git", 0),
3316 ];
3317
3318 let new_repos: Vec<GitRepositoryEntry> = vec![
3319 fake_entry("/a/.git", 1),
3320 fake_entry("/a/b/.git", 0),
3321 fake_entry("/a/c/.git", 0),
3322 ];
3323
3324 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3325
3326 // Deletion retained
3327 assert!(res
3328 .iter()
3329 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3330 .is_some());
3331
3332 // Update retained
3333 assert!(res
3334 .iter()
3335 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3336 .is_some());
3337
3338 // Addition retained
3339 assert!(res
3340 .iter()
3341 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3342 .is_some());
3343
3344 // Nochange, not retained
3345 assert!(res
3346 .iter()
3347 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3348 .is_none());
3349 }
3350
3351 #[gpui::test]
3352 async fn test_write_file(cx: &mut TestAppContext) {
3353 let dir = temp_tree(json!({
3354 ".git": {},
3355 ".gitignore": "ignored-dir\n",
3356 "tracked-dir": {},
3357 "ignored-dir": {}
3358 }));
3359
3360 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3361
3362 let tree = Worktree::local(
3363 client,
3364 dir.path(),
3365 true,
3366 Arc::new(RealFs),
3367 Default::default(),
3368 &mut cx.to_async(),
3369 )
3370 .await
3371 .unwrap();
3372 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3373 .await;
3374 tree.flush_fs_events(cx).await;
3375
3376 tree.update(cx, |tree, cx| {
3377 tree.as_local().unwrap().write_file(
3378 Path::new("tracked-dir/file.txt"),
3379 "hello".into(),
3380 Default::default(),
3381 cx,
3382 )
3383 })
3384 .await
3385 .unwrap();
3386 tree.update(cx, |tree, cx| {
3387 tree.as_local().unwrap().write_file(
3388 Path::new("ignored-dir/file.txt"),
3389 "world".into(),
3390 Default::default(),
3391 cx,
3392 )
3393 })
3394 .await
3395 .unwrap();
3396
3397 tree.read_with(cx, |tree, _| {
3398 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3399 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3400 assert!(!tracked.is_ignored);
3401 assert!(ignored.is_ignored);
3402 });
3403 }
3404
3405 #[gpui::test(iterations = 30)]
3406 async fn test_create_directory(cx: &mut TestAppContext) {
3407 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3408
3409 let fs = FakeFs::new(cx.background());
3410 fs.insert_tree(
3411 "/a",
3412 json!({
3413 "b": {},
3414 "c": {},
3415 "d": {},
3416 }),
3417 )
3418 .await;
3419
3420 let tree = Worktree::local(
3421 client,
3422 "/a".as_ref(),
3423 true,
3424 fs,
3425 Default::default(),
3426 &mut cx.to_async(),
3427 )
3428 .await
3429 .unwrap();
3430
3431 let entry = tree
3432 .update(cx, |tree, cx| {
3433 tree.as_local_mut()
3434 .unwrap()
3435 .create_entry("a/e".as_ref(), true, cx)
3436 })
3437 .await
3438 .unwrap();
3439 assert!(entry.is_dir());
3440
3441 cx.foreground().run_until_parked();
3442 tree.read_with(cx, |tree, _| {
3443 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3444 });
3445 }
3446
3447 #[gpui::test(iterations = 100)]
3448 fn test_random(mut rng: StdRng) {
3449 let operations = env::var("OPERATIONS")
3450 .map(|o| o.parse().unwrap())
3451 .unwrap_or(40);
3452 let initial_entries = env::var("INITIAL_ENTRIES")
3453 .map(|o| o.parse().unwrap())
3454 .unwrap_or(20);
3455
3456 let root_dir = tempdir::TempDir::new("worktree-test").unwrap();
3457 for _ in 0..initial_entries {
3458 randomly_mutate_tree(root_dir.path(), 1.0, &mut rng).unwrap();
3459 }
3460 log::info!("Generated initial tree");
3461
3462 let (notify_tx, _notify_rx) = mpsc::unbounded();
3463 let fs = Arc::new(RealFs);
3464 let next_entry_id = Arc::new(AtomicUsize::new(0));
3465 let mut initial_snapshot = LocalSnapshot {
3466 removed_entry_ids: Default::default(),
3467 ignores_by_parent_abs_path: Default::default(),
3468 git_repositories: Default::default(),
3469 next_entry_id: next_entry_id.clone(),
3470 snapshot: Snapshot {
3471 id: WorktreeId::from_usize(0),
3472 entries_by_path: Default::default(),
3473 entries_by_id: Default::default(),
3474 abs_path: root_dir.path().into(),
3475 root_name: Default::default(),
3476 root_char_bag: Default::default(),
3477 scan_id: 0,
3478 is_complete: true,
3479 },
3480 extension_counts: Default::default(),
3481 };
3482 initial_snapshot.insert_entry(
3483 Entry::new(
3484 Path::new("").into(),
3485 &smol::block_on(fs.metadata(root_dir.path()))
3486 .unwrap()
3487 .unwrap(),
3488 &next_entry_id,
3489 Default::default(),
3490 ),
3491 fs.as_ref(),
3492 );
3493 let mut scanner = BackgroundScanner::new(
3494 Arc::new(Mutex::new(initial_snapshot.clone())),
3495 notify_tx,
3496 fs.clone(),
3497 Arc::new(gpui::executor::Background::new()),
3498 );
3499 smol::block_on(scanner.scan_dirs()).unwrap();
3500 scanner.snapshot().check_invariants();
3501
3502 let mut events = Vec::new();
3503 let mut snapshots = Vec::new();
3504 let mut mutations_len = operations;
3505 while mutations_len > 1 {
3506 if !events.is_empty() && rng.gen_bool(0.4) {
3507 let len = rng.gen_range(0..=events.len());
3508 let to_deliver = events.drain(0..len).collect::<Vec<_>>();
3509 log::info!("Delivering events: {:#?}", to_deliver);
3510 smol::block_on(scanner.process_events(to_deliver));
3511 scanner.snapshot().check_invariants();
3512 } else {
3513 events.extend(randomly_mutate_tree(root_dir.path(), 0.6, &mut rng).unwrap());
3514 mutations_len -= 1;
3515 }
3516
3517 if rng.gen_bool(0.2) {
3518 snapshots.push(scanner.snapshot());
3519 }
3520 }
3521 log::info!("Quiescing: {:#?}", events);
3522 smol::block_on(scanner.process_events(events));
3523 scanner.snapshot().check_invariants();
3524
3525 let (notify_tx, _notify_rx) = mpsc::unbounded();
3526 let mut new_scanner = BackgroundScanner::new(
3527 Arc::new(Mutex::new(initial_snapshot)),
3528 notify_tx,
3529 scanner.fs.clone(),
3530 scanner.executor.clone(),
3531 );
3532 smol::block_on(new_scanner.scan_dirs()).unwrap();
3533 assert_eq!(
3534 scanner.snapshot().to_vec(true),
3535 new_scanner.snapshot().to_vec(true)
3536 );
3537
3538 for mut prev_snapshot in snapshots {
3539 let include_ignored = rng.gen::<bool>();
3540 if !include_ignored {
3541 let mut entries_by_path_edits = Vec::new();
3542 let mut entries_by_id_edits = Vec::new();
3543 for entry in prev_snapshot
3544 .entries_by_id
3545 .cursor::<()>()
3546 .filter(|e| e.is_ignored)
3547 {
3548 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3549 entries_by_id_edits.push(Edit::Remove(entry.id));
3550 }
3551
3552 prev_snapshot
3553 .entries_by_path
3554 .edit(entries_by_path_edits, &());
3555 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3556 }
3557
3558 let update = scanner
3559 .snapshot()
3560 .build_update(&prev_snapshot, 0, 0, include_ignored);
3561 prev_snapshot.apply_remote_update(update).unwrap();
3562 assert_eq!(
3563 prev_snapshot.to_vec(true),
3564 scanner.snapshot().to_vec(include_ignored)
3565 );
3566 }
3567 }
3568
3569 fn randomly_mutate_tree(
3570 root_path: &Path,
3571 insertion_probability: f64,
3572 rng: &mut impl Rng,
3573 ) -> Result<Vec<fsevent::Event>> {
3574 let root_path = root_path.canonicalize().unwrap();
3575 let (dirs, files) = read_dir_recursive(root_path.clone());
3576
3577 let mut events = Vec::new();
3578 let mut record_event = |path: PathBuf| {
3579 events.push(fsevent::Event {
3580 event_id: SystemTime::now()
3581 .duration_since(UNIX_EPOCH)
3582 .unwrap()
3583 .as_secs(),
3584 flags: fsevent::StreamFlags::empty(),
3585 path,
3586 });
3587 };
3588
3589 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3590 let path = dirs.choose(rng).unwrap();
3591 let new_path = path.join(gen_name(rng));
3592
3593 if rng.gen() {
3594 log::info!("Creating dir {:?}", new_path.strip_prefix(root_path)?);
3595 std::fs::create_dir(&new_path)?;
3596 } else {
3597 log::info!("Creating file {:?}", new_path.strip_prefix(root_path)?);
3598 std::fs::write(&new_path, "")?;
3599 }
3600 record_event(new_path);
3601 } else if rng.gen_bool(0.05) {
3602 let ignore_dir_path = dirs.choose(rng).unwrap();
3603 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3604
3605 let (subdirs, subfiles) = read_dir_recursive(ignore_dir_path.clone());
3606 let files_to_ignore = {
3607 let len = rng.gen_range(0..=subfiles.len());
3608 subfiles.choose_multiple(rng, len)
3609 };
3610 let dirs_to_ignore = {
3611 let len = rng.gen_range(0..subdirs.len());
3612 subdirs.choose_multiple(rng, len)
3613 };
3614
3615 let mut ignore_contents = String::new();
3616 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3617 writeln!(
3618 ignore_contents,
3619 "{}",
3620 path_to_ignore
3621 .strip_prefix(&ignore_dir_path)?
3622 .to_str()
3623 .unwrap()
3624 )
3625 .unwrap();
3626 }
3627 log::info!(
3628 "Creating {:?} with contents:\n{}",
3629 ignore_path.strip_prefix(&root_path)?,
3630 ignore_contents
3631 );
3632 std::fs::write(&ignore_path, ignore_contents).unwrap();
3633 record_event(ignore_path);
3634 } else {
3635 let old_path = {
3636 let file_path = files.choose(rng);
3637 let dir_path = dirs[1..].choose(rng);
3638 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3639 };
3640
3641 let is_rename = rng.gen();
3642 if is_rename {
3643 let new_path_parent = dirs
3644 .iter()
3645 .filter(|d| !d.starts_with(old_path))
3646 .choose(rng)
3647 .unwrap();
3648
3649 let overwrite_existing_dir =
3650 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3651 let new_path = if overwrite_existing_dir {
3652 std::fs::remove_dir_all(&new_path_parent).ok();
3653 new_path_parent.to_path_buf()
3654 } else {
3655 new_path_parent.join(gen_name(rng))
3656 };
3657
3658 log::info!(
3659 "Renaming {:?} to {}{:?}",
3660 old_path.strip_prefix(&root_path)?,
3661 if overwrite_existing_dir {
3662 "overwrite "
3663 } else {
3664 ""
3665 },
3666 new_path.strip_prefix(&root_path)?
3667 );
3668 std::fs::rename(&old_path, &new_path)?;
3669 record_event(old_path.clone());
3670 record_event(new_path);
3671 } else if old_path.is_dir() {
3672 let (dirs, files) = read_dir_recursive(old_path.clone());
3673
3674 log::info!("Deleting dir {:?}", old_path.strip_prefix(&root_path)?);
3675 std::fs::remove_dir_all(&old_path).unwrap();
3676 for file in files {
3677 record_event(file);
3678 }
3679 for dir in dirs {
3680 record_event(dir);
3681 }
3682 } else {
3683 log::info!("Deleting file {:?}", old_path.strip_prefix(&root_path)?);
3684 std::fs::remove_file(old_path).unwrap();
3685 record_event(old_path.clone());
3686 }
3687 }
3688
3689 Ok(events)
3690 }
3691
3692 fn read_dir_recursive(path: PathBuf) -> (Vec<PathBuf>, Vec<PathBuf>) {
3693 let child_entries = std::fs::read_dir(&path).unwrap();
3694 let mut dirs = vec![path];
3695 let mut files = Vec::new();
3696 for child_entry in child_entries {
3697 let child_path = child_entry.unwrap().path();
3698 if child_path.is_dir() {
3699 let (child_dirs, child_files) = read_dir_recursive(child_path);
3700 dirs.extend(child_dirs);
3701 files.extend(child_files);
3702 } else {
3703 files.push(child_path);
3704 }
3705 }
3706 (dirs, files)
3707 }
3708
3709 fn gen_name(rng: &mut impl Rng) -> String {
3710 (0..6)
3711 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3712 .map(char::from)
3713 .collect()
3714 }
3715
3716 impl LocalSnapshot {
3717 fn check_invariants(&self) {
3718 let mut files = self.files(true, 0);
3719 let mut visible_files = self.files(false, 0);
3720 for entry in self.entries_by_path.cursor::<()>() {
3721 if entry.is_file() {
3722 assert_eq!(files.next().unwrap().inode, entry.inode);
3723 if !entry.is_ignored {
3724 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3725 }
3726 }
3727 }
3728 assert!(files.next().is_none());
3729 assert!(visible_files.next().is_none());
3730
3731 let mut bfs_paths = Vec::new();
3732 let mut stack = vec![Path::new("")];
3733 while let Some(path) = stack.pop() {
3734 bfs_paths.push(path);
3735 let ix = stack.len();
3736 for child_entry in self.child_entries(path) {
3737 stack.insert(ix, &child_entry.path);
3738 }
3739 }
3740
3741 let dfs_paths_via_iter = self
3742 .entries_by_path
3743 .cursor::<()>()
3744 .map(|e| e.path.as_ref())
3745 .collect::<Vec<_>>();
3746 assert_eq!(bfs_paths, dfs_paths_via_iter);
3747
3748 let dfs_paths_via_traversal = self
3749 .entries(true)
3750 .map(|e| e.path.as_ref())
3751 .collect::<Vec<_>>();
3752 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3753
3754 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3755 let ignore_parent_path =
3756 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3757 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3758 assert!(self
3759 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3760 .is_some());
3761 }
3762
3763 // Ensure extension counts are correct.
3764 let mut expected_extension_counts = HashMap::default();
3765 for extension in self.entries(false).filter_map(|e| e.path.extension()) {
3766 *expected_extension_counts
3767 .entry(extension.into())
3768 .or_insert(0) += 1;
3769 }
3770 assert_eq!(self.extension_counts, expected_extension_counts);
3771 }
3772
3773 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3774 let mut paths = Vec::new();
3775 for entry in self.entries_by_path.cursor::<()>() {
3776 if include_ignored || !entry.is_ignored {
3777 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3778 }
3779 }
3780 paths.sort_by(|a, b| a.0.cmp(b.0));
3781 paths
3782 }
3783 }
3784}