1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use parking_lot::Mutex;
30use postage::{
31 barrier,
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use smol::channel::{self, Sender};
36use std::{
37 any::Any,
38 cmp::{self, Ordering},
39 convert::TryFrom,
40 ffi::OsStr,
41 fmt,
42 future::Future,
43 mem,
44 ops::{Deref, DerefMut},
45 path::{Path, PathBuf},
46 pin::Pin,
47 sync::{
48 atomic::{AtomicUsize, Ordering::SeqCst},
49 Arc,
50 },
51 time::{Duration, SystemTime},
52};
53use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
54use util::{paths::HOME, ResultExt, TryFutureExt};
55
56#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
57pub struct WorktreeId(usize);
58
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
67 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
68 _background_scanner_task: Task<()>,
69 share: Option<ShareState>,
70 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
71 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 visible: bool,
75}
76
77pub struct RemoteWorktree {
78 snapshot: Snapshot,
79 background_snapshot: Arc<Mutex<Snapshot>>,
80 project_id: u64,
81 client: Arc<Client>,
82 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
83 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
84 replica_id: ReplicaId,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 visible: bool,
87 disconnected: bool,
88}
89
90#[derive(Clone)]
91pub struct Snapshot {
92 id: WorktreeId,
93 abs_path: Arc<Path>,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98 scan_id: usize,
99 completed_scan_id: usize,
100}
101
102#[derive(Clone)]
103pub struct GitRepositoryEntry {
104 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
105
106 pub(crate) scan_id: usize,
107 // Path to folder containing the .git file or directory
108 pub(crate) content_path: Arc<Path>,
109 // Path to the actual .git folder.
110 // Note: if .git is a file, this points to the folder indicated by the .git file
111 pub(crate) git_dir_path: Arc<Path>,
112}
113
114impl std::fmt::Debug for GitRepositoryEntry {
115 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
116 f.debug_struct("GitRepositoryEntry")
117 .field("content_path", &self.content_path)
118 .field("git_dir_path", &self.git_dir_path)
119 .finish()
120 }
121}
122
123#[derive(Debug)]
124pub struct LocalSnapshot {
125 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
126 git_repositories: Vec<GitRepositoryEntry>,
127 removed_entry_ids: HashMap<u64, ProjectEntryId>,
128 next_entry_id: Arc<AtomicUsize>,
129 snapshot: Snapshot,
130}
131
132impl Clone for LocalSnapshot {
133 fn clone(&self) -> Self {
134 Self {
135 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
136 git_repositories: self.git_repositories.iter().cloned().collect(),
137 removed_entry_ids: self.removed_entry_ids.clone(),
138 next_entry_id: self.next_entry_id.clone(),
139 snapshot: self.snapshot.clone(),
140 }
141 }
142}
143
144impl Deref for LocalSnapshot {
145 type Target = Snapshot;
146
147 fn deref(&self) -> &Self::Target {
148 &self.snapshot
149 }
150}
151
152impl DerefMut for LocalSnapshot {
153 fn deref_mut(&mut self) -> &mut Self::Target {
154 &mut self.snapshot
155 }
156}
157
158enum ScanState {
159 Started,
160 Updated {
161 snapshot: LocalSnapshot,
162 changes: HashMap<Arc<Path>, PathChange>,
163 barrier: Option<barrier::Sender>,
164 scanning: bool,
165 },
166}
167
168struct ShareState {
169 project_id: u64,
170 snapshots_tx: watch::Sender<LocalSnapshot>,
171 resume_updates: watch::Sender<()>,
172 _maintain_remote_snapshot: Task<Option<()>>,
173}
174
175pub enum Event {
176 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
177 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
178}
179
180impl Entity for Worktree {
181 type Event = Event;
182}
183
184impl Worktree {
185 pub async fn local(
186 client: Arc<Client>,
187 path: impl Into<Arc<Path>>,
188 visible: bool,
189 fs: Arc<dyn Fs>,
190 next_entry_id: Arc<AtomicUsize>,
191 cx: &mut AsyncAppContext,
192 ) -> Result<ModelHandle<Self>> {
193 // After determining whether the root entry is a file or a directory, populate the
194 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
195 let abs_path = path.into();
196 let metadata = fs
197 .metadata(&abs_path)
198 .await
199 .context("failed to stat worktree path")?;
200
201 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
202 let root_name = abs_path
203 .file_name()
204 .map_or(String::new(), |f| f.to_string_lossy().to_string());
205
206 let mut snapshot = LocalSnapshot {
207 ignores_by_parent_abs_path: Default::default(),
208 git_repositories: Default::default(),
209 removed_entry_ids: Default::default(),
210 next_entry_id,
211 snapshot: Snapshot {
212 id: WorktreeId::from_usize(cx.model_id()),
213 abs_path: abs_path.clone(),
214 root_name: root_name.clone(),
215 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
216 entries_by_path: Default::default(),
217 entries_by_id: Default::default(),
218 scan_id: 1,
219 completed_scan_id: 0,
220 },
221 };
222
223 if let Some(metadata) = metadata {
224 snapshot.insert_entry(
225 Entry::new(
226 Arc::from(Path::new("")),
227 &metadata,
228 &snapshot.next_entry_id,
229 snapshot.root_char_bag,
230 ),
231 fs.as_ref(),
232 );
233 }
234
235 let (path_changes_tx, path_changes_rx) = channel::unbounded();
236 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
237
238 cx.spawn_weak(|this, mut cx| async move {
239 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
240 this.update(&mut cx, |this, cx| {
241 let this = this.as_local_mut().unwrap();
242 match state {
243 ScanState::Started => {
244 *this.is_scanning.0.borrow_mut() = true;
245 }
246 ScanState::Updated {
247 snapshot,
248 changes,
249 barrier,
250 scanning,
251 } => {
252 *this.is_scanning.0.borrow_mut() = scanning;
253 this.set_snapshot(snapshot, cx);
254 cx.emit(Event::UpdatedEntries(changes));
255 drop(barrier);
256 }
257 }
258 cx.notify();
259 });
260 }
261 })
262 .detach();
263
264 let background_scanner_task = cx.background().spawn({
265 let fs = fs.clone();
266 let snapshot = snapshot.clone();
267 let background = cx.background().clone();
268 async move {
269 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
270 BackgroundScanner::new(
271 snapshot,
272 fs,
273 scan_states_tx,
274 background,
275 path_changes_rx,
276 )
277 .run(events)
278 .await;
279 }
280 });
281
282 Worktree::Local(LocalWorktree {
283 snapshot,
284 is_scanning: watch::channel_with(true),
285 share: None,
286 path_changes_tx,
287 _background_scanner_task: background_scanner_task,
288 diagnostics: Default::default(),
289 diagnostic_summaries: Default::default(),
290 client,
291 fs,
292 visible,
293 })
294 }))
295 }
296
297 pub fn remote(
298 project_remote_id: u64,
299 replica_id: ReplicaId,
300 worktree: proto::WorktreeMetadata,
301 client: Arc<Client>,
302 cx: &mut AppContext,
303 ) -> ModelHandle<Self> {
304 cx.add_model(|cx: &mut ModelContext<Self>| {
305 let snapshot = Snapshot {
306 id: WorktreeId(worktree.id as usize),
307 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
308 root_name: worktree.root_name.clone(),
309 root_char_bag: worktree
310 .root_name
311 .chars()
312 .map(|c| c.to_ascii_lowercase())
313 .collect(),
314 entries_by_path: Default::default(),
315 entries_by_id: Default::default(),
316 scan_id: 1,
317 completed_scan_id: 0,
318 };
319
320 let (updates_tx, mut updates_rx) = mpsc::unbounded();
321 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
322 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
323
324 cx.background()
325 .spawn({
326 let background_snapshot = background_snapshot.clone();
327 async move {
328 while let Some(update) = updates_rx.next().await {
329 if let Err(error) =
330 background_snapshot.lock().apply_remote_update(update)
331 {
332 log::error!("error applying worktree update: {}", error);
333 }
334 snapshot_updated_tx.send(()).await.ok();
335 }
336 }
337 })
338 .detach();
339
340 cx.spawn_weak(|this, mut cx| async move {
341 while (snapshot_updated_rx.recv().await).is_some() {
342 if let Some(this) = this.upgrade(&cx) {
343 this.update(&mut cx, |this, cx| {
344 let this = this.as_remote_mut().unwrap();
345 this.snapshot = this.background_snapshot.lock().clone();
346 cx.emit(Event::UpdatedEntries(Default::default()));
347 cx.notify();
348 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
349 if this.observed_snapshot(*scan_id) {
350 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
351 let _ = tx.send(());
352 } else {
353 break;
354 }
355 }
356 });
357 } else {
358 break;
359 }
360 }
361 })
362 .detach();
363
364 Worktree::Remote(RemoteWorktree {
365 project_id: project_remote_id,
366 replica_id,
367 snapshot: snapshot.clone(),
368 background_snapshot,
369 updates_tx: Some(updates_tx),
370 snapshot_subscriptions: Default::default(),
371 client: client.clone(),
372 diagnostic_summaries: Default::default(),
373 visible: worktree.visible,
374 disconnected: false,
375 })
376 })
377 }
378
379 pub fn as_local(&self) -> Option<&LocalWorktree> {
380 if let Worktree::Local(worktree) = self {
381 Some(worktree)
382 } else {
383 None
384 }
385 }
386
387 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
388 if let Worktree::Remote(worktree) = self {
389 Some(worktree)
390 } else {
391 None
392 }
393 }
394
395 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
396 if let Worktree::Local(worktree) = self {
397 Some(worktree)
398 } else {
399 None
400 }
401 }
402
403 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
404 if let Worktree::Remote(worktree) = self {
405 Some(worktree)
406 } else {
407 None
408 }
409 }
410
411 pub fn is_local(&self) -> bool {
412 matches!(self, Worktree::Local(_))
413 }
414
415 pub fn is_remote(&self) -> bool {
416 !self.is_local()
417 }
418
419 pub fn snapshot(&self) -> Snapshot {
420 match self {
421 Worktree::Local(worktree) => worktree.snapshot().snapshot,
422 Worktree::Remote(worktree) => worktree.snapshot(),
423 }
424 }
425
426 pub fn scan_id(&self) -> usize {
427 match self {
428 Worktree::Local(worktree) => worktree.snapshot.scan_id,
429 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
430 }
431 }
432
433 pub fn completed_scan_id(&self) -> usize {
434 match self {
435 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
436 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
437 }
438 }
439
440 pub fn is_visible(&self) -> bool {
441 match self {
442 Worktree::Local(worktree) => worktree.visible,
443 Worktree::Remote(worktree) => worktree.visible,
444 }
445 }
446
447 pub fn replica_id(&self) -> ReplicaId {
448 match self {
449 Worktree::Local(_) => 0,
450 Worktree::Remote(worktree) => worktree.replica_id,
451 }
452 }
453
454 pub fn diagnostic_summaries(
455 &self,
456 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
457 match self {
458 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
459 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
460 }
461 .iter()
462 .map(|(path, summary)| (path.0.clone(), *summary))
463 }
464
465 pub fn abs_path(&self) -> Arc<Path> {
466 match self {
467 Worktree::Local(worktree) => worktree.abs_path.clone(),
468 Worktree::Remote(worktree) => worktree.abs_path.clone(),
469 }
470 }
471}
472
473impl LocalWorktree {
474 pub fn contains_abs_path(&self, path: &Path) -> bool {
475 path.starts_with(&self.abs_path)
476 }
477
478 fn absolutize(&self, path: &Path) -> PathBuf {
479 if path.file_name().is_some() {
480 self.abs_path.join(path)
481 } else {
482 self.abs_path.to_path_buf()
483 }
484 }
485
486 pub(crate) fn load_buffer(
487 &mut self,
488 path: &Path,
489 cx: &mut ModelContext<Worktree>,
490 ) -> Task<Result<ModelHandle<Buffer>>> {
491 let path = Arc::from(path);
492 cx.spawn(move |this, mut cx| async move {
493 let (file, contents, diff_base) = this
494 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
495 .await?;
496 Ok(cx.add_model(|cx| {
497 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
498 buffer.git_diff_recalc(cx);
499 buffer
500 }))
501 })
502 }
503
504 pub fn diagnostics_for_path(
505 &self,
506 path: &Path,
507 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
508 self.diagnostics.get(path).cloned()
509 }
510
511 pub fn update_diagnostics(
512 &mut self,
513 language_server_id: usize,
514 worktree_path: Arc<Path>,
515 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
516 _: &mut ModelContext<Worktree>,
517 ) -> Result<bool> {
518 self.diagnostics.remove(&worktree_path);
519 let old_summary = self
520 .diagnostic_summaries
521 .remove(&PathKey(worktree_path.clone()))
522 .unwrap_or_default();
523 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
524 if !new_summary.is_empty() {
525 self.diagnostic_summaries
526 .insert(PathKey(worktree_path.clone()), new_summary);
527 self.diagnostics.insert(worktree_path.clone(), diagnostics);
528 }
529
530 let updated = !old_summary.is_empty() || !new_summary.is_empty();
531 if updated {
532 if let Some(share) = self.share.as_ref() {
533 self.client
534 .send(proto::UpdateDiagnosticSummary {
535 project_id: share.project_id,
536 worktree_id: self.id().to_proto(),
537 summary: Some(proto::DiagnosticSummary {
538 path: worktree_path.to_string_lossy().to_string(),
539 language_server_id: language_server_id as u64,
540 error_count: new_summary.error_count as u32,
541 warning_count: new_summary.warning_count as u32,
542 }),
543 })
544 .log_err();
545 }
546 }
547
548 Ok(updated)
549 }
550
551 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
552 let updated_repos = Self::changed_repos(
553 &self.snapshot.git_repositories,
554 &new_snapshot.git_repositories,
555 );
556 self.snapshot = new_snapshot;
557
558 if let Some(share) = self.share.as_mut() {
559 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
560 }
561
562 if !updated_repos.is_empty() {
563 cx.emit(Event::UpdatedGitRepositories(updated_repos));
564 }
565 }
566
567 fn changed_repos(
568 old_repos: &[GitRepositoryEntry],
569 new_repos: &[GitRepositoryEntry],
570 ) -> Vec<GitRepositoryEntry> {
571 fn diff<'a>(
572 a: &'a [GitRepositoryEntry],
573 b: &'a [GitRepositoryEntry],
574 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
575 ) {
576 for a_repo in a {
577 let matched = b.iter().find(|b_repo| {
578 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
579 });
580
581 if matched.is_none() {
582 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
583 }
584 }
585 }
586
587 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
588
589 diff(old_repos, new_repos, &mut updated);
590 diff(new_repos, old_repos, &mut updated);
591
592 updated.into_values().collect()
593 }
594
595 pub fn scan_complete(&self) -> impl Future<Output = ()> {
596 let mut is_scanning_rx = self.is_scanning.1.clone();
597 async move {
598 let mut is_scanning = is_scanning_rx.borrow().clone();
599 while is_scanning {
600 if let Some(value) = is_scanning_rx.recv().await {
601 is_scanning = value;
602 } else {
603 break;
604 }
605 }
606 }
607 }
608
609 pub fn snapshot(&self) -> LocalSnapshot {
610 self.snapshot.clone()
611 }
612
613 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
614 proto::WorktreeMetadata {
615 id: self.id().to_proto(),
616 root_name: self.root_name().to_string(),
617 visible: self.visible,
618 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
619 }
620 }
621
622 fn load(
623 &self,
624 path: &Path,
625 cx: &mut ModelContext<Worktree>,
626 ) -> Task<Result<(File, String, Option<String>)>> {
627 let handle = cx.handle();
628 let path = Arc::from(path);
629 let abs_path = self.absolutize(&path);
630 let fs = self.fs.clone();
631 let snapshot = self.snapshot();
632
633 cx.spawn(|this, mut cx| async move {
634 let text = fs.load(&abs_path).await?;
635
636 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
637 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
638 let repo_relative = repo_relative.to_owned();
639 cx.background()
640 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
641 .await
642 } else {
643 None
644 }
645 } else {
646 None
647 };
648
649 // Eagerly populate the snapshot with an updated entry for the loaded file
650 let entry = this
651 .update(&mut cx, |this, cx| {
652 this.as_local().unwrap().refresh_entry(path, None, cx)
653 })
654 .await?;
655
656 Ok((
657 File {
658 entry_id: entry.id,
659 worktree: handle,
660 path: entry.path,
661 mtime: entry.mtime,
662 is_local: true,
663 is_deleted: false,
664 },
665 text,
666 diff_base,
667 ))
668 })
669 }
670
671 pub fn save_buffer(
672 &self,
673 buffer_handle: ModelHandle<Buffer>,
674 path: Arc<Path>,
675 has_changed_file: bool,
676 cx: &mut ModelContext<Worktree>,
677 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
678 let handle = cx.handle();
679 let buffer = buffer_handle.read(cx);
680
681 let rpc = self.client.clone();
682 let buffer_id = buffer.remote_id();
683 let project_id = self.share.as_ref().map(|share| share.project_id);
684
685 let text = buffer.as_rope().clone();
686 let fingerprint = text.fingerprint();
687 let version = buffer.version();
688 let save = self.write_file(path, text, buffer.line_ending(), cx);
689
690 cx.as_mut().spawn(|mut cx| async move {
691 let entry = save.await?;
692
693 if has_changed_file {
694 let new_file = Arc::new(File {
695 entry_id: entry.id,
696 worktree: handle,
697 path: entry.path,
698 mtime: entry.mtime,
699 is_local: true,
700 is_deleted: false,
701 });
702
703 if let Some(project_id) = project_id {
704 rpc.send(proto::UpdateBufferFile {
705 project_id,
706 buffer_id,
707 file: Some(new_file.to_proto()),
708 })
709 .log_err();
710 }
711
712 buffer_handle.update(&mut cx, |buffer, cx| {
713 if has_changed_file {
714 buffer.file_updated(new_file, cx).detach();
715 }
716 });
717 }
718
719 if let Some(project_id) = project_id {
720 rpc.send(proto::BufferSaved {
721 project_id,
722 buffer_id,
723 version: serialize_version(&version),
724 mtime: Some(entry.mtime.into()),
725 fingerprint: serialize_fingerprint(fingerprint),
726 })?;
727 }
728
729 buffer_handle.update(&mut cx, |buffer, cx| {
730 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
731 });
732
733 Ok((version, fingerprint, entry.mtime))
734 })
735 }
736
737 pub fn create_entry(
738 &self,
739 path: impl Into<Arc<Path>>,
740 is_dir: bool,
741 cx: &mut ModelContext<Worktree>,
742 ) -> Task<Result<Entry>> {
743 let path = path.into();
744 let abs_path = self.absolutize(&path);
745 let fs = self.fs.clone();
746 let write = cx.background().spawn(async move {
747 if is_dir {
748 fs.create_dir(&abs_path).await
749 } else {
750 fs.save(&abs_path, &Default::default(), Default::default())
751 .await
752 }
753 });
754
755 cx.spawn(|this, mut cx| async move {
756 write.await?;
757 this.update(&mut cx, |this, cx| {
758 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
759 })
760 .await
761 })
762 }
763
764 pub fn write_file(
765 &self,
766 path: impl Into<Arc<Path>>,
767 text: Rope,
768 line_ending: LineEnding,
769 cx: &mut ModelContext<Worktree>,
770 ) -> Task<Result<Entry>> {
771 let path = path.into();
772 let abs_path = self.absolutize(&path);
773 let fs = self.fs.clone();
774 let write = cx
775 .background()
776 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
777
778 cx.spawn(|this, mut cx| async move {
779 write.await?;
780 this.update(&mut cx, |this, cx| {
781 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
782 })
783 .await
784 })
785 }
786
787 pub fn delete_entry(
788 &self,
789 entry_id: ProjectEntryId,
790 cx: &mut ModelContext<Worktree>,
791 ) -> Option<Task<Result<()>>> {
792 let entry = self.entry_for_id(entry_id)?.clone();
793 let abs_path = self.abs_path.clone();
794 let fs = self.fs.clone();
795
796 let delete = cx.background().spawn(async move {
797 let mut abs_path = fs.canonicalize(&abs_path).await?;
798 if entry.path.file_name().is_some() {
799 abs_path = abs_path.join(&entry.path);
800 }
801 if entry.is_file() {
802 fs.remove_file(&abs_path, Default::default()).await?;
803 } else {
804 fs.remove_dir(
805 &abs_path,
806 RemoveOptions {
807 recursive: true,
808 ignore_if_not_exists: false,
809 },
810 )
811 .await?;
812 }
813 anyhow::Ok(abs_path)
814 });
815
816 Some(cx.spawn(|this, mut cx| async move {
817 let abs_path = delete.await?;
818 let (tx, mut rx) = barrier::channel();
819 this.update(&mut cx, |this, _| {
820 this.as_local_mut()
821 .unwrap()
822 .path_changes_tx
823 .try_send((vec![abs_path], tx))
824 })?;
825 rx.recv().await;
826 Ok(())
827 }))
828 }
829
830 pub fn rename_entry(
831 &self,
832 entry_id: ProjectEntryId,
833 new_path: impl Into<Arc<Path>>,
834 cx: &mut ModelContext<Worktree>,
835 ) -> Option<Task<Result<Entry>>> {
836 let old_path = self.entry_for_id(entry_id)?.path.clone();
837 let new_path = new_path.into();
838 let abs_old_path = self.absolutize(&old_path);
839 let abs_new_path = self.absolutize(&new_path);
840 let fs = self.fs.clone();
841 let rename = cx.background().spawn(async move {
842 fs.rename(&abs_old_path, &abs_new_path, Default::default())
843 .await
844 });
845
846 Some(cx.spawn(|this, mut cx| async move {
847 rename.await?;
848 this.update(&mut cx, |this, cx| {
849 this.as_local_mut()
850 .unwrap()
851 .refresh_entry(new_path.clone(), Some(old_path), cx)
852 })
853 .await
854 }))
855 }
856
857 pub fn copy_entry(
858 &self,
859 entry_id: ProjectEntryId,
860 new_path: impl Into<Arc<Path>>,
861 cx: &mut ModelContext<Worktree>,
862 ) -> Option<Task<Result<Entry>>> {
863 let old_path = self.entry_for_id(entry_id)?.path.clone();
864 let new_path = new_path.into();
865 let abs_old_path = self.absolutize(&old_path);
866 let abs_new_path = self.absolutize(&new_path);
867 let fs = self.fs.clone();
868 let copy = cx.background().spawn(async move {
869 copy_recursive(
870 fs.as_ref(),
871 &abs_old_path,
872 &abs_new_path,
873 Default::default(),
874 )
875 .await
876 });
877
878 Some(cx.spawn(|this, mut cx| async move {
879 copy.await?;
880 this.update(&mut cx, |this, cx| {
881 this.as_local_mut()
882 .unwrap()
883 .refresh_entry(new_path.clone(), None, cx)
884 })
885 .await
886 }))
887 }
888
889 fn refresh_entry(
890 &self,
891 path: Arc<Path>,
892 old_path: Option<Arc<Path>>,
893 cx: &mut ModelContext<Worktree>,
894 ) -> Task<Result<Entry>> {
895 let fs = self.fs.clone();
896 let abs_root_path = self.abs_path.clone();
897 let path_changes_tx = self.path_changes_tx.clone();
898 cx.spawn_weak(move |this, mut cx| async move {
899 let abs_path = fs.canonicalize(&abs_root_path).await?;
900 let mut paths = Vec::with_capacity(2);
901 paths.push(if path.file_name().is_some() {
902 abs_path.join(&path)
903 } else {
904 abs_path.clone()
905 });
906 if let Some(old_path) = old_path {
907 paths.push(if old_path.file_name().is_some() {
908 abs_path.join(&old_path)
909 } else {
910 abs_path.clone()
911 });
912 }
913
914 let (tx, mut rx) = barrier::channel();
915 path_changes_tx.try_send((paths, tx))?;
916 rx.recv().await;
917 this.upgrade(&cx)
918 .ok_or_else(|| anyhow!("worktree was dropped"))?
919 .update(&mut cx, |this, _| {
920 this.entry_for_path(path)
921 .cloned()
922 .ok_or_else(|| anyhow!("failed to read path after update"))
923 })
924 })
925 }
926
927 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
928 let (share_tx, share_rx) = oneshot::channel();
929
930 if let Some(share) = self.share.as_mut() {
931 let _ = share_tx.send(());
932 *share.resume_updates.borrow_mut() = ();
933 } else {
934 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
935 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
936 let worktree_id = cx.model_id() as u64;
937
938 for (path, summary) in self.diagnostic_summaries.iter() {
939 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
940 project_id,
941 worktree_id,
942 summary: Some(summary.to_proto(&path.0)),
943 }) {
944 return Task::ready(Err(e));
945 }
946 }
947
948 let _maintain_remote_snapshot = cx.background().spawn({
949 let client = self.client.clone();
950 async move {
951 let mut share_tx = Some(share_tx);
952 let mut prev_snapshot = LocalSnapshot {
953 ignores_by_parent_abs_path: Default::default(),
954 git_repositories: Default::default(),
955 removed_entry_ids: Default::default(),
956 next_entry_id: Default::default(),
957 snapshot: Snapshot {
958 id: WorktreeId(worktree_id as usize),
959 abs_path: Path::new("").into(),
960 root_name: Default::default(),
961 root_char_bag: Default::default(),
962 entries_by_path: Default::default(),
963 entries_by_id: Default::default(),
964 scan_id: 0,
965 completed_scan_id: 0,
966 },
967 };
968 while let Some(snapshot) = snapshots_rx.recv().await {
969 #[cfg(any(test, feature = "test-support"))]
970 const MAX_CHUNK_SIZE: usize = 2;
971 #[cfg(not(any(test, feature = "test-support")))]
972 const MAX_CHUNK_SIZE: usize = 256;
973
974 let update =
975 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
976 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
977 let _ = resume_updates_rx.try_recv();
978 while let Err(error) = client.request(update.clone()).await {
979 log::error!("failed to send worktree update: {}", error);
980 log::info!("waiting to resume updates");
981 if resume_updates_rx.next().await.is_none() {
982 return Ok(());
983 }
984 }
985 }
986
987 if let Some(share_tx) = share_tx.take() {
988 let _ = share_tx.send(());
989 }
990
991 prev_snapshot = snapshot;
992 }
993
994 Ok::<_, anyhow::Error>(())
995 }
996 .log_err()
997 });
998
999 self.share = Some(ShareState {
1000 project_id,
1001 snapshots_tx,
1002 resume_updates: resume_updates_tx,
1003 _maintain_remote_snapshot,
1004 });
1005 }
1006
1007 cx.foreground()
1008 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1009 }
1010
1011 pub fn unshare(&mut self) {
1012 self.share.take();
1013 }
1014
1015 pub fn is_shared(&self) -> bool {
1016 self.share.is_some()
1017 }
1018}
1019
1020impl RemoteWorktree {
1021 fn snapshot(&self) -> Snapshot {
1022 self.snapshot.clone()
1023 }
1024
1025 pub fn disconnected_from_host(&mut self) {
1026 self.updates_tx.take();
1027 self.snapshot_subscriptions.clear();
1028 self.disconnected = true;
1029 }
1030
1031 pub fn save_buffer(
1032 &self,
1033 buffer_handle: ModelHandle<Buffer>,
1034 cx: &mut ModelContext<Worktree>,
1035 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1036 let buffer = buffer_handle.read(cx);
1037 let buffer_id = buffer.remote_id();
1038 let version = buffer.version();
1039 let rpc = self.client.clone();
1040 let project_id = self.project_id;
1041 cx.as_mut().spawn(|mut cx| async move {
1042 let response = rpc
1043 .request(proto::SaveBuffer {
1044 project_id,
1045 buffer_id,
1046 version: serialize_version(&version),
1047 })
1048 .await?;
1049 let version = deserialize_version(&response.version);
1050 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1051 let mtime = response
1052 .mtime
1053 .ok_or_else(|| anyhow!("missing mtime"))?
1054 .into();
1055
1056 buffer_handle.update(&mut cx, |buffer, cx| {
1057 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1058 });
1059
1060 Ok((version, fingerprint, mtime))
1061 })
1062 }
1063
1064 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1065 if let Some(updates_tx) = &self.updates_tx {
1066 updates_tx
1067 .unbounded_send(update)
1068 .expect("consumer runs to completion");
1069 }
1070 }
1071
1072 fn observed_snapshot(&self, scan_id: usize) -> bool {
1073 self.completed_scan_id >= scan_id
1074 }
1075
1076 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1077 let (tx, rx) = oneshot::channel();
1078 if self.observed_snapshot(scan_id) {
1079 let _ = tx.send(());
1080 } else if self.disconnected {
1081 drop(tx);
1082 } else {
1083 match self
1084 .snapshot_subscriptions
1085 .binary_search_by_key(&scan_id, |probe| probe.0)
1086 {
1087 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1088 }
1089 }
1090
1091 async move {
1092 rx.await?;
1093 Ok(())
1094 }
1095 }
1096
1097 pub fn update_diagnostic_summary(
1098 &mut self,
1099 path: Arc<Path>,
1100 summary: &proto::DiagnosticSummary,
1101 ) {
1102 let summary = DiagnosticSummary {
1103 language_server_id: summary.language_server_id as usize,
1104 error_count: summary.error_count as usize,
1105 warning_count: summary.warning_count as usize,
1106 };
1107 if summary.is_empty() {
1108 self.diagnostic_summaries.remove(&PathKey(path));
1109 } else {
1110 self.diagnostic_summaries.insert(PathKey(path), summary);
1111 }
1112 }
1113
1114 pub fn insert_entry(
1115 &mut self,
1116 entry: proto::Entry,
1117 scan_id: usize,
1118 cx: &mut ModelContext<Worktree>,
1119 ) -> Task<Result<Entry>> {
1120 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1121 cx.spawn(|this, mut cx| async move {
1122 wait_for_snapshot.await?;
1123 this.update(&mut cx, |worktree, _| {
1124 let worktree = worktree.as_remote_mut().unwrap();
1125 let mut snapshot = worktree.background_snapshot.lock();
1126 let entry = snapshot.insert_entry(entry);
1127 worktree.snapshot = snapshot.clone();
1128 entry
1129 })
1130 })
1131 }
1132
1133 pub(crate) fn delete_entry(
1134 &mut self,
1135 id: ProjectEntryId,
1136 scan_id: usize,
1137 cx: &mut ModelContext<Worktree>,
1138 ) -> Task<Result<()>> {
1139 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1140 cx.spawn(|this, mut cx| async move {
1141 wait_for_snapshot.await?;
1142 this.update(&mut cx, |worktree, _| {
1143 let worktree = worktree.as_remote_mut().unwrap();
1144 let mut snapshot = worktree.background_snapshot.lock();
1145 snapshot.delete_entry(id);
1146 worktree.snapshot = snapshot.clone();
1147 });
1148 Ok(())
1149 })
1150 }
1151}
1152
1153impl Snapshot {
1154 pub fn id(&self) -> WorktreeId {
1155 self.id
1156 }
1157
1158 pub fn abs_path(&self) -> &Arc<Path> {
1159 &self.abs_path
1160 }
1161
1162 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1163 self.entries_by_id.get(&entry_id, &()).is_some()
1164 }
1165
1166 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1167 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1168 let old_entry = self.entries_by_id.insert_or_replace(
1169 PathEntry {
1170 id: entry.id,
1171 path: entry.path.clone(),
1172 is_ignored: entry.is_ignored,
1173 scan_id: 0,
1174 },
1175 &(),
1176 );
1177 if let Some(old_entry) = old_entry {
1178 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1179 }
1180 self.entries_by_path.insert_or_replace(entry.clone(), &());
1181 Ok(entry)
1182 }
1183
1184 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1185 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1186 self.entries_by_path = {
1187 let mut cursor = self.entries_by_path.cursor();
1188 let mut new_entries_by_path =
1189 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1190 while let Some(entry) = cursor.item() {
1191 if entry.path.starts_with(&removed_entry.path) {
1192 self.entries_by_id.remove(&entry.id, &());
1193 cursor.next(&());
1194 } else {
1195 break;
1196 }
1197 }
1198 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1199 new_entries_by_path
1200 };
1201
1202 Some(removed_entry.path)
1203 }
1204
1205 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1206 let mut entries_by_path_edits = Vec::new();
1207 let mut entries_by_id_edits = Vec::new();
1208 for entry_id in update.removed_entries {
1209 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1210 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1211 entries_by_id_edits.push(Edit::Remove(entry.id));
1212 }
1213 }
1214
1215 for entry in update.updated_entries {
1216 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1217 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1218 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1219 }
1220 entries_by_id_edits.push(Edit::Insert(PathEntry {
1221 id: entry.id,
1222 path: entry.path.clone(),
1223 is_ignored: entry.is_ignored,
1224 scan_id: 0,
1225 }));
1226 entries_by_path_edits.push(Edit::Insert(entry));
1227 }
1228
1229 self.entries_by_path.edit(entries_by_path_edits, &());
1230 self.entries_by_id.edit(entries_by_id_edits, &());
1231 self.scan_id = update.scan_id as usize;
1232 if update.is_last_update {
1233 self.completed_scan_id = update.scan_id as usize;
1234 }
1235
1236 Ok(())
1237 }
1238
1239 pub fn file_count(&self) -> usize {
1240 self.entries_by_path.summary().file_count
1241 }
1242
1243 pub fn visible_file_count(&self) -> usize {
1244 self.entries_by_path.summary().visible_file_count
1245 }
1246
1247 fn traverse_from_offset(
1248 &self,
1249 include_dirs: bool,
1250 include_ignored: bool,
1251 start_offset: usize,
1252 ) -> Traversal {
1253 let mut cursor = self.entries_by_path.cursor();
1254 cursor.seek(
1255 &TraversalTarget::Count {
1256 count: start_offset,
1257 include_dirs,
1258 include_ignored,
1259 },
1260 Bias::Right,
1261 &(),
1262 );
1263 Traversal {
1264 cursor,
1265 include_dirs,
1266 include_ignored,
1267 }
1268 }
1269
1270 fn traverse_from_path(
1271 &self,
1272 include_dirs: bool,
1273 include_ignored: bool,
1274 path: &Path,
1275 ) -> Traversal {
1276 let mut cursor = self.entries_by_path.cursor();
1277 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1278 Traversal {
1279 cursor,
1280 include_dirs,
1281 include_ignored,
1282 }
1283 }
1284
1285 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1286 self.traverse_from_offset(false, include_ignored, start)
1287 }
1288
1289 pub fn entries(&self, include_ignored: bool) -> Traversal {
1290 self.traverse_from_offset(true, include_ignored, 0)
1291 }
1292
1293 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1294 let empty_path = Path::new("");
1295 self.entries_by_path
1296 .cursor::<()>()
1297 .filter(move |entry| entry.path.as_ref() != empty_path)
1298 .map(|entry| &entry.path)
1299 }
1300
1301 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1302 let mut cursor = self.entries_by_path.cursor();
1303 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1304 let traversal = Traversal {
1305 cursor,
1306 include_dirs: true,
1307 include_ignored: true,
1308 };
1309 ChildEntriesIter {
1310 traversal,
1311 parent_path,
1312 }
1313 }
1314
1315 pub fn root_entry(&self) -> Option<&Entry> {
1316 self.entry_for_path("")
1317 }
1318
1319 pub fn root_name(&self) -> &str {
1320 &self.root_name
1321 }
1322
1323 pub fn scan_id(&self) -> usize {
1324 self.scan_id
1325 }
1326
1327 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1328 let path = path.as_ref();
1329 self.traverse_from_path(true, true, path)
1330 .entry()
1331 .and_then(|entry| {
1332 if entry.path.as_ref() == path {
1333 Some(entry)
1334 } else {
1335 None
1336 }
1337 })
1338 }
1339
1340 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1341 let entry = self.entries_by_id.get(&id, &())?;
1342 self.entry_for_path(&entry.path)
1343 }
1344
1345 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1346 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1347 }
1348}
1349
1350impl LocalSnapshot {
1351 // Gives the most specific git repository for a given path
1352 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1353 self.git_repositories
1354 .iter()
1355 .rev() //git_repository is ordered lexicographically
1356 .find(|repo| repo.manages(path))
1357 .cloned()
1358 }
1359
1360 pub(crate) fn repo_with_dot_git_containing(
1361 &mut self,
1362 path: &Path,
1363 ) -> Option<&mut GitRepositoryEntry> {
1364 // Git repositories cannot be nested, so we don't need to reverse the order
1365 self.git_repositories
1366 .iter_mut()
1367 .find(|repo| repo.in_dot_git(path))
1368 }
1369
1370 #[cfg(test)]
1371 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1372 let root_name = self.root_name.clone();
1373 proto::UpdateWorktree {
1374 project_id,
1375 worktree_id: self.id().to_proto(),
1376 abs_path: self.abs_path().to_string_lossy().into(),
1377 root_name,
1378 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1379 removed_entries: Default::default(),
1380 scan_id: self.scan_id as u64,
1381 is_last_update: true,
1382 }
1383 }
1384
1385 pub(crate) fn build_update(
1386 &self,
1387 other: &Self,
1388 project_id: u64,
1389 worktree_id: u64,
1390 include_ignored: bool,
1391 ) -> proto::UpdateWorktree {
1392 let mut updated_entries = Vec::new();
1393 let mut removed_entries = Vec::new();
1394 let mut self_entries = self
1395 .entries_by_id
1396 .cursor::<()>()
1397 .filter(|e| include_ignored || !e.is_ignored)
1398 .peekable();
1399 let mut other_entries = other
1400 .entries_by_id
1401 .cursor::<()>()
1402 .filter(|e| include_ignored || !e.is_ignored)
1403 .peekable();
1404 loop {
1405 match (self_entries.peek(), other_entries.peek()) {
1406 (Some(self_entry), Some(other_entry)) => {
1407 match Ord::cmp(&self_entry.id, &other_entry.id) {
1408 Ordering::Less => {
1409 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1410 updated_entries.push(entry);
1411 self_entries.next();
1412 }
1413 Ordering::Equal => {
1414 if self_entry.scan_id != other_entry.scan_id {
1415 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1416 updated_entries.push(entry);
1417 }
1418
1419 self_entries.next();
1420 other_entries.next();
1421 }
1422 Ordering::Greater => {
1423 removed_entries.push(other_entry.id.to_proto());
1424 other_entries.next();
1425 }
1426 }
1427 }
1428 (Some(self_entry), None) => {
1429 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1430 updated_entries.push(entry);
1431 self_entries.next();
1432 }
1433 (None, Some(other_entry)) => {
1434 removed_entries.push(other_entry.id.to_proto());
1435 other_entries.next();
1436 }
1437 (None, None) => break,
1438 }
1439 }
1440
1441 proto::UpdateWorktree {
1442 project_id,
1443 worktree_id,
1444 abs_path: self.abs_path().to_string_lossy().into(),
1445 root_name: self.root_name().to_string(),
1446 updated_entries,
1447 removed_entries,
1448 scan_id: self.scan_id as u64,
1449 is_last_update: self.completed_scan_id == self.scan_id,
1450 }
1451 }
1452
1453 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1454 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1455 let abs_path = self.abs_path.join(&entry.path);
1456 match smol::block_on(build_gitignore(&abs_path, fs)) {
1457 Ok(ignore) => {
1458 self.ignores_by_parent_abs_path.insert(
1459 abs_path.parent().unwrap().into(),
1460 (Arc::new(ignore), self.scan_id),
1461 );
1462 }
1463 Err(error) => {
1464 log::error!(
1465 "error loading .gitignore file {:?} - {:?}",
1466 &entry.path,
1467 error
1468 );
1469 }
1470 }
1471 }
1472
1473 self.reuse_entry_id(&mut entry);
1474
1475 if entry.kind == EntryKind::PendingDir {
1476 if let Some(existing_entry) =
1477 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1478 {
1479 entry.kind = existing_entry.kind;
1480 }
1481 }
1482
1483 let scan_id = self.scan_id;
1484 self.entries_by_path.insert_or_replace(entry.clone(), &());
1485 self.entries_by_id.insert_or_replace(
1486 PathEntry {
1487 id: entry.id,
1488 path: entry.path.clone(),
1489 is_ignored: entry.is_ignored,
1490 scan_id,
1491 },
1492 &(),
1493 );
1494
1495 entry
1496 }
1497
1498 fn populate_dir(
1499 &mut self,
1500 parent_path: Arc<Path>,
1501 entries: impl IntoIterator<Item = Entry>,
1502 ignore: Option<Arc<Gitignore>>,
1503 fs: &dyn Fs,
1504 ) {
1505 let mut parent_entry = if let Some(parent_entry) =
1506 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1507 {
1508 parent_entry.clone()
1509 } else {
1510 log::warn!(
1511 "populating a directory {:?} that has been removed",
1512 parent_path
1513 );
1514 return;
1515 };
1516
1517 match parent_entry.kind {
1518 EntryKind::PendingDir => {
1519 parent_entry.kind = EntryKind::Dir;
1520 }
1521 EntryKind::Dir => {}
1522 _ => return,
1523 }
1524
1525 if let Some(ignore) = ignore {
1526 self.ignores_by_parent_abs_path.insert(
1527 self.abs_path.join(&parent_path).into(),
1528 (ignore, self.scan_id),
1529 );
1530 }
1531
1532 if parent_path.file_name() == Some(&DOT_GIT) {
1533 let abs_path = self.abs_path.join(&parent_path);
1534 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1535 if let Err(ix) = self
1536 .git_repositories
1537 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1538 {
1539 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1540 self.git_repositories.insert(
1541 ix,
1542 GitRepositoryEntry {
1543 repo,
1544 scan_id: 0,
1545 content_path,
1546 git_dir_path: parent_path,
1547 },
1548 );
1549 }
1550 }
1551 }
1552
1553 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1554 let mut entries_by_id_edits = Vec::new();
1555
1556 for mut entry in entries {
1557 self.reuse_entry_id(&mut entry);
1558 entries_by_id_edits.push(Edit::Insert(PathEntry {
1559 id: entry.id,
1560 path: entry.path.clone(),
1561 is_ignored: entry.is_ignored,
1562 scan_id: self.scan_id,
1563 }));
1564 entries_by_path_edits.push(Edit::Insert(entry));
1565 }
1566
1567 self.entries_by_path.edit(entries_by_path_edits, &());
1568 self.entries_by_id.edit(entries_by_id_edits, &());
1569 }
1570
1571 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1572 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1573 entry.id = removed_entry_id;
1574 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1575 entry.id = existing_entry.id;
1576 }
1577 }
1578
1579 fn remove_path(&mut self, path: &Path) {
1580 let mut new_entries;
1581 let removed_entries;
1582 {
1583 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1584 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1585 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1586 new_entries.push_tree(cursor.suffix(&()), &());
1587 }
1588 self.entries_by_path = new_entries;
1589
1590 let mut entries_by_id_edits = Vec::new();
1591 for entry in removed_entries.cursor::<()>() {
1592 let removed_entry_id = self
1593 .removed_entry_ids
1594 .entry(entry.inode)
1595 .or_insert(entry.id);
1596 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1597 entries_by_id_edits.push(Edit::Remove(entry.id));
1598 }
1599 self.entries_by_id.edit(entries_by_id_edits, &());
1600
1601 if path.file_name() == Some(&GITIGNORE) {
1602 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1603 if let Some((_, scan_id)) = self
1604 .ignores_by_parent_abs_path
1605 .get_mut(abs_parent_path.as_path())
1606 {
1607 *scan_id = self.snapshot.scan_id;
1608 }
1609 } else if path.file_name() == Some(&DOT_GIT) {
1610 let parent_path = path.parent().unwrap();
1611 if let Ok(ix) = self
1612 .git_repositories
1613 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1614 {
1615 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1616 }
1617 }
1618 }
1619
1620 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1621 let mut inodes = TreeSet::default();
1622 for ancestor in path.ancestors().skip(1) {
1623 if let Some(entry) = self.entry_for_path(ancestor) {
1624 inodes.insert(entry.inode);
1625 }
1626 }
1627 inodes
1628 }
1629
1630 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1631 let mut new_ignores = Vec::new();
1632 for ancestor in abs_path.ancestors().skip(1) {
1633 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1634 new_ignores.push((ancestor, Some(ignore.clone())));
1635 } else {
1636 new_ignores.push((ancestor, None));
1637 }
1638 }
1639
1640 let mut ignore_stack = IgnoreStack::none();
1641 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1642 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1643 ignore_stack = IgnoreStack::all();
1644 break;
1645 } else if let Some(ignore) = ignore {
1646 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1647 }
1648 }
1649
1650 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1651 ignore_stack = IgnoreStack::all();
1652 }
1653
1654 ignore_stack
1655 }
1656
1657 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1658 &self.git_repositories
1659 }
1660}
1661
1662impl GitRepositoryEntry {
1663 // Note that these paths should be relative to the worktree root.
1664 pub(crate) fn manages(&self, path: &Path) -> bool {
1665 path.starts_with(self.content_path.as_ref())
1666 }
1667
1668 // Note that this path should be relative to the worktree root.
1669 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1670 path.starts_with(self.git_dir_path.as_ref())
1671 }
1672}
1673
1674async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1675 let contents = fs.load(abs_path).await?;
1676 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1677 let mut builder = GitignoreBuilder::new(parent);
1678 for line in contents.lines() {
1679 builder.add_line(Some(abs_path.into()), line)?;
1680 }
1681 Ok(builder.build()?)
1682}
1683
1684impl WorktreeId {
1685 pub fn from_usize(handle_id: usize) -> Self {
1686 Self(handle_id)
1687 }
1688
1689 pub(crate) fn from_proto(id: u64) -> Self {
1690 Self(id as usize)
1691 }
1692
1693 pub fn to_proto(&self) -> u64 {
1694 self.0 as u64
1695 }
1696
1697 pub fn to_usize(&self) -> usize {
1698 self.0
1699 }
1700}
1701
1702impl fmt::Display for WorktreeId {
1703 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1704 self.0.fmt(f)
1705 }
1706}
1707
1708impl Deref for Worktree {
1709 type Target = Snapshot;
1710
1711 fn deref(&self) -> &Self::Target {
1712 match self {
1713 Worktree::Local(worktree) => &worktree.snapshot,
1714 Worktree::Remote(worktree) => &worktree.snapshot,
1715 }
1716 }
1717}
1718
1719impl Deref for LocalWorktree {
1720 type Target = LocalSnapshot;
1721
1722 fn deref(&self) -> &Self::Target {
1723 &self.snapshot
1724 }
1725}
1726
1727impl Deref for RemoteWorktree {
1728 type Target = Snapshot;
1729
1730 fn deref(&self) -> &Self::Target {
1731 &self.snapshot
1732 }
1733}
1734
1735impl fmt::Debug for LocalWorktree {
1736 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1737 self.snapshot.fmt(f)
1738 }
1739}
1740
1741impl fmt::Debug for Snapshot {
1742 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1743 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1744 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1745
1746 impl<'a> fmt::Debug for EntriesByPath<'a> {
1747 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1748 f.debug_map()
1749 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1750 .finish()
1751 }
1752 }
1753
1754 impl<'a> fmt::Debug for EntriesById<'a> {
1755 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1756 f.debug_list().entries(self.0.iter()).finish()
1757 }
1758 }
1759
1760 f.debug_struct("Snapshot")
1761 .field("id", &self.id)
1762 .field("root_name", &self.root_name)
1763 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1764 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1765 .finish()
1766 }
1767}
1768
1769#[derive(Clone, PartialEq)]
1770pub struct File {
1771 pub worktree: ModelHandle<Worktree>,
1772 pub path: Arc<Path>,
1773 pub mtime: SystemTime,
1774 pub(crate) entry_id: ProjectEntryId,
1775 pub(crate) is_local: bool,
1776 pub(crate) is_deleted: bool,
1777}
1778
1779impl language::File for File {
1780 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1781 if self.is_local {
1782 Some(self)
1783 } else {
1784 None
1785 }
1786 }
1787
1788 fn mtime(&self) -> SystemTime {
1789 self.mtime
1790 }
1791
1792 fn path(&self) -> &Arc<Path> {
1793 &self.path
1794 }
1795
1796 fn full_path(&self, cx: &AppContext) -> PathBuf {
1797 let mut full_path = PathBuf::new();
1798 let worktree = self.worktree.read(cx);
1799
1800 if worktree.is_visible() {
1801 full_path.push(worktree.root_name());
1802 } else {
1803 let path = worktree.abs_path();
1804
1805 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1806 full_path.push("~");
1807 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1808 } else {
1809 full_path.push(path)
1810 }
1811 }
1812
1813 if self.path.components().next().is_some() {
1814 full_path.push(&self.path);
1815 }
1816
1817 full_path
1818 }
1819
1820 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1821 /// of its worktree, then this method will return the name of the worktree itself.
1822 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1823 self.path
1824 .file_name()
1825 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1826 }
1827
1828 fn is_deleted(&self) -> bool {
1829 self.is_deleted
1830 }
1831
1832 fn as_any(&self) -> &dyn Any {
1833 self
1834 }
1835
1836 fn to_proto(&self) -> rpc::proto::File {
1837 rpc::proto::File {
1838 worktree_id: self.worktree.id() as u64,
1839 entry_id: self.entry_id.to_proto(),
1840 path: self.path.to_string_lossy().into(),
1841 mtime: Some(self.mtime.into()),
1842 is_deleted: self.is_deleted,
1843 }
1844 }
1845}
1846
1847impl language::LocalFile for File {
1848 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1849 self.worktree
1850 .read(cx)
1851 .as_local()
1852 .unwrap()
1853 .abs_path
1854 .join(&self.path)
1855 }
1856
1857 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1858 let worktree = self.worktree.read(cx).as_local().unwrap();
1859 let abs_path = worktree.absolutize(&self.path);
1860 let fs = worktree.fs.clone();
1861 cx.background()
1862 .spawn(async move { fs.load(&abs_path).await })
1863 }
1864
1865 fn buffer_reloaded(
1866 &self,
1867 buffer_id: u64,
1868 version: &clock::Global,
1869 fingerprint: RopeFingerprint,
1870 line_ending: LineEnding,
1871 mtime: SystemTime,
1872 cx: &mut AppContext,
1873 ) {
1874 let worktree = self.worktree.read(cx).as_local().unwrap();
1875 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1876 worktree
1877 .client
1878 .send(proto::BufferReloaded {
1879 project_id,
1880 buffer_id,
1881 version: serialize_version(version),
1882 mtime: Some(mtime.into()),
1883 fingerprint: serialize_fingerprint(fingerprint),
1884 line_ending: serialize_line_ending(line_ending) as i32,
1885 })
1886 .log_err();
1887 }
1888 }
1889}
1890
1891impl File {
1892 pub fn from_proto(
1893 proto: rpc::proto::File,
1894 worktree: ModelHandle<Worktree>,
1895 cx: &AppContext,
1896 ) -> Result<Self> {
1897 let worktree_id = worktree
1898 .read(cx)
1899 .as_remote()
1900 .ok_or_else(|| anyhow!("not remote"))?
1901 .id();
1902
1903 if worktree_id.to_proto() != proto.worktree_id {
1904 return Err(anyhow!("worktree id does not match file"));
1905 }
1906
1907 Ok(Self {
1908 worktree,
1909 path: Path::new(&proto.path).into(),
1910 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1911 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1912 is_local: false,
1913 is_deleted: proto.is_deleted,
1914 })
1915 }
1916
1917 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1918 file.and_then(|f| f.as_any().downcast_ref())
1919 }
1920
1921 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1922 self.worktree.read(cx).id()
1923 }
1924
1925 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1926 if self.is_deleted {
1927 None
1928 } else {
1929 Some(self.entry_id)
1930 }
1931 }
1932}
1933
1934#[derive(Clone, Debug, PartialEq, Eq)]
1935pub struct Entry {
1936 pub id: ProjectEntryId,
1937 pub kind: EntryKind,
1938 pub path: Arc<Path>,
1939 pub inode: u64,
1940 pub mtime: SystemTime,
1941 pub is_symlink: bool,
1942 pub is_ignored: bool,
1943}
1944
1945#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1946pub enum EntryKind {
1947 PendingDir,
1948 Dir,
1949 File(CharBag),
1950}
1951
1952#[derive(Clone, Copy, Debug)]
1953pub enum PathChange {
1954 Added,
1955 Removed,
1956 Updated,
1957 AddedOrUpdated,
1958}
1959
1960impl Entry {
1961 fn new(
1962 path: Arc<Path>,
1963 metadata: &fs::Metadata,
1964 next_entry_id: &AtomicUsize,
1965 root_char_bag: CharBag,
1966 ) -> Self {
1967 Self {
1968 id: ProjectEntryId::new(next_entry_id),
1969 kind: if metadata.is_dir {
1970 EntryKind::PendingDir
1971 } else {
1972 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1973 },
1974 path,
1975 inode: metadata.inode,
1976 mtime: metadata.mtime,
1977 is_symlink: metadata.is_symlink,
1978 is_ignored: false,
1979 }
1980 }
1981
1982 pub fn is_dir(&self) -> bool {
1983 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1984 }
1985
1986 pub fn is_file(&self) -> bool {
1987 matches!(self.kind, EntryKind::File(_))
1988 }
1989}
1990
1991impl sum_tree::Item for Entry {
1992 type Summary = EntrySummary;
1993
1994 fn summary(&self) -> Self::Summary {
1995 let visible_count = if self.is_ignored { 0 } else { 1 };
1996 let file_count;
1997 let visible_file_count;
1998 if self.is_file() {
1999 file_count = 1;
2000 visible_file_count = visible_count;
2001 } else {
2002 file_count = 0;
2003 visible_file_count = 0;
2004 }
2005
2006 EntrySummary {
2007 max_path: self.path.clone(),
2008 count: 1,
2009 visible_count,
2010 file_count,
2011 visible_file_count,
2012 }
2013 }
2014}
2015
2016impl sum_tree::KeyedItem for Entry {
2017 type Key = PathKey;
2018
2019 fn key(&self) -> Self::Key {
2020 PathKey(self.path.clone())
2021 }
2022}
2023
2024#[derive(Clone, Debug)]
2025pub struct EntrySummary {
2026 max_path: Arc<Path>,
2027 count: usize,
2028 visible_count: usize,
2029 file_count: usize,
2030 visible_file_count: usize,
2031}
2032
2033impl Default for EntrySummary {
2034 fn default() -> Self {
2035 Self {
2036 max_path: Arc::from(Path::new("")),
2037 count: 0,
2038 visible_count: 0,
2039 file_count: 0,
2040 visible_file_count: 0,
2041 }
2042 }
2043}
2044
2045impl sum_tree::Summary for EntrySummary {
2046 type Context = ();
2047
2048 fn add_summary(&mut self, rhs: &Self, _: &()) {
2049 self.max_path = rhs.max_path.clone();
2050 self.count += rhs.count;
2051 self.visible_count += rhs.visible_count;
2052 self.file_count += rhs.file_count;
2053 self.visible_file_count += rhs.visible_file_count;
2054 }
2055}
2056
2057#[derive(Clone, Debug)]
2058struct PathEntry {
2059 id: ProjectEntryId,
2060 path: Arc<Path>,
2061 is_ignored: bool,
2062 scan_id: usize,
2063}
2064
2065impl sum_tree::Item for PathEntry {
2066 type Summary = PathEntrySummary;
2067
2068 fn summary(&self) -> Self::Summary {
2069 PathEntrySummary { max_id: self.id }
2070 }
2071}
2072
2073impl sum_tree::KeyedItem for PathEntry {
2074 type Key = ProjectEntryId;
2075
2076 fn key(&self) -> Self::Key {
2077 self.id
2078 }
2079}
2080
2081#[derive(Clone, Debug, Default)]
2082struct PathEntrySummary {
2083 max_id: ProjectEntryId,
2084}
2085
2086impl sum_tree::Summary for PathEntrySummary {
2087 type Context = ();
2088
2089 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2090 self.max_id = summary.max_id;
2091 }
2092}
2093
2094impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2095 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2096 *self = summary.max_id;
2097 }
2098}
2099
2100#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2101pub struct PathKey(Arc<Path>);
2102
2103impl Default for PathKey {
2104 fn default() -> Self {
2105 Self(Path::new("").into())
2106 }
2107}
2108
2109impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2110 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2111 self.0 = summary.max_path.clone();
2112 }
2113}
2114
2115struct BackgroundScanner {
2116 snapshot: Mutex<LocalSnapshot>,
2117 fs: Arc<dyn Fs>,
2118 status_updates_tx: UnboundedSender<ScanState>,
2119 executor: Arc<executor::Background>,
2120 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2121 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2122 finished_initial_scan: bool,
2123}
2124
2125impl BackgroundScanner {
2126 fn new(
2127 snapshot: LocalSnapshot,
2128 fs: Arc<dyn Fs>,
2129 status_updates_tx: UnboundedSender<ScanState>,
2130 executor: Arc<executor::Background>,
2131 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2132 ) -> Self {
2133 Self {
2134 fs,
2135 status_updates_tx,
2136 executor,
2137 refresh_requests_rx,
2138 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2139 snapshot: Mutex::new(snapshot),
2140 finished_initial_scan: false,
2141 }
2142 }
2143
2144 async fn run(
2145 &mut self,
2146 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2147 ) {
2148 use futures::FutureExt as _;
2149
2150 let (root_abs_path, root_inode) = {
2151 let snapshot = self.snapshot.lock();
2152 (
2153 snapshot.abs_path.clone(),
2154 snapshot.root_entry().map(|e| e.inode),
2155 )
2156 };
2157
2158 // Populate ignores above the root.
2159 let ignore_stack;
2160 for ancestor in root_abs_path.ancestors().skip(1) {
2161 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2162 {
2163 self.snapshot
2164 .lock()
2165 .ignores_by_parent_abs_path
2166 .insert(ancestor.into(), (ignore.into(), 0));
2167 }
2168 }
2169 {
2170 let mut snapshot = self.snapshot.lock();
2171 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2172 if ignore_stack.is_all() {
2173 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2174 root_entry.is_ignored = true;
2175 snapshot.insert_entry(root_entry, self.fs.as_ref());
2176 }
2177 }
2178 };
2179
2180 // Perform an initial scan of the directory.
2181 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2182 smol::block_on(scan_job_tx.send(ScanJob {
2183 abs_path: root_abs_path,
2184 path: Arc::from(Path::new("")),
2185 ignore_stack,
2186 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2187 scan_queue: scan_job_tx.clone(),
2188 }))
2189 .unwrap();
2190 drop(scan_job_tx);
2191 self.scan_dirs(true, scan_job_rx).await;
2192
2193 // Process any any FS events that occurred while performing the initial scan.
2194 // For these events, update events cannot be as precise, because we didn't
2195 // have the previous state loaded yet.
2196 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2197 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2198 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2199 paths.extend(more_events.into_iter().map(|e| e.path));
2200 }
2201 self.process_events(paths).await;
2202 }
2203
2204 self.finished_initial_scan = true;
2205
2206 // Continue processing events until the worktree is dropped.
2207 loop {
2208 select_biased! {
2209 // Process any path refresh requests from the worktree. Prioritize
2210 // these before handling changes reported by the filesystem.
2211 request = self.refresh_requests_rx.recv().fuse() => {
2212 let Ok((paths, barrier)) = request else { break };
2213 self.reload_entries_for_paths(paths, None).await;
2214 if !self.send_status_update(false, Some(barrier)) {
2215 break;
2216 }
2217 }
2218
2219 events = events_rx.next().fuse() => {
2220 let Some(events) = events else { break };
2221 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2222 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2223 paths.extend(more_events.into_iter().map(|e| e.path));
2224 }
2225 self.process_events(paths).await;
2226 }
2227 }
2228 }
2229 }
2230
2231 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2232 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2233 if let Some(mut paths) = self
2234 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2235 .await
2236 {
2237 paths.sort_unstable();
2238 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2239 }
2240 drop(scan_job_tx);
2241 self.scan_dirs(false, scan_job_rx).await;
2242 }
2243
2244 async fn scan_dirs(
2245 &self,
2246 enable_progress_updates: bool,
2247 scan_jobs_rx: channel::Receiver<ScanJob>,
2248 ) {
2249 use futures::FutureExt as _;
2250
2251 self.snapshot.lock().scan_id += 1;
2252 if self
2253 .status_updates_tx
2254 .unbounded_send(ScanState::Started)
2255 .is_err()
2256 {
2257 return;
2258 }
2259
2260 let progress_update_count = AtomicUsize::new(0);
2261 self.executor
2262 .scoped(|scope| {
2263 for _ in 0..self.executor.num_cpus() {
2264 scope.spawn(async {
2265 let mut last_progress_update_count = 0;
2266 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2267 futures::pin_mut!(progress_update_timer);
2268
2269 loop {
2270 select_biased! {
2271 // Process any path refresh requests before moving on to process
2272 // the scan queue, so that user operations are prioritized.
2273 request = self.refresh_requests_rx.recv().fuse() => {
2274 let Ok((paths, barrier)) = request else { break };
2275 self.reload_entries_for_paths(paths, None).await;
2276 if !self.send_status_update(false, Some(barrier)) {
2277 return;
2278 }
2279 }
2280
2281 // Send periodic progress updates to the worktree. Use an atomic counter
2282 // to ensure that only one of the workers sends a progress update after
2283 // the update interval elapses.
2284 _ = progress_update_timer => {
2285 match progress_update_count.compare_exchange(
2286 last_progress_update_count,
2287 last_progress_update_count + 1,
2288 SeqCst,
2289 SeqCst
2290 ) {
2291 Ok(_) => {
2292 last_progress_update_count += 1;
2293 self.send_status_update(true, None);
2294 }
2295 Err(count) => {
2296 last_progress_update_count = count;
2297 }
2298 }
2299 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2300 }
2301
2302 // Recursively load directories from the file system.
2303 job = scan_jobs_rx.recv().fuse() => {
2304 let Ok(job) = job else { break };
2305 if let Err(err) = self.scan_dir(&job).await {
2306 if job.path.as_ref() != Path::new("") {
2307 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2308 }
2309 }
2310 }
2311 }
2312 }
2313 })
2314 }
2315 })
2316 .await;
2317
2318 self.update_ignore_statuses().await;
2319
2320 let mut snapshot = self.snapshot.lock();
2321 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2322 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2323 snapshot.git_repositories = git_repositories;
2324 snapshot.removed_entry_ids.clear();
2325 snapshot.completed_scan_id = snapshot.scan_id;
2326 drop(snapshot);
2327
2328 self.send_status_update(false, None);
2329 }
2330
2331 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2332 let mut prev_state = self.prev_state.lock();
2333 let snapshot = self.snapshot.lock().clone();
2334 let mut old_snapshot = snapshot.snapshot.clone();
2335 mem::swap(&mut old_snapshot, &mut prev_state.0);
2336 let changed_paths = mem::take(&mut prev_state.1);
2337 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2338 self.status_updates_tx
2339 .unbounded_send(ScanState::Updated {
2340 snapshot,
2341 changes,
2342 scanning,
2343 barrier,
2344 })
2345 .is_ok()
2346 }
2347
2348 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2349 let mut new_entries: Vec<Entry> = Vec::new();
2350 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2351 let mut ignore_stack = job.ignore_stack.clone();
2352 let mut new_ignore = None;
2353 let (root_abs_path, root_char_bag, next_entry_id) = {
2354 let snapshot = self.snapshot.lock();
2355 (
2356 snapshot.abs_path().clone(),
2357 snapshot.root_char_bag,
2358 snapshot.next_entry_id.clone(),
2359 )
2360 };
2361 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2362 while let Some(child_abs_path) = child_paths.next().await {
2363 let child_abs_path: Arc<Path> = match child_abs_path {
2364 Ok(child_abs_path) => child_abs_path.into(),
2365 Err(error) => {
2366 log::error!("error processing entry {:?}", error);
2367 continue;
2368 }
2369 };
2370
2371 let child_name = child_abs_path.file_name().unwrap();
2372 let child_path: Arc<Path> = job.path.join(child_name).into();
2373 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2374 Ok(Some(metadata)) => metadata,
2375 Ok(None) => continue,
2376 Err(err) => {
2377 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2378 continue;
2379 }
2380 };
2381
2382 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2383 if child_name == *GITIGNORE {
2384 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2385 Ok(ignore) => {
2386 let ignore = Arc::new(ignore);
2387 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2388 new_ignore = Some(ignore);
2389 }
2390 Err(error) => {
2391 log::error!(
2392 "error loading .gitignore file {:?} - {:?}",
2393 child_name,
2394 error
2395 );
2396 }
2397 }
2398
2399 // Update ignore status of any child entries we've already processed to reflect the
2400 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2401 // there should rarely be too numerous. Update the ignore stack associated with any
2402 // new jobs as well.
2403 let mut new_jobs = new_jobs.iter_mut();
2404 for entry in &mut new_entries {
2405 let entry_abs_path = root_abs_path.join(&entry.path);
2406 entry.is_ignored =
2407 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2408
2409 if entry.is_dir() {
2410 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2411 job.ignore_stack = if entry.is_ignored {
2412 IgnoreStack::all()
2413 } else {
2414 ignore_stack.clone()
2415 };
2416 }
2417 }
2418 }
2419 }
2420
2421 let mut child_entry = Entry::new(
2422 child_path.clone(),
2423 &child_metadata,
2424 &next_entry_id,
2425 root_char_bag,
2426 );
2427
2428 if child_entry.is_dir() {
2429 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2430 child_entry.is_ignored = is_ignored;
2431
2432 // Avoid recursing until crash in the case of a recursive symlink
2433 if !job.ancestor_inodes.contains(&child_entry.inode) {
2434 let mut ancestor_inodes = job.ancestor_inodes.clone();
2435 ancestor_inodes.insert(child_entry.inode);
2436
2437 new_jobs.push(Some(ScanJob {
2438 abs_path: child_abs_path,
2439 path: child_path,
2440 ignore_stack: if is_ignored {
2441 IgnoreStack::all()
2442 } else {
2443 ignore_stack.clone()
2444 },
2445 ancestor_inodes,
2446 scan_queue: job.scan_queue.clone(),
2447 }));
2448 } else {
2449 new_jobs.push(None);
2450 }
2451 } else {
2452 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2453 }
2454
2455 new_entries.push(child_entry);
2456 }
2457
2458 self.snapshot.lock().populate_dir(
2459 job.path.clone(),
2460 new_entries,
2461 new_ignore,
2462 self.fs.as_ref(),
2463 );
2464
2465 for new_job in new_jobs {
2466 if let Some(new_job) = new_job {
2467 job.scan_queue.send(new_job).await.unwrap();
2468 }
2469 }
2470
2471 Ok(())
2472 }
2473
2474 async fn reload_entries_for_paths(
2475 &self,
2476 mut abs_paths: Vec<PathBuf>,
2477 scan_queue_tx: Option<Sender<ScanJob>>,
2478 ) -> Option<Vec<Arc<Path>>> {
2479 abs_paths.sort_unstable();
2480 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2481
2482 let root_abs_path = self.snapshot.lock().abs_path.clone();
2483 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2484 let metadata = futures::future::join_all(
2485 abs_paths
2486 .iter()
2487 .map(|abs_path| self.fs.metadata(&abs_path))
2488 .collect::<Vec<_>>(),
2489 )
2490 .await;
2491
2492 let mut snapshot = self.snapshot.lock();
2493 let doing_recursive_update = scan_queue_tx.is_some();
2494
2495 // Remove any entries for paths that no longer exist or are being recursively
2496 // refreshed. Do this before adding any new entries, so that renames can be
2497 // detected regardless of the order of the paths.
2498 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2499 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2500 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2501 if matches!(metadata, Ok(None)) || doing_recursive_update {
2502 snapshot.remove_path(path);
2503 }
2504 event_paths.push(path.into());
2505 } else {
2506 log::error!(
2507 "unexpected event {:?} for root path {:?}",
2508 abs_path,
2509 root_canonical_path
2510 );
2511 }
2512 }
2513
2514 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2515 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2516
2517 match metadata {
2518 Ok(Some(metadata)) => {
2519 let ignore_stack =
2520 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2521 let mut fs_entry = Entry::new(
2522 path.clone(),
2523 &metadata,
2524 snapshot.next_entry_id.as_ref(),
2525 snapshot.root_char_bag,
2526 );
2527 fs_entry.is_ignored = ignore_stack.is_all();
2528 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2529
2530 let scan_id = snapshot.scan_id;
2531 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2532 repo.repo.lock().reload_index();
2533 repo.scan_id = scan_id;
2534 }
2535
2536 if let Some(scan_queue_tx) = &scan_queue_tx {
2537 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2538 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2539 ancestor_inodes.insert(metadata.inode);
2540 smol::block_on(scan_queue_tx.send(ScanJob {
2541 abs_path,
2542 path,
2543 ignore_stack,
2544 ancestor_inodes,
2545 scan_queue: scan_queue_tx.clone(),
2546 }))
2547 .unwrap();
2548 }
2549 }
2550 }
2551 Ok(None) => {}
2552 Err(err) => {
2553 // TODO - create a special 'error' entry in the entries tree to mark this
2554 log::error!("error reading file on event {:?}", err);
2555 }
2556 }
2557 }
2558
2559 Some(event_paths)
2560 }
2561
2562 async fn update_ignore_statuses(&self) {
2563 let mut snapshot = self.snapshot.lock().clone();
2564 let mut ignores_to_update = Vec::new();
2565 let mut ignores_to_delete = Vec::new();
2566 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2567 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2568 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2569 ignores_to_update.push(parent_abs_path.clone());
2570 }
2571
2572 let ignore_path = parent_path.join(&*GITIGNORE);
2573 if snapshot.entry_for_path(ignore_path).is_none() {
2574 ignores_to_delete.push(parent_abs_path.clone());
2575 }
2576 }
2577 }
2578
2579 for parent_abs_path in ignores_to_delete {
2580 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2581 self.snapshot
2582 .lock()
2583 .ignores_by_parent_abs_path
2584 .remove(&parent_abs_path);
2585 }
2586
2587 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2588 ignores_to_update.sort_unstable();
2589 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2590 while let Some(parent_abs_path) = ignores_to_update.next() {
2591 while ignores_to_update
2592 .peek()
2593 .map_or(false, |p| p.starts_with(&parent_abs_path))
2594 {
2595 ignores_to_update.next().unwrap();
2596 }
2597
2598 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2599 ignore_queue_tx
2600 .send(UpdateIgnoreStatusJob {
2601 abs_path: parent_abs_path,
2602 ignore_stack,
2603 ignore_queue: ignore_queue_tx.clone(),
2604 })
2605 .await
2606 .unwrap();
2607 }
2608 drop(ignore_queue_tx);
2609
2610 self.executor
2611 .scoped(|scope| {
2612 for _ in 0..self.executor.num_cpus() {
2613 scope.spawn(async {
2614 while let Ok(job) = ignore_queue_rx.recv().await {
2615 self.update_ignore_status(job, &snapshot).await;
2616 }
2617 });
2618 }
2619 })
2620 .await;
2621 }
2622
2623 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2624 let mut ignore_stack = job.ignore_stack;
2625 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2626 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2627 }
2628
2629 let mut entries_by_id_edits = Vec::new();
2630 let mut entries_by_path_edits = Vec::new();
2631 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2632 for mut entry in snapshot.child_entries(path).cloned() {
2633 let was_ignored = entry.is_ignored;
2634 let abs_path = snapshot.abs_path().join(&entry.path);
2635 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2636 if entry.is_dir() {
2637 let child_ignore_stack = if entry.is_ignored {
2638 IgnoreStack::all()
2639 } else {
2640 ignore_stack.clone()
2641 };
2642 job.ignore_queue
2643 .send(UpdateIgnoreStatusJob {
2644 abs_path: abs_path.into(),
2645 ignore_stack: child_ignore_stack,
2646 ignore_queue: job.ignore_queue.clone(),
2647 })
2648 .await
2649 .unwrap();
2650 }
2651
2652 if entry.is_ignored != was_ignored {
2653 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2654 path_entry.scan_id = snapshot.scan_id;
2655 path_entry.is_ignored = entry.is_ignored;
2656 entries_by_id_edits.push(Edit::Insert(path_entry));
2657 entries_by_path_edits.push(Edit::Insert(entry));
2658 }
2659 }
2660
2661 let mut snapshot = self.snapshot.lock();
2662 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2663 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2664 }
2665
2666 fn build_change_set(
2667 &self,
2668 old_snapshot: &Snapshot,
2669 new_snapshot: &Snapshot,
2670 event_paths: Vec<Arc<Path>>,
2671 ) -> HashMap<Arc<Path>, PathChange> {
2672 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2673
2674 let mut changes = HashMap::default();
2675 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2676 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2677 let received_before_initialized = !self.finished_initial_scan;
2678
2679 for path in event_paths {
2680 let path = PathKey(path);
2681 old_paths.seek(&path, Bias::Left, &());
2682 new_paths.seek(&path, Bias::Left, &());
2683
2684 loop {
2685 match (old_paths.item(), new_paths.item()) {
2686 (Some(old_entry), Some(new_entry)) => {
2687 if old_entry.path > path.0
2688 && new_entry.path > path.0
2689 && !old_entry.path.starts_with(&path.0)
2690 && !new_entry.path.starts_with(&path.0)
2691 {
2692 break;
2693 }
2694
2695 match Ord::cmp(&old_entry.path, &new_entry.path) {
2696 Ordering::Less => {
2697 changes.insert(old_entry.path.clone(), Removed);
2698 old_paths.next(&());
2699 }
2700 Ordering::Equal => {
2701 if received_before_initialized {
2702 // If the worktree was not fully initialized when this event was generated,
2703 // we can't know whether this entry was added during the scan or whether
2704 // it was merely updated.
2705 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2706 } else if old_entry.mtime != new_entry.mtime {
2707 changes.insert(new_entry.path.clone(), Updated);
2708 }
2709 old_paths.next(&());
2710 new_paths.next(&());
2711 }
2712 Ordering::Greater => {
2713 changes.insert(new_entry.path.clone(), Added);
2714 new_paths.next(&());
2715 }
2716 }
2717 }
2718 (Some(old_entry), None) => {
2719 changes.insert(old_entry.path.clone(), Removed);
2720 old_paths.next(&());
2721 }
2722 (None, Some(new_entry)) => {
2723 changes.insert(new_entry.path.clone(), Added);
2724 new_paths.next(&());
2725 }
2726 (None, None) => break,
2727 }
2728 }
2729 }
2730 changes
2731 }
2732
2733 async fn progress_timer(&self, running: bool) {
2734 if !running {
2735 return futures::future::pending().await;
2736 }
2737
2738 #[cfg(any(test, feature = "test-support"))]
2739 if self.fs.is_fake() {
2740 return self.executor.simulate_random_delay().await;
2741 }
2742
2743 smol::Timer::after(Duration::from_millis(100)).await;
2744 }
2745}
2746
2747fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2748 let mut result = root_char_bag;
2749 result.extend(
2750 path.to_string_lossy()
2751 .chars()
2752 .map(|c| c.to_ascii_lowercase()),
2753 );
2754 result
2755}
2756
2757struct ScanJob {
2758 abs_path: Arc<Path>,
2759 path: Arc<Path>,
2760 ignore_stack: Arc<IgnoreStack>,
2761 scan_queue: Sender<ScanJob>,
2762 ancestor_inodes: TreeSet<u64>,
2763}
2764
2765struct UpdateIgnoreStatusJob {
2766 abs_path: Arc<Path>,
2767 ignore_stack: Arc<IgnoreStack>,
2768 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2769}
2770
2771pub trait WorktreeHandle {
2772 #[cfg(any(test, feature = "test-support"))]
2773 fn flush_fs_events<'a>(
2774 &self,
2775 cx: &'a gpui::TestAppContext,
2776 ) -> futures::future::LocalBoxFuture<'a, ()>;
2777}
2778
2779impl WorktreeHandle for ModelHandle<Worktree> {
2780 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2781 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2782 // extra directory scans, and emit extra scan-state notifications.
2783 //
2784 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2785 // to ensure that all redundant FS events have already been processed.
2786 #[cfg(any(test, feature = "test-support"))]
2787 fn flush_fs_events<'a>(
2788 &self,
2789 cx: &'a gpui::TestAppContext,
2790 ) -> futures::future::LocalBoxFuture<'a, ()> {
2791 use smol::future::FutureExt;
2792
2793 let filename = "fs-event-sentinel";
2794 let tree = self.clone();
2795 let (fs, root_path) = self.read_with(cx, |tree, _| {
2796 let tree = tree.as_local().unwrap();
2797 (tree.fs.clone(), tree.abs_path().clone())
2798 });
2799
2800 async move {
2801 fs.create_file(&root_path.join(filename), Default::default())
2802 .await
2803 .unwrap();
2804 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2805 .await;
2806
2807 fs.remove_file(&root_path.join(filename), Default::default())
2808 .await
2809 .unwrap();
2810 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2811 .await;
2812
2813 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2814 .await;
2815 }
2816 .boxed_local()
2817 }
2818}
2819
2820#[derive(Clone, Debug)]
2821struct TraversalProgress<'a> {
2822 max_path: &'a Path,
2823 count: usize,
2824 visible_count: usize,
2825 file_count: usize,
2826 visible_file_count: usize,
2827}
2828
2829impl<'a> TraversalProgress<'a> {
2830 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2831 match (include_ignored, include_dirs) {
2832 (true, true) => self.count,
2833 (true, false) => self.file_count,
2834 (false, true) => self.visible_count,
2835 (false, false) => self.visible_file_count,
2836 }
2837 }
2838}
2839
2840impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2841 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2842 self.max_path = summary.max_path.as_ref();
2843 self.count += summary.count;
2844 self.visible_count += summary.visible_count;
2845 self.file_count += summary.file_count;
2846 self.visible_file_count += summary.visible_file_count;
2847 }
2848}
2849
2850impl<'a> Default for TraversalProgress<'a> {
2851 fn default() -> Self {
2852 Self {
2853 max_path: Path::new(""),
2854 count: 0,
2855 visible_count: 0,
2856 file_count: 0,
2857 visible_file_count: 0,
2858 }
2859 }
2860}
2861
2862pub struct Traversal<'a> {
2863 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2864 include_ignored: bool,
2865 include_dirs: bool,
2866}
2867
2868impl<'a> Traversal<'a> {
2869 pub fn advance(&mut self) -> bool {
2870 self.advance_to_offset(self.offset() + 1)
2871 }
2872
2873 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2874 self.cursor.seek_forward(
2875 &TraversalTarget::Count {
2876 count: offset,
2877 include_dirs: self.include_dirs,
2878 include_ignored: self.include_ignored,
2879 },
2880 Bias::Right,
2881 &(),
2882 )
2883 }
2884
2885 pub fn advance_to_sibling(&mut self) -> bool {
2886 while let Some(entry) = self.cursor.item() {
2887 self.cursor.seek_forward(
2888 &TraversalTarget::PathSuccessor(&entry.path),
2889 Bias::Left,
2890 &(),
2891 );
2892 if let Some(entry) = self.cursor.item() {
2893 if (self.include_dirs || !entry.is_dir())
2894 && (self.include_ignored || !entry.is_ignored)
2895 {
2896 return true;
2897 }
2898 }
2899 }
2900 false
2901 }
2902
2903 pub fn entry(&self) -> Option<&'a Entry> {
2904 self.cursor.item()
2905 }
2906
2907 pub fn offset(&self) -> usize {
2908 self.cursor
2909 .start()
2910 .count(self.include_dirs, self.include_ignored)
2911 }
2912}
2913
2914impl<'a> Iterator for Traversal<'a> {
2915 type Item = &'a Entry;
2916
2917 fn next(&mut self) -> Option<Self::Item> {
2918 if let Some(item) = self.entry() {
2919 self.advance();
2920 Some(item)
2921 } else {
2922 None
2923 }
2924 }
2925}
2926
2927#[derive(Debug)]
2928enum TraversalTarget<'a> {
2929 Path(&'a Path),
2930 PathSuccessor(&'a Path),
2931 Count {
2932 count: usize,
2933 include_ignored: bool,
2934 include_dirs: bool,
2935 },
2936}
2937
2938impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2939 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2940 match self {
2941 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2942 TraversalTarget::PathSuccessor(path) => {
2943 if !cursor_location.max_path.starts_with(path) {
2944 Ordering::Equal
2945 } else {
2946 Ordering::Greater
2947 }
2948 }
2949 TraversalTarget::Count {
2950 count,
2951 include_dirs,
2952 include_ignored,
2953 } => Ord::cmp(
2954 count,
2955 &cursor_location.count(*include_dirs, *include_ignored),
2956 ),
2957 }
2958 }
2959}
2960
2961struct ChildEntriesIter<'a> {
2962 parent_path: &'a Path,
2963 traversal: Traversal<'a>,
2964}
2965
2966impl<'a> Iterator for ChildEntriesIter<'a> {
2967 type Item = &'a Entry;
2968
2969 fn next(&mut self) -> Option<Self::Item> {
2970 if let Some(item) = self.traversal.entry() {
2971 if item.path.starts_with(&self.parent_path) {
2972 self.traversal.advance_to_sibling();
2973 return Some(item);
2974 }
2975 }
2976 None
2977 }
2978}
2979
2980impl<'a> From<&'a Entry> for proto::Entry {
2981 fn from(entry: &'a Entry) -> Self {
2982 Self {
2983 id: entry.id.to_proto(),
2984 is_dir: entry.is_dir(),
2985 path: entry.path.to_string_lossy().into(),
2986 inode: entry.inode,
2987 mtime: Some(entry.mtime.into()),
2988 is_symlink: entry.is_symlink,
2989 is_ignored: entry.is_ignored,
2990 }
2991 }
2992}
2993
2994impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
2995 type Error = anyhow::Error;
2996
2997 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
2998 if let Some(mtime) = entry.mtime {
2999 let kind = if entry.is_dir {
3000 EntryKind::Dir
3001 } else {
3002 let mut char_bag = *root_char_bag;
3003 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3004 EntryKind::File(char_bag)
3005 };
3006 let path: Arc<Path> = PathBuf::from(entry.path).into();
3007 Ok(Entry {
3008 id: ProjectEntryId::from_proto(entry.id),
3009 kind,
3010 path,
3011 inode: entry.inode,
3012 mtime: mtime.into(),
3013 is_symlink: entry.is_symlink,
3014 is_ignored: entry.is_ignored,
3015 })
3016 } else {
3017 Err(anyhow!(
3018 "missing mtime in remote worktree entry {:?}",
3019 entry.path
3020 ))
3021 }
3022 }
3023}
3024
3025#[cfg(test)]
3026mod tests {
3027 use super::*;
3028 use fs::repository::FakeGitRepository;
3029 use fs::{FakeFs, RealFs};
3030 use gpui::{executor::Deterministic, TestAppContext};
3031 use rand::prelude::*;
3032 use serde_json::json;
3033 use std::{env, fmt::Write};
3034 use util::http::FakeHttpClient;
3035
3036 use util::test::temp_tree;
3037
3038 #[gpui::test]
3039 async fn test_traversal(cx: &mut TestAppContext) {
3040 let fs = FakeFs::new(cx.background());
3041 fs.insert_tree(
3042 "/root",
3043 json!({
3044 ".gitignore": "a/b\n",
3045 "a": {
3046 "b": "",
3047 "c": "",
3048 }
3049 }),
3050 )
3051 .await;
3052
3053 let http_client = FakeHttpClient::with_404_response();
3054 let client = cx.read(|cx| Client::new(http_client, cx));
3055
3056 let tree = Worktree::local(
3057 client,
3058 Path::new("/root"),
3059 true,
3060 fs,
3061 Default::default(),
3062 &mut cx.to_async(),
3063 )
3064 .await
3065 .unwrap();
3066 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3067 .await;
3068
3069 tree.read_with(cx, |tree, _| {
3070 assert_eq!(
3071 tree.entries(false)
3072 .map(|entry| entry.path.as_ref())
3073 .collect::<Vec<_>>(),
3074 vec![
3075 Path::new(""),
3076 Path::new(".gitignore"),
3077 Path::new("a"),
3078 Path::new("a/c"),
3079 ]
3080 );
3081 assert_eq!(
3082 tree.entries(true)
3083 .map(|entry| entry.path.as_ref())
3084 .collect::<Vec<_>>(),
3085 vec![
3086 Path::new(""),
3087 Path::new(".gitignore"),
3088 Path::new("a"),
3089 Path::new("a/b"),
3090 Path::new("a/c"),
3091 ]
3092 );
3093 })
3094 }
3095
3096 #[gpui::test(iterations = 10)]
3097 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3098 let fs = FakeFs::new(cx.background());
3099 fs.insert_tree(
3100 "/root",
3101 json!({
3102 "lib": {
3103 "a": {
3104 "a.txt": ""
3105 },
3106 "b": {
3107 "b.txt": ""
3108 }
3109 }
3110 }),
3111 )
3112 .await;
3113 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3114 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3115
3116 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3117 let tree = Worktree::local(
3118 client,
3119 Path::new("/root"),
3120 true,
3121 fs.clone(),
3122 Default::default(),
3123 &mut cx.to_async(),
3124 )
3125 .await
3126 .unwrap();
3127
3128 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3129 .await;
3130
3131 tree.read_with(cx, |tree, _| {
3132 assert_eq!(
3133 tree.entries(false)
3134 .map(|entry| entry.path.as_ref())
3135 .collect::<Vec<_>>(),
3136 vec![
3137 Path::new(""),
3138 Path::new("lib"),
3139 Path::new("lib/a"),
3140 Path::new("lib/a/a.txt"),
3141 Path::new("lib/a/lib"),
3142 Path::new("lib/b"),
3143 Path::new("lib/b/b.txt"),
3144 Path::new("lib/b/lib"),
3145 ]
3146 );
3147 });
3148
3149 fs.rename(
3150 Path::new("/root/lib/a/lib"),
3151 Path::new("/root/lib/a/lib-2"),
3152 Default::default(),
3153 )
3154 .await
3155 .unwrap();
3156 executor.run_until_parked();
3157 tree.read_with(cx, |tree, _| {
3158 assert_eq!(
3159 tree.entries(false)
3160 .map(|entry| entry.path.as_ref())
3161 .collect::<Vec<_>>(),
3162 vec![
3163 Path::new(""),
3164 Path::new("lib"),
3165 Path::new("lib/a"),
3166 Path::new("lib/a/a.txt"),
3167 Path::new("lib/a/lib-2"),
3168 Path::new("lib/b"),
3169 Path::new("lib/b/b.txt"),
3170 Path::new("lib/b/lib"),
3171 ]
3172 );
3173 });
3174 }
3175
3176 #[gpui::test]
3177 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3178 let parent_dir = temp_tree(json!({
3179 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3180 "tree": {
3181 ".git": {},
3182 ".gitignore": "ignored-dir\n",
3183 "tracked-dir": {
3184 "tracked-file1": "",
3185 "ancestor-ignored-file1": "",
3186 },
3187 "ignored-dir": {
3188 "ignored-file1": ""
3189 }
3190 }
3191 }));
3192 let dir = parent_dir.path().join("tree");
3193
3194 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3195
3196 let tree = Worktree::local(
3197 client,
3198 dir.as_path(),
3199 true,
3200 Arc::new(RealFs),
3201 Default::default(),
3202 &mut cx.to_async(),
3203 )
3204 .await
3205 .unwrap();
3206 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3207 .await;
3208 tree.flush_fs_events(cx).await;
3209 cx.read(|cx| {
3210 let tree = tree.read(cx);
3211 assert!(
3212 !tree
3213 .entry_for_path("tracked-dir/tracked-file1")
3214 .unwrap()
3215 .is_ignored
3216 );
3217 assert!(
3218 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3219 .unwrap()
3220 .is_ignored
3221 );
3222 assert!(
3223 tree.entry_for_path("ignored-dir/ignored-file1")
3224 .unwrap()
3225 .is_ignored
3226 );
3227 });
3228
3229 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3230 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3231 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3232 tree.flush_fs_events(cx).await;
3233 cx.read(|cx| {
3234 let tree = tree.read(cx);
3235 assert!(
3236 !tree
3237 .entry_for_path("tracked-dir/tracked-file2")
3238 .unwrap()
3239 .is_ignored
3240 );
3241 assert!(
3242 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3243 .unwrap()
3244 .is_ignored
3245 );
3246 assert!(
3247 tree.entry_for_path("ignored-dir/ignored-file2")
3248 .unwrap()
3249 .is_ignored
3250 );
3251 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3252 });
3253 }
3254
3255 #[gpui::test]
3256 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3257 let root = temp_tree(json!({
3258 "dir1": {
3259 ".git": {},
3260 "deps": {
3261 "dep1": {
3262 ".git": {},
3263 "src": {
3264 "a.txt": ""
3265 }
3266 }
3267 },
3268 "src": {
3269 "b.txt": ""
3270 }
3271 },
3272 "c.txt": "",
3273 }));
3274
3275 let http_client = FakeHttpClient::with_404_response();
3276 let client = cx.read(|cx| Client::new(http_client, cx));
3277 let tree = Worktree::local(
3278 client,
3279 root.path(),
3280 true,
3281 Arc::new(RealFs),
3282 Default::default(),
3283 &mut cx.to_async(),
3284 )
3285 .await
3286 .unwrap();
3287
3288 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3289 .await;
3290 tree.flush_fs_events(cx).await;
3291
3292 tree.read_with(cx, |tree, _cx| {
3293 let tree = tree.as_local().unwrap();
3294
3295 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3296
3297 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3298 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3299 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3300
3301 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3302 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3303 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3304 });
3305
3306 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3307 let tree = tree.as_local().unwrap();
3308 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3309 });
3310
3311 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3312 tree.flush_fs_events(cx).await;
3313
3314 tree.read_with(cx, |tree, _cx| {
3315 let tree = tree.as_local().unwrap();
3316 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3317 assert_ne!(
3318 original_scan_id, new_scan_id,
3319 "original {original_scan_id}, new {new_scan_id}"
3320 );
3321 });
3322
3323 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3324 tree.flush_fs_events(cx).await;
3325
3326 tree.read_with(cx, |tree, _cx| {
3327 let tree = tree.as_local().unwrap();
3328
3329 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3330 });
3331 }
3332
3333 #[test]
3334 fn test_changed_repos() {
3335 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3336 GitRepositoryEntry {
3337 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3338 scan_id,
3339 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3340 git_dir_path: git_dir_path.as_ref().into(),
3341 }
3342 }
3343
3344 let prev_repos: Vec<GitRepositoryEntry> = vec![
3345 fake_entry("/.git", 0),
3346 fake_entry("/a/.git", 0),
3347 fake_entry("/a/b/.git", 0),
3348 ];
3349
3350 let new_repos: Vec<GitRepositoryEntry> = vec![
3351 fake_entry("/a/.git", 1),
3352 fake_entry("/a/b/.git", 0),
3353 fake_entry("/a/c/.git", 0),
3354 ];
3355
3356 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3357
3358 // Deletion retained
3359 assert!(res
3360 .iter()
3361 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3362 .is_some());
3363
3364 // Update retained
3365 assert!(res
3366 .iter()
3367 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3368 .is_some());
3369
3370 // Addition retained
3371 assert!(res
3372 .iter()
3373 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3374 .is_some());
3375
3376 // Nochange, not retained
3377 assert!(res
3378 .iter()
3379 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3380 .is_none());
3381 }
3382
3383 #[gpui::test]
3384 async fn test_write_file(cx: &mut TestAppContext) {
3385 let dir = temp_tree(json!({
3386 ".git": {},
3387 ".gitignore": "ignored-dir\n",
3388 "tracked-dir": {},
3389 "ignored-dir": {}
3390 }));
3391
3392 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3393
3394 let tree = Worktree::local(
3395 client,
3396 dir.path(),
3397 true,
3398 Arc::new(RealFs),
3399 Default::default(),
3400 &mut cx.to_async(),
3401 )
3402 .await
3403 .unwrap();
3404 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3405 .await;
3406 tree.flush_fs_events(cx).await;
3407
3408 tree.update(cx, |tree, cx| {
3409 tree.as_local().unwrap().write_file(
3410 Path::new("tracked-dir/file.txt"),
3411 "hello".into(),
3412 Default::default(),
3413 cx,
3414 )
3415 })
3416 .await
3417 .unwrap();
3418 tree.update(cx, |tree, cx| {
3419 tree.as_local().unwrap().write_file(
3420 Path::new("ignored-dir/file.txt"),
3421 "world".into(),
3422 Default::default(),
3423 cx,
3424 )
3425 })
3426 .await
3427 .unwrap();
3428
3429 tree.read_with(cx, |tree, _| {
3430 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3431 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3432 assert!(!tracked.is_ignored);
3433 assert!(ignored.is_ignored);
3434 });
3435 }
3436
3437 #[gpui::test(iterations = 30)]
3438 async fn test_create_directory(cx: &mut TestAppContext) {
3439 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3440
3441 let fs = FakeFs::new(cx.background());
3442 fs.insert_tree(
3443 "/root",
3444 json!({
3445 "b": {},
3446 "c": {},
3447 "d": {},
3448 }),
3449 )
3450 .await;
3451
3452 let tree = Worktree::local(
3453 client,
3454 "/root".as_ref(),
3455 true,
3456 fs,
3457 Default::default(),
3458 &mut cx.to_async(),
3459 )
3460 .await
3461 .unwrap();
3462
3463 let entry = tree
3464 .update(cx, |tree, cx| {
3465 tree.as_local_mut()
3466 .unwrap()
3467 .create_entry("a/e".as_ref(), true, cx)
3468 })
3469 .await
3470 .unwrap();
3471 assert!(entry.is_dir());
3472
3473 cx.foreground().run_until_parked();
3474
3475 tree.read_with(cx, |tree, _| {
3476 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3477 });
3478 }
3479
3480 #[gpui::test(iterations = 100)]
3481 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3482 let operations = env::var("OPERATIONS")
3483 .map(|o| o.parse().unwrap())
3484 .unwrap_or(40);
3485 let initial_entries = env::var("INITIAL_ENTRIES")
3486 .map(|o| o.parse().unwrap())
3487 .unwrap_or(20);
3488
3489 let root_dir = Path::new("/test");
3490 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3491 fs.as_fake().insert_tree(root_dir, json!({})).await;
3492 for _ in 0..initial_entries {
3493 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3494 }
3495 log::info!("generated initial tree");
3496
3497 let next_entry_id = Arc::new(AtomicUsize::default());
3498 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3499 let worktree = Worktree::local(
3500 client.clone(),
3501 root_dir,
3502 true,
3503 fs.clone(),
3504 next_entry_id.clone(),
3505 &mut cx.to_async(),
3506 )
3507 .await
3508 .unwrap();
3509
3510 worktree
3511 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3512 .await;
3513
3514 // After the initial scan is complete, the `UpdatedEntries` event can
3515 // be used to follow along with all changes to the worktree's snapshot.
3516 worktree.update(cx, |tree, cx| {
3517 let mut paths = tree
3518 .as_local()
3519 .unwrap()
3520 .paths()
3521 .cloned()
3522 .collect::<Vec<_>>();
3523
3524 cx.subscribe(&worktree, move |tree, _, event, _| {
3525 if let Event::UpdatedEntries(changes) = event {
3526 for (path, change_type) in changes.iter() {
3527 let path = path.clone();
3528 let ix = match paths.binary_search(&path) {
3529 Ok(ix) | Err(ix) => ix,
3530 };
3531 match change_type {
3532 PathChange::Added => {
3533 assert_ne!(paths.get(ix), Some(&path));
3534 paths.insert(ix, path);
3535 }
3536 PathChange::Removed => {
3537 assert_eq!(paths.get(ix), Some(&path));
3538 paths.remove(ix);
3539 }
3540 PathChange::Updated => {
3541 assert_eq!(paths.get(ix), Some(&path));
3542 }
3543 PathChange::AddedOrUpdated => {
3544 if paths[ix] != path {
3545 paths.insert(ix, path);
3546 }
3547 }
3548 }
3549 }
3550 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3551 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3552 }
3553 })
3554 .detach();
3555 });
3556
3557 let mut snapshots = Vec::new();
3558 let mut mutations_len = operations;
3559 while mutations_len > 1 {
3560 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3561 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3562 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3563 let len = rng.gen_range(0..=buffered_event_count);
3564 log::info!("flushing {} events", len);
3565 fs.as_fake().flush_events(len).await;
3566 } else {
3567 randomly_mutate_tree(&fs, root_dir, 0.6, &mut rng).await;
3568 mutations_len -= 1;
3569 }
3570
3571 cx.foreground().run_until_parked();
3572 if rng.gen_bool(0.2) {
3573 log::info!("storing snapshot {}", snapshots.len());
3574 let snapshot =
3575 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3576 snapshots.push(snapshot);
3577 }
3578 }
3579
3580 log::info!("quiescing");
3581 fs.as_fake().flush_events(usize::MAX).await;
3582 cx.foreground().run_until_parked();
3583 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3584 snapshot.check_invariants();
3585
3586 {
3587 let new_worktree = Worktree::local(
3588 client.clone(),
3589 root_dir,
3590 true,
3591 fs.clone(),
3592 next_entry_id,
3593 &mut cx.to_async(),
3594 )
3595 .await
3596 .unwrap();
3597 new_worktree
3598 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3599 .await;
3600 let new_snapshot =
3601 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3602 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3603 }
3604
3605 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3606 let include_ignored = rng.gen::<bool>();
3607 if !include_ignored {
3608 let mut entries_by_path_edits = Vec::new();
3609 let mut entries_by_id_edits = Vec::new();
3610 for entry in prev_snapshot
3611 .entries_by_id
3612 .cursor::<()>()
3613 .filter(|e| e.is_ignored)
3614 {
3615 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3616 entries_by_id_edits.push(Edit::Remove(entry.id));
3617 }
3618
3619 prev_snapshot
3620 .entries_by_path
3621 .edit(entries_by_path_edits, &());
3622 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3623 }
3624
3625 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3626 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3627 assert_eq!(
3628 prev_snapshot.to_vec(include_ignored),
3629 snapshot.to_vec(include_ignored),
3630 "wrong update for snapshot {i}. update: {:?}",
3631 update
3632 );
3633 }
3634 }
3635
3636 async fn randomly_mutate_tree(
3637 fs: &Arc<dyn Fs>,
3638 root_path: &Path,
3639 insertion_probability: f64,
3640 rng: &mut impl Rng,
3641 ) {
3642 let mut files = Vec::new();
3643 let mut dirs = Vec::new();
3644 for path in fs.as_fake().paths() {
3645 if path.starts_with(root_path) {
3646 if fs.is_file(&path).await {
3647 files.push(path);
3648 } else {
3649 dirs.push(path);
3650 }
3651 }
3652 }
3653
3654 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3655 let path = dirs.choose(rng).unwrap();
3656 let new_path = path.join(gen_name(rng));
3657
3658 if rng.gen() {
3659 log::info!(
3660 "creating dir {:?}",
3661 new_path.strip_prefix(root_path).unwrap()
3662 );
3663 fs.create_dir(&new_path).await.unwrap();
3664 } else {
3665 log::info!(
3666 "creating file {:?}",
3667 new_path.strip_prefix(root_path).unwrap()
3668 );
3669 fs.create_file(&new_path, Default::default()).await.unwrap();
3670 }
3671 } else if rng.gen_bool(0.05) {
3672 let ignore_dir_path = dirs.choose(rng).unwrap();
3673 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3674
3675 let subdirs = dirs
3676 .iter()
3677 .filter(|d| d.starts_with(&ignore_dir_path))
3678 .cloned()
3679 .collect::<Vec<_>>();
3680 let subfiles = files
3681 .iter()
3682 .filter(|d| d.starts_with(&ignore_dir_path))
3683 .cloned()
3684 .collect::<Vec<_>>();
3685 let files_to_ignore = {
3686 let len = rng.gen_range(0..=subfiles.len());
3687 subfiles.choose_multiple(rng, len)
3688 };
3689 let dirs_to_ignore = {
3690 let len = rng.gen_range(0..subdirs.len());
3691 subdirs.choose_multiple(rng, len)
3692 };
3693
3694 let mut ignore_contents = String::new();
3695 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3696 writeln!(
3697 ignore_contents,
3698 "{}",
3699 path_to_ignore
3700 .strip_prefix(&ignore_dir_path)
3701 .unwrap()
3702 .to_str()
3703 .unwrap()
3704 )
3705 .unwrap();
3706 }
3707 log::info!(
3708 "creating gitignore {:?} with contents:\n{}",
3709 ignore_path.strip_prefix(&root_path).unwrap(),
3710 ignore_contents
3711 );
3712 fs.save(
3713 &ignore_path,
3714 &ignore_contents.as_str().into(),
3715 Default::default(),
3716 )
3717 .await
3718 .unwrap();
3719 } else {
3720 let old_path = {
3721 let file_path = files.choose(rng);
3722 let dir_path = dirs[1..].choose(rng);
3723 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3724 };
3725
3726 let is_rename = rng.gen();
3727 if is_rename {
3728 let new_path_parent = dirs
3729 .iter()
3730 .filter(|d| !d.starts_with(old_path))
3731 .choose(rng)
3732 .unwrap();
3733
3734 let overwrite_existing_dir =
3735 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3736 let new_path = if overwrite_existing_dir {
3737 fs.remove_dir(
3738 &new_path_parent,
3739 RemoveOptions {
3740 recursive: true,
3741 ignore_if_not_exists: true,
3742 },
3743 )
3744 .await
3745 .unwrap();
3746 new_path_parent.to_path_buf()
3747 } else {
3748 new_path_parent.join(gen_name(rng))
3749 };
3750
3751 log::info!(
3752 "renaming {:?} to {}{:?}",
3753 old_path.strip_prefix(&root_path).unwrap(),
3754 if overwrite_existing_dir {
3755 "overwrite "
3756 } else {
3757 ""
3758 },
3759 new_path.strip_prefix(&root_path).unwrap()
3760 );
3761 fs.rename(
3762 &old_path,
3763 &new_path,
3764 fs::RenameOptions {
3765 overwrite: true,
3766 ignore_if_exists: true,
3767 },
3768 )
3769 .await
3770 .unwrap();
3771 } else if fs.is_file(&old_path).await {
3772 log::info!(
3773 "deleting file {:?}",
3774 old_path.strip_prefix(&root_path).unwrap()
3775 );
3776 fs.remove_file(old_path, Default::default()).await.unwrap();
3777 } else {
3778 log::info!(
3779 "deleting dir {:?}",
3780 old_path.strip_prefix(&root_path).unwrap()
3781 );
3782 fs.remove_dir(
3783 &old_path,
3784 RemoveOptions {
3785 recursive: true,
3786 ignore_if_not_exists: true,
3787 },
3788 )
3789 .await
3790 .unwrap();
3791 }
3792 }
3793 }
3794
3795 fn gen_name(rng: &mut impl Rng) -> String {
3796 (0..6)
3797 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3798 .map(char::from)
3799 .collect()
3800 }
3801
3802 impl LocalSnapshot {
3803 fn check_invariants(&self) {
3804 let mut files = self.files(true, 0);
3805 let mut visible_files = self.files(false, 0);
3806 for entry in self.entries_by_path.cursor::<()>() {
3807 if entry.is_file() {
3808 assert_eq!(files.next().unwrap().inode, entry.inode);
3809 if !entry.is_ignored {
3810 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3811 }
3812 }
3813 }
3814 assert!(files.next().is_none());
3815 assert!(visible_files.next().is_none());
3816
3817 let mut bfs_paths = Vec::new();
3818 let mut stack = vec![Path::new("")];
3819 while let Some(path) = stack.pop() {
3820 bfs_paths.push(path);
3821 let ix = stack.len();
3822 for child_entry in self.child_entries(path) {
3823 stack.insert(ix, &child_entry.path);
3824 }
3825 }
3826
3827 let dfs_paths_via_iter = self
3828 .entries_by_path
3829 .cursor::<()>()
3830 .map(|e| e.path.as_ref())
3831 .collect::<Vec<_>>();
3832 assert_eq!(bfs_paths, dfs_paths_via_iter);
3833
3834 let dfs_paths_via_traversal = self
3835 .entries(true)
3836 .map(|e| e.path.as_ref())
3837 .collect::<Vec<_>>();
3838 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3839
3840 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3841 let ignore_parent_path =
3842 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3843 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3844 assert!(self
3845 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3846 .is_some());
3847 }
3848 }
3849
3850 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3851 let mut paths = Vec::new();
3852 for entry in self.entries_by_path.cursor::<()>() {
3853 if include_ignored || !entry.is_ignored {
3854 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3855 }
3856 }
3857 paths.sort_by(|a, b| a.0.cmp(b.0));
3858 paths
3859 }
3860 }
3861}