1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased, Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::{
24 proto::{
25 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
26 serialize_version,
27 },
28 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
29};
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 sync::{
48 atomic::{AtomicUsize, Ordering::SeqCst},
49 Arc,
50 },
51 task::Poll,
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
72 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
73 client: Arc<Client>,
74 fs: Arc<dyn Fs>,
75 visible: bool,
76}
77
78pub struct RemoteWorktree {
79 snapshot: Snapshot,
80 background_snapshot: Arc<Mutex<Snapshot>>,
81 project_id: u64,
82 client: Arc<Client>,
83 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
84 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
85 replica_id: ReplicaId,
86 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
87 visible: bool,
88 disconnected: bool,
89}
90
91#[derive(Clone)]
92pub struct Snapshot {
93 id: WorktreeId,
94 abs_path: Arc<Path>,
95 root_name: String,
96 root_char_bag: CharBag,
97 entries_by_path: SumTree<Entry>,
98 entries_by_id: SumTree<PathEntry>,
99 scan_id: usize,
100 completed_scan_id: usize,
101}
102
103#[derive(Clone)]
104pub struct GitRepositoryEntry {
105 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
106
107 pub(crate) scan_id: usize,
108 // Path to folder containing the .git file or directory
109 pub(crate) content_path: Arc<Path>,
110 // Path to the actual .git folder.
111 // Note: if .git is a file, this points to the folder indicated by the .git file
112 pub(crate) git_dir_path: Arc<Path>,
113}
114
115impl std::fmt::Debug for GitRepositoryEntry {
116 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
117 f.debug_struct("GitRepositoryEntry")
118 .field("content_path", &self.content_path)
119 .field("git_dir_path", &self.git_dir_path)
120 .finish()
121 }
122}
123
124#[derive(Debug)]
125pub struct LocalSnapshot {
126 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
127 git_repositories: Vec<GitRepositoryEntry>,
128 removed_entry_ids: HashMap<u64, ProjectEntryId>,
129 next_entry_id: Arc<AtomicUsize>,
130 snapshot: Snapshot,
131}
132
133impl Clone for LocalSnapshot {
134 fn clone(&self) -> Self {
135 Self {
136 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
137 git_repositories: self.git_repositories.iter().cloned().collect(),
138 removed_entry_ids: self.removed_entry_ids.clone(),
139 next_entry_id: self.next_entry_id.clone(),
140 snapshot: self.snapshot.clone(),
141 }
142 }
143}
144
145impl Deref for LocalSnapshot {
146 type Target = Snapshot;
147
148 fn deref(&self) -> &Self::Target {
149 &self.snapshot
150 }
151}
152
153impl DerefMut for LocalSnapshot {
154 fn deref_mut(&mut self) -> &mut Self::Target {
155 &mut self.snapshot
156 }
157}
158
159enum ScanState {
160 /// The worktree is performing its initial scan of the filesystem.
161 Initializing {
162 snapshot: LocalSnapshot,
163 barrier: Option<barrier::Sender>,
164 },
165 Initialized {
166 snapshot: LocalSnapshot,
167 },
168 /// The worktree is updating in response to filesystem events.
169 Updating,
170 Updated {
171 snapshot: LocalSnapshot,
172 changes: HashMap<Arc<Path>, PathChange>,
173 barrier: Option<barrier::Sender>,
174 },
175}
176
177struct ShareState {
178 project_id: u64,
179 snapshots_tx: watch::Sender<LocalSnapshot>,
180 resume_updates: watch::Sender<()>,
181 _maintain_remote_snapshot: Task<Option<()>>,
182}
183
184pub enum Event {
185 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
186 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
187}
188
189impl Entity for Worktree {
190 type Event = Event;
191}
192
193impl Worktree {
194 pub async fn local(
195 client: Arc<Client>,
196 path: impl Into<Arc<Path>>,
197 visible: bool,
198 fs: Arc<dyn Fs>,
199 next_entry_id: Arc<AtomicUsize>,
200 cx: &mut AsyncAppContext,
201 ) -> Result<ModelHandle<Self>> {
202 // After determining whether the root entry is a file or a directory, populate the
203 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
204 let abs_path = path.into();
205 let metadata = fs
206 .metadata(&abs_path)
207 .await
208 .context("failed to stat worktree path")?;
209
210 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
211 let root_name = abs_path
212 .file_name()
213 .map_or(String::new(), |f| f.to_string_lossy().to_string());
214
215 let mut snapshot = LocalSnapshot {
216 ignores_by_parent_abs_path: Default::default(),
217 git_repositories: Default::default(),
218 removed_entry_ids: Default::default(),
219 next_entry_id,
220 snapshot: Snapshot {
221 id: WorktreeId::from_usize(cx.model_id()),
222 abs_path: abs_path.clone(),
223 root_name: root_name.clone(),
224 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
225 entries_by_path: Default::default(),
226 entries_by_id: Default::default(),
227 scan_id: 0,
228 completed_scan_id: 0,
229 },
230 };
231
232 if let Some(metadata) = metadata {
233 snapshot.insert_entry(
234 Entry::new(
235 Arc::from(Path::new("")),
236 &metadata,
237 &snapshot.next_entry_id,
238 snapshot.root_char_bag,
239 ),
240 fs.as_ref(),
241 );
242 }
243
244 let (path_changes_tx, path_changes_rx) = channel::unbounded();
245 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
246
247 cx.spawn_weak(|this, mut cx| async move {
248 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
249 this.update(&mut cx, |this, cx| {
250 this.as_local_mut()
251 .unwrap()
252 .background_scanner_updated(state, cx);
253 });
254 }
255 })
256 .detach();
257
258 let background_scanner_task = cx.background().spawn({
259 let fs = fs.clone();
260 let snapshot = snapshot.clone();
261 let background = cx.background().clone();
262 async move {
263 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
264 BackgroundScanner::new(snapshot, scan_states_tx, fs, background)
265 .run(events, path_changes_rx)
266 .await;
267 }
268 });
269
270 Worktree::Local(LocalWorktree {
271 snapshot,
272 is_scanning: watch::channel_with(true),
273 share: None,
274 path_changes_tx,
275 _background_scanner_task: background_scanner_task,
276 diagnostics: Default::default(),
277 diagnostic_summaries: Default::default(),
278 client,
279 fs,
280 visible,
281 })
282 }))
283 }
284
285 pub fn remote(
286 project_remote_id: u64,
287 replica_id: ReplicaId,
288 worktree: proto::WorktreeMetadata,
289 client: Arc<Client>,
290 cx: &mut MutableAppContext,
291 ) -> ModelHandle<Self> {
292 cx.add_model(|cx: &mut ModelContext<Self>| {
293 let snapshot = Snapshot {
294 id: WorktreeId(worktree.id as usize),
295 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
296 root_name: worktree.root_name.clone(),
297 root_char_bag: worktree
298 .root_name
299 .chars()
300 .map(|c| c.to_ascii_lowercase())
301 .collect(),
302 entries_by_path: Default::default(),
303 entries_by_id: Default::default(),
304 scan_id: 0,
305 completed_scan_id: 0,
306 };
307
308 let (updates_tx, mut updates_rx) = mpsc::unbounded();
309 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
310 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
311
312 cx.background()
313 .spawn({
314 let background_snapshot = background_snapshot.clone();
315 async move {
316 while let Some(update) = updates_rx.next().await {
317 if let Err(error) =
318 background_snapshot.lock().apply_remote_update(update)
319 {
320 log::error!("error applying worktree update: {}", error);
321 }
322 snapshot_updated_tx.send(()).await.ok();
323 }
324 }
325 })
326 .detach();
327
328 cx.spawn_weak(|this, mut cx| async move {
329 while (snapshot_updated_rx.recv().await).is_some() {
330 if let Some(this) = this.upgrade(&cx) {
331 this.update(&mut cx, |this, cx| {
332 let this = this.as_remote_mut().unwrap();
333 this.snapshot = this.background_snapshot.lock().clone();
334 cx.emit(Event::UpdatedEntries(Default::default()));
335 cx.notify();
336 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
337 if this.observed_snapshot(*scan_id) {
338 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
339 let _ = tx.send(());
340 } else {
341 break;
342 }
343 }
344 });
345 } else {
346 break;
347 }
348 }
349 })
350 .detach();
351
352 Worktree::Remote(RemoteWorktree {
353 project_id: project_remote_id,
354 replica_id,
355 snapshot: snapshot.clone(),
356 background_snapshot,
357 updates_tx: Some(updates_tx),
358 snapshot_subscriptions: Default::default(),
359 client: client.clone(),
360 diagnostic_summaries: Default::default(),
361 visible: worktree.visible,
362 disconnected: false,
363 })
364 })
365 }
366
367 pub fn as_local(&self) -> Option<&LocalWorktree> {
368 if let Worktree::Local(worktree) = self {
369 Some(worktree)
370 } else {
371 None
372 }
373 }
374
375 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
376 if let Worktree::Remote(worktree) = self {
377 Some(worktree)
378 } else {
379 None
380 }
381 }
382
383 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
384 if let Worktree::Local(worktree) = self {
385 Some(worktree)
386 } else {
387 None
388 }
389 }
390
391 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
392 if let Worktree::Remote(worktree) = self {
393 Some(worktree)
394 } else {
395 None
396 }
397 }
398
399 pub fn is_local(&self) -> bool {
400 matches!(self, Worktree::Local(_))
401 }
402
403 pub fn is_remote(&self) -> bool {
404 !self.is_local()
405 }
406
407 pub fn snapshot(&self) -> Snapshot {
408 match self {
409 Worktree::Local(worktree) => worktree.snapshot().snapshot,
410 Worktree::Remote(worktree) => worktree.snapshot(),
411 }
412 }
413
414 pub fn scan_id(&self) -> usize {
415 match self {
416 Worktree::Local(worktree) => worktree.snapshot.scan_id,
417 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
418 }
419 }
420
421 pub fn completed_scan_id(&self) -> usize {
422 match self {
423 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
424 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
425 }
426 }
427
428 pub fn is_visible(&self) -> bool {
429 match self {
430 Worktree::Local(worktree) => worktree.visible,
431 Worktree::Remote(worktree) => worktree.visible,
432 }
433 }
434
435 pub fn replica_id(&self) -> ReplicaId {
436 match self {
437 Worktree::Local(_) => 0,
438 Worktree::Remote(worktree) => worktree.replica_id,
439 }
440 }
441
442 pub fn diagnostic_summaries(
443 &self,
444 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
445 match self {
446 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
447 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
448 }
449 .iter()
450 .map(|(path, summary)| (path.0.clone(), *summary))
451 }
452
453 pub fn abs_path(&self) -> Arc<Path> {
454 match self {
455 Worktree::Local(worktree) => worktree.abs_path.clone(),
456 Worktree::Remote(worktree) => worktree.abs_path.clone(),
457 }
458 }
459}
460
461impl LocalWorktree {
462 pub fn contains_abs_path(&self, path: &Path) -> bool {
463 path.starts_with(&self.abs_path)
464 }
465
466 fn absolutize(&self, path: &Path) -> PathBuf {
467 if path.file_name().is_some() {
468 self.abs_path.join(path)
469 } else {
470 self.abs_path.to_path_buf()
471 }
472 }
473
474 pub(crate) fn load_buffer(
475 &mut self,
476 path: &Path,
477 cx: &mut ModelContext<Worktree>,
478 ) -> Task<Result<ModelHandle<Buffer>>> {
479 let path = Arc::from(path);
480 cx.spawn(move |this, mut cx| async move {
481 let (file, contents, diff_base) = this
482 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
483 .await?;
484 Ok(cx.add_model(|cx| {
485 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
486 buffer.git_diff_recalc(cx);
487 buffer
488 }))
489 })
490 }
491
492 pub fn diagnostics_for_path(
493 &self,
494 path: &Path,
495 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
496 self.diagnostics.get(path).cloned()
497 }
498
499 pub fn update_diagnostics(
500 &mut self,
501 language_server_id: usize,
502 worktree_path: Arc<Path>,
503 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
504 _: &mut ModelContext<Worktree>,
505 ) -> Result<bool> {
506 self.diagnostics.remove(&worktree_path);
507 let old_summary = self
508 .diagnostic_summaries
509 .remove(&PathKey(worktree_path.clone()))
510 .unwrap_or_default();
511 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
512 if !new_summary.is_empty() {
513 self.diagnostic_summaries
514 .insert(PathKey(worktree_path.clone()), new_summary);
515 self.diagnostics.insert(worktree_path.clone(), diagnostics);
516 }
517
518 let updated = !old_summary.is_empty() || !new_summary.is_empty();
519 if updated {
520 if let Some(share) = self.share.as_ref() {
521 self.client
522 .send(proto::UpdateDiagnosticSummary {
523 project_id: share.project_id,
524 worktree_id: self.id().to_proto(),
525 summary: Some(proto::DiagnosticSummary {
526 path: worktree_path.to_string_lossy().to_string(),
527 language_server_id: language_server_id as u64,
528 error_count: new_summary.error_count as u32,
529 warning_count: new_summary.warning_count as u32,
530 }),
531 })
532 .log_err();
533 }
534 }
535
536 Ok(updated)
537 }
538
539 fn background_scanner_updated(
540 &mut self,
541 scan_state: ScanState,
542 cx: &mut ModelContext<Worktree>,
543 ) {
544 match scan_state {
545 ScanState::Initializing { snapshot, barrier } => {
546 *self.is_scanning.0.borrow_mut() = true;
547 self.set_snapshot(snapshot, cx);
548 drop(barrier);
549 }
550 ScanState::Initialized { snapshot } => {
551 *self.is_scanning.0.borrow_mut() = false;
552 self.set_snapshot(snapshot, cx);
553 }
554 ScanState::Updating => {
555 *self.is_scanning.0.borrow_mut() = true;
556 }
557 ScanState::Updated {
558 snapshot,
559 changes,
560 barrier,
561 } => {
562 *self.is_scanning.0.borrow_mut() = false;
563 cx.emit(Event::UpdatedEntries(changes));
564 self.set_snapshot(snapshot, cx);
565 drop(barrier);
566 }
567 }
568 cx.notify();
569 }
570
571 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
572 let updated_repos = Self::changed_repos(
573 &self.snapshot.git_repositories,
574 &new_snapshot.git_repositories,
575 );
576 self.snapshot = new_snapshot;
577
578 if let Some(share) = self.share.as_mut() {
579 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
580 }
581
582 if !updated_repos.is_empty() {
583 cx.emit(Event::UpdatedGitRepositories(updated_repos));
584 }
585 }
586
587 fn changed_repos(
588 old_repos: &[GitRepositoryEntry],
589 new_repos: &[GitRepositoryEntry],
590 ) -> Vec<GitRepositoryEntry> {
591 fn diff<'a>(
592 a: &'a [GitRepositoryEntry],
593 b: &'a [GitRepositoryEntry],
594 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
595 ) {
596 for a_repo in a {
597 let matched = b.iter().find(|b_repo| {
598 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
599 });
600
601 if matched.is_none() {
602 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
603 }
604 }
605 }
606
607 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
608
609 diff(old_repos, new_repos, &mut updated);
610 diff(new_repos, old_repos, &mut updated);
611
612 updated.into_values().collect()
613 }
614
615 pub fn scan_complete(&self) -> impl Future<Output = ()> {
616 let mut is_scanning_rx = self.is_scanning.1.clone();
617 async move {
618 let mut is_scanning = is_scanning_rx.borrow().clone();
619 while is_scanning {
620 if let Some(value) = is_scanning_rx.recv().await {
621 is_scanning = value;
622 } else {
623 break;
624 }
625 }
626 }
627 }
628
629 pub fn snapshot(&self) -> LocalSnapshot {
630 self.snapshot.clone()
631 }
632
633 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
634 proto::WorktreeMetadata {
635 id: self.id().to_proto(),
636 root_name: self.root_name().to_string(),
637 visible: self.visible,
638 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
639 }
640 }
641
642 fn load(
643 &self,
644 path: &Path,
645 cx: &mut ModelContext<Worktree>,
646 ) -> Task<Result<(File, String, Option<String>)>> {
647 let handle = cx.handle();
648 let path = Arc::from(path);
649 let abs_path = self.absolutize(&path);
650 let fs = self.fs.clone();
651 let snapshot = self.snapshot();
652
653 cx.spawn(|this, mut cx| async move {
654 let text = fs.load(&abs_path).await?;
655
656 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
657 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
658 let repo_relative = repo_relative.to_owned();
659 cx.background()
660 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
661 .await
662 } else {
663 None
664 }
665 } else {
666 None
667 };
668
669 // Eagerly populate the snapshot with an updated entry for the loaded file
670 let entry = this
671 .update(&mut cx, |this, cx| {
672 this.as_local().unwrap().refresh_entry(path, None, cx)
673 })
674 .await?;
675
676 Ok((
677 File {
678 entry_id: entry.id,
679 worktree: handle,
680 path: entry.path,
681 mtime: entry.mtime,
682 is_local: true,
683 is_deleted: false,
684 },
685 text,
686 diff_base,
687 ))
688 })
689 }
690
691 pub fn save_buffer(
692 &self,
693 buffer_handle: ModelHandle<Buffer>,
694 path: Arc<Path>,
695 has_changed_file: bool,
696 cx: &mut ModelContext<Worktree>,
697 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
698 let handle = cx.handle();
699 let buffer = buffer_handle.read(cx);
700
701 let rpc = self.client.clone();
702 let buffer_id = buffer.remote_id();
703 let project_id = self.share.as_ref().map(|share| share.project_id);
704
705 let text = buffer.as_rope().clone();
706 let fingerprint = text.fingerprint();
707 let version = buffer.version();
708 let save = self.write_file(path, text, buffer.line_ending(), cx);
709
710 cx.as_mut().spawn(|mut cx| async move {
711 let entry = save.await?;
712
713 if has_changed_file {
714 let new_file = Arc::new(File {
715 entry_id: entry.id,
716 worktree: handle,
717 path: entry.path,
718 mtime: entry.mtime,
719 is_local: true,
720 is_deleted: false,
721 });
722
723 if let Some(project_id) = project_id {
724 rpc.send(proto::UpdateBufferFile {
725 project_id,
726 buffer_id,
727 file: Some(new_file.to_proto()),
728 })
729 .log_err();
730 }
731
732 buffer_handle.update(&mut cx, |buffer, cx| {
733 if has_changed_file {
734 buffer.file_updated(new_file, cx).detach();
735 }
736 });
737 }
738
739 if let Some(project_id) = project_id {
740 rpc.send(proto::BufferSaved {
741 project_id,
742 buffer_id,
743 version: serialize_version(&version),
744 mtime: Some(entry.mtime.into()),
745 fingerprint: serialize_fingerprint(fingerprint),
746 })?;
747 }
748
749 buffer_handle.update(&mut cx, |buffer, cx| {
750 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
751 });
752
753 Ok((version, fingerprint, entry.mtime))
754 })
755 }
756
757 pub fn create_entry(
758 &self,
759 path: impl Into<Arc<Path>>,
760 is_dir: bool,
761 cx: &mut ModelContext<Worktree>,
762 ) -> Task<Result<Entry>> {
763 let path = path.into();
764 let abs_path = self.absolutize(&path);
765 let fs = self.fs.clone();
766 let write = cx.background().spawn(async move {
767 if is_dir {
768 fs.create_dir(&abs_path).await
769 } else {
770 fs.save(&abs_path, &Default::default(), Default::default())
771 .await
772 }
773 });
774
775 cx.spawn(|this, mut cx| async move {
776 write.await?;
777 this.update(&mut cx, |this, cx| {
778 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
779 })
780 .await
781 })
782 }
783
784 pub fn write_file(
785 &self,
786 path: impl Into<Arc<Path>>,
787 text: Rope,
788 line_ending: LineEnding,
789 cx: &mut ModelContext<Worktree>,
790 ) -> Task<Result<Entry>> {
791 let path = path.into();
792 let abs_path = self.absolutize(&path);
793 let fs = self.fs.clone();
794 let write = cx
795 .background()
796 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
797
798 cx.spawn(|this, mut cx| async move {
799 write.await?;
800 this.update(&mut cx, |this, cx| {
801 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
802 })
803 .await
804 })
805 }
806
807 pub fn delete_entry(
808 &self,
809 entry_id: ProjectEntryId,
810 cx: &mut ModelContext<Worktree>,
811 ) -> Option<Task<Result<()>>> {
812 let entry = self.entry_for_id(entry_id)?.clone();
813 let abs_path = self.abs_path.clone();
814 let fs = self.fs.clone();
815
816 let delete = cx.background().spawn(async move {
817 let mut abs_path = fs.canonicalize(&abs_path).await?;
818 if entry.path.file_name().is_some() {
819 abs_path = abs_path.join(&entry.path);
820 }
821 if entry.is_file() {
822 fs.remove_file(&abs_path, Default::default()).await?;
823 } else {
824 fs.remove_dir(
825 &abs_path,
826 RemoveOptions {
827 recursive: true,
828 ignore_if_not_exists: false,
829 },
830 )
831 .await?;
832 }
833 anyhow::Ok(abs_path)
834 });
835
836 Some(cx.spawn(|this, mut cx| async move {
837 let abs_path = delete.await?;
838 let (tx, mut rx) = barrier::channel();
839 this.update(&mut cx, |this, _| {
840 this.as_local_mut()
841 .unwrap()
842 .path_changes_tx
843 .try_send((vec![abs_path], tx))
844 .unwrap();
845 });
846 rx.recv().await;
847 Ok(())
848 }))
849 }
850
851 pub fn rename_entry(
852 &self,
853 entry_id: ProjectEntryId,
854 new_path: impl Into<Arc<Path>>,
855 cx: &mut ModelContext<Worktree>,
856 ) -> Option<Task<Result<Entry>>> {
857 let old_path = self.entry_for_id(entry_id)?.path.clone();
858 let new_path = new_path.into();
859 let abs_old_path = self.absolutize(&old_path);
860 let abs_new_path = self.absolutize(&new_path);
861 let fs = self.fs.clone();
862 let rename = cx.background().spawn(async move {
863 fs.rename(&abs_old_path, &abs_new_path, Default::default())
864 .await
865 });
866
867 Some(cx.spawn(|this, mut cx| async move {
868 rename.await?;
869 this.update(&mut cx, |this, cx| {
870 this.as_local_mut()
871 .unwrap()
872 .refresh_entry(new_path.clone(), Some(old_path), cx)
873 })
874 .await
875 }))
876 }
877
878 pub fn copy_entry(
879 &self,
880 entry_id: ProjectEntryId,
881 new_path: impl Into<Arc<Path>>,
882 cx: &mut ModelContext<Worktree>,
883 ) -> Option<Task<Result<Entry>>> {
884 let old_path = self.entry_for_id(entry_id)?.path.clone();
885 let new_path = new_path.into();
886 let abs_old_path = self.absolutize(&old_path);
887 let abs_new_path = self.absolutize(&new_path);
888 let fs = self.fs.clone();
889 let copy = cx.background().spawn(async move {
890 copy_recursive(
891 fs.as_ref(),
892 &abs_old_path,
893 &abs_new_path,
894 Default::default(),
895 )
896 .await
897 });
898
899 Some(cx.spawn(|this, mut cx| async move {
900 copy.await?;
901 this.update(&mut cx, |this, cx| {
902 this.as_local_mut()
903 .unwrap()
904 .refresh_entry(new_path.clone(), None, cx)
905 })
906 .await
907 }))
908 }
909
910 fn refresh_entry(
911 &self,
912 path: Arc<Path>,
913 old_path: Option<Arc<Path>>,
914 cx: &mut ModelContext<Worktree>,
915 ) -> Task<Result<Entry>> {
916 let fs = self.fs.clone();
917 let abs_root_path = self.abs_path.clone();
918 let path_changes_tx = self.path_changes_tx.clone();
919 cx.spawn_weak(move |this, mut cx| async move {
920 let abs_path = fs.canonicalize(&abs_root_path).await?;
921 let mut paths = Vec::with_capacity(2);
922 paths.push(if path.file_name().is_some() {
923 abs_path.join(&path)
924 } else {
925 abs_path.clone()
926 });
927 if let Some(old_path) = old_path {
928 paths.push(if old_path.file_name().is_some() {
929 abs_path.join(&old_path)
930 } else {
931 abs_path.clone()
932 });
933 }
934
935 let (tx, mut rx) = barrier::channel();
936 path_changes_tx.try_send((paths, tx)).unwrap();
937 rx.recv().await;
938 this.upgrade(&cx)
939 .ok_or_else(|| anyhow!("worktree was dropped"))?
940 .update(&mut cx, |this, _| {
941 this.entry_for_path(path)
942 .cloned()
943 .ok_or_else(|| anyhow!("failed to read path after update"))
944 })
945 })
946 }
947
948 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
949 let (share_tx, share_rx) = oneshot::channel();
950
951 if let Some(share) = self.share.as_mut() {
952 let _ = share_tx.send(());
953 *share.resume_updates.borrow_mut() = ();
954 } else {
955 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
956 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
957 let worktree_id = cx.model_id() as u64;
958
959 for (path, summary) in self.diagnostic_summaries.iter() {
960 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
961 project_id,
962 worktree_id,
963 summary: Some(summary.to_proto(&path.0)),
964 }) {
965 return Task::ready(Err(e));
966 }
967 }
968
969 let _maintain_remote_snapshot = cx.background().spawn({
970 let client = self.client.clone();
971 async move {
972 let mut share_tx = Some(share_tx);
973 let mut prev_snapshot = LocalSnapshot {
974 ignores_by_parent_abs_path: Default::default(),
975 git_repositories: Default::default(),
976 removed_entry_ids: Default::default(),
977 next_entry_id: Default::default(),
978 snapshot: Snapshot {
979 id: WorktreeId(worktree_id as usize),
980 abs_path: Path::new("").into(),
981 root_name: Default::default(),
982 root_char_bag: Default::default(),
983 entries_by_path: Default::default(),
984 entries_by_id: Default::default(),
985 scan_id: 0,
986 completed_scan_id: 0,
987 },
988 };
989 while let Some(snapshot) = snapshots_rx.recv().await {
990 #[cfg(any(test, feature = "test-support"))]
991 const MAX_CHUNK_SIZE: usize = 2;
992 #[cfg(not(any(test, feature = "test-support")))]
993 const MAX_CHUNK_SIZE: usize = 256;
994
995 let update =
996 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
997 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
998 let _ = resume_updates_rx.try_recv();
999 while let Err(error) = client.request(update.clone()).await {
1000 log::error!("failed to send worktree update: {}", error);
1001 log::info!("waiting to resume updates");
1002 if resume_updates_rx.next().await.is_none() {
1003 return Ok(());
1004 }
1005 }
1006 }
1007
1008 if let Some(share_tx) = share_tx.take() {
1009 let _ = share_tx.send(());
1010 }
1011
1012 prev_snapshot = snapshot;
1013 }
1014
1015 Ok::<_, anyhow::Error>(())
1016 }
1017 .log_err()
1018 });
1019
1020 self.share = Some(ShareState {
1021 project_id,
1022 snapshots_tx,
1023 resume_updates: resume_updates_tx,
1024 _maintain_remote_snapshot,
1025 });
1026 }
1027
1028 cx.foreground()
1029 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1030 }
1031
1032 pub fn unshare(&mut self) {
1033 self.share.take();
1034 }
1035
1036 pub fn is_shared(&self) -> bool {
1037 self.share.is_some()
1038 }
1039}
1040
1041impl RemoteWorktree {
1042 fn snapshot(&self) -> Snapshot {
1043 self.snapshot.clone()
1044 }
1045
1046 pub fn disconnected_from_host(&mut self) {
1047 self.updates_tx.take();
1048 self.snapshot_subscriptions.clear();
1049 self.disconnected = true;
1050 }
1051
1052 pub fn save_buffer(
1053 &self,
1054 buffer_handle: ModelHandle<Buffer>,
1055 cx: &mut ModelContext<Worktree>,
1056 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1057 let buffer = buffer_handle.read(cx);
1058 let buffer_id = buffer.remote_id();
1059 let version = buffer.version();
1060 let rpc = self.client.clone();
1061 let project_id = self.project_id;
1062 cx.as_mut().spawn(|mut cx| async move {
1063 let response = rpc
1064 .request(proto::SaveBuffer {
1065 project_id,
1066 buffer_id,
1067 version: serialize_version(&version),
1068 })
1069 .await?;
1070 let version = deserialize_version(response.version);
1071 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1072 let mtime = response
1073 .mtime
1074 .ok_or_else(|| anyhow!("missing mtime"))?
1075 .into();
1076
1077 buffer_handle.update(&mut cx, |buffer, cx| {
1078 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1079 });
1080
1081 Ok((version, fingerprint, mtime))
1082 })
1083 }
1084
1085 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1086 if let Some(updates_tx) = &self.updates_tx {
1087 updates_tx
1088 .unbounded_send(update)
1089 .expect("consumer runs to completion");
1090 }
1091 }
1092
1093 fn observed_snapshot(&self, scan_id: usize) -> bool {
1094 self.completed_scan_id >= scan_id
1095 }
1096
1097 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1098 let (tx, rx) = oneshot::channel();
1099 if self.observed_snapshot(scan_id) {
1100 let _ = tx.send(());
1101 } else if self.disconnected {
1102 drop(tx);
1103 } else {
1104 match self
1105 .snapshot_subscriptions
1106 .binary_search_by_key(&scan_id, |probe| probe.0)
1107 {
1108 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1109 }
1110 }
1111
1112 async move {
1113 rx.await?;
1114 Ok(())
1115 }
1116 }
1117
1118 pub fn update_diagnostic_summary(
1119 &mut self,
1120 path: Arc<Path>,
1121 summary: &proto::DiagnosticSummary,
1122 ) {
1123 let summary = DiagnosticSummary {
1124 language_server_id: summary.language_server_id as usize,
1125 error_count: summary.error_count as usize,
1126 warning_count: summary.warning_count as usize,
1127 };
1128 if summary.is_empty() {
1129 self.diagnostic_summaries.remove(&PathKey(path));
1130 } else {
1131 self.diagnostic_summaries.insert(PathKey(path), summary);
1132 }
1133 }
1134
1135 pub fn insert_entry(
1136 &mut self,
1137 entry: proto::Entry,
1138 scan_id: usize,
1139 cx: &mut ModelContext<Worktree>,
1140 ) -> Task<Result<Entry>> {
1141 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1142 cx.spawn(|this, mut cx| async move {
1143 wait_for_snapshot.await?;
1144 this.update(&mut cx, |worktree, _| {
1145 let worktree = worktree.as_remote_mut().unwrap();
1146 let mut snapshot = worktree.background_snapshot.lock();
1147 let entry = snapshot.insert_entry(entry);
1148 worktree.snapshot = snapshot.clone();
1149 entry
1150 })
1151 })
1152 }
1153
1154 pub(crate) fn delete_entry(
1155 &mut self,
1156 id: ProjectEntryId,
1157 scan_id: usize,
1158 cx: &mut ModelContext<Worktree>,
1159 ) -> Task<Result<()>> {
1160 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1161 cx.spawn(|this, mut cx| async move {
1162 wait_for_snapshot.await?;
1163 this.update(&mut cx, |worktree, _| {
1164 let worktree = worktree.as_remote_mut().unwrap();
1165 let mut snapshot = worktree.background_snapshot.lock();
1166 snapshot.delete_entry(id);
1167 worktree.snapshot = snapshot.clone();
1168 });
1169 Ok(())
1170 })
1171 }
1172}
1173
1174impl Snapshot {
1175 pub fn id(&self) -> WorktreeId {
1176 self.id
1177 }
1178
1179 pub fn abs_path(&self) -> &Arc<Path> {
1180 &self.abs_path
1181 }
1182
1183 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1184 self.entries_by_id.get(&entry_id, &()).is_some()
1185 }
1186
1187 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1188 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1189 let old_entry = self.entries_by_id.insert_or_replace(
1190 PathEntry {
1191 id: entry.id,
1192 path: entry.path.clone(),
1193 is_ignored: entry.is_ignored,
1194 scan_id: 0,
1195 },
1196 &(),
1197 );
1198 if let Some(old_entry) = old_entry {
1199 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1200 }
1201 self.entries_by_path.insert_or_replace(entry.clone(), &());
1202 Ok(entry)
1203 }
1204
1205 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1206 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1207 self.entries_by_path = {
1208 let mut cursor = self.entries_by_path.cursor();
1209 let mut new_entries_by_path =
1210 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1211 while let Some(entry) = cursor.item() {
1212 if entry.path.starts_with(&removed_entry.path) {
1213 self.entries_by_id.remove(&entry.id, &());
1214 cursor.next(&());
1215 } else {
1216 break;
1217 }
1218 }
1219 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1220 new_entries_by_path
1221 };
1222
1223 Some(removed_entry.path)
1224 }
1225
1226 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1227 let mut entries_by_path_edits = Vec::new();
1228 let mut entries_by_id_edits = Vec::new();
1229 for entry_id in update.removed_entries {
1230 let entry = self
1231 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1232 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1233 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1234 entries_by_id_edits.push(Edit::Remove(entry.id));
1235 }
1236
1237 for entry in update.updated_entries {
1238 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1239 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1240 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1241 }
1242 entries_by_id_edits.push(Edit::Insert(PathEntry {
1243 id: entry.id,
1244 path: entry.path.clone(),
1245 is_ignored: entry.is_ignored,
1246 scan_id: 0,
1247 }));
1248 entries_by_path_edits.push(Edit::Insert(entry));
1249 }
1250
1251 self.entries_by_path.edit(entries_by_path_edits, &());
1252 self.entries_by_id.edit(entries_by_id_edits, &());
1253 self.scan_id = update.scan_id as usize;
1254 if update.is_last_update {
1255 self.completed_scan_id = update.scan_id as usize;
1256 }
1257
1258 Ok(())
1259 }
1260
1261 pub fn file_count(&self) -> usize {
1262 self.entries_by_path.summary().file_count
1263 }
1264
1265 pub fn visible_file_count(&self) -> usize {
1266 self.entries_by_path.summary().visible_file_count
1267 }
1268
1269 fn traverse_from_offset(
1270 &self,
1271 include_dirs: bool,
1272 include_ignored: bool,
1273 start_offset: usize,
1274 ) -> Traversal {
1275 let mut cursor = self.entries_by_path.cursor();
1276 cursor.seek(
1277 &TraversalTarget::Count {
1278 count: start_offset,
1279 include_dirs,
1280 include_ignored,
1281 },
1282 Bias::Right,
1283 &(),
1284 );
1285 Traversal {
1286 cursor,
1287 include_dirs,
1288 include_ignored,
1289 }
1290 }
1291
1292 fn traverse_from_path(
1293 &self,
1294 include_dirs: bool,
1295 include_ignored: bool,
1296 path: &Path,
1297 ) -> Traversal {
1298 let mut cursor = self.entries_by_path.cursor();
1299 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1300 Traversal {
1301 cursor,
1302 include_dirs,
1303 include_ignored,
1304 }
1305 }
1306
1307 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1308 self.traverse_from_offset(false, include_ignored, start)
1309 }
1310
1311 pub fn entries(&self, include_ignored: bool) -> Traversal {
1312 self.traverse_from_offset(true, include_ignored, 0)
1313 }
1314
1315 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1316 let empty_path = Path::new("");
1317 self.entries_by_path
1318 .cursor::<()>()
1319 .filter(move |entry| entry.path.as_ref() != empty_path)
1320 .map(|entry| &entry.path)
1321 }
1322
1323 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1324 let mut cursor = self.entries_by_path.cursor();
1325 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1326 let traversal = Traversal {
1327 cursor,
1328 include_dirs: true,
1329 include_ignored: true,
1330 };
1331 ChildEntriesIter {
1332 traversal,
1333 parent_path,
1334 }
1335 }
1336
1337 pub fn root_entry(&self) -> Option<&Entry> {
1338 self.entry_for_path("")
1339 }
1340
1341 pub fn root_name(&self) -> &str {
1342 &self.root_name
1343 }
1344
1345 pub fn scan_started(&mut self) {
1346 self.scan_id += 1;
1347 }
1348
1349 pub fn scan_completed(&mut self) {
1350 self.completed_scan_id = self.scan_id;
1351 }
1352
1353 pub fn scan_id(&self) -> usize {
1354 self.scan_id
1355 }
1356
1357 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1358 let path = path.as_ref();
1359 self.traverse_from_path(true, true, path)
1360 .entry()
1361 .and_then(|entry| {
1362 if entry.path.as_ref() == path {
1363 Some(entry)
1364 } else {
1365 None
1366 }
1367 })
1368 }
1369
1370 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1371 let entry = self.entries_by_id.get(&id, &())?;
1372 self.entry_for_path(&entry.path)
1373 }
1374
1375 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1376 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1377 }
1378}
1379
1380impl LocalSnapshot {
1381 // Gives the most specific git repository for a given path
1382 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1383 self.git_repositories
1384 .iter()
1385 .rev() //git_repository is ordered lexicographically
1386 .find(|repo| repo.manages(path))
1387 .cloned()
1388 }
1389
1390 pub(crate) fn repo_with_dot_git_containing(
1391 &mut self,
1392 path: &Path,
1393 ) -> Option<&mut GitRepositoryEntry> {
1394 // Git repositories cannot be nested, so we don't need to reverse the order
1395 self.git_repositories
1396 .iter_mut()
1397 .find(|repo| repo.in_dot_git(path))
1398 }
1399
1400 #[cfg(test)]
1401 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1402 let root_name = self.root_name.clone();
1403 proto::UpdateWorktree {
1404 project_id,
1405 worktree_id: self.id().to_proto(),
1406 abs_path: self.abs_path().to_string_lossy().into(),
1407 root_name,
1408 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1409 removed_entries: Default::default(),
1410 scan_id: self.scan_id as u64,
1411 is_last_update: true,
1412 }
1413 }
1414
1415 pub(crate) fn build_update(
1416 &self,
1417 other: &Self,
1418 project_id: u64,
1419 worktree_id: u64,
1420 include_ignored: bool,
1421 ) -> proto::UpdateWorktree {
1422 let mut updated_entries = Vec::new();
1423 let mut removed_entries = Vec::new();
1424 let mut self_entries = self
1425 .entries_by_id
1426 .cursor::<()>()
1427 .filter(|e| include_ignored || !e.is_ignored)
1428 .peekable();
1429 let mut other_entries = other
1430 .entries_by_id
1431 .cursor::<()>()
1432 .filter(|e| include_ignored || !e.is_ignored)
1433 .peekable();
1434 loop {
1435 match (self_entries.peek(), other_entries.peek()) {
1436 (Some(self_entry), Some(other_entry)) => {
1437 match Ord::cmp(&self_entry.id, &other_entry.id) {
1438 Ordering::Less => {
1439 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1440 updated_entries.push(entry);
1441 self_entries.next();
1442 }
1443 Ordering::Equal => {
1444 if self_entry.scan_id != other_entry.scan_id {
1445 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1446 updated_entries.push(entry);
1447 }
1448
1449 self_entries.next();
1450 other_entries.next();
1451 }
1452 Ordering::Greater => {
1453 removed_entries.push(other_entry.id.to_proto());
1454 other_entries.next();
1455 }
1456 }
1457 }
1458 (Some(self_entry), None) => {
1459 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1460 updated_entries.push(entry);
1461 self_entries.next();
1462 }
1463 (None, Some(other_entry)) => {
1464 removed_entries.push(other_entry.id.to_proto());
1465 other_entries.next();
1466 }
1467 (None, None) => break,
1468 }
1469 }
1470
1471 proto::UpdateWorktree {
1472 project_id,
1473 worktree_id,
1474 abs_path: self.abs_path().to_string_lossy().into(),
1475 root_name: self.root_name().to_string(),
1476 updated_entries,
1477 removed_entries,
1478 scan_id: self.scan_id as u64,
1479 is_last_update: self.completed_scan_id == self.scan_id,
1480 }
1481 }
1482
1483 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1484 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1485 let abs_path = self.abs_path.join(&entry.path);
1486 match smol::block_on(build_gitignore(&abs_path, fs)) {
1487 Ok(ignore) => {
1488 self.ignores_by_parent_abs_path.insert(
1489 abs_path.parent().unwrap().into(),
1490 (Arc::new(ignore), self.scan_id),
1491 );
1492 }
1493 Err(error) => {
1494 log::error!(
1495 "error loading .gitignore file {:?} - {:?}",
1496 &entry.path,
1497 error
1498 );
1499 }
1500 }
1501 }
1502
1503 self.reuse_entry_id(&mut entry);
1504
1505 if entry.kind == EntryKind::PendingDir {
1506 if let Some(existing_entry) =
1507 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1508 {
1509 entry.kind = existing_entry.kind;
1510 }
1511 }
1512
1513 let scan_id = self.scan_id;
1514 self.entries_by_path.insert_or_replace(entry.clone(), &());
1515 self.entries_by_id.insert_or_replace(
1516 PathEntry {
1517 id: entry.id,
1518 path: entry.path.clone(),
1519 is_ignored: entry.is_ignored,
1520 scan_id,
1521 },
1522 &(),
1523 );
1524
1525 entry
1526 }
1527
1528 fn populate_dir(
1529 &mut self,
1530 parent_path: Arc<Path>,
1531 entries: impl IntoIterator<Item = Entry>,
1532 ignore: Option<Arc<Gitignore>>,
1533 fs: &dyn Fs,
1534 ) {
1535 let mut parent_entry = if let Some(parent_entry) =
1536 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1537 {
1538 parent_entry.clone()
1539 } else {
1540 log::warn!(
1541 "populating a directory {:?} that has been removed",
1542 parent_path
1543 );
1544 return;
1545 };
1546
1547 if let Some(ignore) = ignore {
1548 self.ignores_by_parent_abs_path.insert(
1549 self.abs_path.join(&parent_path).into(),
1550 (ignore, self.scan_id),
1551 );
1552 }
1553 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1554 parent_entry.kind = EntryKind::Dir;
1555 } else {
1556 unreachable!();
1557 }
1558
1559 if parent_path.file_name() == Some(&DOT_GIT) {
1560 let abs_path = self.abs_path.join(&parent_path);
1561 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1562 if let Err(ix) = self
1563 .git_repositories
1564 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1565 {
1566 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1567 self.git_repositories.insert(
1568 ix,
1569 GitRepositoryEntry {
1570 repo,
1571 scan_id: 0,
1572 content_path,
1573 git_dir_path: parent_path,
1574 },
1575 );
1576 }
1577 }
1578 }
1579
1580 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1581 let mut entries_by_id_edits = Vec::new();
1582
1583 for mut entry in entries {
1584 self.reuse_entry_id(&mut entry);
1585 entries_by_id_edits.push(Edit::Insert(PathEntry {
1586 id: entry.id,
1587 path: entry.path.clone(),
1588 is_ignored: entry.is_ignored,
1589 scan_id: self.scan_id,
1590 }));
1591 entries_by_path_edits.push(Edit::Insert(entry));
1592 }
1593
1594 self.entries_by_path.edit(entries_by_path_edits, &());
1595 self.entries_by_id.edit(entries_by_id_edits, &());
1596 }
1597
1598 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1599 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1600 entry.id = removed_entry_id;
1601 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1602 entry.id = existing_entry.id;
1603 }
1604 }
1605
1606 fn remove_path(&mut self, path: &Path) {
1607 let mut new_entries;
1608 let removed_entries;
1609 {
1610 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1611 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1612 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1613 new_entries.push_tree(cursor.suffix(&()), &());
1614 }
1615 self.entries_by_path = new_entries;
1616
1617 let mut entries_by_id_edits = Vec::new();
1618 for entry in removed_entries.cursor::<()>() {
1619 let removed_entry_id = self
1620 .removed_entry_ids
1621 .entry(entry.inode)
1622 .or_insert(entry.id);
1623 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1624 entries_by_id_edits.push(Edit::Remove(entry.id));
1625 }
1626 self.entries_by_id.edit(entries_by_id_edits, &());
1627
1628 if path.file_name() == Some(&GITIGNORE) {
1629 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1630 if let Some((_, scan_id)) = self
1631 .ignores_by_parent_abs_path
1632 .get_mut(abs_parent_path.as_path())
1633 {
1634 *scan_id = self.snapshot.scan_id;
1635 }
1636 } else if path.file_name() == Some(&DOT_GIT) {
1637 let parent_path = path.parent().unwrap();
1638 if let Ok(ix) = self
1639 .git_repositories
1640 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1641 {
1642 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1643 }
1644 }
1645 }
1646
1647 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1648 let mut inodes = TreeSet::default();
1649 for ancestor in path.ancestors().skip(1) {
1650 if let Some(entry) = self.entry_for_path(ancestor) {
1651 inodes.insert(entry.inode);
1652 }
1653 }
1654 inodes
1655 }
1656
1657 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1658 let mut new_ignores = Vec::new();
1659 for ancestor in abs_path.ancestors().skip(1) {
1660 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1661 new_ignores.push((ancestor, Some(ignore.clone())));
1662 } else {
1663 new_ignores.push((ancestor, None));
1664 }
1665 }
1666
1667 let mut ignore_stack = IgnoreStack::none();
1668 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1669 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1670 ignore_stack = IgnoreStack::all();
1671 break;
1672 } else if let Some(ignore) = ignore {
1673 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1674 }
1675 }
1676
1677 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1678 ignore_stack = IgnoreStack::all();
1679 }
1680
1681 ignore_stack
1682 }
1683
1684 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1685 &self.git_repositories
1686 }
1687}
1688
1689impl GitRepositoryEntry {
1690 // Note that these paths should be relative to the worktree root.
1691 pub(crate) fn manages(&self, path: &Path) -> bool {
1692 path.starts_with(self.content_path.as_ref())
1693 }
1694
1695 // Note that this path should be relative to the worktree root.
1696 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1697 path.starts_with(self.git_dir_path.as_ref())
1698 }
1699}
1700
1701async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1702 let contents = fs.load(abs_path).await?;
1703 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1704 let mut builder = GitignoreBuilder::new(parent);
1705 for line in contents.lines() {
1706 builder.add_line(Some(abs_path.into()), line)?;
1707 }
1708 Ok(builder.build()?)
1709}
1710
1711impl WorktreeId {
1712 pub fn from_usize(handle_id: usize) -> Self {
1713 Self(handle_id)
1714 }
1715
1716 pub(crate) fn from_proto(id: u64) -> Self {
1717 Self(id as usize)
1718 }
1719
1720 pub fn to_proto(&self) -> u64 {
1721 self.0 as u64
1722 }
1723
1724 pub fn to_usize(&self) -> usize {
1725 self.0
1726 }
1727}
1728
1729impl fmt::Display for WorktreeId {
1730 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1731 self.0.fmt(f)
1732 }
1733}
1734
1735impl Deref for Worktree {
1736 type Target = Snapshot;
1737
1738 fn deref(&self) -> &Self::Target {
1739 match self {
1740 Worktree::Local(worktree) => &worktree.snapshot,
1741 Worktree::Remote(worktree) => &worktree.snapshot,
1742 }
1743 }
1744}
1745
1746impl Deref for LocalWorktree {
1747 type Target = LocalSnapshot;
1748
1749 fn deref(&self) -> &Self::Target {
1750 &self.snapshot
1751 }
1752}
1753
1754impl Deref for RemoteWorktree {
1755 type Target = Snapshot;
1756
1757 fn deref(&self) -> &Self::Target {
1758 &self.snapshot
1759 }
1760}
1761
1762impl fmt::Debug for LocalWorktree {
1763 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1764 self.snapshot.fmt(f)
1765 }
1766}
1767
1768impl fmt::Debug for Snapshot {
1769 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1770 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1771 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1772
1773 impl<'a> fmt::Debug for EntriesByPath<'a> {
1774 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1775 f.debug_map()
1776 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1777 .finish()
1778 }
1779 }
1780
1781 impl<'a> fmt::Debug for EntriesById<'a> {
1782 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1783 f.debug_list().entries(self.0.iter()).finish()
1784 }
1785 }
1786
1787 f.debug_struct("Snapshot")
1788 .field("id", &self.id)
1789 .field("root_name", &self.root_name)
1790 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1791 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1792 .finish()
1793 }
1794}
1795
1796#[derive(Clone, PartialEq)]
1797pub struct File {
1798 pub worktree: ModelHandle<Worktree>,
1799 pub path: Arc<Path>,
1800 pub mtime: SystemTime,
1801 pub(crate) entry_id: ProjectEntryId,
1802 pub(crate) is_local: bool,
1803 pub(crate) is_deleted: bool,
1804}
1805
1806impl language::File for File {
1807 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1808 if self.is_local {
1809 Some(self)
1810 } else {
1811 None
1812 }
1813 }
1814
1815 fn mtime(&self) -> SystemTime {
1816 self.mtime
1817 }
1818
1819 fn path(&self) -> &Arc<Path> {
1820 &self.path
1821 }
1822
1823 fn full_path(&self, cx: &AppContext) -> PathBuf {
1824 let mut full_path = PathBuf::new();
1825 let worktree = self.worktree.read(cx);
1826
1827 if worktree.is_visible() {
1828 full_path.push(worktree.root_name());
1829 } else {
1830 let path = worktree.abs_path();
1831
1832 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1833 full_path.push("~");
1834 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1835 } else {
1836 full_path.push(path)
1837 }
1838 }
1839
1840 if self.path.components().next().is_some() {
1841 full_path.push(&self.path);
1842 }
1843
1844 full_path
1845 }
1846
1847 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1848 /// of its worktree, then this method will return the name of the worktree itself.
1849 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1850 self.path
1851 .file_name()
1852 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1853 }
1854
1855 fn is_deleted(&self) -> bool {
1856 self.is_deleted
1857 }
1858
1859 fn as_any(&self) -> &dyn Any {
1860 self
1861 }
1862
1863 fn to_proto(&self) -> rpc::proto::File {
1864 rpc::proto::File {
1865 worktree_id: self.worktree.id() as u64,
1866 entry_id: self.entry_id.to_proto(),
1867 path: self.path.to_string_lossy().into(),
1868 mtime: Some(self.mtime.into()),
1869 is_deleted: self.is_deleted,
1870 }
1871 }
1872}
1873
1874impl language::LocalFile for File {
1875 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1876 self.worktree
1877 .read(cx)
1878 .as_local()
1879 .unwrap()
1880 .abs_path
1881 .join(&self.path)
1882 }
1883
1884 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1885 let worktree = self.worktree.read(cx).as_local().unwrap();
1886 let abs_path = worktree.absolutize(&self.path);
1887 let fs = worktree.fs.clone();
1888 cx.background()
1889 .spawn(async move { fs.load(&abs_path).await })
1890 }
1891
1892 fn buffer_reloaded(
1893 &self,
1894 buffer_id: u64,
1895 version: &clock::Global,
1896 fingerprint: RopeFingerprint,
1897 line_ending: LineEnding,
1898 mtime: SystemTime,
1899 cx: &mut MutableAppContext,
1900 ) {
1901 let worktree = self.worktree.read(cx).as_local().unwrap();
1902 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1903 worktree
1904 .client
1905 .send(proto::BufferReloaded {
1906 project_id,
1907 buffer_id,
1908 version: serialize_version(version),
1909 mtime: Some(mtime.into()),
1910 fingerprint: serialize_fingerprint(fingerprint),
1911 line_ending: serialize_line_ending(line_ending) as i32,
1912 })
1913 .log_err();
1914 }
1915 }
1916}
1917
1918impl File {
1919 pub fn from_proto(
1920 proto: rpc::proto::File,
1921 worktree: ModelHandle<Worktree>,
1922 cx: &AppContext,
1923 ) -> Result<Self> {
1924 let worktree_id = worktree
1925 .read(cx)
1926 .as_remote()
1927 .ok_or_else(|| anyhow!("not remote"))?
1928 .id();
1929
1930 if worktree_id.to_proto() != proto.worktree_id {
1931 return Err(anyhow!("worktree id does not match file"));
1932 }
1933
1934 Ok(Self {
1935 worktree,
1936 path: Path::new(&proto.path).into(),
1937 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1938 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1939 is_local: false,
1940 is_deleted: proto.is_deleted,
1941 })
1942 }
1943
1944 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1945 file.and_then(|f| f.as_any().downcast_ref())
1946 }
1947
1948 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1949 self.worktree.read(cx).id()
1950 }
1951
1952 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1953 if self.is_deleted {
1954 None
1955 } else {
1956 Some(self.entry_id)
1957 }
1958 }
1959}
1960
1961#[derive(Clone, Debug, PartialEq, Eq)]
1962pub struct Entry {
1963 pub id: ProjectEntryId,
1964 pub kind: EntryKind,
1965 pub path: Arc<Path>,
1966 pub inode: u64,
1967 pub mtime: SystemTime,
1968 pub is_symlink: bool,
1969 pub is_ignored: bool,
1970}
1971
1972#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1973pub enum EntryKind {
1974 PendingDir,
1975 Dir,
1976 File(CharBag),
1977}
1978
1979#[derive(Clone, Copy, Debug)]
1980pub enum PathChange {
1981 Added,
1982 Removed,
1983 Updated,
1984 AddedOrUpdated,
1985}
1986
1987impl Entry {
1988 fn new(
1989 path: Arc<Path>,
1990 metadata: &fs::Metadata,
1991 next_entry_id: &AtomicUsize,
1992 root_char_bag: CharBag,
1993 ) -> Self {
1994 Self {
1995 id: ProjectEntryId::new(next_entry_id),
1996 kind: if metadata.is_dir {
1997 EntryKind::PendingDir
1998 } else {
1999 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2000 },
2001 path,
2002 inode: metadata.inode,
2003 mtime: metadata.mtime,
2004 is_symlink: metadata.is_symlink,
2005 is_ignored: false,
2006 }
2007 }
2008
2009 pub fn is_dir(&self) -> bool {
2010 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2011 }
2012
2013 pub fn is_file(&self) -> bool {
2014 matches!(self.kind, EntryKind::File(_))
2015 }
2016}
2017
2018impl sum_tree::Item for Entry {
2019 type Summary = EntrySummary;
2020
2021 fn summary(&self) -> Self::Summary {
2022 let visible_count = if self.is_ignored { 0 } else { 1 };
2023 let file_count;
2024 let visible_file_count;
2025 if self.is_file() {
2026 file_count = 1;
2027 visible_file_count = visible_count;
2028 } else {
2029 file_count = 0;
2030 visible_file_count = 0;
2031 }
2032
2033 EntrySummary {
2034 max_path: self.path.clone(),
2035 count: 1,
2036 visible_count,
2037 file_count,
2038 visible_file_count,
2039 }
2040 }
2041}
2042
2043impl sum_tree::KeyedItem for Entry {
2044 type Key = PathKey;
2045
2046 fn key(&self) -> Self::Key {
2047 PathKey(self.path.clone())
2048 }
2049}
2050
2051#[derive(Clone, Debug)]
2052pub struct EntrySummary {
2053 max_path: Arc<Path>,
2054 count: usize,
2055 visible_count: usize,
2056 file_count: usize,
2057 visible_file_count: usize,
2058}
2059
2060impl Default for EntrySummary {
2061 fn default() -> Self {
2062 Self {
2063 max_path: Arc::from(Path::new("")),
2064 count: 0,
2065 visible_count: 0,
2066 file_count: 0,
2067 visible_file_count: 0,
2068 }
2069 }
2070}
2071
2072impl sum_tree::Summary for EntrySummary {
2073 type Context = ();
2074
2075 fn add_summary(&mut self, rhs: &Self, _: &()) {
2076 self.max_path = rhs.max_path.clone();
2077 self.count += rhs.count;
2078 self.visible_count += rhs.visible_count;
2079 self.file_count += rhs.file_count;
2080 self.visible_file_count += rhs.visible_file_count;
2081 }
2082}
2083
2084#[derive(Clone, Debug)]
2085struct PathEntry {
2086 id: ProjectEntryId,
2087 path: Arc<Path>,
2088 is_ignored: bool,
2089 scan_id: usize,
2090}
2091
2092impl sum_tree::Item for PathEntry {
2093 type Summary = PathEntrySummary;
2094
2095 fn summary(&self) -> Self::Summary {
2096 PathEntrySummary { max_id: self.id }
2097 }
2098}
2099
2100impl sum_tree::KeyedItem for PathEntry {
2101 type Key = ProjectEntryId;
2102
2103 fn key(&self) -> Self::Key {
2104 self.id
2105 }
2106}
2107
2108#[derive(Clone, Debug, Default)]
2109struct PathEntrySummary {
2110 max_id: ProjectEntryId,
2111}
2112
2113impl sum_tree::Summary for PathEntrySummary {
2114 type Context = ();
2115
2116 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2117 self.max_id = summary.max_id;
2118 }
2119}
2120
2121impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2122 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2123 *self = summary.max_id;
2124 }
2125}
2126
2127#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2128pub struct PathKey(Arc<Path>);
2129
2130impl Default for PathKey {
2131 fn default() -> Self {
2132 Self(Path::new("").into())
2133 }
2134}
2135
2136impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2137 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2138 self.0 = summary.max_path.clone();
2139 }
2140}
2141
2142struct BackgroundScanner {
2143 fs: Arc<dyn Fs>,
2144 snapshot: Mutex<LocalSnapshot>,
2145 notify: UnboundedSender<ScanState>,
2146 executor: Arc<executor::Background>,
2147}
2148
2149impl BackgroundScanner {
2150 fn new(
2151 snapshot: LocalSnapshot,
2152 notify: UnboundedSender<ScanState>,
2153 fs: Arc<dyn Fs>,
2154 executor: Arc<executor::Background>,
2155 ) -> Self {
2156 Self {
2157 fs,
2158 snapshot: Mutex::new(snapshot),
2159 notify,
2160 executor,
2161 }
2162 }
2163
2164 fn abs_path(&self) -> Arc<Path> {
2165 self.snapshot.lock().abs_path.clone()
2166 }
2167
2168 async fn run(
2169 self,
2170 events_rx: impl Stream<Item = Vec<fsevent::Event>>,
2171 mut changed_paths: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2172 ) {
2173 use futures::FutureExt as _;
2174
2175 // Retrieve the basic properties of the root node.
2176 let root_char_bag;
2177 let root_abs_path;
2178 let root_inode;
2179 let root_is_dir;
2180 let next_entry_id;
2181 {
2182 let mut snapshot = self.snapshot.lock();
2183 snapshot.scan_started();
2184 root_char_bag = snapshot.root_char_bag;
2185 root_abs_path = snapshot.abs_path.clone();
2186 root_inode = snapshot.root_entry().map(|e| e.inode);
2187 root_is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2188 next_entry_id = snapshot.next_entry_id.clone();
2189 }
2190
2191 // Populate ignores above the root.
2192 let ignore_stack;
2193 for ancestor in root_abs_path.ancestors().skip(1) {
2194 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2195 {
2196 self.snapshot
2197 .lock()
2198 .ignores_by_parent_abs_path
2199 .insert(ancestor.into(), (ignore.into(), 0));
2200 }
2201 }
2202 {
2203 let mut snapshot = self.snapshot.lock();
2204 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2205 if ignore_stack.is_all() {
2206 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2207 root_entry.is_ignored = true;
2208 snapshot.insert_entry(root_entry, self.fs.as_ref());
2209 }
2210 }
2211 };
2212
2213 if root_is_dir {
2214 let mut ancestor_inodes = TreeSet::default();
2215 if let Some(root_inode) = root_inode {
2216 ancestor_inodes.insert(root_inode);
2217 }
2218
2219 let (tx, rx) = channel::unbounded();
2220 self.executor
2221 .block(tx.send(ScanJob {
2222 abs_path: root_abs_path.to_path_buf(),
2223 path: Arc::from(Path::new("")),
2224 ignore_stack,
2225 ancestor_inodes,
2226 scan_queue: tx.clone(),
2227 }))
2228 .unwrap();
2229 drop(tx);
2230
2231 let progress_update_count = AtomicUsize::new(0);
2232 self.executor
2233 .scoped(|scope| {
2234 for _ in 0..self.executor.num_cpus() {
2235 scope.spawn(async {
2236 let mut last_progress_update_count = 0;
2237 let progress_update_timer = self.pause_between_progress_updates().fuse();
2238 futures::pin_mut!(progress_update_timer);
2239 loop {
2240 select_biased! {
2241 // Send periodic progress updates to the worktree. Use an atomic counter
2242 // to ensure that only one of the workers sends a progress update after
2243 // the update interval elapses.
2244 _ = progress_update_timer => {
2245 match progress_update_count.compare_exchange(
2246 last_progress_update_count,
2247 last_progress_update_count + 1,
2248 SeqCst,
2249 SeqCst
2250 ) {
2251 Ok(_) => {
2252 last_progress_update_count += 1;
2253 if self
2254 .notify
2255 .unbounded_send(ScanState::Initializing {
2256 snapshot: self.snapshot.lock().clone(),
2257 barrier: None,
2258 })
2259 .is_err()
2260 {
2261 break;
2262 }
2263 }
2264 Err(current_count) => last_progress_update_count = current_count,
2265 }
2266 progress_update_timer.set(self.pause_between_progress_updates().fuse());
2267 }
2268
2269 // Refresh any paths requested by the main thread.
2270 job = changed_paths.recv().fuse() => {
2271 let Ok((abs_paths, barrier)) = job else { break };
2272 self.update_entries_for_paths(abs_paths, None).await;
2273 if self
2274 .notify
2275 .unbounded_send(ScanState::Initializing {
2276 snapshot: self.snapshot.lock().clone(),
2277 barrier: Some(barrier),
2278 })
2279 .is_err()
2280 {
2281 break;
2282 }
2283 }
2284
2285 // Recursively load directories from the file system.
2286 job = rx.recv().fuse() => {
2287 let Ok(job) = job else { break };
2288 if let Err(err) = self
2289 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2290 .await
2291 {
2292 log::error!("error scanning {:?}: {}", job.abs_path, err);
2293 }
2294 }
2295 }
2296 }
2297 });
2298 }
2299 })
2300 .await;
2301 }
2302
2303 self.snapshot.lock().scan_completed();
2304
2305 if self
2306 .notify
2307 .unbounded_send(ScanState::Initialized {
2308 snapshot: self.snapshot.lock().clone(),
2309 })
2310 .is_err()
2311 {
2312 return;
2313 }
2314
2315 // Process any events that occurred while performing the initial scan. These
2316 // events can't be reported as precisely, because there is no snapshot of the
2317 // worktree before they occurred.
2318 futures::pin_mut!(events_rx);
2319 if let Poll::Ready(Some(mut events)) = futures::poll!(events_rx.next()) {
2320 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2321 events.extend(additional_events);
2322 }
2323 let abs_paths = events.into_iter().map(|e| e.path).collect();
2324 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2325 return;
2326 }
2327 if let Some(changes) = self.process_events(abs_paths, true).await {
2328 if self
2329 .notify
2330 .unbounded_send(ScanState::Updated {
2331 snapshot: self.snapshot.lock().clone(),
2332 changes,
2333 barrier: None,
2334 })
2335 .is_err()
2336 {
2337 return;
2338 }
2339 } else {
2340 return;
2341 }
2342 }
2343
2344 // Continue processing events until the worktree is dropped.
2345 loop {
2346 let barrier;
2347 let abs_paths;
2348 select_biased! {
2349 request = changed_paths.next().fuse() => {
2350 let Some((paths, b)) = request else { break };
2351 abs_paths = paths;
2352 barrier = Some(b);
2353 }
2354 events = events_rx.next().fuse() => {
2355 let Some(events) = events else { break };
2356 abs_paths = events.into_iter().map(|e| e.path).collect();
2357 barrier = None;
2358 }
2359 }
2360
2361 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2362 return;
2363 }
2364 if let Some(changes) = self.process_events(abs_paths, false).await {
2365 if self
2366 .notify
2367 .unbounded_send(ScanState::Updated {
2368 snapshot: self.snapshot.lock().clone(),
2369 changes,
2370 barrier,
2371 })
2372 .is_err()
2373 {
2374 return;
2375 }
2376 } else {
2377 return;
2378 }
2379 }
2380 }
2381
2382 async fn pause_between_progress_updates(&self) {
2383 #[cfg(any(test, feature = "test-support"))]
2384 if self.fs.is_fake() {
2385 return self.executor.simulate_random_delay().await;
2386 }
2387 smol::Timer::after(Duration::from_millis(100)).await;
2388 }
2389
2390 async fn scan_dir(
2391 &self,
2392 root_char_bag: CharBag,
2393 next_entry_id: Arc<AtomicUsize>,
2394 job: &ScanJob,
2395 ) -> Result<()> {
2396 let mut new_entries: Vec<Entry> = Vec::new();
2397 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2398 let mut ignore_stack = job.ignore_stack.clone();
2399 let mut new_ignore = None;
2400
2401 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2402 while let Some(child_abs_path) = child_paths.next().await {
2403 let child_abs_path = match child_abs_path {
2404 Ok(child_abs_path) => child_abs_path,
2405 Err(error) => {
2406 log::error!("error processing entry {:?}", error);
2407 continue;
2408 }
2409 };
2410
2411 let child_name = child_abs_path.file_name().unwrap();
2412 let child_path: Arc<Path> = job.path.join(child_name).into();
2413 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2414 Ok(Some(metadata)) => metadata,
2415 Ok(None) => continue,
2416 Err(err) => {
2417 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2418 continue;
2419 }
2420 };
2421
2422 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2423 if child_name == *GITIGNORE {
2424 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2425 Ok(ignore) => {
2426 let ignore = Arc::new(ignore);
2427 ignore_stack =
2428 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2429 new_ignore = Some(ignore);
2430 }
2431 Err(error) => {
2432 log::error!(
2433 "error loading .gitignore file {:?} - {:?}",
2434 child_name,
2435 error
2436 );
2437 }
2438 }
2439
2440 // Update ignore status of any child entries we've already processed to reflect the
2441 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2442 // there should rarely be too numerous. Update the ignore stack associated with any
2443 // new jobs as well.
2444 let mut new_jobs = new_jobs.iter_mut();
2445 for entry in &mut new_entries {
2446 let entry_abs_path = self.abs_path().join(&entry.path);
2447 entry.is_ignored =
2448 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2449
2450 if entry.is_dir() {
2451 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2452 job.ignore_stack = if entry.is_ignored {
2453 IgnoreStack::all()
2454 } else {
2455 ignore_stack.clone()
2456 };
2457 }
2458 }
2459 }
2460 }
2461
2462 let mut child_entry = Entry::new(
2463 child_path.clone(),
2464 &child_metadata,
2465 &next_entry_id,
2466 root_char_bag,
2467 );
2468
2469 if child_entry.is_dir() {
2470 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2471 child_entry.is_ignored = is_ignored;
2472
2473 // Avoid recursing until crash in the case of a recursive symlink
2474 if !job.ancestor_inodes.contains(&child_entry.inode) {
2475 let mut ancestor_inodes = job.ancestor_inodes.clone();
2476 ancestor_inodes.insert(child_entry.inode);
2477
2478 new_jobs.push(Some(ScanJob {
2479 abs_path: child_abs_path,
2480 path: child_path,
2481 ignore_stack: if is_ignored {
2482 IgnoreStack::all()
2483 } else {
2484 ignore_stack.clone()
2485 },
2486 ancestor_inodes,
2487 scan_queue: job.scan_queue.clone(),
2488 }));
2489 } else {
2490 new_jobs.push(None);
2491 }
2492 } else {
2493 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2494 }
2495
2496 new_entries.push(child_entry);
2497 }
2498
2499 self.snapshot.lock().populate_dir(
2500 job.path.clone(),
2501 new_entries,
2502 new_ignore,
2503 self.fs.as_ref(),
2504 );
2505
2506 for new_job in new_jobs {
2507 if let Some(new_job) = new_job {
2508 job.scan_queue.send(new_job).await.unwrap();
2509 }
2510 }
2511
2512 Ok(())
2513 }
2514
2515 async fn process_events(
2516 &self,
2517 abs_paths: Vec<PathBuf>,
2518 received_before_initialized: bool,
2519 ) -> Option<HashMap<Arc<Path>, PathChange>> {
2520 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2521
2522 let prev_snapshot = {
2523 let mut snapshot = self.snapshot.lock();
2524 snapshot.scan_started();
2525 snapshot.clone()
2526 };
2527
2528 let event_paths = self
2529 .update_entries_for_paths(abs_paths, Some(scan_queue_tx))
2530 .await?;
2531
2532 // Scan any directories that were created as part of this event batch.
2533 self.executor
2534 .scoped(|scope| {
2535 for _ in 0..self.executor.num_cpus() {
2536 scope.spawn(async {
2537 while let Ok(job) = scan_queue_rx.recv().await {
2538 if let Err(err) = self
2539 .scan_dir(
2540 prev_snapshot.root_char_bag,
2541 prev_snapshot.next_entry_id.clone(),
2542 &job,
2543 )
2544 .await
2545 {
2546 log::error!("error scanning {:?}: {}", job.abs_path, err);
2547 }
2548 }
2549 });
2550 }
2551 })
2552 .await;
2553
2554 // Attempt to detect renames only over a single batch of file-system events.
2555 self.snapshot.lock().removed_entry_ids.clear();
2556
2557 self.update_ignore_statuses().await;
2558 self.update_git_repositories();
2559 let changes = self.build_change_set(
2560 prev_snapshot.snapshot,
2561 event_paths,
2562 received_before_initialized,
2563 );
2564 self.snapshot.lock().scan_completed();
2565 Some(changes)
2566 }
2567
2568 async fn update_entries_for_paths(
2569 &self,
2570 mut abs_paths: Vec<PathBuf>,
2571 scan_queue_tx: Option<Sender<ScanJob>>,
2572 ) -> Option<Vec<Arc<Path>>> {
2573 abs_paths.sort_unstable();
2574 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2575
2576 let root_abs_path = self.snapshot.lock().abs_path.clone();
2577 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.ok()?;
2578 let metadata = futures::future::join_all(
2579 abs_paths
2580 .iter()
2581 .map(|abs_path| self.fs.metadata(&abs_path))
2582 .collect::<Vec<_>>(),
2583 )
2584 .await;
2585
2586 let mut snapshot = self.snapshot.lock();
2587 if scan_queue_tx.is_some() {
2588 for abs_path in &abs_paths {
2589 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2590 snapshot.remove_path(path);
2591 }
2592 }
2593 }
2594
2595 let mut event_paths = Vec::with_capacity(abs_paths.len());
2596 for (abs_path, metadata) in abs_paths.into_iter().zip(metadata.into_iter()) {
2597 let path: Arc<Path> = match abs_path.strip_prefix(&root_canonical_path) {
2598 Ok(path) => Arc::from(path.to_path_buf()),
2599 Err(_) => {
2600 log::error!(
2601 "unexpected event {:?} for root path {:?}",
2602 abs_path,
2603 root_canonical_path
2604 );
2605 continue;
2606 }
2607 };
2608 event_paths.push(path.clone());
2609 let abs_path = root_abs_path.join(&path);
2610
2611 match metadata {
2612 Ok(Some(metadata)) => {
2613 let ignore_stack =
2614 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2615 let mut fs_entry = Entry::new(
2616 path.clone(),
2617 &metadata,
2618 snapshot.next_entry_id.as_ref(),
2619 snapshot.root_char_bag,
2620 );
2621 fs_entry.is_ignored = ignore_stack.is_all();
2622 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2623
2624 let scan_id = snapshot.scan_id;
2625 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2626 repo.repo.lock().reload_index();
2627 repo.scan_id = scan_id;
2628 }
2629
2630 if let Some(scan_queue_tx) = &scan_queue_tx {
2631 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2632 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2633 ancestor_inodes.insert(metadata.inode);
2634 self.executor
2635 .block(scan_queue_tx.send(ScanJob {
2636 abs_path,
2637 path,
2638 ignore_stack,
2639 ancestor_inodes,
2640 scan_queue: scan_queue_tx.clone(),
2641 }))
2642 .unwrap();
2643 }
2644 }
2645 }
2646 Ok(None) => {}
2647 Err(err) => {
2648 // TODO - create a special 'error' entry in the entries tree to mark this
2649 log::error!("error reading file on event {:?}", err);
2650 }
2651 }
2652 }
2653
2654 Some(event_paths)
2655 }
2656
2657 async fn update_ignore_statuses(&self) {
2658 let mut snapshot = self.snapshot.lock().clone();
2659 let mut ignores_to_update = Vec::new();
2660 let mut ignores_to_delete = Vec::new();
2661 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2662 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2663 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2664 ignores_to_update.push(parent_abs_path.clone());
2665 }
2666
2667 let ignore_path = parent_path.join(&*GITIGNORE);
2668 if snapshot.entry_for_path(ignore_path).is_none() {
2669 ignores_to_delete.push(parent_abs_path.clone());
2670 }
2671 }
2672 }
2673
2674 for parent_abs_path in ignores_to_delete {
2675 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2676 self.snapshot
2677 .lock()
2678 .ignores_by_parent_abs_path
2679 .remove(&parent_abs_path);
2680 }
2681
2682 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2683 ignores_to_update.sort_unstable();
2684 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2685 while let Some(parent_abs_path) = ignores_to_update.next() {
2686 while ignores_to_update
2687 .peek()
2688 .map_or(false, |p| p.starts_with(&parent_abs_path))
2689 {
2690 ignores_to_update.next().unwrap();
2691 }
2692
2693 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2694 ignore_queue_tx
2695 .send(UpdateIgnoreStatusJob {
2696 abs_path: parent_abs_path,
2697 ignore_stack,
2698 ignore_queue: ignore_queue_tx.clone(),
2699 })
2700 .await
2701 .unwrap();
2702 }
2703 drop(ignore_queue_tx);
2704
2705 self.executor
2706 .scoped(|scope| {
2707 for _ in 0..self.executor.num_cpus() {
2708 scope.spawn(async {
2709 while let Ok(job) = ignore_queue_rx.recv().await {
2710 self.update_ignore_status(job, &snapshot).await;
2711 }
2712 });
2713 }
2714 })
2715 .await;
2716 }
2717
2718 fn update_git_repositories(&self) {
2719 let mut snapshot = self.snapshot.lock();
2720 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2721 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2722 snapshot.git_repositories = git_repositories;
2723 }
2724
2725 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2726 let mut ignore_stack = job.ignore_stack;
2727 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2728 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2729 }
2730
2731 let mut entries_by_id_edits = Vec::new();
2732 let mut entries_by_path_edits = Vec::new();
2733 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2734 for mut entry in snapshot.child_entries(path).cloned() {
2735 let was_ignored = entry.is_ignored;
2736 let abs_path = self.abs_path().join(&entry.path);
2737 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2738 if entry.is_dir() {
2739 let child_ignore_stack = if entry.is_ignored {
2740 IgnoreStack::all()
2741 } else {
2742 ignore_stack.clone()
2743 };
2744 job.ignore_queue
2745 .send(UpdateIgnoreStatusJob {
2746 abs_path: abs_path.into(),
2747 ignore_stack: child_ignore_stack,
2748 ignore_queue: job.ignore_queue.clone(),
2749 })
2750 .await
2751 .unwrap();
2752 }
2753
2754 if entry.is_ignored != was_ignored {
2755 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2756 path_entry.scan_id = snapshot.scan_id;
2757 path_entry.is_ignored = entry.is_ignored;
2758 entries_by_id_edits.push(Edit::Insert(path_entry));
2759 entries_by_path_edits.push(Edit::Insert(entry));
2760 }
2761 }
2762
2763 let mut snapshot = self.snapshot.lock();
2764 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2765 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2766 }
2767
2768 fn build_change_set(
2769 &self,
2770 old_snapshot: Snapshot,
2771 event_paths: Vec<Arc<Path>>,
2772 received_before_initialized: bool,
2773 ) -> HashMap<Arc<Path>, PathChange> {
2774 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2775
2776 let new_snapshot = self.snapshot.lock();
2777 let mut changes = HashMap::default();
2778 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2779 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2780
2781 for path in event_paths {
2782 let path = PathKey(path);
2783 old_paths.seek(&path, Bias::Left, &());
2784 new_paths.seek(&path, Bias::Left, &());
2785
2786 loop {
2787 match (old_paths.item(), new_paths.item()) {
2788 (Some(old_entry), Some(new_entry)) => {
2789 if old_entry.path > path.0
2790 && new_entry.path > path.0
2791 && !old_entry.path.starts_with(&path.0)
2792 && !new_entry.path.starts_with(&path.0)
2793 {
2794 break;
2795 }
2796
2797 match Ord::cmp(&old_entry.path, &new_entry.path) {
2798 Ordering::Less => {
2799 changes.insert(old_entry.path.clone(), Removed);
2800 old_paths.next(&());
2801 }
2802 Ordering::Equal => {
2803 if received_before_initialized {
2804 // If the worktree was not fully initialized when this event was generated,
2805 // we can't know whether this entry was added during the scan or whether
2806 // it was merely updated.
2807 changes.insert(old_entry.path.clone(), AddedOrUpdated);
2808 } else if old_entry.mtime != new_entry.mtime {
2809 changes.insert(old_entry.path.clone(), Updated);
2810 }
2811 old_paths.next(&());
2812 new_paths.next(&());
2813 }
2814 Ordering::Greater => {
2815 changes.insert(new_entry.path.clone(), Added);
2816 new_paths.next(&());
2817 }
2818 }
2819 }
2820 (Some(old_entry), None) => {
2821 changes.insert(old_entry.path.clone(), Removed);
2822 old_paths.next(&());
2823 }
2824 (None, Some(new_entry)) => {
2825 changes.insert(new_entry.path.clone(), Added);
2826 new_paths.next(&());
2827 }
2828 (None, None) => break,
2829 }
2830 }
2831 }
2832 changes
2833 }
2834}
2835
2836fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2837 let mut result = root_char_bag;
2838 result.extend(
2839 path.to_string_lossy()
2840 .chars()
2841 .map(|c| c.to_ascii_lowercase()),
2842 );
2843 result
2844}
2845
2846struct ScanJob {
2847 abs_path: PathBuf,
2848 path: Arc<Path>,
2849 ignore_stack: Arc<IgnoreStack>,
2850 scan_queue: Sender<ScanJob>,
2851 ancestor_inodes: TreeSet<u64>,
2852}
2853
2854struct UpdateIgnoreStatusJob {
2855 abs_path: Arc<Path>,
2856 ignore_stack: Arc<IgnoreStack>,
2857 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2858}
2859
2860pub trait WorktreeHandle {
2861 #[cfg(any(test, feature = "test-support"))]
2862 fn flush_fs_events<'a>(
2863 &self,
2864 cx: &'a gpui::TestAppContext,
2865 ) -> futures::future::LocalBoxFuture<'a, ()>;
2866}
2867
2868impl WorktreeHandle for ModelHandle<Worktree> {
2869 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2870 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2871 // extra directory scans, and emit extra scan-state notifications.
2872 //
2873 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2874 // to ensure that all redundant FS events have already been processed.
2875 #[cfg(any(test, feature = "test-support"))]
2876 fn flush_fs_events<'a>(
2877 &self,
2878 cx: &'a gpui::TestAppContext,
2879 ) -> futures::future::LocalBoxFuture<'a, ()> {
2880 use smol::future::FutureExt;
2881
2882 let filename = "fs-event-sentinel";
2883 let tree = self.clone();
2884 let (fs, root_path) = self.read_with(cx, |tree, _| {
2885 let tree = tree.as_local().unwrap();
2886 (tree.fs.clone(), tree.abs_path().clone())
2887 });
2888
2889 async move {
2890 fs.create_file(&root_path.join(filename), Default::default())
2891 .await
2892 .unwrap();
2893 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2894 .await;
2895
2896 fs.remove_file(&root_path.join(filename), Default::default())
2897 .await
2898 .unwrap();
2899 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2900 .await;
2901
2902 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2903 .await;
2904 }
2905 .boxed_local()
2906 }
2907}
2908
2909#[derive(Clone, Debug)]
2910struct TraversalProgress<'a> {
2911 max_path: &'a Path,
2912 count: usize,
2913 visible_count: usize,
2914 file_count: usize,
2915 visible_file_count: usize,
2916}
2917
2918impl<'a> TraversalProgress<'a> {
2919 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2920 match (include_ignored, include_dirs) {
2921 (true, true) => self.count,
2922 (true, false) => self.file_count,
2923 (false, true) => self.visible_count,
2924 (false, false) => self.visible_file_count,
2925 }
2926 }
2927}
2928
2929impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2930 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2931 self.max_path = summary.max_path.as_ref();
2932 self.count += summary.count;
2933 self.visible_count += summary.visible_count;
2934 self.file_count += summary.file_count;
2935 self.visible_file_count += summary.visible_file_count;
2936 }
2937}
2938
2939impl<'a> Default for TraversalProgress<'a> {
2940 fn default() -> Self {
2941 Self {
2942 max_path: Path::new(""),
2943 count: 0,
2944 visible_count: 0,
2945 file_count: 0,
2946 visible_file_count: 0,
2947 }
2948 }
2949}
2950
2951pub struct Traversal<'a> {
2952 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2953 include_ignored: bool,
2954 include_dirs: bool,
2955}
2956
2957impl<'a> Traversal<'a> {
2958 pub fn advance(&mut self) -> bool {
2959 self.advance_to_offset(self.offset() + 1)
2960 }
2961
2962 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2963 self.cursor.seek_forward(
2964 &TraversalTarget::Count {
2965 count: offset,
2966 include_dirs: self.include_dirs,
2967 include_ignored: self.include_ignored,
2968 },
2969 Bias::Right,
2970 &(),
2971 )
2972 }
2973
2974 pub fn advance_to_sibling(&mut self) -> bool {
2975 while let Some(entry) = self.cursor.item() {
2976 self.cursor.seek_forward(
2977 &TraversalTarget::PathSuccessor(&entry.path),
2978 Bias::Left,
2979 &(),
2980 );
2981 if let Some(entry) = self.cursor.item() {
2982 if (self.include_dirs || !entry.is_dir())
2983 && (self.include_ignored || !entry.is_ignored)
2984 {
2985 return true;
2986 }
2987 }
2988 }
2989 false
2990 }
2991
2992 pub fn entry(&self) -> Option<&'a Entry> {
2993 self.cursor.item()
2994 }
2995
2996 pub fn offset(&self) -> usize {
2997 self.cursor
2998 .start()
2999 .count(self.include_dirs, self.include_ignored)
3000 }
3001}
3002
3003impl<'a> Iterator for Traversal<'a> {
3004 type Item = &'a Entry;
3005
3006 fn next(&mut self) -> Option<Self::Item> {
3007 if let Some(item) = self.entry() {
3008 self.advance();
3009 Some(item)
3010 } else {
3011 None
3012 }
3013 }
3014}
3015
3016#[derive(Debug)]
3017enum TraversalTarget<'a> {
3018 Path(&'a Path),
3019 PathSuccessor(&'a Path),
3020 Count {
3021 count: usize,
3022 include_ignored: bool,
3023 include_dirs: bool,
3024 },
3025}
3026
3027impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3028 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3029 match self {
3030 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3031 TraversalTarget::PathSuccessor(path) => {
3032 if !cursor_location.max_path.starts_with(path) {
3033 Ordering::Equal
3034 } else {
3035 Ordering::Greater
3036 }
3037 }
3038 TraversalTarget::Count {
3039 count,
3040 include_dirs,
3041 include_ignored,
3042 } => Ord::cmp(
3043 count,
3044 &cursor_location.count(*include_dirs, *include_ignored),
3045 ),
3046 }
3047 }
3048}
3049
3050struct ChildEntriesIter<'a> {
3051 parent_path: &'a Path,
3052 traversal: Traversal<'a>,
3053}
3054
3055impl<'a> Iterator for ChildEntriesIter<'a> {
3056 type Item = &'a Entry;
3057
3058 fn next(&mut self) -> Option<Self::Item> {
3059 if let Some(item) = self.traversal.entry() {
3060 if item.path.starts_with(&self.parent_path) {
3061 self.traversal.advance_to_sibling();
3062 return Some(item);
3063 }
3064 }
3065 None
3066 }
3067}
3068
3069impl<'a> From<&'a Entry> for proto::Entry {
3070 fn from(entry: &'a Entry) -> Self {
3071 Self {
3072 id: entry.id.to_proto(),
3073 is_dir: entry.is_dir(),
3074 path: entry.path.to_string_lossy().into(),
3075 inode: entry.inode,
3076 mtime: Some(entry.mtime.into()),
3077 is_symlink: entry.is_symlink,
3078 is_ignored: entry.is_ignored,
3079 }
3080 }
3081}
3082
3083impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3084 type Error = anyhow::Error;
3085
3086 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3087 if let Some(mtime) = entry.mtime {
3088 let kind = if entry.is_dir {
3089 EntryKind::Dir
3090 } else {
3091 let mut char_bag = *root_char_bag;
3092 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3093 EntryKind::File(char_bag)
3094 };
3095 let path: Arc<Path> = PathBuf::from(entry.path).into();
3096 Ok(Entry {
3097 id: ProjectEntryId::from_proto(entry.id),
3098 kind,
3099 path,
3100 inode: entry.inode,
3101 mtime: mtime.into(),
3102 is_symlink: entry.is_symlink,
3103 is_ignored: entry.is_ignored,
3104 })
3105 } else {
3106 Err(anyhow!(
3107 "missing mtime in remote worktree entry {:?}",
3108 entry.path
3109 ))
3110 }
3111 }
3112}
3113
3114#[cfg(test)]
3115mod tests {
3116 use super::*;
3117 use client::test::FakeHttpClient;
3118 use fs::repository::FakeGitRepository;
3119 use fs::{FakeFs, RealFs};
3120 use gpui::{executor::Deterministic, TestAppContext};
3121 use rand::prelude::*;
3122 use serde_json::json;
3123 use std::{env, fmt::Write};
3124 use util::test::temp_tree;
3125
3126 #[gpui::test]
3127 async fn test_traversal(cx: &mut TestAppContext) {
3128 let fs = FakeFs::new(cx.background());
3129 fs.insert_tree(
3130 "/root",
3131 json!({
3132 ".gitignore": "a/b\n",
3133 "a": {
3134 "b": "",
3135 "c": "",
3136 }
3137 }),
3138 )
3139 .await;
3140
3141 let http_client = FakeHttpClient::with_404_response();
3142 let client = cx.read(|cx| Client::new(http_client, cx));
3143
3144 let tree = Worktree::local(
3145 client,
3146 Path::new("/root"),
3147 true,
3148 fs,
3149 Default::default(),
3150 &mut cx.to_async(),
3151 )
3152 .await
3153 .unwrap();
3154 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3155 .await;
3156
3157 tree.read_with(cx, |tree, _| {
3158 assert_eq!(
3159 tree.entries(false)
3160 .map(|entry| entry.path.as_ref())
3161 .collect::<Vec<_>>(),
3162 vec![
3163 Path::new(""),
3164 Path::new(".gitignore"),
3165 Path::new("a"),
3166 Path::new("a/c"),
3167 ]
3168 );
3169 assert_eq!(
3170 tree.entries(true)
3171 .map(|entry| entry.path.as_ref())
3172 .collect::<Vec<_>>(),
3173 vec![
3174 Path::new(""),
3175 Path::new(".gitignore"),
3176 Path::new("a"),
3177 Path::new("a/b"),
3178 Path::new("a/c"),
3179 ]
3180 );
3181 })
3182 }
3183
3184 #[gpui::test(iterations = 10)]
3185 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3186 let fs = FakeFs::new(cx.background());
3187 fs.insert_tree(
3188 "/root",
3189 json!({
3190 "lib": {
3191 "a": {
3192 "a.txt": ""
3193 },
3194 "b": {
3195 "b.txt": ""
3196 }
3197 }
3198 }),
3199 )
3200 .await;
3201 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3202 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3203
3204 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3205 let tree = Worktree::local(
3206 client,
3207 Path::new("/root"),
3208 true,
3209 fs.clone(),
3210 Default::default(),
3211 &mut cx.to_async(),
3212 )
3213 .await
3214 .unwrap();
3215
3216 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3217 .await;
3218
3219 tree.read_with(cx, |tree, _| {
3220 assert_eq!(
3221 tree.entries(false)
3222 .map(|entry| entry.path.as_ref())
3223 .collect::<Vec<_>>(),
3224 vec![
3225 Path::new(""),
3226 Path::new("lib"),
3227 Path::new("lib/a"),
3228 Path::new("lib/a/a.txt"),
3229 Path::new("lib/a/lib"),
3230 Path::new("lib/b"),
3231 Path::new("lib/b/b.txt"),
3232 Path::new("lib/b/lib"),
3233 ]
3234 );
3235 });
3236
3237 fs.rename(
3238 Path::new("/root/lib/a/lib"),
3239 Path::new("/root/lib/a/lib-2"),
3240 Default::default(),
3241 )
3242 .await
3243 .unwrap();
3244 executor.run_until_parked();
3245 tree.read_with(cx, |tree, _| {
3246 assert_eq!(
3247 tree.entries(false)
3248 .map(|entry| entry.path.as_ref())
3249 .collect::<Vec<_>>(),
3250 vec![
3251 Path::new(""),
3252 Path::new("lib"),
3253 Path::new("lib/a"),
3254 Path::new("lib/a/a.txt"),
3255 Path::new("lib/a/lib-2"),
3256 Path::new("lib/b"),
3257 Path::new("lib/b/b.txt"),
3258 Path::new("lib/b/lib"),
3259 ]
3260 );
3261 });
3262 }
3263
3264 #[gpui::test]
3265 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3266 let parent_dir = temp_tree(json!({
3267 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3268 "tree": {
3269 ".git": {},
3270 ".gitignore": "ignored-dir\n",
3271 "tracked-dir": {
3272 "tracked-file1": "",
3273 "ancestor-ignored-file1": "",
3274 },
3275 "ignored-dir": {
3276 "ignored-file1": ""
3277 }
3278 }
3279 }));
3280 let dir = parent_dir.path().join("tree");
3281
3282 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3283
3284 let tree = Worktree::local(
3285 client,
3286 dir.as_path(),
3287 true,
3288 Arc::new(RealFs),
3289 Default::default(),
3290 &mut cx.to_async(),
3291 )
3292 .await
3293 .unwrap();
3294 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3295 .await;
3296 tree.flush_fs_events(cx).await;
3297 cx.read(|cx| {
3298 let tree = tree.read(cx);
3299 assert!(
3300 !tree
3301 .entry_for_path("tracked-dir/tracked-file1")
3302 .unwrap()
3303 .is_ignored
3304 );
3305 assert!(
3306 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3307 .unwrap()
3308 .is_ignored
3309 );
3310 assert!(
3311 tree.entry_for_path("ignored-dir/ignored-file1")
3312 .unwrap()
3313 .is_ignored
3314 );
3315 });
3316
3317 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3318 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3319 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3320 tree.flush_fs_events(cx).await;
3321 cx.read(|cx| {
3322 let tree = tree.read(cx);
3323 assert!(
3324 !tree
3325 .entry_for_path("tracked-dir/tracked-file2")
3326 .unwrap()
3327 .is_ignored
3328 );
3329 assert!(
3330 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3331 .unwrap()
3332 .is_ignored
3333 );
3334 assert!(
3335 tree.entry_for_path("ignored-dir/ignored-file2")
3336 .unwrap()
3337 .is_ignored
3338 );
3339 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3340 });
3341 }
3342
3343 #[gpui::test]
3344 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3345 let root = temp_tree(json!({
3346 "dir1": {
3347 ".git": {},
3348 "deps": {
3349 "dep1": {
3350 ".git": {},
3351 "src": {
3352 "a.txt": ""
3353 }
3354 }
3355 },
3356 "src": {
3357 "b.txt": ""
3358 }
3359 },
3360 "c.txt": "",
3361 }));
3362
3363 let http_client = FakeHttpClient::with_404_response();
3364 let client = cx.read(|cx| Client::new(http_client, cx));
3365 let tree = Worktree::local(
3366 client,
3367 root.path(),
3368 true,
3369 Arc::new(RealFs),
3370 Default::default(),
3371 &mut cx.to_async(),
3372 )
3373 .await
3374 .unwrap();
3375
3376 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3377 .await;
3378 tree.flush_fs_events(cx).await;
3379
3380 tree.read_with(cx, |tree, _cx| {
3381 let tree = tree.as_local().unwrap();
3382
3383 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3384
3385 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3386 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3387 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3388
3389 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3390 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3391 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3392 });
3393
3394 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3395 let tree = tree.as_local().unwrap();
3396 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3397 });
3398
3399 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3400 tree.flush_fs_events(cx).await;
3401
3402 tree.read_with(cx, |tree, _cx| {
3403 let tree = tree.as_local().unwrap();
3404 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3405 assert_ne!(
3406 original_scan_id, new_scan_id,
3407 "original {original_scan_id}, new {new_scan_id}"
3408 );
3409 });
3410
3411 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3412 tree.flush_fs_events(cx).await;
3413
3414 tree.read_with(cx, |tree, _cx| {
3415 let tree = tree.as_local().unwrap();
3416
3417 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3418 });
3419 }
3420
3421 #[test]
3422 fn test_changed_repos() {
3423 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3424 GitRepositoryEntry {
3425 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3426 scan_id,
3427 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3428 git_dir_path: git_dir_path.as_ref().into(),
3429 }
3430 }
3431
3432 let prev_repos: Vec<GitRepositoryEntry> = vec![
3433 fake_entry("/.git", 0),
3434 fake_entry("/a/.git", 0),
3435 fake_entry("/a/b/.git", 0),
3436 ];
3437
3438 let new_repos: Vec<GitRepositoryEntry> = vec![
3439 fake_entry("/a/.git", 1),
3440 fake_entry("/a/b/.git", 0),
3441 fake_entry("/a/c/.git", 0),
3442 ];
3443
3444 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3445
3446 // Deletion retained
3447 assert!(res
3448 .iter()
3449 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3450 .is_some());
3451
3452 // Update retained
3453 assert!(res
3454 .iter()
3455 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3456 .is_some());
3457
3458 // Addition retained
3459 assert!(res
3460 .iter()
3461 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3462 .is_some());
3463
3464 // Nochange, not retained
3465 assert!(res
3466 .iter()
3467 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3468 .is_none());
3469 }
3470
3471 #[gpui::test]
3472 async fn test_write_file(cx: &mut TestAppContext) {
3473 let dir = temp_tree(json!({
3474 ".git": {},
3475 ".gitignore": "ignored-dir\n",
3476 "tracked-dir": {},
3477 "ignored-dir": {}
3478 }));
3479
3480 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3481
3482 let tree = Worktree::local(
3483 client,
3484 dir.path(),
3485 true,
3486 Arc::new(RealFs),
3487 Default::default(),
3488 &mut cx.to_async(),
3489 )
3490 .await
3491 .unwrap();
3492 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3493 .await;
3494 tree.flush_fs_events(cx).await;
3495
3496 tree.update(cx, |tree, cx| {
3497 tree.as_local().unwrap().write_file(
3498 Path::new("tracked-dir/file.txt"),
3499 "hello".into(),
3500 Default::default(),
3501 cx,
3502 )
3503 })
3504 .await
3505 .unwrap();
3506 tree.update(cx, |tree, cx| {
3507 tree.as_local().unwrap().write_file(
3508 Path::new("ignored-dir/file.txt"),
3509 "world".into(),
3510 Default::default(),
3511 cx,
3512 )
3513 })
3514 .await
3515 .unwrap();
3516
3517 tree.read_with(cx, |tree, _| {
3518 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3519 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3520 assert!(!tracked.is_ignored);
3521 assert!(ignored.is_ignored);
3522 });
3523 }
3524
3525 #[gpui::test(iterations = 30)]
3526 async fn test_create_directory(cx: &mut TestAppContext) {
3527 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3528
3529 let fs = FakeFs::new(cx.background());
3530 fs.insert_tree(
3531 "/a",
3532 json!({
3533 "b": {},
3534 "c": {},
3535 "d": {},
3536 }),
3537 )
3538 .await;
3539
3540 let tree = Worktree::local(
3541 client,
3542 "/a".as_ref(),
3543 true,
3544 fs,
3545 Default::default(),
3546 &mut cx.to_async(),
3547 )
3548 .await
3549 .unwrap();
3550
3551 let entry = tree
3552 .update(cx, |tree, cx| {
3553 tree.as_local_mut()
3554 .unwrap()
3555 .create_entry("a/e".as_ref(), true, cx)
3556 })
3557 .await
3558 .unwrap();
3559 assert!(entry.is_dir());
3560
3561 cx.foreground().run_until_parked();
3562 tree.read_with(cx, |tree, _| {
3563 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3564 });
3565 }
3566
3567 #[gpui::test(iterations = 100)]
3568 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3569 let operations = env::var("OPERATIONS")
3570 .map(|o| o.parse().unwrap())
3571 .unwrap_or(40);
3572 let initial_entries = env::var("INITIAL_ENTRIES")
3573 .map(|o| o.parse().unwrap())
3574 .unwrap_or(20);
3575
3576 let root_dir = Path::new("/test");
3577 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3578 fs.as_fake().insert_tree(root_dir, json!({})).await;
3579 for _ in 0..initial_entries {
3580 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3581 }
3582 log::info!("generated initial tree");
3583
3584 let next_entry_id = Arc::new(AtomicUsize::default());
3585 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3586 let worktree = Worktree::local(
3587 client.clone(),
3588 root_dir,
3589 true,
3590 fs.clone(),
3591 next_entry_id.clone(),
3592 &mut cx.to_async(),
3593 )
3594 .await
3595 .unwrap();
3596
3597 worktree
3598 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3599 .await;
3600
3601 // After the initial scan is complete, the `UpdatedEntries` event can
3602 // be used to follow along with all changes to the worktree's snapshot.
3603 worktree.update(cx, |tree, cx| {
3604 let mut paths = tree
3605 .as_local()
3606 .unwrap()
3607 .paths()
3608 .cloned()
3609 .collect::<Vec<_>>();
3610
3611 cx.subscribe(&worktree, move |tree, _, event, _| {
3612 if let Event::UpdatedEntries(changes) = event {
3613 for (path, change_type) in changes.iter() {
3614 let path = path.clone();
3615 let ix = match paths.binary_search(&path) {
3616 Ok(ix) | Err(ix) => ix,
3617 };
3618 match change_type {
3619 PathChange::Added => {
3620 assert_ne!(paths.get(ix), Some(&path));
3621 paths.insert(ix, path);
3622 }
3623 PathChange::Removed => {
3624 assert_eq!(paths.get(ix), Some(&path));
3625 paths.remove(ix);
3626 }
3627 PathChange::Updated => {
3628 assert_eq!(paths.get(ix), Some(&path));
3629 }
3630 PathChange::AddedOrUpdated => {
3631 if paths[ix] != path {
3632 paths.insert(ix, path);
3633 }
3634 }
3635 }
3636 }
3637 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3638 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3639 }
3640 })
3641 .detach();
3642 });
3643
3644 let mut snapshots = Vec::new();
3645 let mut mutations_len = operations;
3646 while mutations_len > 1 {
3647 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3648 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3649 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3650 let len = rng.gen_range(0..=buffered_event_count);
3651 log::info!("flushing {} events", len);
3652 fs.as_fake().flush_events(len).await;
3653 } else {
3654 randomly_mutate_tree(&fs, root_dir, 0.6, &mut rng).await;
3655 mutations_len -= 1;
3656 }
3657
3658 cx.foreground().run_until_parked();
3659 if rng.gen_bool(0.2) {
3660 log::info!("storing snapshot {}", snapshots.len());
3661 let snapshot =
3662 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3663 snapshots.push(snapshot);
3664 }
3665 }
3666
3667 log::info!("quiescing");
3668 fs.as_fake().flush_events(usize::MAX).await;
3669 cx.foreground().run_until_parked();
3670 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3671 snapshot.check_invariants();
3672
3673 {
3674 let new_worktree = Worktree::local(
3675 client.clone(),
3676 root_dir,
3677 true,
3678 fs.clone(),
3679 next_entry_id,
3680 &mut cx.to_async(),
3681 )
3682 .await
3683 .unwrap();
3684 new_worktree
3685 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3686 .await;
3687 let new_snapshot =
3688 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3689 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3690 }
3691
3692 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3693 let include_ignored = rng.gen::<bool>();
3694 if !include_ignored {
3695 let mut entries_by_path_edits = Vec::new();
3696 let mut entries_by_id_edits = Vec::new();
3697 for entry in prev_snapshot
3698 .entries_by_id
3699 .cursor::<()>()
3700 .filter(|e| e.is_ignored)
3701 {
3702 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3703 entries_by_id_edits.push(Edit::Remove(entry.id));
3704 }
3705
3706 prev_snapshot
3707 .entries_by_path
3708 .edit(entries_by_path_edits, &());
3709 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3710 }
3711
3712 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3713 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3714 assert_eq!(
3715 prev_snapshot.to_vec(include_ignored),
3716 snapshot.to_vec(include_ignored),
3717 "wrong update for snapshot {i}. update: {:?}",
3718 update
3719 );
3720 }
3721 }
3722
3723 async fn randomly_mutate_tree(
3724 fs: &Arc<dyn Fs>,
3725 root_path: &Path,
3726 insertion_probability: f64,
3727 rng: &mut impl Rng,
3728 ) {
3729 let mut files = Vec::new();
3730 let mut dirs = Vec::new();
3731 for path in fs.as_fake().paths().await {
3732 if path.starts_with(root_path) {
3733 if fs.is_file(&path).await {
3734 files.push(path);
3735 } else {
3736 dirs.push(path);
3737 }
3738 }
3739 }
3740
3741 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3742 let path = dirs.choose(rng).unwrap();
3743 let new_path = path.join(gen_name(rng));
3744
3745 if rng.gen() {
3746 log::info!(
3747 "creating dir {:?}",
3748 new_path.strip_prefix(root_path).unwrap()
3749 );
3750 fs.create_dir(&new_path).await.unwrap();
3751 } else {
3752 log::info!(
3753 "creating file {:?}",
3754 new_path.strip_prefix(root_path).unwrap()
3755 );
3756 fs.create_file(&new_path, Default::default()).await.unwrap();
3757 }
3758 } else if rng.gen_bool(0.05) {
3759 let ignore_dir_path = dirs.choose(rng).unwrap();
3760 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3761
3762 let subdirs = dirs
3763 .iter()
3764 .filter(|d| d.starts_with(&ignore_dir_path))
3765 .cloned()
3766 .collect::<Vec<_>>();
3767 let subfiles = files
3768 .iter()
3769 .filter(|d| d.starts_with(&ignore_dir_path))
3770 .cloned()
3771 .collect::<Vec<_>>();
3772 let files_to_ignore = {
3773 let len = rng.gen_range(0..=subfiles.len());
3774 subfiles.choose_multiple(rng, len)
3775 };
3776 let dirs_to_ignore = {
3777 let len = rng.gen_range(0..subdirs.len());
3778 subdirs.choose_multiple(rng, len)
3779 };
3780
3781 let mut ignore_contents = String::new();
3782 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3783 writeln!(
3784 ignore_contents,
3785 "{}",
3786 path_to_ignore
3787 .strip_prefix(&ignore_dir_path)
3788 .unwrap()
3789 .to_str()
3790 .unwrap()
3791 )
3792 .unwrap();
3793 }
3794 log::info!(
3795 "creating gitignore {:?} with contents:\n{}",
3796 ignore_path.strip_prefix(&root_path).unwrap(),
3797 ignore_contents
3798 );
3799 fs.save(
3800 &ignore_path,
3801 &ignore_contents.as_str().into(),
3802 Default::default(),
3803 )
3804 .await
3805 .unwrap();
3806 } else {
3807 let old_path = {
3808 let file_path = files.choose(rng);
3809 let dir_path = dirs[1..].choose(rng);
3810 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3811 };
3812
3813 let is_rename = rng.gen();
3814 if is_rename {
3815 let new_path_parent = dirs
3816 .iter()
3817 .filter(|d| !d.starts_with(old_path))
3818 .choose(rng)
3819 .unwrap();
3820
3821 let overwrite_existing_dir =
3822 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3823 let new_path = if overwrite_existing_dir {
3824 fs.remove_dir(
3825 &new_path_parent,
3826 RemoveOptions {
3827 recursive: true,
3828 ignore_if_not_exists: true,
3829 },
3830 )
3831 .await
3832 .unwrap();
3833 new_path_parent.to_path_buf()
3834 } else {
3835 new_path_parent.join(gen_name(rng))
3836 };
3837
3838 log::info!(
3839 "renaming {:?} to {}{:?}",
3840 old_path.strip_prefix(&root_path).unwrap(),
3841 if overwrite_existing_dir {
3842 "overwrite "
3843 } else {
3844 ""
3845 },
3846 new_path.strip_prefix(&root_path).unwrap()
3847 );
3848 fs.rename(
3849 &old_path,
3850 &new_path,
3851 fs::RenameOptions {
3852 overwrite: true,
3853 ignore_if_exists: true,
3854 },
3855 )
3856 .await
3857 .unwrap();
3858 } else if fs.is_file(&old_path).await {
3859 log::info!(
3860 "deleting file {:?}",
3861 old_path.strip_prefix(&root_path).unwrap()
3862 );
3863 fs.remove_file(old_path, Default::default()).await.unwrap();
3864 } else {
3865 log::info!(
3866 "deleting dir {:?}",
3867 old_path.strip_prefix(&root_path).unwrap()
3868 );
3869 fs.remove_dir(
3870 &old_path,
3871 RemoveOptions {
3872 recursive: true,
3873 ignore_if_not_exists: true,
3874 },
3875 )
3876 .await
3877 .unwrap();
3878 }
3879 }
3880 }
3881
3882 fn gen_name(rng: &mut impl Rng) -> String {
3883 (0..6)
3884 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3885 .map(char::from)
3886 .collect()
3887 }
3888
3889 impl LocalSnapshot {
3890 fn check_invariants(&self) {
3891 let mut files = self.files(true, 0);
3892 let mut visible_files = self.files(false, 0);
3893 for entry in self.entries_by_path.cursor::<()>() {
3894 if entry.is_file() {
3895 assert_eq!(files.next().unwrap().inode, entry.inode);
3896 if !entry.is_ignored {
3897 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3898 }
3899 }
3900 }
3901 assert!(files.next().is_none());
3902 assert!(visible_files.next().is_none());
3903
3904 let mut bfs_paths = Vec::new();
3905 let mut stack = vec![Path::new("")];
3906 while let Some(path) = stack.pop() {
3907 bfs_paths.push(path);
3908 let ix = stack.len();
3909 for child_entry in self.child_entries(path) {
3910 stack.insert(ix, &child_entry.path);
3911 }
3912 }
3913
3914 let dfs_paths_via_iter = self
3915 .entries_by_path
3916 .cursor::<()>()
3917 .map(|e| e.path.as_ref())
3918 .collect::<Vec<_>>();
3919 assert_eq!(bfs_paths, dfs_paths_via_iter);
3920
3921 let dfs_paths_via_traversal = self
3922 .entries(true)
3923 .map(|e| e.path.as_ref())
3924 .collect::<Vec<_>>();
3925 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3926
3927 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3928 let ignore_parent_path =
3929 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3930 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3931 assert!(self
3932 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3933 .is_some());
3934 }
3935 }
3936
3937 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3938 let mut paths = Vec::new();
3939 for entry in self.entries_by_path.cursor::<()>() {
3940 if include_ignored || !entry.is_ignored {
3941 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3942 }
3943 }
3944 paths.sort_by(|a, b| a.0.cmp(b.0));
3945 paths
3946 }
3947 }
3948}