1use super::{ignore::IgnoreStack, DiagnosticSummary};
2use crate::{copy_recursive, ProjectEntryId, RemoveOptions};
3use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
4use anyhow::{anyhow, Context, Result};
5use client::{proto, Client};
6use clock::ReplicaId;
7use collections::{HashMap, VecDeque};
8use fs::LineEnding;
9use fs::{repository::GitRepository, Fs};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedReceiver, UnboundedSender},
13 oneshot,
14 },
15 select_biased, Stream, StreamExt,
16};
17use fuzzy::CharBag;
18use git::{DOT_GIT, GITIGNORE};
19use gpui::{
20 executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext,
21 Task,
22};
23use language::File as _;
24use language::{
25 proto::{
26 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
27 serialize_version,
28 },
29 Buffer, DiagnosticEntry, PointUtf16, Rope, RopeFingerprint, Unclipped,
30};
31use parking_lot::Mutex;
32use postage::barrier;
33use postage::{
34 prelude::{Sink as _, Stream as _},
35 watch,
36};
37use smol::channel::{self, Sender};
38use std::{
39 any::Any,
40 cmp::{self, Ordering},
41 convert::TryFrom,
42 ffi::OsStr,
43 fmt,
44 future::Future,
45 mem,
46 ops::{Deref, DerefMut},
47 path::{Path, PathBuf},
48 sync::{atomic::AtomicUsize, Arc},
49 task::Poll,
50 time::{Duration, SystemTime},
51};
52use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
53use util::paths::HOME;
54use util::{ResultExt, TryFutureExt};
55
56#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
57pub struct WorktreeId(usize);
58
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 path_changes_tx: mpsc::UnboundedSender<(Vec<PathBuf>, barrier::Sender)>,
67 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
68 _background_scanner_task: Task<()>,
69 share: Option<ShareState>,
70 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
71 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 visible: bool,
75}
76
77pub struct RemoteWorktree {
78 snapshot: Snapshot,
79 background_snapshot: Arc<Mutex<Snapshot>>,
80 project_id: u64,
81 client: Arc<Client>,
82 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
83 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
84 replica_id: ReplicaId,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 visible: bool,
87 disconnected: bool,
88}
89
90#[derive(Clone)]
91pub struct Snapshot {
92 id: WorktreeId,
93 abs_path: Arc<Path>,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98 scan_id: usize,
99 completed_scan_id: usize,
100}
101
102#[derive(Clone)]
103pub struct GitRepositoryEntry {
104 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
105
106 pub(crate) scan_id: usize,
107 // Path to folder containing the .git file or directory
108 pub(crate) content_path: Arc<Path>,
109 // Path to the actual .git folder.
110 // Note: if .git is a file, this points to the folder indicated by the .git file
111 pub(crate) git_dir_path: Arc<Path>,
112}
113
114impl std::fmt::Debug for GitRepositoryEntry {
115 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
116 f.debug_struct("GitRepositoryEntry")
117 .field("content_path", &self.content_path)
118 .field("git_dir_path", &self.git_dir_path)
119 .finish()
120 }
121}
122
123#[derive(Debug)]
124pub struct LocalSnapshot {
125 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
126 git_repositories: Vec<GitRepositoryEntry>,
127 removed_entry_ids: HashMap<u64, ProjectEntryId>,
128 next_entry_id: Arc<AtomicUsize>,
129 snapshot: Snapshot,
130}
131
132impl Clone for LocalSnapshot {
133 fn clone(&self) -> Self {
134 Self {
135 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
136 git_repositories: self.git_repositories.iter().cloned().collect(),
137 removed_entry_ids: self.removed_entry_ids.clone(),
138 next_entry_id: self.next_entry_id.clone(),
139 snapshot: self.snapshot.clone(),
140 }
141 }
142}
143
144impl Deref for LocalSnapshot {
145 type Target = Snapshot;
146
147 fn deref(&self) -> &Self::Target {
148 &self.snapshot
149 }
150}
151
152impl DerefMut for LocalSnapshot {
153 fn deref_mut(&mut self) -> &mut Self::Target {
154 &mut self.snapshot
155 }
156}
157
158enum ScanState {
159 /// The worktree is performing its initial scan of the filesystem.
160 Initializing {
161 snapshot: LocalSnapshot,
162 barrier: Option<barrier::Sender>,
163 },
164 Initialized {
165 snapshot: LocalSnapshot,
166 },
167 /// The worktree is updating in response to filesystem events.
168 Updating,
169 Updated {
170 snapshot: LocalSnapshot,
171 changes: HashMap<Arc<Path>, PathChange>,
172 barrier: Option<barrier::Sender>,
173 },
174}
175
176struct ShareState {
177 project_id: u64,
178 snapshots_tx: watch::Sender<LocalSnapshot>,
179 resume_updates: watch::Sender<()>,
180 _maintain_remote_snapshot: Task<Option<()>>,
181}
182
183pub enum Event {
184 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
185 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
186}
187
188impl Entity for Worktree {
189 type Event = Event;
190}
191
192impl Worktree {
193 pub async fn local(
194 client: Arc<Client>,
195 path: impl Into<Arc<Path>>,
196 visible: bool,
197 fs: Arc<dyn Fs>,
198 next_entry_id: Arc<AtomicUsize>,
199 cx: &mut AsyncAppContext,
200 ) -> Result<ModelHandle<Self>> {
201 // After determining whether the root entry is a file or a directory, populate the
202 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
203 let abs_path = path.into();
204 let metadata = fs
205 .metadata(&abs_path)
206 .await
207 .context("failed to stat worktree path")?;
208
209 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
210 let root_name = abs_path
211 .file_name()
212 .map_or(String::new(), |f| f.to_string_lossy().to_string());
213
214 let mut snapshot = LocalSnapshot {
215 ignores_by_parent_abs_path: Default::default(),
216 git_repositories: Default::default(),
217 removed_entry_ids: Default::default(),
218 next_entry_id,
219 snapshot: Snapshot {
220 id: WorktreeId::from_usize(cx.model_id()),
221 abs_path: abs_path.clone(),
222 root_name: root_name.clone(),
223 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
224 entries_by_path: Default::default(),
225 entries_by_id: Default::default(),
226 scan_id: 0,
227 completed_scan_id: 0,
228 },
229 };
230
231 if let Some(metadata) = metadata {
232 snapshot.insert_entry(
233 Entry::new(
234 Arc::from(Path::new("")),
235 &metadata,
236 &snapshot.next_entry_id,
237 snapshot.root_char_bag,
238 ),
239 fs.as_ref(),
240 );
241 }
242
243 let (path_changes_tx, path_changes_rx) = mpsc::unbounded();
244 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
245
246 cx.spawn_weak(|this, mut cx| async move {
247 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
248 this.update(&mut cx, |this, cx| {
249 this.as_local_mut()
250 .unwrap()
251 .background_scanner_updated(state, cx);
252 });
253 }
254 })
255 .detach();
256
257 let background_scanner_task = cx.background().spawn({
258 let fs = fs.clone();
259 let snapshot = snapshot.clone();
260 let background = cx.background().clone();
261 async move {
262 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
263 BackgroundScanner::new(snapshot, scan_states_tx, fs, background)
264 .run(events, path_changes_rx)
265 .await;
266 }
267 });
268
269 Worktree::Local(LocalWorktree {
270 snapshot,
271 is_scanning: watch::channel_with(true),
272 share: None,
273 path_changes_tx,
274 _background_scanner_task: background_scanner_task,
275 diagnostics: Default::default(),
276 diagnostic_summaries: Default::default(),
277 client,
278 fs,
279 visible,
280 })
281 }))
282 }
283
284 pub fn remote(
285 project_remote_id: u64,
286 replica_id: ReplicaId,
287 worktree: proto::WorktreeMetadata,
288 client: Arc<Client>,
289 cx: &mut MutableAppContext,
290 ) -> ModelHandle<Self> {
291 cx.add_model(|cx: &mut ModelContext<Self>| {
292 let snapshot = Snapshot {
293 id: WorktreeId(worktree.id as usize),
294 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
295 root_name: worktree.root_name.clone(),
296 root_char_bag: worktree
297 .root_name
298 .chars()
299 .map(|c| c.to_ascii_lowercase())
300 .collect(),
301 entries_by_path: Default::default(),
302 entries_by_id: Default::default(),
303 scan_id: 0,
304 completed_scan_id: 0,
305 };
306
307 let (updates_tx, mut updates_rx) = mpsc::unbounded();
308 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
309 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
310
311 cx.background()
312 .spawn({
313 let background_snapshot = background_snapshot.clone();
314 async move {
315 while let Some(update) = updates_rx.next().await {
316 if let Err(error) =
317 background_snapshot.lock().apply_remote_update(update)
318 {
319 log::error!("error applying worktree update: {}", error);
320 }
321 snapshot_updated_tx.send(()).await.ok();
322 }
323 }
324 })
325 .detach();
326
327 cx.spawn_weak(|this, mut cx| async move {
328 while (snapshot_updated_rx.recv().await).is_some() {
329 if let Some(this) = this.upgrade(&cx) {
330 this.update(&mut cx, |this, cx| {
331 let this = this.as_remote_mut().unwrap();
332 this.snapshot = this.background_snapshot.lock().clone();
333 cx.emit(Event::UpdatedEntries(Default::default()));
334 cx.notify();
335 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
336 if this.observed_snapshot(*scan_id) {
337 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
338 let _ = tx.send(());
339 } else {
340 break;
341 }
342 }
343 });
344 } else {
345 break;
346 }
347 }
348 })
349 .detach();
350
351 Worktree::Remote(RemoteWorktree {
352 project_id: project_remote_id,
353 replica_id,
354 snapshot: snapshot.clone(),
355 background_snapshot,
356 updates_tx: Some(updates_tx),
357 snapshot_subscriptions: Default::default(),
358 client: client.clone(),
359 diagnostic_summaries: Default::default(),
360 visible: worktree.visible,
361 disconnected: false,
362 })
363 })
364 }
365
366 pub fn as_local(&self) -> Option<&LocalWorktree> {
367 if let Worktree::Local(worktree) = self {
368 Some(worktree)
369 } else {
370 None
371 }
372 }
373
374 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
375 if let Worktree::Remote(worktree) = self {
376 Some(worktree)
377 } else {
378 None
379 }
380 }
381
382 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
383 if let Worktree::Local(worktree) = self {
384 Some(worktree)
385 } else {
386 None
387 }
388 }
389
390 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
391 if let Worktree::Remote(worktree) = self {
392 Some(worktree)
393 } else {
394 None
395 }
396 }
397
398 pub fn is_local(&self) -> bool {
399 matches!(self, Worktree::Local(_))
400 }
401
402 pub fn is_remote(&self) -> bool {
403 !self.is_local()
404 }
405
406 pub fn snapshot(&self) -> Snapshot {
407 match self {
408 Worktree::Local(worktree) => worktree.snapshot().snapshot,
409 Worktree::Remote(worktree) => worktree.snapshot(),
410 }
411 }
412
413 pub fn scan_id(&self) -> usize {
414 match self {
415 Worktree::Local(worktree) => worktree.snapshot.scan_id,
416 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
417 }
418 }
419
420 pub fn completed_scan_id(&self) -> usize {
421 match self {
422 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
423 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
424 }
425 }
426
427 pub fn is_visible(&self) -> bool {
428 match self {
429 Worktree::Local(worktree) => worktree.visible,
430 Worktree::Remote(worktree) => worktree.visible,
431 }
432 }
433
434 pub fn replica_id(&self) -> ReplicaId {
435 match self {
436 Worktree::Local(_) => 0,
437 Worktree::Remote(worktree) => worktree.replica_id,
438 }
439 }
440
441 pub fn diagnostic_summaries(
442 &self,
443 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
444 match self {
445 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
446 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
447 }
448 .iter()
449 .map(|(path, summary)| (path.0.clone(), *summary))
450 }
451
452 pub fn abs_path(&self) -> Arc<Path> {
453 match self {
454 Worktree::Local(worktree) => worktree.abs_path.clone(),
455 Worktree::Remote(worktree) => worktree.abs_path.clone(),
456 }
457 }
458}
459
460impl LocalWorktree {
461 pub fn contains_abs_path(&self, path: &Path) -> bool {
462 path.starts_with(&self.abs_path)
463 }
464
465 fn absolutize(&self, path: &Path) -> PathBuf {
466 if path.file_name().is_some() {
467 self.abs_path.join(path)
468 } else {
469 self.abs_path.to_path_buf()
470 }
471 }
472
473 pub(crate) fn load_buffer(
474 &mut self,
475 path: &Path,
476 cx: &mut ModelContext<Worktree>,
477 ) -> Task<Result<ModelHandle<Buffer>>> {
478 let path = Arc::from(path);
479 cx.spawn(move |this, mut cx| async move {
480 let (file, contents, diff_base) = this
481 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
482 .await?;
483 Ok(cx.add_model(|cx| {
484 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
485 buffer.git_diff_recalc(cx);
486 buffer
487 }))
488 })
489 }
490
491 pub fn diagnostics_for_path(
492 &self,
493 path: &Path,
494 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
495 self.diagnostics.get(path).cloned()
496 }
497
498 pub fn update_diagnostics(
499 &mut self,
500 language_server_id: usize,
501 worktree_path: Arc<Path>,
502 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
503 _: &mut ModelContext<Worktree>,
504 ) -> Result<bool> {
505 self.diagnostics.remove(&worktree_path);
506 let old_summary = self
507 .diagnostic_summaries
508 .remove(&PathKey(worktree_path.clone()))
509 .unwrap_or_default();
510 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
511 if !new_summary.is_empty() {
512 self.diagnostic_summaries
513 .insert(PathKey(worktree_path.clone()), new_summary);
514 self.diagnostics.insert(worktree_path.clone(), diagnostics);
515 }
516
517 let updated = !old_summary.is_empty() || !new_summary.is_empty();
518 if updated {
519 if let Some(share) = self.share.as_ref() {
520 self.client
521 .send(proto::UpdateDiagnosticSummary {
522 project_id: share.project_id,
523 worktree_id: self.id().to_proto(),
524 summary: Some(proto::DiagnosticSummary {
525 path: worktree_path.to_string_lossy().to_string(),
526 language_server_id: language_server_id as u64,
527 error_count: new_summary.error_count as u32,
528 warning_count: new_summary.warning_count as u32,
529 }),
530 })
531 .log_err();
532 }
533 }
534
535 Ok(updated)
536 }
537
538 fn background_scanner_updated(
539 &mut self,
540 scan_state: ScanState,
541 cx: &mut ModelContext<Worktree>,
542 ) {
543 match scan_state {
544 ScanState::Initializing { snapshot, barrier } => {
545 *self.is_scanning.0.borrow_mut() = true;
546 self.set_snapshot(snapshot, cx);
547 drop(barrier);
548 }
549 ScanState::Initialized { snapshot } => {
550 *self.is_scanning.0.borrow_mut() = false;
551 self.set_snapshot(snapshot, cx);
552 }
553 ScanState::Updating => {
554 *self.is_scanning.0.borrow_mut() = true;
555 }
556 ScanState::Updated {
557 snapshot,
558 changes,
559 barrier,
560 } => {
561 *self.is_scanning.0.borrow_mut() = false;
562 cx.emit(Event::UpdatedEntries(changes));
563 self.set_snapshot(snapshot, cx);
564 drop(barrier);
565 }
566 }
567 cx.notify();
568 }
569
570 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
571 let updated_repos = Self::changed_repos(
572 &self.snapshot.git_repositories,
573 &new_snapshot.git_repositories,
574 );
575 self.snapshot = new_snapshot;
576
577 if let Some(share) = self.share.as_mut() {
578 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
579 }
580
581 if !updated_repos.is_empty() {
582 cx.emit(Event::UpdatedGitRepositories(updated_repos));
583 }
584 }
585
586 fn changed_repos(
587 old_repos: &[GitRepositoryEntry],
588 new_repos: &[GitRepositoryEntry],
589 ) -> Vec<GitRepositoryEntry> {
590 fn diff<'a>(
591 a: &'a [GitRepositoryEntry],
592 b: &'a [GitRepositoryEntry],
593 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
594 ) {
595 for a_repo in a {
596 let matched = b.iter().find(|b_repo| {
597 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
598 });
599
600 if matched.is_none() {
601 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
602 }
603 }
604 }
605
606 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
607
608 diff(old_repos, new_repos, &mut updated);
609 diff(new_repos, old_repos, &mut updated);
610
611 updated.into_values().collect()
612 }
613
614 pub fn scan_complete(&self) -> impl Future<Output = ()> {
615 let mut is_scanning_rx = self.is_scanning.1.clone();
616 async move {
617 let mut is_scanning = is_scanning_rx.borrow().clone();
618 while is_scanning {
619 if let Some(value) = is_scanning_rx.recv().await {
620 is_scanning = value;
621 } else {
622 break;
623 }
624 }
625 }
626 }
627
628 pub fn snapshot(&self) -> LocalSnapshot {
629 self.snapshot.clone()
630 }
631
632 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
633 proto::WorktreeMetadata {
634 id: self.id().to_proto(),
635 root_name: self.root_name().to_string(),
636 visible: self.visible,
637 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
638 }
639 }
640
641 fn load(
642 &self,
643 path: &Path,
644 cx: &mut ModelContext<Worktree>,
645 ) -> Task<Result<(File, String, Option<String>)>> {
646 let handle = cx.handle();
647 let path = Arc::from(path);
648 let abs_path = self.absolutize(&path);
649 let fs = self.fs.clone();
650 let snapshot = self.snapshot();
651
652 cx.spawn(|this, mut cx| async move {
653 let text = fs.load(&abs_path).await?;
654
655 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
656 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
657 let repo_relative = repo_relative.to_owned();
658 cx.background()
659 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
660 .await
661 } else {
662 None
663 }
664 } else {
665 None
666 };
667
668 // Eagerly populate the snapshot with an updated entry for the loaded file
669 let entry = this
670 .update(&mut cx, |this, cx| {
671 this.as_local().unwrap().refresh_entry(path, None, cx)
672 })
673 .await?;
674
675 Ok((
676 File {
677 entry_id: entry.id,
678 worktree: handle,
679 path: entry.path,
680 mtime: entry.mtime,
681 is_local: true,
682 is_deleted: false,
683 },
684 text,
685 diff_base,
686 ))
687 })
688 }
689
690 pub fn save_buffer(
691 &self,
692 buffer_handle: ModelHandle<Buffer>,
693 path: Arc<Path>,
694 has_changed_file: bool,
695 cx: &mut ModelContext<Worktree>,
696 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
697 let handle = cx.handle();
698 let buffer = buffer_handle.read(cx);
699
700 let rpc = self.client.clone();
701 let buffer_id = buffer.remote_id();
702 let project_id = self.share.as_ref().map(|share| share.project_id);
703
704 let text = buffer.as_rope().clone();
705 let fingerprint = text.fingerprint();
706 let version = buffer.version();
707 let save = self.write_file(path, text, buffer.line_ending(), cx);
708
709 cx.as_mut().spawn(|mut cx| async move {
710 let entry = save.await?;
711
712 if has_changed_file {
713 let new_file = Arc::new(File {
714 entry_id: entry.id,
715 worktree: handle,
716 path: entry.path,
717 mtime: entry.mtime,
718 is_local: true,
719 is_deleted: false,
720 });
721
722 if let Some(project_id) = project_id {
723 rpc.send(proto::UpdateBufferFile {
724 project_id,
725 buffer_id,
726 file: Some(new_file.to_proto()),
727 })
728 .log_err();
729 }
730
731 buffer_handle.update(&mut cx, |buffer, cx| {
732 if has_changed_file {
733 buffer.file_updated(new_file, cx).detach();
734 }
735 });
736 }
737
738 if let Some(project_id) = project_id {
739 rpc.send(proto::BufferSaved {
740 project_id,
741 buffer_id,
742 version: serialize_version(&version),
743 mtime: Some(entry.mtime.into()),
744 fingerprint: serialize_fingerprint(fingerprint),
745 })?;
746 }
747
748 buffer_handle.update(&mut cx, |buffer, cx| {
749 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
750 });
751
752 Ok((version, fingerprint, entry.mtime))
753 })
754 }
755
756 pub fn create_entry(
757 &self,
758 path: impl Into<Arc<Path>>,
759 is_dir: bool,
760 cx: &mut ModelContext<Worktree>,
761 ) -> Task<Result<Entry>> {
762 let path = path.into();
763 let abs_path = self.absolutize(&path);
764 let fs = self.fs.clone();
765 let write = cx.background().spawn(async move {
766 if is_dir {
767 fs.create_dir(&abs_path).await
768 } else {
769 fs.save(&abs_path, &Default::default(), Default::default())
770 .await
771 }
772 });
773
774 cx.spawn(|this, mut cx| async move {
775 write.await?;
776 this.update(&mut cx, |this, cx| {
777 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
778 })
779 .await
780 })
781 }
782
783 pub fn write_file(
784 &self,
785 path: impl Into<Arc<Path>>,
786 text: Rope,
787 line_ending: LineEnding,
788 cx: &mut ModelContext<Worktree>,
789 ) -> Task<Result<Entry>> {
790 let path = path.into();
791 let abs_path = self.absolutize(&path);
792 let fs = self.fs.clone();
793 let write = cx
794 .background()
795 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
796
797 cx.spawn(|this, mut cx| async move {
798 write.await?;
799 this.update(&mut cx, |this, cx| {
800 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
801 })
802 .await
803 })
804 }
805
806 pub fn delete_entry(
807 &self,
808 entry_id: ProjectEntryId,
809 cx: &mut ModelContext<Worktree>,
810 ) -> Option<Task<Result<()>>> {
811 let entry = self.entry_for_id(entry_id)?.clone();
812 let abs_path = self.abs_path.clone();
813 let fs = self.fs.clone();
814
815 let delete = cx.background().spawn(async move {
816 let mut abs_path = fs.canonicalize(&abs_path).await?;
817 if entry.path.file_name().is_some() {
818 abs_path = abs_path.join(&entry.path);
819 }
820 if entry.is_file() {
821 fs.remove_file(&abs_path, Default::default()).await?;
822 } else {
823 fs.remove_dir(
824 &abs_path,
825 RemoveOptions {
826 recursive: true,
827 ignore_if_not_exists: false,
828 },
829 )
830 .await?;
831 }
832 anyhow::Ok(abs_path)
833 });
834
835 Some(cx.spawn(|this, mut cx| async move {
836 let abs_path = delete.await?;
837 let (tx, mut rx) = barrier::channel();
838 this.update(&mut cx, |this, _| {
839 this.as_local_mut()
840 .unwrap()
841 .path_changes_tx
842 .unbounded_send((vec![abs_path], tx))
843 .unwrap();
844 });
845 rx.recv().await;
846 Ok(())
847 }))
848 }
849
850 pub fn rename_entry(
851 &self,
852 entry_id: ProjectEntryId,
853 new_path: impl Into<Arc<Path>>,
854 cx: &mut ModelContext<Worktree>,
855 ) -> Option<Task<Result<Entry>>> {
856 let old_path = self.entry_for_id(entry_id)?.path.clone();
857 let new_path = new_path.into();
858 let abs_old_path = self.absolutize(&old_path);
859 let abs_new_path = self.absolutize(&new_path);
860 let fs = self.fs.clone();
861 let rename = cx.background().spawn(async move {
862 fs.rename(&abs_old_path, &abs_new_path, Default::default())
863 .await
864 });
865
866 Some(cx.spawn(|this, mut cx| async move {
867 rename.await?;
868 this.update(&mut cx, |this, cx| {
869 this.as_local_mut()
870 .unwrap()
871 .refresh_entry(new_path.clone(), Some(old_path), cx)
872 })
873 .await
874 }))
875 }
876
877 pub fn copy_entry(
878 &self,
879 entry_id: ProjectEntryId,
880 new_path: impl Into<Arc<Path>>,
881 cx: &mut ModelContext<Worktree>,
882 ) -> Option<Task<Result<Entry>>> {
883 let old_path = self.entry_for_id(entry_id)?.path.clone();
884 let new_path = new_path.into();
885 let abs_old_path = self.absolutize(&old_path);
886 let abs_new_path = self.absolutize(&new_path);
887 let fs = self.fs.clone();
888 let copy = cx.background().spawn(async move {
889 copy_recursive(
890 fs.as_ref(),
891 &abs_old_path,
892 &abs_new_path,
893 Default::default(),
894 )
895 .await
896 });
897
898 Some(cx.spawn(|this, mut cx| async move {
899 copy.await?;
900 this.update(&mut cx, |this, cx| {
901 this.as_local_mut()
902 .unwrap()
903 .refresh_entry(new_path.clone(), None, cx)
904 })
905 .await
906 }))
907 }
908
909 fn refresh_entry(
910 &self,
911 path: Arc<Path>,
912 old_path: Option<Arc<Path>>,
913 cx: &mut ModelContext<Worktree>,
914 ) -> Task<Result<Entry>> {
915 let fs = self.fs.clone();
916 let abs_root_path = self.abs_path.clone();
917 let path_changes_tx = self.path_changes_tx.clone();
918 cx.spawn_weak(move |this, mut cx| async move {
919 let abs_path = fs.canonicalize(&abs_root_path).await?;
920 let mut paths = Vec::with_capacity(2);
921 paths.push(if path.file_name().is_some() {
922 abs_path.join(&path)
923 } else {
924 abs_path.clone()
925 });
926 if let Some(old_path) = old_path {
927 paths.push(if old_path.file_name().is_some() {
928 abs_path.join(&old_path)
929 } else {
930 abs_path.clone()
931 });
932 }
933
934 let (tx, mut rx) = barrier::channel();
935 path_changes_tx.unbounded_send((paths, tx)).unwrap();
936 rx.recv().await;
937 this.upgrade(&cx)
938 .ok_or_else(|| anyhow!("worktree was dropped"))?
939 .update(&mut cx, |this, _| {
940 this.entry_for_path(path)
941 .cloned()
942 .ok_or_else(|| anyhow!("failed to read path after update"))
943 })
944 })
945 }
946
947 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
948 let (share_tx, share_rx) = oneshot::channel();
949
950 if let Some(share) = self.share.as_mut() {
951 let _ = share_tx.send(());
952 *share.resume_updates.borrow_mut() = ();
953 } else {
954 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
955 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
956 let worktree_id = cx.model_id() as u64;
957
958 for (path, summary) in self.diagnostic_summaries.iter() {
959 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
960 project_id,
961 worktree_id,
962 summary: Some(summary.to_proto(&path.0)),
963 }) {
964 return Task::ready(Err(e));
965 }
966 }
967
968 let _maintain_remote_snapshot = cx.background().spawn({
969 let client = self.client.clone();
970 async move {
971 let mut share_tx = Some(share_tx);
972 let mut prev_snapshot = LocalSnapshot {
973 ignores_by_parent_abs_path: Default::default(),
974 git_repositories: Default::default(),
975 removed_entry_ids: Default::default(),
976 next_entry_id: Default::default(),
977 snapshot: Snapshot {
978 id: WorktreeId(worktree_id as usize),
979 abs_path: Path::new("").into(),
980 root_name: Default::default(),
981 root_char_bag: Default::default(),
982 entries_by_path: Default::default(),
983 entries_by_id: Default::default(),
984 scan_id: 0,
985 completed_scan_id: 0,
986 },
987 };
988 while let Some(snapshot) = snapshots_rx.recv().await {
989 #[cfg(any(test, feature = "test-support"))]
990 const MAX_CHUNK_SIZE: usize = 2;
991 #[cfg(not(any(test, feature = "test-support")))]
992 const MAX_CHUNK_SIZE: usize = 256;
993
994 let update =
995 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
996 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
997 let _ = resume_updates_rx.try_recv();
998 while let Err(error) = client.request(update.clone()).await {
999 log::error!("failed to send worktree update: {}", error);
1000 log::info!("waiting to resume updates");
1001 if resume_updates_rx.next().await.is_none() {
1002 return Ok(());
1003 }
1004 }
1005 }
1006
1007 if let Some(share_tx) = share_tx.take() {
1008 let _ = share_tx.send(());
1009 }
1010
1011 prev_snapshot = snapshot;
1012 }
1013
1014 Ok::<_, anyhow::Error>(())
1015 }
1016 .log_err()
1017 });
1018
1019 self.share = Some(ShareState {
1020 project_id,
1021 snapshots_tx,
1022 resume_updates: resume_updates_tx,
1023 _maintain_remote_snapshot,
1024 });
1025 }
1026
1027 cx.foreground()
1028 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1029 }
1030
1031 pub fn unshare(&mut self) {
1032 self.share.take();
1033 }
1034
1035 pub fn is_shared(&self) -> bool {
1036 self.share.is_some()
1037 }
1038}
1039
1040impl RemoteWorktree {
1041 fn snapshot(&self) -> Snapshot {
1042 self.snapshot.clone()
1043 }
1044
1045 pub fn disconnected_from_host(&mut self) {
1046 self.updates_tx.take();
1047 self.snapshot_subscriptions.clear();
1048 self.disconnected = true;
1049 }
1050
1051 pub fn save_buffer(
1052 &self,
1053 buffer_handle: ModelHandle<Buffer>,
1054 cx: &mut ModelContext<Worktree>,
1055 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1056 let buffer = buffer_handle.read(cx);
1057 let buffer_id = buffer.remote_id();
1058 let version = buffer.version();
1059 let rpc = self.client.clone();
1060 let project_id = self.project_id;
1061 cx.as_mut().spawn(|mut cx| async move {
1062 let response = rpc
1063 .request(proto::SaveBuffer {
1064 project_id,
1065 buffer_id,
1066 version: serialize_version(&version),
1067 })
1068 .await?;
1069 let version = deserialize_version(response.version);
1070 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1071 let mtime = response
1072 .mtime
1073 .ok_or_else(|| anyhow!("missing mtime"))?
1074 .into();
1075
1076 buffer_handle.update(&mut cx, |buffer, cx| {
1077 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1078 });
1079
1080 Ok((version, fingerprint, mtime))
1081 })
1082 }
1083
1084 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1085 if let Some(updates_tx) = &self.updates_tx {
1086 updates_tx
1087 .unbounded_send(update)
1088 .expect("consumer runs to completion");
1089 }
1090 }
1091
1092 fn observed_snapshot(&self, scan_id: usize) -> bool {
1093 self.completed_scan_id >= scan_id
1094 }
1095
1096 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1097 let (tx, rx) = oneshot::channel();
1098 if self.observed_snapshot(scan_id) {
1099 let _ = tx.send(());
1100 } else if self.disconnected {
1101 drop(tx);
1102 } else {
1103 match self
1104 .snapshot_subscriptions
1105 .binary_search_by_key(&scan_id, |probe| probe.0)
1106 {
1107 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1108 }
1109 }
1110
1111 async move {
1112 rx.await?;
1113 Ok(())
1114 }
1115 }
1116
1117 pub fn update_diagnostic_summary(
1118 &mut self,
1119 path: Arc<Path>,
1120 summary: &proto::DiagnosticSummary,
1121 ) {
1122 let summary = DiagnosticSummary {
1123 language_server_id: summary.language_server_id as usize,
1124 error_count: summary.error_count as usize,
1125 warning_count: summary.warning_count as usize,
1126 };
1127 if summary.is_empty() {
1128 self.diagnostic_summaries.remove(&PathKey(path));
1129 } else {
1130 self.diagnostic_summaries.insert(PathKey(path), summary);
1131 }
1132 }
1133
1134 pub fn insert_entry(
1135 &mut self,
1136 entry: proto::Entry,
1137 scan_id: usize,
1138 cx: &mut ModelContext<Worktree>,
1139 ) -> Task<Result<Entry>> {
1140 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1141 cx.spawn(|this, mut cx| async move {
1142 wait_for_snapshot.await?;
1143 this.update(&mut cx, |worktree, _| {
1144 let worktree = worktree.as_remote_mut().unwrap();
1145 let mut snapshot = worktree.background_snapshot.lock();
1146 let entry = snapshot.insert_entry(entry);
1147 worktree.snapshot = snapshot.clone();
1148 entry
1149 })
1150 })
1151 }
1152
1153 pub(crate) fn delete_entry(
1154 &mut self,
1155 id: ProjectEntryId,
1156 scan_id: usize,
1157 cx: &mut ModelContext<Worktree>,
1158 ) -> Task<Result<()>> {
1159 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1160 cx.spawn(|this, mut cx| async move {
1161 wait_for_snapshot.await?;
1162 this.update(&mut cx, |worktree, _| {
1163 let worktree = worktree.as_remote_mut().unwrap();
1164 let mut snapshot = worktree.background_snapshot.lock();
1165 snapshot.delete_entry(id);
1166 worktree.snapshot = snapshot.clone();
1167 });
1168 Ok(())
1169 })
1170 }
1171}
1172
1173impl Snapshot {
1174 pub fn id(&self) -> WorktreeId {
1175 self.id
1176 }
1177
1178 pub fn abs_path(&self) -> &Arc<Path> {
1179 &self.abs_path
1180 }
1181
1182 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1183 self.entries_by_id.get(&entry_id, &()).is_some()
1184 }
1185
1186 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1187 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1188 let old_entry = self.entries_by_id.insert_or_replace(
1189 PathEntry {
1190 id: entry.id,
1191 path: entry.path.clone(),
1192 is_ignored: entry.is_ignored,
1193 scan_id: 0,
1194 },
1195 &(),
1196 );
1197 if let Some(old_entry) = old_entry {
1198 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1199 }
1200 self.entries_by_path.insert_or_replace(entry.clone(), &());
1201 Ok(entry)
1202 }
1203
1204 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1205 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1206 self.entries_by_path = {
1207 let mut cursor = self.entries_by_path.cursor();
1208 let mut new_entries_by_path =
1209 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1210 while let Some(entry) = cursor.item() {
1211 if entry.path.starts_with(&removed_entry.path) {
1212 self.entries_by_id.remove(&entry.id, &());
1213 cursor.next(&());
1214 } else {
1215 break;
1216 }
1217 }
1218 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1219 new_entries_by_path
1220 };
1221
1222 Some(removed_entry.path)
1223 }
1224
1225 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1226 let mut entries_by_path_edits = Vec::new();
1227 let mut entries_by_id_edits = Vec::new();
1228 for entry_id in update.removed_entries {
1229 let entry = self
1230 .entry_for_id(ProjectEntryId::from_proto(entry_id))
1231 .ok_or_else(|| anyhow!("unknown entry {}", entry_id))?;
1232 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1233 entries_by_id_edits.push(Edit::Remove(entry.id));
1234 }
1235
1236 for entry in update.updated_entries {
1237 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1238 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1239 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1240 }
1241 entries_by_id_edits.push(Edit::Insert(PathEntry {
1242 id: entry.id,
1243 path: entry.path.clone(),
1244 is_ignored: entry.is_ignored,
1245 scan_id: 0,
1246 }));
1247 entries_by_path_edits.push(Edit::Insert(entry));
1248 }
1249
1250 self.entries_by_path.edit(entries_by_path_edits, &());
1251 self.entries_by_id.edit(entries_by_id_edits, &());
1252 self.scan_id = update.scan_id as usize;
1253 if update.is_last_update {
1254 self.completed_scan_id = update.scan_id as usize;
1255 }
1256
1257 Ok(())
1258 }
1259
1260 pub fn file_count(&self) -> usize {
1261 self.entries_by_path.summary().file_count
1262 }
1263
1264 pub fn visible_file_count(&self) -> usize {
1265 self.entries_by_path.summary().visible_file_count
1266 }
1267
1268 fn traverse_from_offset(
1269 &self,
1270 include_dirs: bool,
1271 include_ignored: bool,
1272 start_offset: usize,
1273 ) -> Traversal {
1274 let mut cursor = self.entries_by_path.cursor();
1275 cursor.seek(
1276 &TraversalTarget::Count {
1277 count: start_offset,
1278 include_dirs,
1279 include_ignored,
1280 },
1281 Bias::Right,
1282 &(),
1283 );
1284 Traversal {
1285 cursor,
1286 include_dirs,
1287 include_ignored,
1288 }
1289 }
1290
1291 fn traverse_from_path(
1292 &self,
1293 include_dirs: bool,
1294 include_ignored: bool,
1295 path: &Path,
1296 ) -> Traversal {
1297 let mut cursor = self.entries_by_path.cursor();
1298 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1299 Traversal {
1300 cursor,
1301 include_dirs,
1302 include_ignored,
1303 }
1304 }
1305
1306 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1307 self.traverse_from_offset(false, include_ignored, start)
1308 }
1309
1310 pub fn entries(&self, include_ignored: bool) -> Traversal {
1311 self.traverse_from_offset(true, include_ignored, 0)
1312 }
1313
1314 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1315 let empty_path = Path::new("");
1316 self.entries_by_path
1317 .cursor::<()>()
1318 .filter(move |entry| entry.path.as_ref() != empty_path)
1319 .map(|entry| &entry.path)
1320 }
1321
1322 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1323 let mut cursor = self.entries_by_path.cursor();
1324 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1325 let traversal = Traversal {
1326 cursor,
1327 include_dirs: true,
1328 include_ignored: true,
1329 };
1330 ChildEntriesIter {
1331 traversal,
1332 parent_path,
1333 }
1334 }
1335
1336 pub fn root_entry(&self) -> Option<&Entry> {
1337 self.entry_for_path("")
1338 }
1339
1340 pub fn root_name(&self) -> &str {
1341 &self.root_name
1342 }
1343
1344 pub fn scan_started(&mut self) {
1345 self.scan_id += 1;
1346 }
1347
1348 pub fn scan_completed(&mut self) {
1349 self.completed_scan_id = self.scan_id;
1350 }
1351
1352 pub fn scan_id(&self) -> usize {
1353 self.scan_id
1354 }
1355
1356 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1357 let path = path.as_ref();
1358 self.traverse_from_path(true, true, path)
1359 .entry()
1360 .and_then(|entry| {
1361 if entry.path.as_ref() == path {
1362 Some(entry)
1363 } else {
1364 None
1365 }
1366 })
1367 }
1368
1369 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1370 let entry = self.entries_by_id.get(&id, &())?;
1371 self.entry_for_path(&entry.path)
1372 }
1373
1374 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1375 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1376 }
1377}
1378
1379impl LocalSnapshot {
1380 // Gives the most specific git repository for a given path
1381 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1382 self.git_repositories
1383 .iter()
1384 .rev() //git_repository is ordered lexicographically
1385 .find(|repo| repo.manages(path))
1386 .cloned()
1387 }
1388
1389 pub(crate) fn repo_with_dot_git_containing(
1390 &mut self,
1391 path: &Path,
1392 ) -> Option<&mut GitRepositoryEntry> {
1393 // Git repositories cannot be nested, so we don't need to reverse the order
1394 self.git_repositories
1395 .iter_mut()
1396 .find(|repo| repo.in_dot_git(path))
1397 }
1398
1399 #[cfg(test)]
1400 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1401 let root_name = self.root_name.clone();
1402 proto::UpdateWorktree {
1403 project_id,
1404 worktree_id: self.id().to_proto(),
1405 abs_path: self.abs_path().to_string_lossy().into(),
1406 root_name,
1407 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1408 removed_entries: Default::default(),
1409 scan_id: self.scan_id as u64,
1410 is_last_update: true,
1411 }
1412 }
1413
1414 pub(crate) fn build_update(
1415 &self,
1416 other: &Self,
1417 project_id: u64,
1418 worktree_id: u64,
1419 include_ignored: bool,
1420 ) -> proto::UpdateWorktree {
1421 let mut updated_entries = Vec::new();
1422 let mut removed_entries = Vec::new();
1423 let mut self_entries = self
1424 .entries_by_id
1425 .cursor::<()>()
1426 .filter(|e| include_ignored || !e.is_ignored)
1427 .peekable();
1428 let mut other_entries = other
1429 .entries_by_id
1430 .cursor::<()>()
1431 .filter(|e| include_ignored || !e.is_ignored)
1432 .peekable();
1433 loop {
1434 match (self_entries.peek(), other_entries.peek()) {
1435 (Some(self_entry), Some(other_entry)) => {
1436 match Ord::cmp(&self_entry.id, &other_entry.id) {
1437 Ordering::Less => {
1438 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1439 updated_entries.push(entry);
1440 self_entries.next();
1441 }
1442 Ordering::Equal => {
1443 if self_entry.scan_id != other_entry.scan_id {
1444 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1445 updated_entries.push(entry);
1446 }
1447
1448 self_entries.next();
1449 other_entries.next();
1450 }
1451 Ordering::Greater => {
1452 removed_entries.push(other_entry.id.to_proto());
1453 other_entries.next();
1454 }
1455 }
1456 }
1457 (Some(self_entry), None) => {
1458 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1459 updated_entries.push(entry);
1460 self_entries.next();
1461 }
1462 (None, Some(other_entry)) => {
1463 removed_entries.push(other_entry.id.to_proto());
1464 other_entries.next();
1465 }
1466 (None, None) => break,
1467 }
1468 }
1469
1470 proto::UpdateWorktree {
1471 project_id,
1472 worktree_id,
1473 abs_path: self.abs_path().to_string_lossy().into(),
1474 root_name: self.root_name().to_string(),
1475 updated_entries,
1476 removed_entries,
1477 scan_id: self.scan_id as u64,
1478 is_last_update: self.completed_scan_id == self.scan_id,
1479 }
1480 }
1481
1482 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1483 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1484 let abs_path = self.abs_path.join(&entry.path);
1485 match smol::block_on(build_gitignore(&abs_path, fs)) {
1486 Ok(ignore) => {
1487 self.ignores_by_parent_abs_path.insert(
1488 abs_path.parent().unwrap().into(),
1489 (Arc::new(ignore), self.scan_id),
1490 );
1491 }
1492 Err(error) => {
1493 log::error!(
1494 "error loading .gitignore file {:?} - {:?}",
1495 &entry.path,
1496 error
1497 );
1498 }
1499 }
1500 }
1501
1502 self.reuse_entry_id(&mut entry);
1503
1504 if entry.kind == EntryKind::PendingDir {
1505 if let Some(existing_entry) =
1506 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1507 {
1508 entry.kind = existing_entry.kind;
1509 }
1510 }
1511
1512 let scan_id = self.scan_id;
1513 self.entries_by_path.insert_or_replace(entry.clone(), &());
1514 self.entries_by_id.insert_or_replace(
1515 PathEntry {
1516 id: entry.id,
1517 path: entry.path.clone(),
1518 is_ignored: entry.is_ignored,
1519 scan_id,
1520 },
1521 &(),
1522 );
1523
1524 entry
1525 }
1526
1527 fn populate_dir(
1528 &mut self,
1529 parent_path: Arc<Path>,
1530 entries: impl IntoIterator<Item = Entry>,
1531 ignore: Option<Arc<Gitignore>>,
1532 fs: &dyn Fs,
1533 ) {
1534 let mut parent_entry = if let Some(parent_entry) =
1535 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1536 {
1537 parent_entry.clone()
1538 } else {
1539 log::warn!(
1540 "populating a directory {:?} that has been removed",
1541 parent_path
1542 );
1543 return;
1544 };
1545
1546 if let Some(ignore) = ignore {
1547 self.ignores_by_parent_abs_path.insert(
1548 self.abs_path.join(&parent_path).into(),
1549 (ignore, self.scan_id),
1550 );
1551 }
1552 if matches!(parent_entry.kind, EntryKind::PendingDir) {
1553 parent_entry.kind = EntryKind::Dir;
1554 } else {
1555 unreachable!();
1556 }
1557
1558 if parent_path.file_name() == Some(&DOT_GIT) {
1559 let abs_path = self.abs_path.join(&parent_path);
1560 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1561 if let Err(ix) = self
1562 .git_repositories
1563 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1564 {
1565 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1566 self.git_repositories.insert(
1567 ix,
1568 GitRepositoryEntry {
1569 repo,
1570 scan_id: 0,
1571 content_path,
1572 git_dir_path: parent_path,
1573 },
1574 );
1575 }
1576 }
1577 }
1578
1579 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1580 let mut entries_by_id_edits = Vec::new();
1581
1582 for mut entry in entries {
1583 self.reuse_entry_id(&mut entry);
1584 entries_by_id_edits.push(Edit::Insert(PathEntry {
1585 id: entry.id,
1586 path: entry.path.clone(),
1587 is_ignored: entry.is_ignored,
1588 scan_id: self.scan_id,
1589 }));
1590 entries_by_path_edits.push(Edit::Insert(entry));
1591 }
1592
1593 self.entries_by_path.edit(entries_by_path_edits, &());
1594 self.entries_by_id.edit(entries_by_id_edits, &());
1595 }
1596
1597 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1598 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1599 entry.id = removed_entry_id;
1600 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1601 entry.id = existing_entry.id;
1602 }
1603 }
1604
1605 fn remove_path(&mut self, path: &Path) {
1606 let mut new_entries;
1607 let removed_entries;
1608 {
1609 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1610 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1611 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1612 new_entries.push_tree(cursor.suffix(&()), &());
1613 }
1614 self.entries_by_path = new_entries;
1615
1616 let mut entries_by_id_edits = Vec::new();
1617 for entry in removed_entries.cursor::<()>() {
1618 let removed_entry_id = self
1619 .removed_entry_ids
1620 .entry(entry.inode)
1621 .or_insert(entry.id);
1622 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1623 entries_by_id_edits.push(Edit::Remove(entry.id));
1624 }
1625 self.entries_by_id.edit(entries_by_id_edits, &());
1626
1627 if path.file_name() == Some(&GITIGNORE) {
1628 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1629 if let Some((_, scan_id)) = self
1630 .ignores_by_parent_abs_path
1631 .get_mut(abs_parent_path.as_path())
1632 {
1633 *scan_id = self.snapshot.scan_id;
1634 }
1635 } else if path.file_name() == Some(&DOT_GIT) {
1636 let parent_path = path.parent().unwrap();
1637 if let Ok(ix) = self
1638 .git_repositories
1639 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1640 {
1641 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1642 }
1643 }
1644 }
1645
1646 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1647 let mut inodes = TreeSet::default();
1648 for ancestor in path.ancestors().skip(1) {
1649 if let Some(entry) = self.entry_for_path(ancestor) {
1650 inodes.insert(entry.inode);
1651 }
1652 }
1653 inodes
1654 }
1655
1656 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1657 let mut new_ignores = Vec::new();
1658 for ancestor in abs_path.ancestors().skip(1) {
1659 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1660 new_ignores.push((ancestor, Some(ignore.clone())));
1661 } else {
1662 new_ignores.push((ancestor, None));
1663 }
1664 }
1665
1666 let mut ignore_stack = IgnoreStack::none();
1667 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1668 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1669 ignore_stack = IgnoreStack::all();
1670 break;
1671 } else if let Some(ignore) = ignore {
1672 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1673 }
1674 }
1675
1676 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1677 ignore_stack = IgnoreStack::all();
1678 }
1679
1680 ignore_stack
1681 }
1682
1683 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1684 &self.git_repositories
1685 }
1686}
1687
1688impl GitRepositoryEntry {
1689 // Note that these paths should be relative to the worktree root.
1690 pub(crate) fn manages(&self, path: &Path) -> bool {
1691 path.starts_with(self.content_path.as_ref())
1692 }
1693
1694 // Note that this path should be relative to the worktree root.
1695 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1696 path.starts_with(self.git_dir_path.as_ref())
1697 }
1698}
1699
1700async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1701 let contents = fs.load(abs_path).await?;
1702 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1703 let mut builder = GitignoreBuilder::new(parent);
1704 for line in contents.lines() {
1705 builder.add_line(Some(abs_path.into()), line)?;
1706 }
1707 Ok(builder.build()?)
1708}
1709
1710impl WorktreeId {
1711 pub fn from_usize(handle_id: usize) -> Self {
1712 Self(handle_id)
1713 }
1714
1715 pub(crate) fn from_proto(id: u64) -> Self {
1716 Self(id as usize)
1717 }
1718
1719 pub fn to_proto(&self) -> u64 {
1720 self.0 as u64
1721 }
1722
1723 pub fn to_usize(&self) -> usize {
1724 self.0
1725 }
1726}
1727
1728impl fmt::Display for WorktreeId {
1729 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1730 self.0.fmt(f)
1731 }
1732}
1733
1734impl Deref for Worktree {
1735 type Target = Snapshot;
1736
1737 fn deref(&self) -> &Self::Target {
1738 match self {
1739 Worktree::Local(worktree) => &worktree.snapshot,
1740 Worktree::Remote(worktree) => &worktree.snapshot,
1741 }
1742 }
1743}
1744
1745impl Deref for LocalWorktree {
1746 type Target = LocalSnapshot;
1747
1748 fn deref(&self) -> &Self::Target {
1749 &self.snapshot
1750 }
1751}
1752
1753impl Deref for RemoteWorktree {
1754 type Target = Snapshot;
1755
1756 fn deref(&self) -> &Self::Target {
1757 &self.snapshot
1758 }
1759}
1760
1761impl fmt::Debug for LocalWorktree {
1762 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1763 self.snapshot.fmt(f)
1764 }
1765}
1766
1767impl fmt::Debug for Snapshot {
1768 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1769 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1770 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1771
1772 impl<'a> fmt::Debug for EntriesByPath<'a> {
1773 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1774 f.debug_map()
1775 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1776 .finish()
1777 }
1778 }
1779
1780 impl<'a> fmt::Debug for EntriesById<'a> {
1781 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1782 f.debug_list().entries(self.0.iter()).finish()
1783 }
1784 }
1785
1786 f.debug_struct("Snapshot")
1787 .field("id", &self.id)
1788 .field("root_name", &self.root_name)
1789 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1790 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1791 .finish()
1792 }
1793}
1794
1795#[derive(Clone, PartialEq)]
1796pub struct File {
1797 pub worktree: ModelHandle<Worktree>,
1798 pub path: Arc<Path>,
1799 pub mtime: SystemTime,
1800 pub(crate) entry_id: ProjectEntryId,
1801 pub(crate) is_local: bool,
1802 pub(crate) is_deleted: bool,
1803}
1804
1805impl language::File for File {
1806 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1807 if self.is_local {
1808 Some(self)
1809 } else {
1810 None
1811 }
1812 }
1813
1814 fn mtime(&self) -> SystemTime {
1815 self.mtime
1816 }
1817
1818 fn path(&self) -> &Arc<Path> {
1819 &self.path
1820 }
1821
1822 fn full_path(&self, cx: &AppContext) -> PathBuf {
1823 let mut full_path = PathBuf::new();
1824 let worktree = self.worktree.read(cx);
1825
1826 if worktree.is_visible() {
1827 full_path.push(worktree.root_name());
1828 } else {
1829 let path = worktree.abs_path();
1830
1831 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1832 full_path.push("~");
1833 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1834 } else {
1835 full_path.push(path)
1836 }
1837 }
1838
1839 if self.path.components().next().is_some() {
1840 full_path.push(&self.path);
1841 }
1842
1843 full_path
1844 }
1845
1846 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1847 /// of its worktree, then this method will return the name of the worktree itself.
1848 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1849 self.path
1850 .file_name()
1851 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1852 }
1853
1854 fn is_deleted(&self) -> bool {
1855 self.is_deleted
1856 }
1857
1858 fn as_any(&self) -> &dyn Any {
1859 self
1860 }
1861
1862 fn to_proto(&self) -> rpc::proto::File {
1863 rpc::proto::File {
1864 worktree_id: self.worktree.id() as u64,
1865 entry_id: self.entry_id.to_proto(),
1866 path: self.path.to_string_lossy().into(),
1867 mtime: Some(self.mtime.into()),
1868 is_deleted: self.is_deleted,
1869 }
1870 }
1871}
1872
1873impl language::LocalFile for File {
1874 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1875 self.worktree
1876 .read(cx)
1877 .as_local()
1878 .unwrap()
1879 .abs_path
1880 .join(&self.path)
1881 }
1882
1883 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1884 let worktree = self.worktree.read(cx).as_local().unwrap();
1885 let abs_path = worktree.absolutize(&self.path);
1886 let fs = worktree.fs.clone();
1887 cx.background()
1888 .spawn(async move { fs.load(&abs_path).await })
1889 }
1890
1891 fn buffer_reloaded(
1892 &self,
1893 buffer_id: u64,
1894 version: &clock::Global,
1895 fingerprint: RopeFingerprint,
1896 line_ending: LineEnding,
1897 mtime: SystemTime,
1898 cx: &mut MutableAppContext,
1899 ) {
1900 let worktree = self.worktree.read(cx).as_local().unwrap();
1901 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1902 worktree
1903 .client
1904 .send(proto::BufferReloaded {
1905 project_id,
1906 buffer_id,
1907 version: serialize_version(version),
1908 mtime: Some(mtime.into()),
1909 fingerprint: serialize_fingerprint(fingerprint),
1910 line_ending: serialize_line_ending(line_ending) as i32,
1911 })
1912 .log_err();
1913 }
1914 }
1915}
1916
1917impl File {
1918 pub fn from_proto(
1919 proto: rpc::proto::File,
1920 worktree: ModelHandle<Worktree>,
1921 cx: &AppContext,
1922 ) -> Result<Self> {
1923 let worktree_id = worktree
1924 .read(cx)
1925 .as_remote()
1926 .ok_or_else(|| anyhow!("not remote"))?
1927 .id();
1928
1929 if worktree_id.to_proto() != proto.worktree_id {
1930 return Err(anyhow!("worktree id does not match file"));
1931 }
1932
1933 Ok(Self {
1934 worktree,
1935 path: Path::new(&proto.path).into(),
1936 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1937 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1938 is_local: false,
1939 is_deleted: proto.is_deleted,
1940 })
1941 }
1942
1943 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1944 file.and_then(|f| f.as_any().downcast_ref())
1945 }
1946
1947 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1948 self.worktree.read(cx).id()
1949 }
1950
1951 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1952 if self.is_deleted {
1953 None
1954 } else {
1955 Some(self.entry_id)
1956 }
1957 }
1958}
1959
1960#[derive(Clone, Debug, PartialEq, Eq)]
1961pub struct Entry {
1962 pub id: ProjectEntryId,
1963 pub kind: EntryKind,
1964 pub path: Arc<Path>,
1965 pub inode: u64,
1966 pub mtime: SystemTime,
1967 pub is_symlink: bool,
1968 pub is_ignored: bool,
1969}
1970
1971#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1972pub enum EntryKind {
1973 PendingDir,
1974 Dir,
1975 File(CharBag),
1976}
1977
1978#[derive(Clone, Copy, Debug)]
1979pub enum PathChange {
1980 Added,
1981 Removed,
1982 Updated,
1983 AddedOrUpdated,
1984}
1985
1986impl Entry {
1987 fn new(
1988 path: Arc<Path>,
1989 metadata: &fs::Metadata,
1990 next_entry_id: &AtomicUsize,
1991 root_char_bag: CharBag,
1992 ) -> Self {
1993 Self {
1994 id: ProjectEntryId::new(next_entry_id),
1995 kind: if metadata.is_dir {
1996 EntryKind::PendingDir
1997 } else {
1998 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1999 },
2000 path,
2001 inode: metadata.inode,
2002 mtime: metadata.mtime,
2003 is_symlink: metadata.is_symlink,
2004 is_ignored: false,
2005 }
2006 }
2007
2008 pub fn is_dir(&self) -> bool {
2009 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2010 }
2011
2012 pub fn is_file(&self) -> bool {
2013 matches!(self.kind, EntryKind::File(_))
2014 }
2015}
2016
2017impl sum_tree::Item for Entry {
2018 type Summary = EntrySummary;
2019
2020 fn summary(&self) -> Self::Summary {
2021 let visible_count = if self.is_ignored { 0 } else { 1 };
2022 let file_count;
2023 let visible_file_count;
2024 if self.is_file() {
2025 file_count = 1;
2026 visible_file_count = visible_count;
2027 } else {
2028 file_count = 0;
2029 visible_file_count = 0;
2030 }
2031
2032 EntrySummary {
2033 max_path: self.path.clone(),
2034 count: 1,
2035 visible_count,
2036 file_count,
2037 visible_file_count,
2038 }
2039 }
2040}
2041
2042impl sum_tree::KeyedItem for Entry {
2043 type Key = PathKey;
2044
2045 fn key(&self) -> Self::Key {
2046 PathKey(self.path.clone())
2047 }
2048}
2049
2050#[derive(Clone, Debug)]
2051pub struct EntrySummary {
2052 max_path: Arc<Path>,
2053 count: usize,
2054 visible_count: usize,
2055 file_count: usize,
2056 visible_file_count: usize,
2057}
2058
2059impl Default for EntrySummary {
2060 fn default() -> Self {
2061 Self {
2062 max_path: Arc::from(Path::new("")),
2063 count: 0,
2064 visible_count: 0,
2065 file_count: 0,
2066 visible_file_count: 0,
2067 }
2068 }
2069}
2070
2071impl sum_tree::Summary for EntrySummary {
2072 type Context = ();
2073
2074 fn add_summary(&mut self, rhs: &Self, _: &()) {
2075 self.max_path = rhs.max_path.clone();
2076 self.count += rhs.count;
2077 self.visible_count += rhs.visible_count;
2078 self.file_count += rhs.file_count;
2079 self.visible_file_count += rhs.visible_file_count;
2080 }
2081}
2082
2083#[derive(Clone, Debug)]
2084struct PathEntry {
2085 id: ProjectEntryId,
2086 path: Arc<Path>,
2087 is_ignored: bool,
2088 scan_id: usize,
2089}
2090
2091impl sum_tree::Item for PathEntry {
2092 type Summary = PathEntrySummary;
2093
2094 fn summary(&self) -> Self::Summary {
2095 PathEntrySummary { max_id: self.id }
2096 }
2097}
2098
2099impl sum_tree::KeyedItem for PathEntry {
2100 type Key = ProjectEntryId;
2101
2102 fn key(&self) -> Self::Key {
2103 self.id
2104 }
2105}
2106
2107#[derive(Clone, Debug, Default)]
2108struct PathEntrySummary {
2109 max_id: ProjectEntryId,
2110}
2111
2112impl sum_tree::Summary for PathEntrySummary {
2113 type Context = ();
2114
2115 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2116 self.max_id = summary.max_id;
2117 }
2118}
2119
2120impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2121 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2122 *self = summary.max_id;
2123 }
2124}
2125
2126#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2127pub struct PathKey(Arc<Path>);
2128
2129impl Default for PathKey {
2130 fn default() -> Self {
2131 Self(Path::new("").into())
2132 }
2133}
2134
2135impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2136 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2137 self.0 = summary.max_path.clone();
2138 }
2139}
2140
2141struct BackgroundScanner {
2142 fs: Arc<dyn Fs>,
2143 snapshot: Mutex<LocalSnapshot>,
2144 changes: HashMap<Arc<Path>, PathChange>,
2145 notify: UnboundedSender<ScanState>,
2146 executor: Arc<executor::Background>,
2147}
2148
2149impl BackgroundScanner {
2150 fn new(
2151 snapshot: LocalSnapshot,
2152 notify: UnboundedSender<ScanState>,
2153 fs: Arc<dyn Fs>,
2154 executor: Arc<executor::Background>,
2155 ) -> Self {
2156 Self {
2157 fs,
2158 snapshot: Mutex::new(snapshot),
2159 notify,
2160 executor,
2161 changes: Default::default(),
2162 }
2163 }
2164
2165 fn abs_path(&self) -> Arc<Path> {
2166 self.snapshot.lock().abs_path.clone()
2167 }
2168
2169 async fn run(
2170 mut self,
2171 events_rx: impl Stream<Item = Vec<fsevent::Event>>,
2172 mut changed_paths: UnboundedReceiver<(Vec<PathBuf>, barrier::Sender)>,
2173 ) {
2174 use futures::FutureExt as _;
2175
2176 // Retrieve the basic properties of the root node.
2177 let root_char_bag;
2178 let root_abs_path;
2179 let root_inode;
2180 let root_is_dir;
2181 let next_entry_id;
2182 {
2183 let mut snapshot = self.snapshot.lock();
2184 snapshot.scan_started();
2185 root_char_bag = snapshot.root_char_bag;
2186 root_abs_path = snapshot.abs_path.clone();
2187 root_inode = snapshot.root_entry().map(|e| e.inode);
2188 root_is_dir = snapshot.root_entry().map_or(false, |e| e.is_dir());
2189 next_entry_id = snapshot.next_entry_id.clone();
2190 }
2191
2192 // Populate ignores above the root.
2193 let ignore_stack;
2194 for ancestor in root_abs_path.ancestors().skip(1) {
2195 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2196 {
2197 self.snapshot
2198 .lock()
2199 .ignores_by_parent_abs_path
2200 .insert(ancestor.into(), (ignore.into(), 0));
2201 }
2202 }
2203 {
2204 let mut snapshot = self.snapshot.lock();
2205 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2206 if ignore_stack.is_all() {
2207 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2208 root_entry.is_ignored = true;
2209 snapshot.insert_entry(root_entry, self.fs.as_ref());
2210 }
2211 }
2212 };
2213
2214 if root_is_dir {
2215 let mut ancestor_inodes = TreeSet::default();
2216 if let Some(root_inode) = root_inode {
2217 ancestor_inodes.insert(root_inode);
2218 }
2219
2220 let (tx, rx) = channel::unbounded();
2221 self.executor
2222 .block(tx.send(ScanJob {
2223 abs_path: root_abs_path.to_path_buf(),
2224 path: Arc::from(Path::new("")),
2225 ignore_stack,
2226 ancestor_inodes,
2227 scan_queue: tx.clone(),
2228 }))
2229 .unwrap();
2230 drop(tx);
2231
2232 // Spawn a worker thread per logical CPU.
2233 self.executor
2234 .scoped(|scope| {
2235 // One the first worker thread, listen for change requests from the worktree.
2236 // For each change request, after refreshing the given paths, report
2237 // a progress update for the snapshot.
2238 scope.spawn(async {
2239 let reporting_timer = self.delay().fuse();
2240 futures::pin_mut!(reporting_timer);
2241
2242 loop {
2243 select_biased! {
2244 job = changed_paths.next().fuse() => {
2245 let Some((abs_paths, barrier)) = job else { break };
2246 self.update_entries_for_paths(abs_paths, None).await;
2247 if self.notify.unbounded_send(ScanState::Initializing {
2248 snapshot: self.snapshot.lock().clone(),
2249 barrier: Some(barrier),
2250 }).is_err() {
2251 break;
2252 }
2253 }
2254
2255 _ = reporting_timer => {
2256 reporting_timer.set(self.delay().fuse());
2257 if self.notify.unbounded_send(ScanState::Initializing {
2258 snapshot: self.snapshot.lock().clone(),
2259 barrier: None,
2260 }).is_err() {
2261 break;
2262 }
2263 }
2264
2265 job = rx.recv().fuse() => {
2266 let Ok(job) = job else { break };
2267 if let Err(err) = self
2268 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2269 .await
2270 {
2271 log::error!("error scanning {:?}: {}", job.abs_path, err);
2272 }
2273 }
2274 }
2275 }
2276 });
2277
2278 // On all of the remaining worker threads, just scan directories.
2279 for _ in 1..self.executor.num_cpus() {
2280 scope.spawn(async {
2281 while let Ok(job) = rx.recv().await {
2282 if let Err(err) = self
2283 .scan_dir(root_char_bag, next_entry_id.clone(), &job)
2284 .await
2285 {
2286 log::error!("error scanning {:?}: {}", job.abs_path, err);
2287 }
2288 }
2289 });
2290 }
2291 })
2292 .await;
2293 }
2294
2295 self.snapshot.lock().scan_completed();
2296
2297 if self
2298 .notify
2299 .unbounded_send(ScanState::Initialized {
2300 snapshot: self.snapshot.lock().clone(),
2301 })
2302 .is_err()
2303 {
2304 return;
2305 }
2306
2307 // Process any events that occurred while performing the initial scan. These
2308 // events can't be reported as precisely, because there is no snapshot of the
2309 // worktree before they occurred.
2310 futures::pin_mut!(events_rx);
2311 if let Poll::Ready(Some(mut events)) = futures::poll!(events_rx.next()) {
2312 while let Poll::Ready(Some(additional_events)) = futures::poll!(events_rx.next()) {
2313 events.extend(additional_events);
2314 }
2315 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2316 return;
2317 }
2318 if !self
2319 .process_events(events.into_iter().map(|e| e.path).collect(), true)
2320 .await
2321 {
2322 return;
2323 }
2324 if self
2325 .notify
2326 .unbounded_send(ScanState::Updated {
2327 snapshot: self.snapshot.lock().clone(),
2328 changes: mem::take(&mut self.changes),
2329 barrier: None,
2330 })
2331 .is_err()
2332 {
2333 return;
2334 }
2335 }
2336
2337 // Continue processing events until the worktree is dropped.
2338 loop {
2339 let abs_paths;
2340 let barrier;
2341 select_biased! {
2342 request = changed_paths.next().fuse() => {
2343 let Some((paths, b)) = request else { break };
2344 abs_paths = paths;
2345 barrier = Some(b);
2346 }
2347 events = events_rx.next().fuse() => {
2348 let Some(events) = events else { break };
2349 abs_paths = events.into_iter().map(|e| e.path).collect();
2350 barrier = None;
2351 }
2352 }
2353
2354 if self.notify.unbounded_send(ScanState::Updating).is_err() {
2355 return;
2356 }
2357 if !self.process_events(abs_paths, false).await {
2358 return;
2359 }
2360 if self
2361 .notify
2362 .unbounded_send(ScanState::Updated {
2363 snapshot: self.snapshot.lock().clone(),
2364 changes: mem::take(&mut self.changes),
2365 barrier,
2366 })
2367 .is_err()
2368 {
2369 return;
2370 }
2371 }
2372 }
2373
2374 async fn delay(&self) {
2375 #[cfg(any(test, feature = "test-support"))]
2376 if self.fs.is_fake() {
2377 return self.executor.simulate_random_delay().await;
2378 }
2379 smol::Timer::after(Duration::from_millis(100)).await;
2380 }
2381
2382 async fn scan_dir(
2383 &self,
2384 root_char_bag: CharBag,
2385 next_entry_id: Arc<AtomicUsize>,
2386 job: &ScanJob,
2387 ) -> Result<()> {
2388 let mut new_entries: Vec<Entry> = Vec::new();
2389 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2390 let mut ignore_stack = job.ignore_stack.clone();
2391 let mut new_ignore = None;
2392
2393 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2394 while let Some(child_abs_path) = child_paths.next().await {
2395 let child_abs_path = match child_abs_path {
2396 Ok(child_abs_path) => child_abs_path,
2397 Err(error) => {
2398 log::error!("error processing entry {:?}", error);
2399 continue;
2400 }
2401 };
2402
2403 let child_name = child_abs_path.file_name().unwrap();
2404 let child_path: Arc<Path> = job.path.join(child_name).into();
2405 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2406 Ok(Some(metadata)) => metadata,
2407 Ok(None) => continue,
2408 Err(err) => {
2409 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2410 continue;
2411 }
2412 };
2413
2414 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2415 if child_name == *GITIGNORE {
2416 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2417 Ok(ignore) => {
2418 let ignore = Arc::new(ignore);
2419 ignore_stack =
2420 ignore_stack.append(job.abs_path.as_path().into(), ignore.clone());
2421 new_ignore = Some(ignore);
2422 }
2423 Err(error) => {
2424 log::error!(
2425 "error loading .gitignore file {:?} - {:?}",
2426 child_name,
2427 error
2428 );
2429 }
2430 }
2431
2432 // Update ignore status of any child entries we've already processed to reflect the
2433 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2434 // there should rarely be too numerous. Update the ignore stack associated with any
2435 // new jobs as well.
2436 let mut new_jobs = new_jobs.iter_mut();
2437 for entry in &mut new_entries {
2438 let entry_abs_path = self.abs_path().join(&entry.path);
2439 entry.is_ignored =
2440 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2441
2442 if entry.is_dir() {
2443 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2444 job.ignore_stack = if entry.is_ignored {
2445 IgnoreStack::all()
2446 } else {
2447 ignore_stack.clone()
2448 };
2449 }
2450 }
2451 }
2452 }
2453
2454 let mut child_entry = Entry::new(
2455 child_path.clone(),
2456 &child_metadata,
2457 &next_entry_id,
2458 root_char_bag,
2459 );
2460
2461 if child_entry.is_dir() {
2462 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2463 child_entry.is_ignored = is_ignored;
2464
2465 // Avoid recursing until crash in the case of a recursive symlink
2466 if !job.ancestor_inodes.contains(&child_entry.inode) {
2467 let mut ancestor_inodes = job.ancestor_inodes.clone();
2468 ancestor_inodes.insert(child_entry.inode);
2469
2470 new_jobs.push(Some(ScanJob {
2471 abs_path: child_abs_path,
2472 path: child_path,
2473 ignore_stack: if is_ignored {
2474 IgnoreStack::all()
2475 } else {
2476 ignore_stack.clone()
2477 },
2478 ancestor_inodes,
2479 scan_queue: job.scan_queue.clone(),
2480 }));
2481 } else {
2482 new_jobs.push(None);
2483 }
2484 } else {
2485 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2486 }
2487
2488 new_entries.push(child_entry);
2489 }
2490
2491 self.snapshot.lock().populate_dir(
2492 job.path.clone(),
2493 new_entries,
2494 new_ignore,
2495 self.fs.as_ref(),
2496 );
2497
2498 for new_job in new_jobs {
2499 if let Some(new_job) = new_job {
2500 job.scan_queue.send(new_job).await.unwrap();
2501 }
2502 }
2503
2504 Ok(())
2505 }
2506
2507 async fn process_events(
2508 &mut self,
2509 abs_paths: Vec<PathBuf>,
2510 received_before_initialized: bool,
2511 ) -> bool {
2512 let (scan_queue_tx, scan_queue_rx) = channel::unbounded();
2513
2514 let prev_snapshot = {
2515 let mut snapshot = self.snapshot.lock();
2516 snapshot.scan_started();
2517 snapshot.clone()
2518 };
2519
2520 let event_paths = if let Some(event_paths) = self
2521 .update_entries_for_paths(abs_paths, Some(scan_queue_tx))
2522 .await
2523 {
2524 event_paths
2525 } else {
2526 return false;
2527 };
2528
2529 // Scan any directories that were created as part of this event batch.
2530 self.executor
2531 .scoped(|scope| {
2532 for _ in 0..self.executor.num_cpus() {
2533 scope.spawn(async {
2534 while let Ok(job) = scan_queue_rx.recv().await {
2535 if let Err(err) = self
2536 .scan_dir(
2537 prev_snapshot.root_char_bag,
2538 prev_snapshot.next_entry_id.clone(),
2539 &job,
2540 )
2541 .await
2542 {
2543 log::error!("error scanning {:?}: {}", job.abs_path, err);
2544 }
2545 }
2546 });
2547 }
2548 })
2549 .await;
2550
2551 // Attempt to detect renames only over a single batch of file-system events.
2552 self.snapshot.lock().removed_entry_ids.clear();
2553
2554 self.update_ignore_statuses().await;
2555 self.update_git_repositories();
2556 self.build_change_set(
2557 prev_snapshot.snapshot,
2558 event_paths,
2559 received_before_initialized,
2560 );
2561 self.snapshot.lock().scan_completed();
2562 true
2563 }
2564
2565 async fn update_entries_for_paths(
2566 &self,
2567 mut abs_paths: Vec<PathBuf>,
2568 scan_queue_tx: Option<Sender<ScanJob>>,
2569 ) -> Option<Vec<Arc<Path>>> {
2570 abs_paths.sort_unstable();
2571 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2572
2573 let root_abs_path = self.snapshot.lock().abs_path.clone();
2574 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.ok()?;
2575 let metadata = futures::future::join_all(
2576 abs_paths
2577 .iter()
2578 .map(|abs_path| self.fs.metadata(&abs_path))
2579 .collect::<Vec<_>>(),
2580 )
2581 .await;
2582
2583 let mut snapshot = self.snapshot.lock();
2584 if scan_queue_tx.is_some() {
2585 for abs_path in &abs_paths {
2586 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2587 snapshot.remove_path(path);
2588 }
2589 }
2590 }
2591
2592 let mut event_paths = Vec::with_capacity(abs_paths.len());
2593 for (abs_path, metadata) in abs_paths.into_iter().zip(metadata.into_iter()) {
2594 let path: Arc<Path> = match abs_path.strip_prefix(&root_canonical_path) {
2595 Ok(path) => Arc::from(path.to_path_buf()),
2596 Err(_) => {
2597 log::error!(
2598 "unexpected event {:?} for root path {:?}",
2599 abs_path,
2600 root_canonical_path
2601 );
2602 continue;
2603 }
2604 };
2605 event_paths.push(path.clone());
2606 let abs_path = root_abs_path.join(&path);
2607
2608 match metadata {
2609 Ok(Some(metadata)) => {
2610 let ignore_stack =
2611 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2612 let mut fs_entry = Entry::new(
2613 path.clone(),
2614 &metadata,
2615 snapshot.next_entry_id.as_ref(),
2616 snapshot.root_char_bag,
2617 );
2618 fs_entry.is_ignored = ignore_stack.is_all();
2619 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2620
2621 let scan_id = snapshot.scan_id;
2622 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2623 repo.repo.lock().reload_index();
2624 repo.scan_id = scan_id;
2625 }
2626
2627 if let Some(scan_queue_tx) = &scan_queue_tx {
2628 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2629 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2630 ancestor_inodes.insert(metadata.inode);
2631 self.executor
2632 .block(scan_queue_tx.send(ScanJob {
2633 abs_path,
2634 path,
2635 ignore_stack,
2636 ancestor_inodes,
2637 scan_queue: scan_queue_tx.clone(),
2638 }))
2639 .unwrap();
2640 }
2641 }
2642 }
2643 Ok(None) => {}
2644 Err(err) => {
2645 // TODO - create a special 'error' entry in the entries tree to mark this
2646 log::error!("error reading file on event {:?}", err);
2647 }
2648 }
2649 }
2650
2651 Some(event_paths)
2652 }
2653
2654 async fn update_ignore_statuses(&self) {
2655 let mut snapshot = self.snapshot.lock().clone();
2656 let mut ignores_to_update = Vec::new();
2657 let mut ignores_to_delete = Vec::new();
2658 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2659 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2660 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2661 ignores_to_update.push(parent_abs_path.clone());
2662 }
2663
2664 let ignore_path = parent_path.join(&*GITIGNORE);
2665 if snapshot.entry_for_path(ignore_path).is_none() {
2666 ignores_to_delete.push(parent_abs_path.clone());
2667 }
2668 }
2669 }
2670
2671 for parent_abs_path in ignores_to_delete {
2672 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2673 self.snapshot
2674 .lock()
2675 .ignores_by_parent_abs_path
2676 .remove(&parent_abs_path);
2677 }
2678
2679 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2680 ignores_to_update.sort_unstable();
2681 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2682 while let Some(parent_abs_path) = ignores_to_update.next() {
2683 while ignores_to_update
2684 .peek()
2685 .map_or(false, |p| p.starts_with(&parent_abs_path))
2686 {
2687 ignores_to_update.next().unwrap();
2688 }
2689
2690 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2691 ignore_queue_tx
2692 .send(UpdateIgnoreStatusJob {
2693 abs_path: parent_abs_path,
2694 ignore_stack,
2695 ignore_queue: ignore_queue_tx.clone(),
2696 })
2697 .await
2698 .unwrap();
2699 }
2700 drop(ignore_queue_tx);
2701
2702 self.executor
2703 .scoped(|scope| {
2704 for _ in 0..self.executor.num_cpus() {
2705 scope.spawn(async {
2706 while let Ok(job) = ignore_queue_rx.recv().await {
2707 self.update_ignore_status(job, &snapshot).await;
2708 }
2709 });
2710 }
2711 })
2712 .await;
2713 }
2714
2715 fn update_git_repositories(&self) {
2716 let mut snapshot = self.snapshot.lock();
2717 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2718 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2719 snapshot.git_repositories = git_repositories;
2720 }
2721
2722 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2723 let mut ignore_stack = job.ignore_stack;
2724 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2725 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2726 }
2727
2728 let mut entries_by_id_edits = Vec::new();
2729 let mut entries_by_path_edits = Vec::new();
2730 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2731 for mut entry in snapshot.child_entries(path).cloned() {
2732 let was_ignored = entry.is_ignored;
2733 let abs_path = self.abs_path().join(&entry.path);
2734 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2735 if entry.is_dir() {
2736 let child_ignore_stack = if entry.is_ignored {
2737 IgnoreStack::all()
2738 } else {
2739 ignore_stack.clone()
2740 };
2741 job.ignore_queue
2742 .send(UpdateIgnoreStatusJob {
2743 abs_path: abs_path.into(),
2744 ignore_stack: child_ignore_stack,
2745 ignore_queue: job.ignore_queue.clone(),
2746 })
2747 .await
2748 .unwrap();
2749 }
2750
2751 if entry.is_ignored != was_ignored {
2752 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2753 path_entry.scan_id = snapshot.scan_id;
2754 path_entry.is_ignored = entry.is_ignored;
2755 entries_by_id_edits.push(Edit::Insert(path_entry));
2756 entries_by_path_edits.push(Edit::Insert(entry));
2757 }
2758 }
2759
2760 let mut snapshot = self.snapshot.lock();
2761 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2762 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2763 }
2764
2765 fn build_change_set(
2766 &mut self,
2767 old_snapshot: Snapshot,
2768 event_paths: Vec<Arc<Path>>,
2769 received_before_initialized: bool,
2770 ) {
2771 let new_snapshot = self.snapshot.lock();
2772 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2773 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2774
2775 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2776
2777 for path in event_paths {
2778 let path = PathKey(path);
2779 old_paths.seek(&path, Bias::Left, &());
2780 new_paths.seek(&path, Bias::Left, &());
2781
2782 loop {
2783 match (old_paths.item(), new_paths.item()) {
2784 (Some(old_entry), Some(new_entry)) => {
2785 if old_entry.path > path.0
2786 && new_entry.path > path.0
2787 && !old_entry.path.starts_with(&path.0)
2788 && !new_entry.path.starts_with(&path.0)
2789 {
2790 break;
2791 }
2792
2793 match Ord::cmp(&old_entry.path, &new_entry.path) {
2794 Ordering::Less => {
2795 self.changes.insert(old_entry.path.clone(), Removed);
2796 old_paths.next(&());
2797 }
2798 Ordering::Equal => {
2799 if received_before_initialized {
2800 // If the worktree was not fully initialized when this event was generated,
2801 // we can't know whether this entry was added during the scan or whether
2802 // it was merely updated.
2803 self.changes.insert(old_entry.path.clone(), AddedOrUpdated);
2804 } else if old_entry.mtime != new_entry.mtime {
2805 self.changes.insert(old_entry.path.clone(), Updated);
2806 }
2807 old_paths.next(&());
2808 new_paths.next(&());
2809 }
2810 Ordering::Greater => {
2811 self.changes.insert(new_entry.path.clone(), Added);
2812 new_paths.next(&());
2813 }
2814 }
2815 }
2816 (Some(old_entry), None) => {
2817 self.changes.insert(old_entry.path.clone(), Removed);
2818 old_paths.next(&());
2819 }
2820 (None, Some(new_entry)) => {
2821 self.changes.insert(new_entry.path.clone(), Added);
2822 new_paths.next(&());
2823 }
2824 (None, None) => break,
2825 }
2826 }
2827 }
2828 }
2829}
2830
2831fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2832 let mut result = root_char_bag;
2833 result.extend(
2834 path.to_string_lossy()
2835 .chars()
2836 .map(|c| c.to_ascii_lowercase()),
2837 );
2838 result
2839}
2840
2841struct ScanJob {
2842 abs_path: PathBuf,
2843 path: Arc<Path>,
2844 ignore_stack: Arc<IgnoreStack>,
2845 scan_queue: Sender<ScanJob>,
2846 ancestor_inodes: TreeSet<u64>,
2847}
2848
2849struct UpdateIgnoreStatusJob {
2850 abs_path: Arc<Path>,
2851 ignore_stack: Arc<IgnoreStack>,
2852 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2853}
2854
2855pub trait WorktreeHandle {
2856 #[cfg(any(test, feature = "test-support"))]
2857 fn flush_fs_events<'a>(
2858 &self,
2859 cx: &'a gpui::TestAppContext,
2860 ) -> futures::future::LocalBoxFuture<'a, ()>;
2861}
2862
2863impl WorktreeHandle for ModelHandle<Worktree> {
2864 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2865 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2866 // extra directory scans, and emit extra scan-state notifications.
2867 //
2868 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2869 // to ensure that all redundant FS events have already been processed.
2870 #[cfg(any(test, feature = "test-support"))]
2871 fn flush_fs_events<'a>(
2872 &self,
2873 cx: &'a gpui::TestAppContext,
2874 ) -> futures::future::LocalBoxFuture<'a, ()> {
2875 use smol::future::FutureExt;
2876
2877 let filename = "fs-event-sentinel";
2878 let tree = self.clone();
2879 let (fs, root_path) = self.read_with(cx, |tree, _| {
2880 let tree = tree.as_local().unwrap();
2881 (tree.fs.clone(), tree.abs_path().clone())
2882 });
2883
2884 async move {
2885 fs.create_file(&root_path.join(filename), Default::default())
2886 .await
2887 .unwrap();
2888 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2889 .await;
2890
2891 fs.remove_file(&root_path.join(filename), Default::default())
2892 .await
2893 .unwrap();
2894 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2895 .await;
2896
2897 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2898 .await;
2899 }
2900 .boxed_local()
2901 }
2902}
2903
2904#[derive(Clone, Debug)]
2905struct TraversalProgress<'a> {
2906 max_path: &'a Path,
2907 count: usize,
2908 visible_count: usize,
2909 file_count: usize,
2910 visible_file_count: usize,
2911}
2912
2913impl<'a> TraversalProgress<'a> {
2914 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2915 match (include_ignored, include_dirs) {
2916 (true, true) => self.count,
2917 (true, false) => self.file_count,
2918 (false, true) => self.visible_count,
2919 (false, false) => self.visible_file_count,
2920 }
2921 }
2922}
2923
2924impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2925 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2926 self.max_path = summary.max_path.as_ref();
2927 self.count += summary.count;
2928 self.visible_count += summary.visible_count;
2929 self.file_count += summary.file_count;
2930 self.visible_file_count += summary.visible_file_count;
2931 }
2932}
2933
2934impl<'a> Default for TraversalProgress<'a> {
2935 fn default() -> Self {
2936 Self {
2937 max_path: Path::new(""),
2938 count: 0,
2939 visible_count: 0,
2940 file_count: 0,
2941 visible_file_count: 0,
2942 }
2943 }
2944}
2945
2946pub struct Traversal<'a> {
2947 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2948 include_ignored: bool,
2949 include_dirs: bool,
2950}
2951
2952impl<'a> Traversal<'a> {
2953 pub fn advance(&mut self) -> bool {
2954 self.advance_to_offset(self.offset() + 1)
2955 }
2956
2957 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2958 self.cursor.seek_forward(
2959 &TraversalTarget::Count {
2960 count: offset,
2961 include_dirs: self.include_dirs,
2962 include_ignored: self.include_ignored,
2963 },
2964 Bias::Right,
2965 &(),
2966 )
2967 }
2968
2969 pub fn advance_to_sibling(&mut self) -> bool {
2970 while let Some(entry) = self.cursor.item() {
2971 self.cursor.seek_forward(
2972 &TraversalTarget::PathSuccessor(&entry.path),
2973 Bias::Left,
2974 &(),
2975 );
2976 if let Some(entry) = self.cursor.item() {
2977 if (self.include_dirs || !entry.is_dir())
2978 && (self.include_ignored || !entry.is_ignored)
2979 {
2980 return true;
2981 }
2982 }
2983 }
2984 false
2985 }
2986
2987 pub fn entry(&self) -> Option<&'a Entry> {
2988 self.cursor.item()
2989 }
2990
2991 pub fn offset(&self) -> usize {
2992 self.cursor
2993 .start()
2994 .count(self.include_dirs, self.include_ignored)
2995 }
2996}
2997
2998impl<'a> Iterator for Traversal<'a> {
2999 type Item = &'a Entry;
3000
3001 fn next(&mut self) -> Option<Self::Item> {
3002 if let Some(item) = self.entry() {
3003 self.advance();
3004 Some(item)
3005 } else {
3006 None
3007 }
3008 }
3009}
3010
3011#[derive(Debug)]
3012enum TraversalTarget<'a> {
3013 Path(&'a Path),
3014 PathSuccessor(&'a Path),
3015 Count {
3016 count: usize,
3017 include_ignored: bool,
3018 include_dirs: bool,
3019 },
3020}
3021
3022impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3023 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3024 match self {
3025 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3026 TraversalTarget::PathSuccessor(path) => {
3027 if !cursor_location.max_path.starts_with(path) {
3028 Ordering::Equal
3029 } else {
3030 Ordering::Greater
3031 }
3032 }
3033 TraversalTarget::Count {
3034 count,
3035 include_dirs,
3036 include_ignored,
3037 } => Ord::cmp(
3038 count,
3039 &cursor_location.count(*include_dirs, *include_ignored),
3040 ),
3041 }
3042 }
3043}
3044
3045struct ChildEntriesIter<'a> {
3046 parent_path: &'a Path,
3047 traversal: Traversal<'a>,
3048}
3049
3050impl<'a> Iterator for ChildEntriesIter<'a> {
3051 type Item = &'a Entry;
3052
3053 fn next(&mut self) -> Option<Self::Item> {
3054 if let Some(item) = self.traversal.entry() {
3055 if item.path.starts_with(&self.parent_path) {
3056 self.traversal.advance_to_sibling();
3057 return Some(item);
3058 }
3059 }
3060 None
3061 }
3062}
3063
3064impl<'a> From<&'a Entry> for proto::Entry {
3065 fn from(entry: &'a Entry) -> Self {
3066 Self {
3067 id: entry.id.to_proto(),
3068 is_dir: entry.is_dir(),
3069 path: entry.path.to_string_lossy().into(),
3070 inode: entry.inode,
3071 mtime: Some(entry.mtime.into()),
3072 is_symlink: entry.is_symlink,
3073 is_ignored: entry.is_ignored,
3074 }
3075 }
3076}
3077
3078impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3079 type Error = anyhow::Error;
3080
3081 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3082 if let Some(mtime) = entry.mtime {
3083 let kind = if entry.is_dir {
3084 EntryKind::Dir
3085 } else {
3086 let mut char_bag = *root_char_bag;
3087 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3088 EntryKind::File(char_bag)
3089 };
3090 let path: Arc<Path> = PathBuf::from(entry.path).into();
3091 Ok(Entry {
3092 id: ProjectEntryId::from_proto(entry.id),
3093 kind,
3094 path,
3095 inode: entry.inode,
3096 mtime: mtime.into(),
3097 is_symlink: entry.is_symlink,
3098 is_ignored: entry.is_ignored,
3099 })
3100 } else {
3101 Err(anyhow!(
3102 "missing mtime in remote worktree entry {:?}",
3103 entry.path
3104 ))
3105 }
3106 }
3107}
3108
3109#[cfg(test)]
3110mod tests {
3111 use super::*;
3112 use client::test::FakeHttpClient;
3113 use fs::repository::FakeGitRepository;
3114 use fs::{FakeFs, RealFs};
3115 use gpui::{executor::Deterministic, TestAppContext};
3116 use rand::prelude::*;
3117 use serde_json::json;
3118 use std::{env, fmt::Write};
3119 use util::test::temp_tree;
3120
3121 #[gpui::test]
3122 async fn test_traversal(cx: &mut TestAppContext) {
3123 let fs = FakeFs::new(cx.background());
3124 fs.insert_tree(
3125 "/root",
3126 json!({
3127 ".gitignore": "a/b\n",
3128 "a": {
3129 "b": "",
3130 "c": "",
3131 }
3132 }),
3133 )
3134 .await;
3135
3136 let http_client = FakeHttpClient::with_404_response();
3137 let client = cx.read(|cx| Client::new(http_client, cx));
3138
3139 let tree = Worktree::local(
3140 client,
3141 Path::new("/root"),
3142 true,
3143 fs,
3144 Default::default(),
3145 &mut cx.to_async(),
3146 )
3147 .await
3148 .unwrap();
3149 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3150 .await;
3151
3152 tree.read_with(cx, |tree, _| {
3153 assert_eq!(
3154 tree.entries(false)
3155 .map(|entry| entry.path.as_ref())
3156 .collect::<Vec<_>>(),
3157 vec![
3158 Path::new(""),
3159 Path::new(".gitignore"),
3160 Path::new("a"),
3161 Path::new("a/c"),
3162 ]
3163 );
3164 assert_eq!(
3165 tree.entries(true)
3166 .map(|entry| entry.path.as_ref())
3167 .collect::<Vec<_>>(),
3168 vec![
3169 Path::new(""),
3170 Path::new(".gitignore"),
3171 Path::new("a"),
3172 Path::new("a/b"),
3173 Path::new("a/c"),
3174 ]
3175 );
3176 })
3177 }
3178
3179 #[gpui::test(iterations = 10)]
3180 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3181 let fs = FakeFs::new(cx.background());
3182 fs.insert_tree(
3183 "/root",
3184 json!({
3185 "lib": {
3186 "a": {
3187 "a.txt": ""
3188 },
3189 "b": {
3190 "b.txt": ""
3191 }
3192 }
3193 }),
3194 )
3195 .await;
3196 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3197 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3198
3199 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3200 let tree = Worktree::local(
3201 client,
3202 Path::new("/root"),
3203 true,
3204 fs.clone(),
3205 Default::default(),
3206 &mut cx.to_async(),
3207 )
3208 .await
3209 .unwrap();
3210
3211 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3212 .await;
3213
3214 tree.read_with(cx, |tree, _| {
3215 assert_eq!(
3216 tree.entries(false)
3217 .map(|entry| entry.path.as_ref())
3218 .collect::<Vec<_>>(),
3219 vec![
3220 Path::new(""),
3221 Path::new("lib"),
3222 Path::new("lib/a"),
3223 Path::new("lib/a/a.txt"),
3224 Path::new("lib/a/lib"),
3225 Path::new("lib/b"),
3226 Path::new("lib/b/b.txt"),
3227 Path::new("lib/b/lib"),
3228 ]
3229 );
3230 });
3231
3232 fs.rename(
3233 Path::new("/root/lib/a/lib"),
3234 Path::new("/root/lib/a/lib-2"),
3235 Default::default(),
3236 )
3237 .await
3238 .unwrap();
3239 executor.run_until_parked();
3240 tree.read_with(cx, |tree, _| {
3241 assert_eq!(
3242 tree.entries(false)
3243 .map(|entry| entry.path.as_ref())
3244 .collect::<Vec<_>>(),
3245 vec![
3246 Path::new(""),
3247 Path::new("lib"),
3248 Path::new("lib/a"),
3249 Path::new("lib/a/a.txt"),
3250 Path::new("lib/a/lib-2"),
3251 Path::new("lib/b"),
3252 Path::new("lib/b/b.txt"),
3253 Path::new("lib/b/lib"),
3254 ]
3255 );
3256 });
3257 }
3258
3259 #[gpui::test]
3260 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3261 let parent_dir = temp_tree(json!({
3262 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3263 "tree": {
3264 ".git": {},
3265 ".gitignore": "ignored-dir\n",
3266 "tracked-dir": {
3267 "tracked-file1": "",
3268 "ancestor-ignored-file1": "",
3269 },
3270 "ignored-dir": {
3271 "ignored-file1": ""
3272 }
3273 }
3274 }));
3275 let dir = parent_dir.path().join("tree");
3276
3277 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3278
3279 let tree = Worktree::local(
3280 client,
3281 dir.as_path(),
3282 true,
3283 Arc::new(RealFs),
3284 Default::default(),
3285 &mut cx.to_async(),
3286 )
3287 .await
3288 .unwrap();
3289 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3290 .await;
3291 tree.flush_fs_events(cx).await;
3292 cx.read(|cx| {
3293 let tree = tree.read(cx);
3294 assert!(
3295 !tree
3296 .entry_for_path("tracked-dir/tracked-file1")
3297 .unwrap()
3298 .is_ignored
3299 );
3300 assert!(
3301 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3302 .unwrap()
3303 .is_ignored
3304 );
3305 assert!(
3306 tree.entry_for_path("ignored-dir/ignored-file1")
3307 .unwrap()
3308 .is_ignored
3309 );
3310 });
3311
3312 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3313 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3314 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3315 tree.flush_fs_events(cx).await;
3316 cx.read(|cx| {
3317 let tree = tree.read(cx);
3318 assert!(
3319 !tree
3320 .entry_for_path("tracked-dir/tracked-file2")
3321 .unwrap()
3322 .is_ignored
3323 );
3324 assert!(
3325 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3326 .unwrap()
3327 .is_ignored
3328 );
3329 assert!(
3330 tree.entry_for_path("ignored-dir/ignored-file2")
3331 .unwrap()
3332 .is_ignored
3333 );
3334 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3335 });
3336 }
3337
3338 #[gpui::test]
3339 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3340 let root = temp_tree(json!({
3341 "dir1": {
3342 ".git": {},
3343 "deps": {
3344 "dep1": {
3345 ".git": {},
3346 "src": {
3347 "a.txt": ""
3348 }
3349 }
3350 },
3351 "src": {
3352 "b.txt": ""
3353 }
3354 },
3355 "c.txt": "",
3356 }));
3357
3358 let http_client = FakeHttpClient::with_404_response();
3359 let client = cx.read(|cx| Client::new(http_client, cx));
3360 let tree = Worktree::local(
3361 client,
3362 root.path(),
3363 true,
3364 Arc::new(RealFs),
3365 Default::default(),
3366 &mut cx.to_async(),
3367 )
3368 .await
3369 .unwrap();
3370
3371 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3372 .await;
3373 tree.flush_fs_events(cx).await;
3374
3375 tree.read_with(cx, |tree, _cx| {
3376 let tree = tree.as_local().unwrap();
3377
3378 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3379
3380 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3381 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3382 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3383
3384 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3385 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3386 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3387 });
3388
3389 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3390 let tree = tree.as_local().unwrap();
3391 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3392 });
3393
3394 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3395 tree.flush_fs_events(cx).await;
3396
3397 tree.read_with(cx, |tree, _cx| {
3398 let tree = tree.as_local().unwrap();
3399 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3400 assert_ne!(
3401 original_scan_id, new_scan_id,
3402 "original {original_scan_id}, new {new_scan_id}"
3403 );
3404 });
3405
3406 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3407 tree.flush_fs_events(cx).await;
3408
3409 tree.read_with(cx, |tree, _cx| {
3410 let tree = tree.as_local().unwrap();
3411
3412 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3413 });
3414 }
3415
3416 #[test]
3417 fn test_changed_repos() {
3418 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3419 GitRepositoryEntry {
3420 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3421 scan_id,
3422 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3423 git_dir_path: git_dir_path.as_ref().into(),
3424 }
3425 }
3426
3427 let prev_repos: Vec<GitRepositoryEntry> = vec![
3428 fake_entry("/.git", 0),
3429 fake_entry("/a/.git", 0),
3430 fake_entry("/a/b/.git", 0),
3431 ];
3432
3433 let new_repos: Vec<GitRepositoryEntry> = vec![
3434 fake_entry("/a/.git", 1),
3435 fake_entry("/a/b/.git", 0),
3436 fake_entry("/a/c/.git", 0),
3437 ];
3438
3439 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3440
3441 // Deletion retained
3442 assert!(res
3443 .iter()
3444 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3445 .is_some());
3446
3447 // Update retained
3448 assert!(res
3449 .iter()
3450 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3451 .is_some());
3452
3453 // Addition retained
3454 assert!(res
3455 .iter()
3456 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3457 .is_some());
3458
3459 // Nochange, not retained
3460 assert!(res
3461 .iter()
3462 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3463 .is_none());
3464 }
3465
3466 #[gpui::test]
3467 async fn test_write_file(cx: &mut TestAppContext) {
3468 let dir = temp_tree(json!({
3469 ".git": {},
3470 ".gitignore": "ignored-dir\n",
3471 "tracked-dir": {},
3472 "ignored-dir": {}
3473 }));
3474
3475 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3476
3477 let tree = Worktree::local(
3478 client,
3479 dir.path(),
3480 true,
3481 Arc::new(RealFs),
3482 Default::default(),
3483 &mut cx.to_async(),
3484 )
3485 .await
3486 .unwrap();
3487 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3488 .await;
3489 tree.flush_fs_events(cx).await;
3490
3491 tree.update(cx, |tree, cx| {
3492 tree.as_local().unwrap().write_file(
3493 Path::new("tracked-dir/file.txt"),
3494 "hello".into(),
3495 Default::default(),
3496 cx,
3497 )
3498 })
3499 .await
3500 .unwrap();
3501 tree.update(cx, |tree, cx| {
3502 tree.as_local().unwrap().write_file(
3503 Path::new("ignored-dir/file.txt"),
3504 "world".into(),
3505 Default::default(),
3506 cx,
3507 )
3508 })
3509 .await
3510 .unwrap();
3511
3512 tree.read_with(cx, |tree, _| {
3513 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3514 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3515 assert!(!tracked.is_ignored);
3516 assert!(ignored.is_ignored);
3517 });
3518 }
3519
3520 #[gpui::test(iterations = 30)]
3521 async fn test_create_directory(cx: &mut TestAppContext) {
3522 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3523
3524 let fs = FakeFs::new(cx.background());
3525 fs.insert_tree(
3526 "/a",
3527 json!({
3528 "b": {},
3529 "c": {},
3530 "d": {},
3531 }),
3532 )
3533 .await;
3534
3535 let tree = Worktree::local(
3536 client,
3537 "/a".as_ref(),
3538 true,
3539 fs,
3540 Default::default(),
3541 &mut cx.to_async(),
3542 )
3543 .await
3544 .unwrap();
3545
3546 let entry = tree
3547 .update(cx, |tree, cx| {
3548 tree.as_local_mut()
3549 .unwrap()
3550 .create_entry("a/e".as_ref(), true, cx)
3551 })
3552 .await
3553 .unwrap();
3554 assert!(entry.is_dir());
3555
3556 cx.foreground().run_until_parked();
3557 tree.read_with(cx, |tree, _| {
3558 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3559 });
3560 }
3561
3562 #[gpui::test(iterations = 100)]
3563 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3564 let operations = env::var("OPERATIONS")
3565 .map(|o| o.parse().unwrap())
3566 .unwrap_or(40);
3567 let initial_entries = env::var("INITIAL_ENTRIES")
3568 .map(|o| o.parse().unwrap())
3569 .unwrap_or(20);
3570
3571 let root_dir = Path::new("/test");
3572 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3573 fs.as_fake().insert_tree(root_dir, json!({})).await;
3574 for _ in 0..initial_entries {
3575 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3576 }
3577 log::info!("generated initial tree");
3578
3579 let next_entry_id = Arc::new(AtomicUsize::default());
3580 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3581 let worktree = Worktree::local(
3582 client.clone(),
3583 root_dir,
3584 true,
3585 fs.clone(),
3586 next_entry_id.clone(),
3587 &mut cx.to_async(),
3588 )
3589 .await
3590 .unwrap();
3591
3592 worktree
3593 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3594 .await;
3595
3596 // After the initial scan is complete, the `UpdatedEntries` event can
3597 // be used to follow along with all changes to the worktree's snapshot.
3598 worktree.update(cx, |tree, cx| {
3599 let mut paths = tree
3600 .as_local()
3601 .unwrap()
3602 .paths()
3603 .cloned()
3604 .collect::<Vec<_>>();
3605
3606 cx.subscribe(&worktree, move |tree, _, event, _| {
3607 if let Event::UpdatedEntries(changes) = event {
3608 for (path, change_type) in changes.iter() {
3609 let path = path.clone();
3610 let ix = match paths.binary_search(&path) {
3611 Ok(ix) | Err(ix) => ix,
3612 };
3613 match change_type {
3614 PathChange::Added => {
3615 assert_ne!(paths.get(ix), Some(&path));
3616 paths.insert(ix, path);
3617 }
3618 PathChange::Removed => {
3619 assert_eq!(paths.get(ix), Some(&path));
3620 paths.remove(ix);
3621 }
3622 PathChange::Updated => {
3623 assert_eq!(paths.get(ix), Some(&path));
3624 }
3625 PathChange::AddedOrUpdated => {
3626 if paths[ix] != path {
3627 paths.insert(ix, path);
3628 }
3629 }
3630 }
3631 }
3632 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3633 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3634 }
3635 })
3636 .detach();
3637 });
3638
3639 let mut snapshots = Vec::new();
3640 let mut mutations_len = operations;
3641 while mutations_len > 1 {
3642 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3643 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3644 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3645 let len = rng.gen_range(0..=buffered_event_count);
3646 log::info!("flushing {} events", len);
3647 fs.as_fake().flush_events(len).await;
3648 } else {
3649 randomly_mutate_tree(&fs, root_dir, 0.6, &mut rng).await;
3650 mutations_len -= 1;
3651 }
3652
3653 cx.foreground().run_until_parked();
3654 if rng.gen_bool(0.2) {
3655 log::info!("storing snapshot {}", snapshots.len());
3656 let snapshot =
3657 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3658 snapshots.push(snapshot);
3659 }
3660 }
3661
3662 log::info!("quiescing");
3663 fs.as_fake().flush_events(usize::MAX).await;
3664 cx.foreground().run_until_parked();
3665 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3666 snapshot.check_invariants();
3667
3668 {
3669 let new_worktree = Worktree::local(
3670 client.clone(),
3671 root_dir,
3672 true,
3673 fs.clone(),
3674 next_entry_id,
3675 &mut cx.to_async(),
3676 )
3677 .await
3678 .unwrap();
3679 new_worktree
3680 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3681 .await;
3682 let new_snapshot =
3683 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3684 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3685 }
3686
3687 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3688 let include_ignored = rng.gen::<bool>();
3689 if !include_ignored {
3690 let mut entries_by_path_edits = Vec::new();
3691 let mut entries_by_id_edits = Vec::new();
3692 for entry in prev_snapshot
3693 .entries_by_id
3694 .cursor::<()>()
3695 .filter(|e| e.is_ignored)
3696 {
3697 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3698 entries_by_id_edits.push(Edit::Remove(entry.id));
3699 }
3700
3701 prev_snapshot
3702 .entries_by_path
3703 .edit(entries_by_path_edits, &());
3704 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3705 }
3706
3707 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3708 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3709 assert_eq!(
3710 prev_snapshot.to_vec(include_ignored),
3711 snapshot.to_vec(include_ignored),
3712 "wrong update for snapshot {i}. update: {:?}",
3713 update
3714 );
3715 }
3716 }
3717
3718 async fn randomly_mutate_tree(
3719 fs: &Arc<dyn Fs>,
3720 root_path: &Path,
3721 insertion_probability: f64,
3722 rng: &mut impl Rng,
3723 ) {
3724 let mut files = Vec::new();
3725 let mut dirs = Vec::new();
3726 for path in fs.as_fake().paths().await {
3727 if path.starts_with(root_path) {
3728 if fs.is_file(&path).await {
3729 files.push(path);
3730 } else {
3731 dirs.push(path);
3732 }
3733 }
3734 }
3735
3736 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3737 let path = dirs.choose(rng).unwrap();
3738 let new_path = path.join(gen_name(rng));
3739
3740 if rng.gen() {
3741 log::info!(
3742 "creating dir {:?}",
3743 new_path.strip_prefix(root_path).unwrap()
3744 );
3745 fs.create_dir(&new_path).await.unwrap();
3746 } else {
3747 log::info!(
3748 "creating file {:?}",
3749 new_path.strip_prefix(root_path).unwrap()
3750 );
3751 fs.create_file(&new_path, Default::default()).await.unwrap();
3752 }
3753 } else if rng.gen_bool(0.05) {
3754 let ignore_dir_path = dirs.choose(rng).unwrap();
3755 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3756
3757 let subdirs = dirs
3758 .iter()
3759 .filter(|d| d.starts_with(&ignore_dir_path))
3760 .cloned()
3761 .collect::<Vec<_>>();
3762 let subfiles = files
3763 .iter()
3764 .filter(|d| d.starts_with(&ignore_dir_path))
3765 .cloned()
3766 .collect::<Vec<_>>();
3767 let files_to_ignore = {
3768 let len = rng.gen_range(0..=subfiles.len());
3769 subfiles.choose_multiple(rng, len)
3770 };
3771 let dirs_to_ignore = {
3772 let len = rng.gen_range(0..subdirs.len());
3773 subdirs.choose_multiple(rng, len)
3774 };
3775
3776 let mut ignore_contents = String::new();
3777 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3778 writeln!(
3779 ignore_contents,
3780 "{}",
3781 path_to_ignore
3782 .strip_prefix(&ignore_dir_path)
3783 .unwrap()
3784 .to_str()
3785 .unwrap()
3786 )
3787 .unwrap();
3788 }
3789 log::info!(
3790 "creating gitignore {:?} with contents:\n{}",
3791 ignore_path.strip_prefix(&root_path).unwrap(),
3792 ignore_contents
3793 );
3794 fs.save(
3795 &ignore_path,
3796 &ignore_contents.as_str().into(),
3797 Default::default(),
3798 )
3799 .await
3800 .unwrap();
3801 } else {
3802 let old_path = {
3803 let file_path = files.choose(rng);
3804 let dir_path = dirs[1..].choose(rng);
3805 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3806 };
3807
3808 let is_rename = rng.gen();
3809 if is_rename {
3810 let new_path_parent = dirs
3811 .iter()
3812 .filter(|d| !d.starts_with(old_path))
3813 .choose(rng)
3814 .unwrap();
3815
3816 let overwrite_existing_dir =
3817 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3818 let new_path = if overwrite_existing_dir {
3819 fs.remove_dir(
3820 &new_path_parent,
3821 RemoveOptions {
3822 recursive: true,
3823 ignore_if_not_exists: true,
3824 },
3825 )
3826 .await
3827 .unwrap();
3828 new_path_parent.to_path_buf()
3829 } else {
3830 new_path_parent.join(gen_name(rng))
3831 };
3832
3833 log::info!(
3834 "renaming {:?} to {}{:?}",
3835 old_path.strip_prefix(&root_path).unwrap(),
3836 if overwrite_existing_dir {
3837 "overwrite "
3838 } else {
3839 ""
3840 },
3841 new_path.strip_prefix(&root_path).unwrap()
3842 );
3843 fs.rename(
3844 &old_path,
3845 &new_path,
3846 fs::RenameOptions {
3847 overwrite: true,
3848 ignore_if_exists: true,
3849 },
3850 )
3851 .await
3852 .unwrap();
3853 } else if fs.is_file(&old_path).await {
3854 log::info!(
3855 "deleting file {:?}",
3856 old_path.strip_prefix(&root_path).unwrap()
3857 );
3858 fs.remove_file(old_path, Default::default()).await.unwrap();
3859 } else {
3860 log::info!(
3861 "deleting dir {:?}",
3862 old_path.strip_prefix(&root_path).unwrap()
3863 );
3864 fs.remove_dir(
3865 &old_path,
3866 RemoveOptions {
3867 recursive: true,
3868 ignore_if_not_exists: true,
3869 },
3870 )
3871 .await
3872 .unwrap();
3873 }
3874 }
3875 }
3876
3877 fn gen_name(rng: &mut impl Rng) -> String {
3878 (0..6)
3879 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3880 .map(char::from)
3881 .collect()
3882 }
3883
3884 impl LocalSnapshot {
3885 fn check_invariants(&self) {
3886 let mut files = self.files(true, 0);
3887 let mut visible_files = self.files(false, 0);
3888 for entry in self.entries_by_path.cursor::<()>() {
3889 if entry.is_file() {
3890 assert_eq!(files.next().unwrap().inode, entry.inode);
3891 if !entry.is_ignored {
3892 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3893 }
3894 }
3895 }
3896 assert!(files.next().is_none());
3897 assert!(visible_files.next().is_none());
3898
3899 let mut bfs_paths = Vec::new();
3900 let mut stack = vec![Path::new("")];
3901 while let Some(path) = stack.pop() {
3902 bfs_paths.push(path);
3903 let ix = stack.len();
3904 for child_entry in self.child_entries(path) {
3905 stack.insert(ix, &child_entry.path);
3906 }
3907 }
3908
3909 let dfs_paths_via_iter = self
3910 .entries_by_path
3911 .cursor::<()>()
3912 .map(|e| e.path.as_ref())
3913 .collect::<Vec<_>>();
3914 assert_eq!(bfs_paths, dfs_paths_via_iter);
3915
3916 let dfs_paths_via_traversal = self
3917 .entries(true)
3918 .map(|e| e.path.as_ref())
3919 .collect::<Vec<_>>();
3920 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3921
3922 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3923 let ignore_parent_path =
3924 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3925 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3926 assert!(self
3927 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3928 .is_some());
3929 }
3930 }
3931
3932 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3933 let mut paths = Vec::new();
3934 for entry in self.entries_by_path.cursor::<()>() {
3935 if include_ignored || !entry.is_ignored {
3936 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3937 }
3938 }
3939 paths.sort_by(|a, b| a.0.cmp(b.0));
3940 paths
3941 }
3942 }
3943}