1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use parking_lot::Mutex;
30use postage::{
31 barrier,
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use smol::channel::{self, Sender};
36use std::{
37 any::Any,
38 cmp::{self, Ordering},
39 convert::TryFrom,
40 ffi::OsStr,
41 fmt,
42 future::Future,
43 mem,
44 ops::{Deref, DerefMut},
45 path::{Path, PathBuf},
46 pin::Pin,
47 sync::{
48 atomic::{AtomicUsize, Ordering::SeqCst},
49 Arc,
50 },
51 time::{Duration, SystemTime},
52};
53use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
54use util::{paths::HOME, ResultExt, TryFutureExt};
55
56#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
57pub struct WorktreeId(usize);
58
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
67 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
68 _background_scanner_task: Task<()>,
69 share: Option<ShareState>,
70 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
71 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 visible: bool,
75}
76
77pub struct RemoteWorktree {
78 snapshot: Snapshot,
79 background_snapshot: Arc<Mutex<Snapshot>>,
80 project_id: u64,
81 client: Arc<Client>,
82 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
83 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
84 replica_id: ReplicaId,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 visible: bool,
87 disconnected: bool,
88}
89
90#[derive(Clone)]
91pub struct Snapshot {
92 id: WorktreeId,
93 abs_path: Arc<Path>,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98 scan_id: usize,
99 completed_scan_id: usize,
100}
101
102#[derive(Clone)]
103pub struct GitRepositoryEntry {
104 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
105
106 pub(crate) scan_id: usize,
107 // Path to folder containing the .git file or directory
108 pub(crate) content_path: Arc<Path>,
109 // Path to the actual .git folder.
110 // Note: if .git is a file, this points to the folder indicated by the .git file
111 pub(crate) git_dir_path: Arc<Path>,
112}
113
114impl std::fmt::Debug for GitRepositoryEntry {
115 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
116 f.debug_struct("GitRepositoryEntry")
117 .field("content_path", &self.content_path)
118 .field("git_dir_path", &self.git_dir_path)
119 .finish()
120 }
121}
122
123#[derive(Debug)]
124pub struct LocalSnapshot {
125 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
126 git_repositories: Vec<GitRepositoryEntry>,
127 removed_entry_ids: HashMap<u64, ProjectEntryId>,
128 next_entry_id: Arc<AtomicUsize>,
129 snapshot: Snapshot,
130}
131
132impl Clone for LocalSnapshot {
133 fn clone(&self) -> Self {
134 Self {
135 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
136 git_repositories: self.git_repositories.iter().cloned().collect(),
137 removed_entry_ids: self.removed_entry_ids.clone(),
138 next_entry_id: self.next_entry_id.clone(),
139 snapshot: self.snapshot.clone(),
140 }
141 }
142}
143
144impl Deref for LocalSnapshot {
145 type Target = Snapshot;
146
147 fn deref(&self) -> &Self::Target {
148 &self.snapshot
149 }
150}
151
152impl DerefMut for LocalSnapshot {
153 fn deref_mut(&mut self) -> &mut Self::Target {
154 &mut self.snapshot
155 }
156}
157
158enum ScanState {
159 Started,
160 Updated {
161 snapshot: LocalSnapshot,
162 changes: HashMap<Arc<Path>, PathChange>,
163 barrier: Option<barrier::Sender>,
164 scanning: bool,
165 },
166}
167
168struct ShareState {
169 project_id: u64,
170 snapshots_tx: watch::Sender<LocalSnapshot>,
171 resume_updates: watch::Sender<()>,
172 _maintain_remote_snapshot: Task<Option<()>>,
173}
174
175pub enum Event {
176 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
177 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
178}
179
180impl Entity for Worktree {
181 type Event = Event;
182}
183
184impl Worktree {
185 pub async fn local(
186 client: Arc<Client>,
187 path: impl Into<Arc<Path>>,
188 visible: bool,
189 fs: Arc<dyn Fs>,
190 next_entry_id: Arc<AtomicUsize>,
191 cx: &mut AsyncAppContext,
192 ) -> Result<ModelHandle<Self>> {
193 // After determining whether the root entry is a file or a directory, populate the
194 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
195 let abs_path = path.into();
196 let metadata = fs
197 .metadata(&abs_path)
198 .await
199 .context("failed to stat worktree path")?;
200
201 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
202 let root_name = abs_path
203 .file_name()
204 .map_or(String::new(), |f| f.to_string_lossy().to_string());
205
206 let mut snapshot = LocalSnapshot {
207 ignores_by_parent_abs_path: Default::default(),
208 git_repositories: Default::default(),
209 removed_entry_ids: Default::default(),
210 next_entry_id,
211 snapshot: Snapshot {
212 id: WorktreeId::from_usize(cx.model_id()),
213 abs_path: abs_path.clone(),
214 root_name: root_name.clone(),
215 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
216 entries_by_path: Default::default(),
217 entries_by_id: Default::default(),
218 scan_id: 1,
219 completed_scan_id: 0,
220 },
221 };
222
223 if let Some(metadata) = metadata {
224 snapshot.insert_entry(
225 Entry::new(
226 Arc::from(Path::new("")),
227 &metadata,
228 &snapshot.next_entry_id,
229 snapshot.root_char_bag,
230 ),
231 fs.as_ref(),
232 );
233 }
234
235 let (path_changes_tx, path_changes_rx) = channel::unbounded();
236 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
237
238 cx.spawn_weak(|this, mut cx| async move {
239 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
240 this.update(&mut cx, |this, cx| {
241 let this = this.as_local_mut().unwrap();
242 match state {
243 ScanState::Started => {
244 *this.is_scanning.0.borrow_mut() = true;
245 }
246 ScanState::Updated {
247 snapshot,
248 changes,
249 barrier,
250 scanning,
251 } => {
252 *this.is_scanning.0.borrow_mut() = scanning;
253 this.set_snapshot(snapshot, cx);
254 cx.emit(Event::UpdatedEntries(changes));
255 drop(barrier);
256 }
257 }
258 cx.notify();
259 });
260 }
261 })
262 .detach();
263
264 let background_scanner_task = cx.background().spawn({
265 let fs = fs.clone();
266 let snapshot = snapshot.clone();
267 let background = cx.background().clone();
268 async move {
269 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
270 BackgroundScanner::new(
271 snapshot,
272 fs,
273 scan_states_tx,
274 background,
275 path_changes_rx,
276 )
277 .run(events)
278 .await;
279 }
280 });
281
282 Worktree::Local(LocalWorktree {
283 snapshot,
284 is_scanning: watch::channel_with(true),
285 share: None,
286 path_changes_tx,
287 _background_scanner_task: background_scanner_task,
288 diagnostics: Default::default(),
289 diagnostic_summaries: Default::default(),
290 client,
291 fs,
292 visible,
293 })
294 }))
295 }
296
297 pub fn remote(
298 project_remote_id: u64,
299 replica_id: ReplicaId,
300 worktree: proto::WorktreeMetadata,
301 client: Arc<Client>,
302 cx: &mut AppContext,
303 ) -> ModelHandle<Self> {
304 cx.add_model(|cx: &mut ModelContext<Self>| {
305 let snapshot = Snapshot {
306 id: WorktreeId(worktree.id as usize),
307 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
308 root_name: worktree.root_name.clone(),
309 root_char_bag: worktree
310 .root_name
311 .chars()
312 .map(|c| c.to_ascii_lowercase())
313 .collect(),
314 entries_by_path: Default::default(),
315 entries_by_id: Default::default(),
316 scan_id: 1,
317 completed_scan_id: 0,
318 };
319
320 let (updates_tx, mut updates_rx) = mpsc::unbounded();
321 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
322 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
323
324 cx.background()
325 .spawn({
326 let background_snapshot = background_snapshot.clone();
327 async move {
328 while let Some(update) = updates_rx.next().await {
329 if let Err(error) =
330 background_snapshot.lock().apply_remote_update(update)
331 {
332 log::error!("error applying worktree update: {}", error);
333 }
334 snapshot_updated_tx.send(()).await.ok();
335 }
336 }
337 })
338 .detach();
339
340 cx.spawn_weak(|this, mut cx| async move {
341 while (snapshot_updated_rx.recv().await).is_some() {
342 if let Some(this) = this.upgrade(&cx) {
343 this.update(&mut cx, |this, cx| {
344 let this = this.as_remote_mut().unwrap();
345 this.snapshot = this.background_snapshot.lock().clone();
346 cx.emit(Event::UpdatedEntries(Default::default()));
347 cx.notify();
348 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
349 if this.observed_snapshot(*scan_id) {
350 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
351 let _ = tx.send(());
352 } else {
353 break;
354 }
355 }
356 });
357 } else {
358 break;
359 }
360 }
361 })
362 .detach();
363
364 Worktree::Remote(RemoteWorktree {
365 project_id: project_remote_id,
366 replica_id,
367 snapshot: snapshot.clone(),
368 background_snapshot,
369 updates_tx: Some(updates_tx),
370 snapshot_subscriptions: Default::default(),
371 client: client.clone(),
372 diagnostic_summaries: Default::default(),
373 visible: worktree.visible,
374 disconnected: false,
375 })
376 })
377 }
378
379 pub fn as_local(&self) -> Option<&LocalWorktree> {
380 if let Worktree::Local(worktree) = self {
381 Some(worktree)
382 } else {
383 None
384 }
385 }
386
387 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
388 if let Worktree::Remote(worktree) = self {
389 Some(worktree)
390 } else {
391 None
392 }
393 }
394
395 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
396 if let Worktree::Local(worktree) = self {
397 Some(worktree)
398 } else {
399 None
400 }
401 }
402
403 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
404 if let Worktree::Remote(worktree) = self {
405 Some(worktree)
406 } else {
407 None
408 }
409 }
410
411 pub fn is_local(&self) -> bool {
412 matches!(self, Worktree::Local(_))
413 }
414
415 pub fn is_remote(&self) -> bool {
416 !self.is_local()
417 }
418
419 pub fn snapshot(&self) -> Snapshot {
420 match self {
421 Worktree::Local(worktree) => worktree.snapshot().snapshot,
422 Worktree::Remote(worktree) => worktree.snapshot(),
423 }
424 }
425
426 pub fn scan_id(&self) -> usize {
427 match self {
428 Worktree::Local(worktree) => worktree.snapshot.scan_id,
429 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
430 }
431 }
432
433 pub fn completed_scan_id(&self) -> usize {
434 match self {
435 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
436 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
437 }
438 }
439
440 pub fn is_visible(&self) -> bool {
441 match self {
442 Worktree::Local(worktree) => worktree.visible,
443 Worktree::Remote(worktree) => worktree.visible,
444 }
445 }
446
447 pub fn replica_id(&self) -> ReplicaId {
448 match self {
449 Worktree::Local(_) => 0,
450 Worktree::Remote(worktree) => worktree.replica_id,
451 }
452 }
453
454 pub fn diagnostic_summaries(
455 &self,
456 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
457 match self {
458 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
459 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
460 }
461 .iter()
462 .map(|(path, summary)| (path.0.clone(), *summary))
463 }
464
465 pub fn abs_path(&self) -> Arc<Path> {
466 match self {
467 Worktree::Local(worktree) => worktree.abs_path.clone(),
468 Worktree::Remote(worktree) => worktree.abs_path.clone(),
469 }
470 }
471}
472
473impl LocalWorktree {
474 pub fn contains_abs_path(&self, path: &Path) -> bool {
475 path.starts_with(&self.abs_path)
476 }
477
478 fn absolutize(&self, path: &Path) -> PathBuf {
479 if path.file_name().is_some() {
480 self.abs_path.join(path)
481 } else {
482 self.abs_path.to_path_buf()
483 }
484 }
485
486 pub(crate) fn load_buffer(
487 &mut self,
488 path: &Path,
489 cx: &mut ModelContext<Worktree>,
490 ) -> Task<Result<ModelHandle<Buffer>>> {
491 let path = Arc::from(path);
492 cx.spawn(move |this, mut cx| async move {
493 let (file, contents, diff_base) = this
494 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
495 .await?;
496 Ok(cx.add_model(|cx| {
497 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
498 buffer.git_diff_recalc(cx);
499 buffer
500 }))
501 })
502 }
503
504 pub fn diagnostics_for_path(
505 &self,
506 path: &Path,
507 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
508 self.diagnostics.get(path).cloned()
509 }
510
511 pub fn update_diagnostics(
512 &mut self,
513 language_server_id: usize,
514 worktree_path: Arc<Path>,
515 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
516 _: &mut ModelContext<Worktree>,
517 ) -> Result<bool> {
518 self.diagnostics.remove(&worktree_path);
519 let old_summary = self
520 .diagnostic_summaries
521 .remove(&PathKey(worktree_path.clone()))
522 .unwrap_or_default();
523 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
524 if !new_summary.is_empty() {
525 self.diagnostic_summaries
526 .insert(PathKey(worktree_path.clone()), new_summary);
527 self.diagnostics.insert(worktree_path.clone(), diagnostics);
528 }
529
530 let updated = !old_summary.is_empty() || !new_summary.is_empty();
531 if updated {
532 if let Some(share) = self.share.as_ref() {
533 self.client
534 .send(proto::UpdateDiagnosticSummary {
535 project_id: share.project_id,
536 worktree_id: self.id().to_proto(),
537 summary: Some(proto::DiagnosticSummary {
538 path: worktree_path.to_string_lossy().to_string(),
539 language_server_id: language_server_id as u64,
540 error_count: new_summary.error_count as u32,
541 warning_count: new_summary.warning_count as u32,
542 }),
543 })
544 .log_err();
545 }
546 }
547
548 Ok(updated)
549 }
550
551 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
552 let updated_repos = Self::changed_repos(
553 &self.snapshot.git_repositories,
554 &new_snapshot.git_repositories,
555 );
556 self.snapshot = new_snapshot;
557
558 if let Some(share) = self.share.as_mut() {
559 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
560 }
561
562 if !updated_repos.is_empty() {
563 cx.emit(Event::UpdatedGitRepositories(updated_repos));
564 }
565 }
566
567 fn changed_repos(
568 old_repos: &[GitRepositoryEntry],
569 new_repos: &[GitRepositoryEntry],
570 ) -> Vec<GitRepositoryEntry> {
571 fn diff<'a>(
572 a: &'a [GitRepositoryEntry],
573 b: &'a [GitRepositoryEntry],
574 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
575 ) {
576 for a_repo in a {
577 let matched = b.iter().find(|b_repo| {
578 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
579 });
580
581 if matched.is_none() {
582 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
583 }
584 }
585 }
586
587 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
588
589 diff(old_repos, new_repos, &mut updated);
590 diff(new_repos, old_repos, &mut updated);
591
592 updated.into_values().collect()
593 }
594
595 pub fn scan_complete(&self) -> impl Future<Output = ()> {
596 let mut is_scanning_rx = self.is_scanning.1.clone();
597 async move {
598 let mut is_scanning = is_scanning_rx.borrow().clone();
599 while is_scanning {
600 if let Some(value) = is_scanning_rx.recv().await {
601 is_scanning = value;
602 } else {
603 break;
604 }
605 }
606 }
607 }
608
609 pub fn snapshot(&self) -> LocalSnapshot {
610 self.snapshot.clone()
611 }
612
613 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
614 proto::WorktreeMetadata {
615 id: self.id().to_proto(),
616 root_name: self.root_name().to_string(),
617 visible: self.visible,
618 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
619 }
620 }
621
622 fn load(
623 &self,
624 path: &Path,
625 cx: &mut ModelContext<Worktree>,
626 ) -> Task<Result<(File, String, Option<String>)>> {
627 let handle = cx.handle();
628 let path = Arc::from(path);
629 let abs_path = self.absolutize(&path);
630 let fs = self.fs.clone();
631 let snapshot = self.snapshot();
632
633 cx.spawn(|this, mut cx| async move {
634 let text = fs.load(&abs_path).await?;
635
636 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
637 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
638 let repo_relative = repo_relative.to_owned();
639 cx.background()
640 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
641 .await
642 } else {
643 None
644 }
645 } else {
646 None
647 };
648
649 // Eagerly populate the snapshot with an updated entry for the loaded file
650 let entry = this
651 .update(&mut cx, |this, cx| {
652 this.as_local().unwrap().refresh_entry(path, None, cx)
653 })
654 .await?;
655
656 Ok((
657 File {
658 entry_id: entry.id,
659 worktree: handle,
660 path: entry.path,
661 mtime: entry.mtime,
662 is_local: true,
663 is_deleted: false,
664 },
665 text,
666 diff_base,
667 ))
668 })
669 }
670
671 pub fn save_buffer(
672 &self,
673 buffer_handle: ModelHandle<Buffer>,
674 path: Arc<Path>,
675 has_changed_file: bool,
676 cx: &mut ModelContext<Worktree>,
677 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
678 let handle = cx.handle();
679 let buffer = buffer_handle.read(cx);
680
681 let rpc = self.client.clone();
682 let buffer_id = buffer.remote_id();
683 let project_id = self.share.as_ref().map(|share| share.project_id);
684
685 let text = buffer.as_rope().clone();
686 let fingerprint = text.fingerprint();
687 let version = buffer.version();
688 let save = self.write_file(path, text, buffer.line_ending(), cx);
689
690 cx.as_mut().spawn(|mut cx| async move {
691 let entry = save.await?;
692
693 if has_changed_file {
694 let new_file = Arc::new(File {
695 entry_id: entry.id,
696 worktree: handle,
697 path: entry.path,
698 mtime: entry.mtime,
699 is_local: true,
700 is_deleted: false,
701 });
702
703 if let Some(project_id) = project_id {
704 rpc.send(proto::UpdateBufferFile {
705 project_id,
706 buffer_id,
707 file: Some(new_file.to_proto()),
708 })
709 .log_err();
710 }
711
712 buffer_handle.update(&mut cx, |buffer, cx| {
713 if has_changed_file {
714 buffer.file_updated(new_file, cx).detach();
715 }
716 });
717 }
718
719 if let Some(project_id) = project_id {
720 rpc.send(proto::BufferSaved {
721 project_id,
722 buffer_id,
723 version: serialize_version(&version),
724 mtime: Some(entry.mtime.into()),
725 fingerprint: serialize_fingerprint(fingerprint),
726 })?;
727 }
728
729 buffer_handle.update(&mut cx, |buffer, cx| {
730 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
731 });
732
733 Ok((version, fingerprint, entry.mtime))
734 })
735 }
736
737 pub fn create_entry(
738 &self,
739 path: impl Into<Arc<Path>>,
740 is_dir: bool,
741 cx: &mut ModelContext<Worktree>,
742 ) -> Task<Result<Entry>> {
743 let path = path.into();
744 let abs_path = self.absolutize(&path);
745 let fs = self.fs.clone();
746 let write = cx.background().spawn(async move {
747 if is_dir {
748 fs.create_dir(&abs_path).await
749 } else {
750 fs.save(&abs_path, &Default::default(), Default::default())
751 .await
752 }
753 });
754
755 cx.spawn(|this, mut cx| async move {
756 write.await?;
757 this.update(&mut cx, |this, cx| {
758 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
759 })
760 .await
761 })
762 }
763
764 pub fn write_file(
765 &self,
766 path: impl Into<Arc<Path>>,
767 text: Rope,
768 line_ending: LineEnding,
769 cx: &mut ModelContext<Worktree>,
770 ) -> Task<Result<Entry>> {
771 let path = path.into();
772 let abs_path = self.absolutize(&path);
773 let fs = self.fs.clone();
774 let write = cx
775 .background()
776 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
777
778 cx.spawn(|this, mut cx| async move {
779 write.await?;
780 this.update(&mut cx, |this, cx| {
781 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
782 })
783 .await
784 })
785 }
786
787 pub fn delete_entry(
788 &self,
789 entry_id: ProjectEntryId,
790 cx: &mut ModelContext<Worktree>,
791 ) -> Option<Task<Result<()>>> {
792 let entry = self.entry_for_id(entry_id)?.clone();
793 let abs_path = self.abs_path.clone();
794 let fs = self.fs.clone();
795
796 let delete = cx.background().spawn(async move {
797 let mut abs_path = fs.canonicalize(&abs_path).await?;
798 if entry.path.file_name().is_some() {
799 abs_path = abs_path.join(&entry.path);
800 }
801 if entry.is_file() {
802 fs.remove_file(&abs_path, Default::default()).await?;
803 } else {
804 fs.remove_dir(
805 &abs_path,
806 RemoveOptions {
807 recursive: true,
808 ignore_if_not_exists: false,
809 },
810 )
811 .await?;
812 }
813 anyhow::Ok(abs_path)
814 });
815
816 Some(cx.spawn(|this, mut cx| async move {
817 let abs_path = delete.await?;
818 let (tx, mut rx) = barrier::channel();
819 this.update(&mut cx, |this, _| {
820 this.as_local_mut()
821 .unwrap()
822 .path_changes_tx
823 .try_send((vec![abs_path], tx))
824 })?;
825 rx.recv().await;
826 Ok(())
827 }))
828 }
829
830 pub fn rename_entry(
831 &self,
832 entry_id: ProjectEntryId,
833 new_path: impl Into<Arc<Path>>,
834 cx: &mut ModelContext<Worktree>,
835 ) -> Option<Task<Result<Entry>>> {
836 let old_path = self.entry_for_id(entry_id)?.path.clone();
837 let new_path = new_path.into();
838 let abs_old_path = self.absolutize(&old_path);
839 let abs_new_path = self.absolutize(&new_path);
840 let fs = self.fs.clone();
841 let rename = cx.background().spawn(async move {
842 fs.rename(&abs_old_path, &abs_new_path, Default::default())
843 .await
844 });
845
846 Some(cx.spawn(|this, mut cx| async move {
847 rename.await?;
848 this.update(&mut cx, |this, cx| {
849 this.as_local_mut()
850 .unwrap()
851 .refresh_entry(new_path.clone(), Some(old_path), cx)
852 })
853 .await
854 }))
855 }
856
857 pub fn copy_entry(
858 &self,
859 entry_id: ProjectEntryId,
860 new_path: impl Into<Arc<Path>>,
861 cx: &mut ModelContext<Worktree>,
862 ) -> Option<Task<Result<Entry>>> {
863 let old_path = self.entry_for_id(entry_id)?.path.clone();
864 let new_path = new_path.into();
865 let abs_old_path = self.absolutize(&old_path);
866 let abs_new_path = self.absolutize(&new_path);
867 let fs = self.fs.clone();
868 let copy = cx.background().spawn(async move {
869 copy_recursive(
870 fs.as_ref(),
871 &abs_old_path,
872 &abs_new_path,
873 Default::default(),
874 )
875 .await
876 });
877
878 Some(cx.spawn(|this, mut cx| async move {
879 copy.await?;
880 this.update(&mut cx, |this, cx| {
881 this.as_local_mut()
882 .unwrap()
883 .refresh_entry(new_path.clone(), None, cx)
884 })
885 .await
886 }))
887 }
888
889 fn refresh_entry(
890 &self,
891 path: Arc<Path>,
892 old_path: Option<Arc<Path>>,
893 cx: &mut ModelContext<Worktree>,
894 ) -> Task<Result<Entry>> {
895 let fs = self.fs.clone();
896 let abs_root_path = self.abs_path.clone();
897 let path_changes_tx = self.path_changes_tx.clone();
898 cx.spawn_weak(move |this, mut cx| async move {
899 let abs_path = fs.canonicalize(&abs_root_path).await?;
900 let mut paths = Vec::with_capacity(2);
901 paths.push(if path.file_name().is_some() {
902 abs_path.join(&path)
903 } else {
904 abs_path.clone()
905 });
906 if let Some(old_path) = old_path {
907 paths.push(if old_path.file_name().is_some() {
908 abs_path.join(&old_path)
909 } else {
910 abs_path.clone()
911 });
912 }
913
914 let (tx, mut rx) = barrier::channel();
915 path_changes_tx.try_send((paths, tx))?;
916 rx.recv().await;
917 this.upgrade(&cx)
918 .ok_or_else(|| anyhow!("worktree was dropped"))?
919 .update(&mut cx, |this, _| {
920 this.entry_for_path(path)
921 .cloned()
922 .ok_or_else(|| anyhow!("failed to read path after update"))
923 })
924 })
925 }
926
927 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
928 let (share_tx, share_rx) = oneshot::channel();
929
930 if let Some(share) = self.share.as_mut() {
931 let _ = share_tx.send(());
932 *share.resume_updates.borrow_mut() = ();
933 } else {
934 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
935 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
936 let worktree_id = cx.model_id() as u64;
937
938 for (path, summary) in self.diagnostic_summaries.iter() {
939 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
940 project_id,
941 worktree_id,
942 summary: Some(summary.to_proto(&path.0)),
943 }) {
944 return Task::ready(Err(e));
945 }
946 }
947
948 let _maintain_remote_snapshot = cx.background().spawn({
949 let client = self.client.clone();
950 async move {
951 let mut share_tx = Some(share_tx);
952 let mut prev_snapshot = LocalSnapshot {
953 ignores_by_parent_abs_path: Default::default(),
954 git_repositories: Default::default(),
955 removed_entry_ids: Default::default(),
956 next_entry_id: Default::default(),
957 snapshot: Snapshot {
958 id: WorktreeId(worktree_id as usize),
959 abs_path: Path::new("").into(),
960 root_name: Default::default(),
961 root_char_bag: Default::default(),
962 entries_by_path: Default::default(),
963 entries_by_id: Default::default(),
964 scan_id: 0,
965 completed_scan_id: 0,
966 },
967 };
968 while let Some(snapshot) = snapshots_rx.recv().await {
969 #[cfg(any(test, feature = "test-support"))]
970 const MAX_CHUNK_SIZE: usize = 2;
971 #[cfg(not(any(test, feature = "test-support")))]
972 const MAX_CHUNK_SIZE: usize = 256;
973
974 let update =
975 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
976 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
977 let _ = resume_updates_rx.try_recv();
978 while let Err(error) = client.request(update.clone()).await {
979 log::error!("failed to send worktree update: {}", error);
980 log::info!("waiting to resume updates");
981 if resume_updates_rx.next().await.is_none() {
982 return Ok(());
983 }
984 }
985 }
986
987 if let Some(share_tx) = share_tx.take() {
988 let _ = share_tx.send(());
989 }
990
991 prev_snapshot = snapshot;
992 }
993
994 Ok::<_, anyhow::Error>(())
995 }
996 .log_err()
997 });
998
999 self.share = Some(ShareState {
1000 project_id,
1001 snapshots_tx,
1002 resume_updates: resume_updates_tx,
1003 _maintain_remote_snapshot,
1004 });
1005 }
1006
1007 cx.foreground()
1008 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1009 }
1010
1011 pub fn unshare(&mut self) {
1012 self.share.take();
1013 }
1014
1015 pub fn is_shared(&self) -> bool {
1016 self.share.is_some()
1017 }
1018}
1019
1020impl RemoteWorktree {
1021 fn snapshot(&self) -> Snapshot {
1022 self.snapshot.clone()
1023 }
1024
1025 pub fn disconnected_from_host(&mut self) {
1026 self.updates_tx.take();
1027 self.snapshot_subscriptions.clear();
1028 self.disconnected = true;
1029 }
1030
1031 pub fn save_buffer(
1032 &self,
1033 buffer_handle: ModelHandle<Buffer>,
1034 cx: &mut ModelContext<Worktree>,
1035 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1036 let buffer = buffer_handle.read(cx);
1037 let buffer_id = buffer.remote_id();
1038 let version = buffer.version();
1039 let rpc = self.client.clone();
1040 let project_id = self.project_id;
1041 cx.as_mut().spawn(|mut cx| async move {
1042 let response = rpc
1043 .request(proto::SaveBuffer {
1044 project_id,
1045 buffer_id,
1046 version: serialize_version(&version),
1047 })
1048 .await?;
1049 let version = deserialize_version(&response.version);
1050 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1051 let mtime = response
1052 .mtime
1053 .ok_or_else(|| anyhow!("missing mtime"))?
1054 .into();
1055
1056 buffer_handle.update(&mut cx, |buffer, cx| {
1057 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1058 });
1059
1060 Ok((version, fingerprint, mtime))
1061 })
1062 }
1063
1064 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1065 if let Some(updates_tx) = &self.updates_tx {
1066 updates_tx
1067 .unbounded_send(update)
1068 .expect("consumer runs to completion");
1069 }
1070 }
1071
1072 fn observed_snapshot(&self, scan_id: usize) -> bool {
1073 self.completed_scan_id >= scan_id
1074 }
1075
1076 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1077 let (tx, rx) = oneshot::channel();
1078 if self.observed_snapshot(scan_id) {
1079 let _ = tx.send(());
1080 } else if self.disconnected {
1081 drop(tx);
1082 } else {
1083 match self
1084 .snapshot_subscriptions
1085 .binary_search_by_key(&scan_id, |probe| probe.0)
1086 {
1087 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1088 }
1089 }
1090
1091 async move {
1092 rx.await?;
1093 Ok(())
1094 }
1095 }
1096
1097 pub fn update_diagnostic_summary(
1098 &mut self,
1099 path: Arc<Path>,
1100 summary: &proto::DiagnosticSummary,
1101 ) {
1102 let summary = DiagnosticSummary {
1103 language_server_id: summary.language_server_id as usize,
1104 error_count: summary.error_count as usize,
1105 warning_count: summary.warning_count as usize,
1106 };
1107 if summary.is_empty() {
1108 self.diagnostic_summaries.remove(&PathKey(path));
1109 } else {
1110 self.diagnostic_summaries.insert(PathKey(path), summary);
1111 }
1112 }
1113
1114 pub fn insert_entry(
1115 &mut self,
1116 entry: proto::Entry,
1117 scan_id: usize,
1118 cx: &mut ModelContext<Worktree>,
1119 ) -> Task<Result<Entry>> {
1120 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1121 cx.spawn(|this, mut cx| async move {
1122 wait_for_snapshot.await?;
1123 this.update(&mut cx, |worktree, _| {
1124 let worktree = worktree.as_remote_mut().unwrap();
1125 let mut snapshot = worktree.background_snapshot.lock();
1126 let entry = snapshot.insert_entry(entry);
1127 worktree.snapshot = snapshot.clone();
1128 entry
1129 })
1130 })
1131 }
1132
1133 pub(crate) fn delete_entry(
1134 &mut self,
1135 id: ProjectEntryId,
1136 scan_id: usize,
1137 cx: &mut ModelContext<Worktree>,
1138 ) -> Task<Result<()>> {
1139 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1140 cx.spawn(|this, mut cx| async move {
1141 wait_for_snapshot.await?;
1142 this.update(&mut cx, |worktree, _| {
1143 let worktree = worktree.as_remote_mut().unwrap();
1144 let mut snapshot = worktree.background_snapshot.lock();
1145 snapshot.delete_entry(id);
1146 worktree.snapshot = snapshot.clone();
1147 });
1148 Ok(())
1149 })
1150 }
1151}
1152
1153impl Snapshot {
1154 pub fn id(&self) -> WorktreeId {
1155 self.id
1156 }
1157
1158 pub fn abs_path(&self) -> &Arc<Path> {
1159 &self.abs_path
1160 }
1161
1162 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1163 self.entries_by_id.get(&entry_id, &()).is_some()
1164 }
1165
1166 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1167 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1168 let old_entry = self.entries_by_id.insert_or_replace(
1169 PathEntry {
1170 id: entry.id,
1171 path: entry.path.clone(),
1172 is_ignored: entry.is_ignored,
1173 scan_id: 0,
1174 },
1175 &(),
1176 );
1177 if let Some(old_entry) = old_entry {
1178 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1179 }
1180 self.entries_by_path.insert_or_replace(entry.clone(), &());
1181 Ok(entry)
1182 }
1183
1184 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1185 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1186 self.entries_by_path = {
1187 let mut cursor = self.entries_by_path.cursor();
1188 let mut new_entries_by_path =
1189 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1190 while let Some(entry) = cursor.item() {
1191 if entry.path.starts_with(&removed_entry.path) {
1192 self.entries_by_id.remove(&entry.id, &());
1193 cursor.next(&());
1194 } else {
1195 break;
1196 }
1197 }
1198 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1199 new_entries_by_path
1200 };
1201
1202 Some(removed_entry.path)
1203 }
1204
1205 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1206 let mut entries_by_path_edits = Vec::new();
1207 let mut entries_by_id_edits = Vec::new();
1208 for entry_id in update.removed_entries {
1209 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1210 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1211 entries_by_id_edits.push(Edit::Remove(entry.id));
1212 }
1213 }
1214
1215 for entry in update.updated_entries {
1216 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1217 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1218 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1219 }
1220 entries_by_id_edits.push(Edit::Insert(PathEntry {
1221 id: entry.id,
1222 path: entry.path.clone(),
1223 is_ignored: entry.is_ignored,
1224 scan_id: 0,
1225 }));
1226 entries_by_path_edits.push(Edit::Insert(entry));
1227 }
1228
1229 self.entries_by_path.edit(entries_by_path_edits, &());
1230 self.entries_by_id.edit(entries_by_id_edits, &());
1231 self.scan_id = update.scan_id as usize;
1232 if update.is_last_update {
1233 self.completed_scan_id = update.scan_id as usize;
1234 }
1235
1236 Ok(())
1237 }
1238
1239 pub fn file_count(&self) -> usize {
1240 self.entries_by_path.summary().file_count
1241 }
1242
1243 pub fn visible_file_count(&self) -> usize {
1244 self.entries_by_path.summary().visible_file_count
1245 }
1246
1247 fn traverse_from_offset(
1248 &self,
1249 include_dirs: bool,
1250 include_ignored: bool,
1251 start_offset: usize,
1252 ) -> Traversal {
1253 let mut cursor = self.entries_by_path.cursor();
1254 cursor.seek(
1255 &TraversalTarget::Count {
1256 count: start_offset,
1257 include_dirs,
1258 include_ignored,
1259 },
1260 Bias::Right,
1261 &(),
1262 );
1263 Traversal {
1264 cursor,
1265 include_dirs,
1266 include_ignored,
1267 }
1268 }
1269
1270 fn traverse_from_path(
1271 &self,
1272 include_dirs: bool,
1273 include_ignored: bool,
1274 path: &Path,
1275 ) -> Traversal {
1276 let mut cursor = self.entries_by_path.cursor();
1277 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1278 Traversal {
1279 cursor,
1280 include_dirs,
1281 include_ignored,
1282 }
1283 }
1284
1285 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1286 self.traverse_from_offset(false, include_ignored, start)
1287 }
1288
1289 pub fn entries(&self, include_ignored: bool) -> Traversal {
1290 self.traverse_from_offset(true, include_ignored, 0)
1291 }
1292
1293 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1294 let empty_path = Path::new("");
1295 self.entries_by_path
1296 .cursor::<()>()
1297 .filter(move |entry| entry.path.as_ref() != empty_path)
1298 .map(|entry| &entry.path)
1299 }
1300
1301 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1302 let mut cursor = self.entries_by_path.cursor();
1303 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1304 let traversal = Traversal {
1305 cursor,
1306 include_dirs: true,
1307 include_ignored: true,
1308 };
1309 ChildEntriesIter {
1310 traversal,
1311 parent_path,
1312 }
1313 }
1314
1315 pub fn root_entry(&self) -> Option<&Entry> {
1316 self.entry_for_path("")
1317 }
1318
1319 pub fn root_name(&self) -> &str {
1320 &self.root_name
1321 }
1322
1323 pub fn scan_id(&self) -> usize {
1324 self.scan_id
1325 }
1326
1327 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1328 let path = path.as_ref();
1329 self.traverse_from_path(true, true, path)
1330 .entry()
1331 .and_then(|entry| {
1332 if entry.path.as_ref() == path {
1333 Some(entry)
1334 } else {
1335 None
1336 }
1337 })
1338 }
1339
1340 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1341 let entry = self.entries_by_id.get(&id, &())?;
1342 self.entry_for_path(&entry.path)
1343 }
1344
1345 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1346 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1347 }
1348}
1349
1350impl LocalSnapshot {
1351 // Gives the most specific git repository for a given path
1352 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1353 self.git_repositories
1354 .iter()
1355 .rev() //git_repository is ordered lexicographically
1356 .find(|repo| repo.manages(path))
1357 .cloned()
1358 }
1359
1360 pub(crate) fn repo_with_dot_git_containing(
1361 &mut self,
1362 path: &Path,
1363 ) -> Option<&mut GitRepositoryEntry> {
1364 // Git repositories cannot be nested, so we don't need to reverse the order
1365 self.git_repositories
1366 .iter_mut()
1367 .find(|repo| repo.in_dot_git(path))
1368 }
1369
1370 #[cfg(test)]
1371 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1372 let root_name = self.root_name.clone();
1373 proto::UpdateWorktree {
1374 project_id,
1375 worktree_id: self.id().to_proto(),
1376 abs_path: self.abs_path().to_string_lossy().into(),
1377 root_name,
1378 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1379 removed_entries: Default::default(),
1380 scan_id: self.scan_id as u64,
1381 is_last_update: true,
1382 }
1383 }
1384
1385 pub(crate) fn build_update(
1386 &self,
1387 other: &Self,
1388 project_id: u64,
1389 worktree_id: u64,
1390 include_ignored: bool,
1391 ) -> proto::UpdateWorktree {
1392 let mut updated_entries = Vec::new();
1393 let mut removed_entries = Vec::new();
1394 let mut self_entries = self
1395 .entries_by_id
1396 .cursor::<()>()
1397 .filter(|e| include_ignored || !e.is_ignored)
1398 .peekable();
1399 let mut other_entries = other
1400 .entries_by_id
1401 .cursor::<()>()
1402 .filter(|e| include_ignored || !e.is_ignored)
1403 .peekable();
1404 loop {
1405 match (self_entries.peek(), other_entries.peek()) {
1406 (Some(self_entry), Some(other_entry)) => {
1407 match Ord::cmp(&self_entry.id, &other_entry.id) {
1408 Ordering::Less => {
1409 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1410 updated_entries.push(entry);
1411 self_entries.next();
1412 }
1413 Ordering::Equal => {
1414 if self_entry.scan_id != other_entry.scan_id {
1415 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1416 updated_entries.push(entry);
1417 }
1418
1419 self_entries.next();
1420 other_entries.next();
1421 }
1422 Ordering::Greater => {
1423 removed_entries.push(other_entry.id.to_proto());
1424 other_entries.next();
1425 }
1426 }
1427 }
1428 (Some(self_entry), None) => {
1429 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1430 updated_entries.push(entry);
1431 self_entries.next();
1432 }
1433 (None, Some(other_entry)) => {
1434 removed_entries.push(other_entry.id.to_proto());
1435 other_entries.next();
1436 }
1437 (None, None) => break,
1438 }
1439 }
1440
1441 proto::UpdateWorktree {
1442 project_id,
1443 worktree_id,
1444 abs_path: self.abs_path().to_string_lossy().into(),
1445 root_name: self.root_name().to_string(),
1446 updated_entries,
1447 removed_entries,
1448 scan_id: self.scan_id as u64,
1449 is_last_update: self.completed_scan_id == self.scan_id,
1450 }
1451 }
1452
1453 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1454 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1455 let abs_path = self.abs_path.join(&entry.path);
1456 match smol::block_on(build_gitignore(&abs_path, fs)) {
1457 Ok(ignore) => {
1458 self.ignores_by_parent_abs_path.insert(
1459 abs_path.parent().unwrap().into(),
1460 (Arc::new(ignore), self.scan_id),
1461 );
1462 }
1463 Err(error) => {
1464 log::error!(
1465 "error loading .gitignore file {:?} - {:?}",
1466 &entry.path,
1467 error
1468 );
1469 }
1470 }
1471 }
1472
1473 self.reuse_entry_id(&mut entry);
1474
1475 if entry.kind == EntryKind::PendingDir {
1476 if let Some(existing_entry) =
1477 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1478 {
1479 entry.kind = existing_entry.kind;
1480 }
1481 }
1482
1483 let scan_id = self.scan_id;
1484 self.entries_by_path.insert_or_replace(entry.clone(), &());
1485 self.entries_by_id.insert_or_replace(
1486 PathEntry {
1487 id: entry.id,
1488 path: entry.path.clone(),
1489 is_ignored: entry.is_ignored,
1490 scan_id,
1491 },
1492 &(),
1493 );
1494
1495 entry
1496 }
1497
1498 fn populate_dir(
1499 &mut self,
1500 parent_path: Arc<Path>,
1501 entries: impl IntoIterator<Item = Entry>,
1502 ignore: Option<Arc<Gitignore>>,
1503 fs: &dyn Fs,
1504 ) {
1505 let mut parent_entry = if let Some(parent_entry) =
1506 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1507 {
1508 parent_entry.clone()
1509 } else {
1510 log::warn!(
1511 "populating a directory {:?} that has been removed",
1512 parent_path
1513 );
1514 return;
1515 };
1516
1517 match parent_entry.kind {
1518 EntryKind::PendingDir => {
1519 parent_entry.kind = EntryKind::Dir;
1520 }
1521 EntryKind::Dir => {}
1522 _ => return,
1523 }
1524
1525 if let Some(ignore) = ignore {
1526 self.ignores_by_parent_abs_path.insert(
1527 self.abs_path.join(&parent_path).into(),
1528 (ignore, self.scan_id),
1529 );
1530 }
1531
1532 if parent_path.file_name() == Some(&DOT_GIT) {
1533 let abs_path = self.abs_path.join(&parent_path);
1534 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1535 if let Err(ix) = self
1536 .git_repositories
1537 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1538 {
1539 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1540 self.git_repositories.insert(
1541 ix,
1542 GitRepositoryEntry {
1543 repo,
1544 scan_id: 0,
1545 content_path,
1546 git_dir_path: parent_path,
1547 },
1548 );
1549 }
1550 }
1551 }
1552
1553 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1554 let mut entries_by_id_edits = Vec::new();
1555
1556 for mut entry in entries {
1557 self.reuse_entry_id(&mut entry);
1558 entries_by_id_edits.push(Edit::Insert(PathEntry {
1559 id: entry.id,
1560 path: entry.path.clone(),
1561 is_ignored: entry.is_ignored,
1562 scan_id: self.scan_id,
1563 }));
1564 entries_by_path_edits.push(Edit::Insert(entry));
1565 }
1566
1567 self.entries_by_path.edit(entries_by_path_edits, &());
1568 self.entries_by_id.edit(entries_by_id_edits, &());
1569 }
1570
1571 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1572 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1573 entry.id = removed_entry_id;
1574 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1575 entry.id = existing_entry.id;
1576 }
1577 }
1578
1579 fn remove_path(&mut self, path: &Path) {
1580 let mut new_entries;
1581 let removed_entries;
1582 {
1583 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1584 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1585 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1586 new_entries.push_tree(cursor.suffix(&()), &());
1587 }
1588 self.entries_by_path = new_entries;
1589
1590 let mut entries_by_id_edits = Vec::new();
1591 for entry in removed_entries.cursor::<()>() {
1592 let removed_entry_id = self
1593 .removed_entry_ids
1594 .entry(entry.inode)
1595 .or_insert(entry.id);
1596 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1597 entries_by_id_edits.push(Edit::Remove(entry.id));
1598 }
1599 self.entries_by_id.edit(entries_by_id_edits, &());
1600
1601 if path.file_name() == Some(&GITIGNORE) {
1602 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1603 if let Some((_, scan_id)) = self
1604 .ignores_by_parent_abs_path
1605 .get_mut(abs_parent_path.as_path())
1606 {
1607 *scan_id = self.snapshot.scan_id;
1608 }
1609 } else if path.file_name() == Some(&DOT_GIT) {
1610 let parent_path = path.parent().unwrap();
1611 if let Ok(ix) = self
1612 .git_repositories
1613 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1614 {
1615 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1616 }
1617 }
1618 }
1619
1620 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1621 let mut inodes = TreeSet::default();
1622 for ancestor in path.ancestors().skip(1) {
1623 if let Some(entry) = self.entry_for_path(ancestor) {
1624 inodes.insert(entry.inode);
1625 }
1626 }
1627 inodes
1628 }
1629
1630 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1631 let mut new_ignores = Vec::new();
1632 for ancestor in abs_path.ancestors().skip(1) {
1633 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1634 new_ignores.push((ancestor, Some(ignore.clone())));
1635 } else {
1636 new_ignores.push((ancestor, None));
1637 }
1638 }
1639
1640 let mut ignore_stack = IgnoreStack::none();
1641 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1642 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1643 ignore_stack = IgnoreStack::all();
1644 break;
1645 } else if let Some(ignore) = ignore {
1646 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1647 }
1648 }
1649
1650 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1651 ignore_stack = IgnoreStack::all();
1652 }
1653
1654 ignore_stack
1655 }
1656
1657 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1658 &self.git_repositories
1659 }
1660}
1661
1662impl GitRepositoryEntry {
1663 // Note that these paths should be relative to the worktree root.
1664 pub(crate) fn manages(&self, path: &Path) -> bool {
1665 path.starts_with(self.content_path.as_ref())
1666 }
1667
1668 // Note that this path should be relative to the worktree root.
1669 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1670 path.starts_with(self.git_dir_path.as_ref())
1671 }
1672}
1673
1674async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1675 let contents = fs.load(abs_path).await?;
1676 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1677 let mut builder = GitignoreBuilder::new(parent);
1678 for line in contents.lines() {
1679 builder.add_line(Some(abs_path.into()), line)?;
1680 }
1681 Ok(builder.build()?)
1682}
1683
1684impl WorktreeId {
1685 pub fn from_usize(handle_id: usize) -> Self {
1686 Self(handle_id)
1687 }
1688
1689 pub(crate) fn from_proto(id: u64) -> Self {
1690 Self(id as usize)
1691 }
1692
1693 pub fn to_proto(&self) -> u64 {
1694 self.0 as u64
1695 }
1696
1697 pub fn to_usize(&self) -> usize {
1698 self.0
1699 }
1700}
1701
1702impl fmt::Display for WorktreeId {
1703 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1704 self.0.fmt(f)
1705 }
1706}
1707
1708impl Deref for Worktree {
1709 type Target = Snapshot;
1710
1711 fn deref(&self) -> &Self::Target {
1712 match self {
1713 Worktree::Local(worktree) => &worktree.snapshot,
1714 Worktree::Remote(worktree) => &worktree.snapshot,
1715 }
1716 }
1717}
1718
1719impl Deref for LocalWorktree {
1720 type Target = LocalSnapshot;
1721
1722 fn deref(&self) -> &Self::Target {
1723 &self.snapshot
1724 }
1725}
1726
1727impl Deref for RemoteWorktree {
1728 type Target = Snapshot;
1729
1730 fn deref(&self) -> &Self::Target {
1731 &self.snapshot
1732 }
1733}
1734
1735impl fmt::Debug for LocalWorktree {
1736 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1737 self.snapshot.fmt(f)
1738 }
1739}
1740
1741impl fmt::Debug for Snapshot {
1742 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1743 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1744 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1745
1746 impl<'a> fmt::Debug for EntriesByPath<'a> {
1747 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1748 f.debug_map()
1749 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1750 .finish()
1751 }
1752 }
1753
1754 impl<'a> fmt::Debug for EntriesById<'a> {
1755 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1756 f.debug_list().entries(self.0.iter()).finish()
1757 }
1758 }
1759
1760 f.debug_struct("Snapshot")
1761 .field("id", &self.id)
1762 .field("root_name", &self.root_name)
1763 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1764 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1765 .finish()
1766 }
1767}
1768
1769#[derive(Clone, PartialEq)]
1770pub struct File {
1771 pub worktree: ModelHandle<Worktree>,
1772 pub path: Arc<Path>,
1773 pub mtime: SystemTime,
1774 pub(crate) entry_id: ProjectEntryId,
1775 pub(crate) is_local: bool,
1776 pub(crate) is_deleted: bool,
1777}
1778
1779impl language::File for File {
1780 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1781 if self.is_local {
1782 Some(self)
1783 } else {
1784 None
1785 }
1786 }
1787
1788 fn mtime(&self) -> SystemTime {
1789 self.mtime
1790 }
1791
1792 fn path(&self) -> &Arc<Path> {
1793 &self.path
1794 }
1795
1796 fn full_path(&self, cx: &AppContext) -> PathBuf {
1797 let mut full_path = PathBuf::new();
1798 let worktree = self.worktree.read(cx);
1799
1800 if worktree.is_visible() {
1801 full_path.push(worktree.root_name());
1802 } else {
1803 let path = worktree.abs_path();
1804
1805 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1806 full_path.push("~");
1807 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1808 } else {
1809 full_path.push(path)
1810 }
1811 }
1812
1813 if self.path.components().next().is_some() {
1814 full_path.push(&self.path);
1815 }
1816
1817 full_path
1818 }
1819
1820 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1821 /// of its worktree, then this method will return the name of the worktree itself.
1822 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1823 self.path
1824 .file_name()
1825 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1826 }
1827
1828 fn is_deleted(&self) -> bool {
1829 self.is_deleted
1830 }
1831
1832 fn as_any(&self) -> &dyn Any {
1833 self
1834 }
1835
1836 fn to_proto(&self) -> rpc::proto::File {
1837 rpc::proto::File {
1838 worktree_id: self.worktree.id() as u64,
1839 entry_id: self.entry_id.to_proto(),
1840 path: self.path.to_string_lossy().into(),
1841 mtime: Some(self.mtime.into()),
1842 is_deleted: self.is_deleted,
1843 }
1844 }
1845}
1846
1847impl language::LocalFile for File {
1848 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1849 self.worktree
1850 .read(cx)
1851 .as_local()
1852 .unwrap()
1853 .abs_path
1854 .join(&self.path)
1855 }
1856
1857 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1858 let worktree = self.worktree.read(cx).as_local().unwrap();
1859 let abs_path = worktree.absolutize(&self.path);
1860 let fs = worktree.fs.clone();
1861 cx.background()
1862 .spawn(async move { fs.load(&abs_path).await })
1863 }
1864
1865 fn buffer_reloaded(
1866 &self,
1867 buffer_id: u64,
1868 version: &clock::Global,
1869 fingerprint: RopeFingerprint,
1870 line_ending: LineEnding,
1871 mtime: SystemTime,
1872 cx: &mut AppContext,
1873 ) {
1874 let worktree = self.worktree.read(cx).as_local().unwrap();
1875 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1876 worktree
1877 .client
1878 .send(proto::BufferReloaded {
1879 project_id,
1880 buffer_id,
1881 version: serialize_version(version),
1882 mtime: Some(mtime.into()),
1883 fingerprint: serialize_fingerprint(fingerprint),
1884 line_ending: serialize_line_ending(line_ending) as i32,
1885 })
1886 .log_err();
1887 }
1888 }
1889}
1890
1891impl File {
1892 pub fn from_proto(
1893 proto: rpc::proto::File,
1894 worktree: ModelHandle<Worktree>,
1895 cx: &AppContext,
1896 ) -> Result<Self> {
1897 let worktree_id = worktree
1898 .read(cx)
1899 .as_remote()
1900 .ok_or_else(|| anyhow!("not remote"))?
1901 .id();
1902
1903 if worktree_id.to_proto() != proto.worktree_id {
1904 return Err(anyhow!("worktree id does not match file"));
1905 }
1906
1907 Ok(Self {
1908 worktree,
1909 path: Path::new(&proto.path).into(),
1910 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1911 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1912 is_local: false,
1913 is_deleted: proto.is_deleted,
1914 })
1915 }
1916
1917 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1918 file.and_then(|f| f.as_any().downcast_ref())
1919 }
1920
1921 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1922 self.worktree.read(cx).id()
1923 }
1924
1925 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1926 if self.is_deleted {
1927 None
1928 } else {
1929 Some(self.entry_id)
1930 }
1931 }
1932}
1933
1934#[derive(Clone, Debug, PartialEq, Eq)]
1935pub struct Entry {
1936 pub id: ProjectEntryId,
1937 pub kind: EntryKind,
1938 pub path: Arc<Path>,
1939 pub inode: u64,
1940 pub mtime: SystemTime,
1941 pub is_symlink: bool,
1942 pub is_ignored: bool,
1943}
1944
1945#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1946pub enum EntryKind {
1947 PendingDir,
1948 Dir,
1949 File(CharBag),
1950}
1951
1952#[derive(Clone, Copy, Debug)]
1953pub enum PathChange {
1954 Added,
1955 Removed,
1956 Updated,
1957 AddedOrUpdated,
1958}
1959
1960impl Entry {
1961 fn new(
1962 path: Arc<Path>,
1963 metadata: &fs::Metadata,
1964 next_entry_id: &AtomicUsize,
1965 root_char_bag: CharBag,
1966 ) -> Self {
1967 Self {
1968 id: ProjectEntryId::new(next_entry_id),
1969 kind: if metadata.is_dir {
1970 EntryKind::PendingDir
1971 } else {
1972 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1973 },
1974 path,
1975 inode: metadata.inode,
1976 mtime: metadata.mtime,
1977 is_symlink: metadata.is_symlink,
1978 is_ignored: false,
1979 }
1980 }
1981
1982 pub fn is_dir(&self) -> bool {
1983 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1984 }
1985
1986 pub fn is_file(&self) -> bool {
1987 matches!(self.kind, EntryKind::File(_))
1988 }
1989}
1990
1991impl sum_tree::Item for Entry {
1992 type Summary = EntrySummary;
1993
1994 fn summary(&self) -> Self::Summary {
1995 let visible_count = if self.is_ignored { 0 } else { 1 };
1996 let file_count;
1997 let visible_file_count;
1998 if self.is_file() {
1999 file_count = 1;
2000 visible_file_count = visible_count;
2001 } else {
2002 file_count = 0;
2003 visible_file_count = 0;
2004 }
2005
2006 EntrySummary {
2007 max_path: self.path.clone(),
2008 count: 1,
2009 visible_count,
2010 file_count,
2011 visible_file_count,
2012 }
2013 }
2014}
2015
2016impl sum_tree::KeyedItem for Entry {
2017 type Key = PathKey;
2018
2019 fn key(&self) -> Self::Key {
2020 PathKey(self.path.clone())
2021 }
2022}
2023
2024#[derive(Clone, Debug)]
2025pub struct EntrySummary {
2026 max_path: Arc<Path>,
2027 count: usize,
2028 visible_count: usize,
2029 file_count: usize,
2030 visible_file_count: usize,
2031}
2032
2033impl Default for EntrySummary {
2034 fn default() -> Self {
2035 Self {
2036 max_path: Arc::from(Path::new("")),
2037 count: 0,
2038 visible_count: 0,
2039 file_count: 0,
2040 visible_file_count: 0,
2041 }
2042 }
2043}
2044
2045impl sum_tree::Summary for EntrySummary {
2046 type Context = ();
2047
2048 fn add_summary(&mut self, rhs: &Self, _: &()) {
2049 self.max_path = rhs.max_path.clone();
2050 self.count += rhs.count;
2051 self.visible_count += rhs.visible_count;
2052 self.file_count += rhs.file_count;
2053 self.visible_file_count += rhs.visible_file_count;
2054 }
2055}
2056
2057#[derive(Clone, Debug)]
2058struct PathEntry {
2059 id: ProjectEntryId,
2060 path: Arc<Path>,
2061 is_ignored: bool,
2062 scan_id: usize,
2063}
2064
2065impl sum_tree::Item for PathEntry {
2066 type Summary = PathEntrySummary;
2067
2068 fn summary(&self) -> Self::Summary {
2069 PathEntrySummary { max_id: self.id }
2070 }
2071}
2072
2073impl sum_tree::KeyedItem for PathEntry {
2074 type Key = ProjectEntryId;
2075
2076 fn key(&self) -> Self::Key {
2077 self.id
2078 }
2079}
2080
2081#[derive(Clone, Debug, Default)]
2082struct PathEntrySummary {
2083 max_id: ProjectEntryId,
2084}
2085
2086impl sum_tree::Summary for PathEntrySummary {
2087 type Context = ();
2088
2089 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2090 self.max_id = summary.max_id;
2091 }
2092}
2093
2094impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2095 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2096 *self = summary.max_id;
2097 }
2098}
2099
2100#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2101pub struct PathKey(Arc<Path>);
2102
2103impl Default for PathKey {
2104 fn default() -> Self {
2105 Self(Path::new("").into())
2106 }
2107}
2108
2109impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2110 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2111 self.0 = summary.max_path.clone();
2112 }
2113}
2114
2115struct BackgroundScanner {
2116 snapshot: Mutex<LocalSnapshot>,
2117 fs: Arc<dyn Fs>,
2118 status_updates_tx: UnboundedSender<ScanState>,
2119 executor: Arc<executor::Background>,
2120 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2121 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2122 finished_initial_scan: bool,
2123}
2124
2125impl BackgroundScanner {
2126 fn new(
2127 snapshot: LocalSnapshot,
2128 fs: Arc<dyn Fs>,
2129 status_updates_tx: UnboundedSender<ScanState>,
2130 executor: Arc<executor::Background>,
2131 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2132 ) -> Self {
2133 Self {
2134 fs,
2135 status_updates_tx,
2136 executor,
2137 refresh_requests_rx,
2138 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2139 snapshot: Mutex::new(snapshot),
2140 finished_initial_scan: false,
2141 }
2142 }
2143
2144 async fn run(
2145 &mut self,
2146 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2147 ) {
2148 use futures::FutureExt as _;
2149
2150 let (root_abs_path, root_inode) = {
2151 let snapshot = self.snapshot.lock();
2152 (
2153 snapshot.abs_path.clone(),
2154 snapshot.root_entry().map(|e| e.inode),
2155 )
2156 };
2157
2158 // Populate ignores above the root.
2159 let ignore_stack;
2160 for ancestor in root_abs_path.ancestors().skip(1) {
2161 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2162 {
2163 self.snapshot
2164 .lock()
2165 .ignores_by_parent_abs_path
2166 .insert(ancestor.into(), (ignore.into(), 0));
2167 }
2168 }
2169 {
2170 let mut snapshot = self.snapshot.lock();
2171 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2172 if ignore_stack.is_all() {
2173 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2174 root_entry.is_ignored = true;
2175 snapshot.insert_entry(root_entry, self.fs.as_ref());
2176 }
2177 }
2178 };
2179
2180 // Perform an initial scan of the directory.
2181 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2182 smol::block_on(scan_job_tx.send(ScanJob {
2183 abs_path: root_abs_path,
2184 path: Arc::from(Path::new("")),
2185 ignore_stack,
2186 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2187 scan_queue: scan_job_tx.clone(),
2188 }))
2189 .unwrap();
2190 drop(scan_job_tx);
2191 self.scan_dirs(true, scan_job_rx).await;
2192 self.send_status_update(false, None);
2193
2194 // Process any any FS events that occurred while performing the initial scan.
2195 // For these events, update events cannot be as precise, because we didn't
2196 // have the previous state loaded yet.
2197 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2198 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2199 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2200 paths.extend(more_events.into_iter().map(|e| e.path));
2201 }
2202 self.process_events(paths).await;
2203 self.send_status_update(false, None);
2204 }
2205
2206 self.finished_initial_scan = true;
2207
2208 // Continue processing events until the worktree is dropped.
2209 loop {
2210 select_biased! {
2211 // Process any path refresh requests from the worktree. Prioritize
2212 // these before handling changes reported by the filesystem.
2213 request = self.refresh_requests_rx.recv().fuse() => {
2214 let Ok((paths, barrier)) = request else { break };
2215 self.reload_entries_for_paths(paths, None).await;
2216 if !self.send_status_update(false, Some(barrier)) {
2217 break;
2218 }
2219 }
2220
2221 events = events_rx.next().fuse() => {
2222 let Some(events) = events else { break };
2223 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2224 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2225 paths.extend(more_events.into_iter().map(|e| e.path));
2226 }
2227 self.process_events(paths).await;
2228 self.send_status_update(false, None);
2229 }
2230 }
2231 }
2232 }
2233
2234 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2235 use futures::FutureExt as _;
2236
2237 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2238 if let Some(mut paths) = self
2239 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2240 .await
2241 {
2242 paths.sort_unstable();
2243 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2244 }
2245 drop(scan_job_tx);
2246 self.scan_dirs(false, scan_job_rx).await;
2247
2248 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2249 let snapshot = self.update_ignore_statuses(ignore_queue_tx);
2250 self.executor
2251 .scoped(|scope| {
2252 for _ in 0..self.executor.num_cpus() {
2253 scope.spawn(async {
2254 loop {
2255 select_biased! {
2256 // Process any path refresh requests before moving on to process
2257 // the queue of ignore statuses.
2258 request = self.refresh_requests_rx.recv().fuse() => {
2259 let Ok((paths, barrier)) = request else { break };
2260 self.reload_entries_for_paths(paths, None).await;
2261 if !self.send_status_update(false, Some(barrier)) {
2262 return;
2263 }
2264 }
2265
2266 // Recursively process directories whose ignores have changed.
2267 job = ignore_queue_rx.recv().fuse() => {
2268 let Ok(job) = job else { break };
2269 self.update_ignore_status(job, &snapshot).await;
2270 }
2271 }
2272 }
2273 });
2274 }
2275 })
2276 .await;
2277
2278 let mut snapshot = self.snapshot.lock();
2279 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2280 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2281 snapshot.git_repositories = git_repositories;
2282 snapshot.removed_entry_ids.clear();
2283 snapshot.completed_scan_id = snapshot.scan_id;
2284 }
2285
2286 async fn scan_dirs(
2287 &self,
2288 enable_progress_updates: bool,
2289 scan_jobs_rx: channel::Receiver<ScanJob>,
2290 ) {
2291 use futures::FutureExt as _;
2292
2293 if self
2294 .status_updates_tx
2295 .unbounded_send(ScanState::Started)
2296 .is_err()
2297 {
2298 return;
2299 }
2300
2301 let progress_update_count = AtomicUsize::new(0);
2302 self.executor
2303 .scoped(|scope| {
2304 for _ in 0..self.executor.num_cpus() {
2305 scope.spawn(async {
2306 let mut last_progress_update_count = 0;
2307 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2308 futures::pin_mut!(progress_update_timer);
2309
2310 loop {
2311 select_biased! {
2312 // Process any path refresh requests before moving on to process
2313 // the scan queue, so that user operations are prioritized.
2314 request = self.refresh_requests_rx.recv().fuse() => {
2315 let Ok((paths, barrier)) = request else { break };
2316 self.reload_entries_for_paths(paths, None).await;
2317 if !self.send_status_update(false, Some(barrier)) {
2318 return;
2319 }
2320 }
2321
2322 // Send periodic progress updates to the worktree. Use an atomic counter
2323 // to ensure that only one of the workers sends a progress update after
2324 // the update interval elapses.
2325 _ = progress_update_timer => {
2326 match progress_update_count.compare_exchange(
2327 last_progress_update_count,
2328 last_progress_update_count + 1,
2329 SeqCst,
2330 SeqCst
2331 ) {
2332 Ok(_) => {
2333 last_progress_update_count += 1;
2334 self.send_status_update(true, None);
2335 }
2336 Err(count) => {
2337 last_progress_update_count = count;
2338 }
2339 }
2340 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2341 }
2342
2343 // Recursively load directories from the file system.
2344 job = scan_jobs_rx.recv().fuse() => {
2345 let Ok(job) = job else { break };
2346 if let Err(err) = self.scan_dir(&job).await {
2347 if job.path.as_ref() != Path::new("") {
2348 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2349 }
2350 }
2351 }
2352 }
2353 }
2354 })
2355 }
2356 })
2357 .await;
2358 }
2359
2360 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2361 let mut prev_state = self.prev_state.lock();
2362 let snapshot = self.snapshot.lock().clone();
2363 let mut old_snapshot = snapshot.snapshot.clone();
2364 mem::swap(&mut old_snapshot, &mut prev_state.0);
2365 let changed_paths = mem::take(&mut prev_state.1);
2366 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2367 self.status_updates_tx
2368 .unbounded_send(ScanState::Updated {
2369 snapshot,
2370 changes,
2371 scanning,
2372 barrier,
2373 })
2374 .is_ok()
2375 }
2376
2377 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2378 let mut new_entries: Vec<Entry> = Vec::new();
2379 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2380 let mut ignore_stack = job.ignore_stack.clone();
2381 let mut new_ignore = None;
2382 let (root_abs_path, root_char_bag, next_entry_id) = {
2383 let snapshot = self.snapshot.lock();
2384 (
2385 snapshot.abs_path().clone(),
2386 snapshot.root_char_bag,
2387 snapshot.next_entry_id.clone(),
2388 )
2389 };
2390 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2391 while let Some(child_abs_path) = child_paths.next().await {
2392 let child_abs_path: Arc<Path> = match child_abs_path {
2393 Ok(child_abs_path) => child_abs_path.into(),
2394 Err(error) => {
2395 log::error!("error processing entry {:?}", error);
2396 continue;
2397 }
2398 };
2399
2400 let child_name = child_abs_path.file_name().unwrap();
2401 let child_path: Arc<Path> = job.path.join(child_name).into();
2402 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2403 Ok(Some(metadata)) => metadata,
2404 Ok(None) => continue,
2405 Err(err) => {
2406 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2407 continue;
2408 }
2409 };
2410
2411 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2412 if child_name == *GITIGNORE {
2413 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2414 Ok(ignore) => {
2415 let ignore = Arc::new(ignore);
2416 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2417 new_ignore = Some(ignore);
2418 }
2419 Err(error) => {
2420 log::error!(
2421 "error loading .gitignore file {:?} - {:?}",
2422 child_name,
2423 error
2424 );
2425 }
2426 }
2427
2428 // Update ignore status of any child entries we've already processed to reflect the
2429 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2430 // there should rarely be too numerous. Update the ignore stack associated with any
2431 // new jobs as well.
2432 let mut new_jobs = new_jobs.iter_mut();
2433 for entry in &mut new_entries {
2434 let entry_abs_path = root_abs_path.join(&entry.path);
2435 entry.is_ignored =
2436 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2437
2438 if entry.is_dir() {
2439 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2440 job.ignore_stack = if entry.is_ignored {
2441 IgnoreStack::all()
2442 } else {
2443 ignore_stack.clone()
2444 };
2445 }
2446 }
2447 }
2448 }
2449
2450 let mut child_entry = Entry::new(
2451 child_path.clone(),
2452 &child_metadata,
2453 &next_entry_id,
2454 root_char_bag,
2455 );
2456
2457 if child_entry.is_dir() {
2458 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2459 child_entry.is_ignored = is_ignored;
2460
2461 // Avoid recursing until crash in the case of a recursive symlink
2462 if !job.ancestor_inodes.contains(&child_entry.inode) {
2463 let mut ancestor_inodes = job.ancestor_inodes.clone();
2464 ancestor_inodes.insert(child_entry.inode);
2465
2466 new_jobs.push(Some(ScanJob {
2467 abs_path: child_abs_path,
2468 path: child_path,
2469 ignore_stack: if is_ignored {
2470 IgnoreStack::all()
2471 } else {
2472 ignore_stack.clone()
2473 },
2474 ancestor_inodes,
2475 scan_queue: job.scan_queue.clone(),
2476 }));
2477 } else {
2478 new_jobs.push(None);
2479 }
2480 } else {
2481 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2482 }
2483
2484 new_entries.push(child_entry);
2485 }
2486
2487 self.snapshot.lock().populate_dir(
2488 job.path.clone(),
2489 new_entries,
2490 new_ignore,
2491 self.fs.as_ref(),
2492 );
2493
2494 for new_job in new_jobs {
2495 if let Some(new_job) = new_job {
2496 job.scan_queue.send(new_job).await.unwrap();
2497 }
2498 }
2499
2500 Ok(())
2501 }
2502
2503 async fn reload_entries_for_paths(
2504 &self,
2505 mut abs_paths: Vec<PathBuf>,
2506 scan_queue_tx: Option<Sender<ScanJob>>,
2507 ) -> Option<Vec<Arc<Path>>> {
2508 let doing_recursive_update = scan_queue_tx.is_some();
2509
2510 abs_paths.sort_unstable();
2511 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2512
2513 let root_abs_path = self.snapshot.lock().abs_path.clone();
2514 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2515 let metadata = futures::future::join_all(
2516 abs_paths
2517 .iter()
2518 .map(|abs_path| self.fs.metadata(&abs_path))
2519 .collect::<Vec<_>>(),
2520 )
2521 .await;
2522
2523 let mut snapshot = self.snapshot.lock();
2524
2525 if snapshot.completed_scan_id == snapshot.scan_id {
2526 snapshot.scan_id += 1;
2527 if !doing_recursive_update {
2528 snapshot.completed_scan_id = snapshot.scan_id;
2529 }
2530 }
2531
2532 // Remove any entries for paths that no longer exist or are being recursively
2533 // refreshed. Do this before adding any new entries, so that renames can be
2534 // detected regardless of the order of the paths.
2535 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2536 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2537 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2538 if matches!(metadata, Ok(None)) || doing_recursive_update {
2539 snapshot.remove_path(path);
2540 }
2541 event_paths.push(path.into());
2542 } else {
2543 log::error!(
2544 "unexpected event {:?} for root path {:?}",
2545 abs_path,
2546 root_canonical_path
2547 );
2548 }
2549 }
2550
2551 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2552 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2553
2554 match metadata {
2555 Ok(Some(metadata)) => {
2556 let ignore_stack =
2557 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2558 let mut fs_entry = Entry::new(
2559 path.clone(),
2560 &metadata,
2561 snapshot.next_entry_id.as_ref(),
2562 snapshot.root_char_bag,
2563 );
2564 fs_entry.is_ignored = ignore_stack.is_all();
2565 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2566
2567 let scan_id = snapshot.scan_id;
2568 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2569 repo.repo.lock().reload_index();
2570 repo.scan_id = scan_id;
2571 }
2572
2573 if let Some(scan_queue_tx) = &scan_queue_tx {
2574 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2575 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2576 ancestor_inodes.insert(metadata.inode);
2577 smol::block_on(scan_queue_tx.send(ScanJob {
2578 abs_path,
2579 path,
2580 ignore_stack,
2581 ancestor_inodes,
2582 scan_queue: scan_queue_tx.clone(),
2583 }))
2584 .unwrap();
2585 }
2586 }
2587 }
2588 Ok(None) => {}
2589 Err(err) => {
2590 // TODO - create a special 'error' entry in the entries tree to mark this
2591 log::error!("error reading file on event {:?}", err);
2592 }
2593 }
2594 }
2595
2596 Some(event_paths)
2597 }
2598
2599 fn update_ignore_statuses(
2600 &self,
2601 ignore_queue_tx: Sender<UpdateIgnoreStatusJob>,
2602 ) -> LocalSnapshot {
2603 let mut snapshot = self.snapshot.lock().clone();
2604 let mut ignores_to_update = Vec::new();
2605 let mut ignores_to_delete = Vec::new();
2606 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2607 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2608 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2609 ignores_to_update.push(parent_abs_path.clone());
2610 }
2611
2612 let ignore_path = parent_path.join(&*GITIGNORE);
2613 if snapshot.entry_for_path(ignore_path).is_none() {
2614 ignores_to_delete.push(parent_abs_path.clone());
2615 }
2616 }
2617 }
2618
2619 for parent_abs_path in ignores_to_delete {
2620 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2621 self.snapshot
2622 .lock()
2623 .ignores_by_parent_abs_path
2624 .remove(&parent_abs_path);
2625 }
2626
2627 ignores_to_update.sort_unstable();
2628 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2629 while let Some(parent_abs_path) = ignores_to_update.next() {
2630 while ignores_to_update
2631 .peek()
2632 .map_or(false, |p| p.starts_with(&parent_abs_path))
2633 {
2634 ignores_to_update.next().unwrap();
2635 }
2636
2637 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2638 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2639 abs_path: parent_abs_path,
2640 ignore_stack,
2641 ignore_queue: ignore_queue_tx.clone(),
2642 }))
2643 .unwrap();
2644 }
2645
2646 snapshot
2647 }
2648
2649 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2650 let mut ignore_stack = job.ignore_stack;
2651 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2652 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2653 }
2654
2655 let mut entries_by_id_edits = Vec::new();
2656 let mut entries_by_path_edits = Vec::new();
2657 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2658 for mut entry in snapshot.child_entries(path).cloned() {
2659 let was_ignored = entry.is_ignored;
2660 let abs_path = snapshot.abs_path().join(&entry.path);
2661 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2662 if entry.is_dir() {
2663 let child_ignore_stack = if entry.is_ignored {
2664 IgnoreStack::all()
2665 } else {
2666 ignore_stack.clone()
2667 };
2668 job.ignore_queue
2669 .send(UpdateIgnoreStatusJob {
2670 abs_path: abs_path.into(),
2671 ignore_stack: child_ignore_stack,
2672 ignore_queue: job.ignore_queue.clone(),
2673 })
2674 .await
2675 .unwrap();
2676 }
2677
2678 if entry.is_ignored != was_ignored {
2679 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2680 path_entry.scan_id = snapshot.scan_id;
2681 path_entry.is_ignored = entry.is_ignored;
2682 entries_by_id_edits.push(Edit::Insert(path_entry));
2683 entries_by_path_edits.push(Edit::Insert(entry));
2684 }
2685 }
2686
2687 let mut snapshot = self.snapshot.lock();
2688 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2689 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2690 }
2691
2692 fn build_change_set(
2693 &self,
2694 old_snapshot: &Snapshot,
2695 new_snapshot: &Snapshot,
2696 event_paths: Vec<Arc<Path>>,
2697 ) -> HashMap<Arc<Path>, PathChange> {
2698 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2699
2700 let mut changes = HashMap::default();
2701 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2702 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2703 let received_before_initialized = !self.finished_initial_scan;
2704
2705 for path in event_paths {
2706 let path = PathKey(path);
2707 old_paths.seek(&path, Bias::Left, &());
2708 new_paths.seek(&path, Bias::Left, &());
2709
2710 loop {
2711 match (old_paths.item(), new_paths.item()) {
2712 (Some(old_entry), Some(new_entry)) => {
2713 if old_entry.path > path.0
2714 && new_entry.path > path.0
2715 && !old_entry.path.starts_with(&path.0)
2716 && !new_entry.path.starts_with(&path.0)
2717 {
2718 break;
2719 }
2720
2721 match Ord::cmp(&old_entry.path, &new_entry.path) {
2722 Ordering::Less => {
2723 changes.insert(old_entry.path.clone(), Removed);
2724 old_paths.next(&());
2725 }
2726 Ordering::Equal => {
2727 if received_before_initialized {
2728 // If the worktree was not fully initialized when this event was generated,
2729 // we can't know whether this entry was added during the scan or whether
2730 // it was merely updated.
2731 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2732 } else if old_entry.mtime != new_entry.mtime {
2733 changes.insert(new_entry.path.clone(), Updated);
2734 }
2735 old_paths.next(&());
2736 new_paths.next(&());
2737 }
2738 Ordering::Greater => {
2739 changes.insert(new_entry.path.clone(), Added);
2740 new_paths.next(&());
2741 }
2742 }
2743 }
2744 (Some(old_entry), None) => {
2745 changes.insert(old_entry.path.clone(), Removed);
2746 old_paths.next(&());
2747 }
2748 (None, Some(new_entry)) => {
2749 changes.insert(new_entry.path.clone(), Added);
2750 new_paths.next(&());
2751 }
2752 (None, None) => break,
2753 }
2754 }
2755 }
2756 changes
2757 }
2758
2759 async fn progress_timer(&self, running: bool) {
2760 if !running {
2761 return futures::future::pending().await;
2762 }
2763
2764 #[cfg(any(test, feature = "test-support"))]
2765 if self.fs.is_fake() {
2766 return self.executor.simulate_random_delay().await;
2767 }
2768
2769 smol::Timer::after(Duration::from_millis(100)).await;
2770 }
2771}
2772
2773fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2774 let mut result = root_char_bag;
2775 result.extend(
2776 path.to_string_lossy()
2777 .chars()
2778 .map(|c| c.to_ascii_lowercase()),
2779 );
2780 result
2781}
2782
2783struct ScanJob {
2784 abs_path: Arc<Path>,
2785 path: Arc<Path>,
2786 ignore_stack: Arc<IgnoreStack>,
2787 scan_queue: Sender<ScanJob>,
2788 ancestor_inodes: TreeSet<u64>,
2789}
2790
2791struct UpdateIgnoreStatusJob {
2792 abs_path: Arc<Path>,
2793 ignore_stack: Arc<IgnoreStack>,
2794 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2795}
2796
2797pub trait WorktreeHandle {
2798 #[cfg(any(test, feature = "test-support"))]
2799 fn flush_fs_events<'a>(
2800 &self,
2801 cx: &'a gpui::TestAppContext,
2802 ) -> futures::future::LocalBoxFuture<'a, ()>;
2803}
2804
2805impl WorktreeHandle for ModelHandle<Worktree> {
2806 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2807 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2808 // extra directory scans, and emit extra scan-state notifications.
2809 //
2810 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2811 // to ensure that all redundant FS events have already been processed.
2812 #[cfg(any(test, feature = "test-support"))]
2813 fn flush_fs_events<'a>(
2814 &self,
2815 cx: &'a gpui::TestAppContext,
2816 ) -> futures::future::LocalBoxFuture<'a, ()> {
2817 use smol::future::FutureExt;
2818
2819 let filename = "fs-event-sentinel";
2820 let tree = self.clone();
2821 let (fs, root_path) = self.read_with(cx, |tree, _| {
2822 let tree = tree.as_local().unwrap();
2823 (tree.fs.clone(), tree.abs_path().clone())
2824 });
2825
2826 async move {
2827 fs.create_file(&root_path.join(filename), Default::default())
2828 .await
2829 .unwrap();
2830 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2831 .await;
2832
2833 fs.remove_file(&root_path.join(filename), Default::default())
2834 .await
2835 .unwrap();
2836 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2837 .await;
2838
2839 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2840 .await;
2841 }
2842 .boxed_local()
2843 }
2844}
2845
2846#[derive(Clone, Debug)]
2847struct TraversalProgress<'a> {
2848 max_path: &'a Path,
2849 count: usize,
2850 visible_count: usize,
2851 file_count: usize,
2852 visible_file_count: usize,
2853}
2854
2855impl<'a> TraversalProgress<'a> {
2856 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2857 match (include_ignored, include_dirs) {
2858 (true, true) => self.count,
2859 (true, false) => self.file_count,
2860 (false, true) => self.visible_count,
2861 (false, false) => self.visible_file_count,
2862 }
2863 }
2864}
2865
2866impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2867 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2868 self.max_path = summary.max_path.as_ref();
2869 self.count += summary.count;
2870 self.visible_count += summary.visible_count;
2871 self.file_count += summary.file_count;
2872 self.visible_file_count += summary.visible_file_count;
2873 }
2874}
2875
2876impl<'a> Default for TraversalProgress<'a> {
2877 fn default() -> Self {
2878 Self {
2879 max_path: Path::new(""),
2880 count: 0,
2881 visible_count: 0,
2882 file_count: 0,
2883 visible_file_count: 0,
2884 }
2885 }
2886}
2887
2888pub struct Traversal<'a> {
2889 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2890 include_ignored: bool,
2891 include_dirs: bool,
2892}
2893
2894impl<'a> Traversal<'a> {
2895 pub fn advance(&mut self) -> bool {
2896 self.advance_to_offset(self.offset() + 1)
2897 }
2898
2899 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2900 self.cursor.seek_forward(
2901 &TraversalTarget::Count {
2902 count: offset,
2903 include_dirs: self.include_dirs,
2904 include_ignored: self.include_ignored,
2905 },
2906 Bias::Right,
2907 &(),
2908 )
2909 }
2910
2911 pub fn advance_to_sibling(&mut self) -> bool {
2912 while let Some(entry) = self.cursor.item() {
2913 self.cursor.seek_forward(
2914 &TraversalTarget::PathSuccessor(&entry.path),
2915 Bias::Left,
2916 &(),
2917 );
2918 if let Some(entry) = self.cursor.item() {
2919 if (self.include_dirs || !entry.is_dir())
2920 && (self.include_ignored || !entry.is_ignored)
2921 {
2922 return true;
2923 }
2924 }
2925 }
2926 false
2927 }
2928
2929 pub fn entry(&self) -> Option<&'a Entry> {
2930 self.cursor.item()
2931 }
2932
2933 pub fn offset(&self) -> usize {
2934 self.cursor
2935 .start()
2936 .count(self.include_dirs, self.include_ignored)
2937 }
2938}
2939
2940impl<'a> Iterator for Traversal<'a> {
2941 type Item = &'a Entry;
2942
2943 fn next(&mut self) -> Option<Self::Item> {
2944 if let Some(item) = self.entry() {
2945 self.advance();
2946 Some(item)
2947 } else {
2948 None
2949 }
2950 }
2951}
2952
2953#[derive(Debug)]
2954enum TraversalTarget<'a> {
2955 Path(&'a Path),
2956 PathSuccessor(&'a Path),
2957 Count {
2958 count: usize,
2959 include_ignored: bool,
2960 include_dirs: bool,
2961 },
2962}
2963
2964impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2965 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2966 match self {
2967 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2968 TraversalTarget::PathSuccessor(path) => {
2969 if !cursor_location.max_path.starts_with(path) {
2970 Ordering::Equal
2971 } else {
2972 Ordering::Greater
2973 }
2974 }
2975 TraversalTarget::Count {
2976 count,
2977 include_dirs,
2978 include_ignored,
2979 } => Ord::cmp(
2980 count,
2981 &cursor_location.count(*include_dirs, *include_ignored),
2982 ),
2983 }
2984 }
2985}
2986
2987struct ChildEntriesIter<'a> {
2988 parent_path: &'a Path,
2989 traversal: Traversal<'a>,
2990}
2991
2992impl<'a> Iterator for ChildEntriesIter<'a> {
2993 type Item = &'a Entry;
2994
2995 fn next(&mut self) -> Option<Self::Item> {
2996 if let Some(item) = self.traversal.entry() {
2997 if item.path.starts_with(&self.parent_path) {
2998 self.traversal.advance_to_sibling();
2999 return Some(item);
3000 }
3001 }
3002 None
3003 }
3004}
3005
3006impl<'a> From<&'a Entry> for proto::Entry {
3007 fn from(entry: &'a Entry) -> Self {
3008 Self {
3009 id: entry.id.to_proto(),
3010 is_dir: entry.is_dir(),
3011 path: entry.path.to_string_lossy().into(),
3012 inode: entry.inode,
3013 mtime: Some(entry.mtime.into()),
3014 is_symlink: entry.is_symlink,
3015 is_ignored: entry.is_ignored,
3016 }
3017 }
3018}
3019
3020impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3021 type Error = anyhow::Error;
3022
3023 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3024 if let Some(mtime) = entry.mtime {
3025 let kind = if entry.is_dir {
3026 EntryKind::Dir
3027 } else {
3028 let mut char_bag = *root_char_bag;
3029 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3030 EntryKind::File(char_bag)
3031 };
3032 let path: Arc<Path> = PathBuf::from(entry.path).into();
3033 Ok(Entry {
3034 id: ProjectEntryId::from_proto(entry.id),
3035 kind,
3036 path,
3037 inode: entry.inode,
3038 mtime: mtime.into(),
3039 is_symlink: entry.is_symlink,
3040 is_ignored: entry.is_ignored,
3041 })
3042 } else {
3043 Err(anyhow!(
3044 "missing mtime in remote worktree entry {:?}",
3045 entry.path
3046 ))
3047 }
3048 }
3049}
3050
3051#[cfg(test)]
3052mod tests {
3053 use super::*;
3054 use fs::repository::FakeGitRepository;
3055 use fs::{FakeFs, RealFs};
3056 use gpui::{executor::Deterministic, TestAppContext};
3057 use rand::prelude::*;
3058 use serde_json::json;
3059 use std::{env, fmt::Write};
3060 use util::http::FakeHttpClient;
3061
3062 use util::test::temp_tree;
3063
3064 #[gpui::test]
3065 async fn test_traversal(cx: &mut TestAppContext) {
3066 let fs = FakeFs::new(cx.background());
3067 fs.insert_tree(
3068 "/root",
3069 json!({
3070 ".gitignore": "a/b\n",
3071 "a": {
3072 "b": "",
3073 "c": "",
3074 }
3075 }),
3076 )
3077 .await;
3078
3079 let http_client = FakeHttpClient::with_404_response();
3080 let client = cx.read(|cx| Client::new(http_client, cx));
3081
3082 let tree = Worktree::local(
3083 client,
3084 Path::new("/root"),
3085 true,
3086 fs,
3087 Default::default(),
3088 &mut cx.to_async(),
3089 )
3090 .await
3091 .unwrap();
3092 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3093 .await;
3094
3095 tree.read_with(cx, |tree, _| {
3096 assert_eq!(
3097 tree.entries(false)
3098 .map(|entry| entry.path.as_ref())
3099 .collect::<Vec<_>>(),
3100 vec![
3101 Path::new(""),
3102 Path::new(".gitignore"),
3103 Path::new("a"),
3104 Path::new("a/c"),
3105 ]
3106 );
3107 assert_eq!(
3108 tree.entries(true)
3109 .map(|entry| entry.path.as_ref())
3110 .collect::<Vec<_>>(),
3111 vec![
3112 Path::new(""),
3113 Path::new(".gitignore"),
3114 Path::new("a"),
3115 Path::new("a/b"),
3116 Path::new("a/c"),
3117 ]
3118 );
3119 })
3120 }
3121
3122 #[gpui::test(iterations = 10)]
3123 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3124 let fs = FakeFs::new(cx.background());
3125 fs.insert_tree(
3126 "/root",
3127 json!({
3128 "lib": {
3129 "a": {
3130 "a.txt": ""
3131 },
3132 "b": {
3133 "b.txt": ""
3134 }
3135 }
3136 }),
3137 )
3138 .await;
3139 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3140 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3141
3142 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3143 let tree = Worktree::local(
3144 client,
3145 Path::new("/root"),
3146 true,
3147 fs.clone(),
3148 Default::default(),
3149 &mut cx.to_async(),
3150 )
3151 .await
3152 .unwrap();
3153
3154 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3155 .await;
3156
3157 tree.read_with(cx, |tree, _| {
3158 assert_eq!(
3159 tree.entries(false)
3160 .map(|entry| entry.path.as_ref())
3161 .collect::<Vec<_>>(),
3162 vec![
3163 Path::new(""),
3164 Path::new("lib"),
3165 Path::new("lib/a"),
3166 Path::new("lib/a/a.txt"),
3167 Path::new("lib/a/lib"),
3168 Path::new("lib/b"),
3169 Path::new("lib/b/b.txt"),
3170 Path::new("lib/b/lib"),
3171 ]
3172 );
3173 });
3174
3175 fs.rename(
3176 Path::new("/root/lib/a/lib"),
3177 Path::new("/root/lib/a/lib-2"),
3178 Default::default(),
3179 )
3180 .await
3181 .unwrap();
3182 executor.run_until_parked();
3183 tree.read_with(cx, |tree, _| {
3184 assert_eq!(
3185 tree.entries(false)
3186 .map(|entry| entry.path.as_ref())
3187 .collect::<Vec<_>>(),
3188 vec![
3189 Path::new(""),
3190 Path::new("lib"),
3191 Path::new("lib/a"),
3192 Path::new("lib/a/a.txt"),
3193 Path::new("lib/a/lib-2"),
3194 Path::new("lib/b"),
3195 Path::new("lib/b/b.txt"),
3196 Path::new("lib/b/lib"),
3197 ]
3198 );
3199 });
3200 }
3201
3202 #[gpui::test]
3203 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3204 let parent_dir = temp_tree(json!({
3205 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3206 "tree": {
3207 ".git": {},
3208 ".gitignore": "ignored-dir\n",
3209 "tracked-dir": {
3210 "tracked-file1": "",
3211 "ancestor-ignored-file1": "",
3212 },
3213 "ignored-dir": {
3214 "ignored-file1": ""
3215 }
3216 }
3217 }));
3218 let dir = parent_dir.path().join("tree");
3219
3220 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3221
3222 let tree = Worktree::local(
3223 client,
3224 dir.as_path(),
3225 true,
3226 Arc::new(RealFs),
3227 Default::default(),
3228 &mut cx.to_async(),
3229 )
3230 .await
3231 .unwrap();
3232 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3233 .await;
3234 tree.flush_fs_events(cx).await;
3235 cx.read(|cx| {
3236 let tree = tree.read(cx);
3237 assert!(
3238 !tree
3239 .entry_for_path("tracked-dir/tracked-file1")
3240 .unwrap()
3241 .is_ignored
3242 );
3243 assert!(
3244 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3245 .unwrap()
3246 .is_ignored
3247 );
3248 assert!(
3249 tree.entry_for_path("ignored-dir/ignored-file1")
3250 .unwrap()
3251 .is_ignored
3252 );
3253 });
3254
3255 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3256 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3257 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3258 tree.flush_fs_events(cx).await;
3259 cx.read(|cx| {
3260 let tree = tree.read(cx);
3261 assert!(
3262 !tree
3263 .entry_for_path("tracked-dir/tracked-file2")
3264 .unwrap()
3265 .is_ignored
3266 );
3267 assert!(
3268 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3269 .unwrap()
3270 .is_ignored
3271 );
3272 assert!(
3273 tree.entry_for_path("ignored-dir/ignored-file2")
3274 .unwrap()
3275 .is_ignored
3276 );
3277 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3278 });
3279 }
3280
3281 #[gpui::test]
3282 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3283 let root = temp_tree(json!({
3284 "dir1": {
3285 ".git": {},
3286 "deps": {
3287 "dep1": {
3288 ".git": {},
3289 "src": {
3290 "a.txt": ""
3291 }
3292 }
3293 },
3294 "src": {
3295 "b.txt": ""
3296 }
3297 },
3298 "c.txt": "",
3299 }));
3300
3301 let http_client = FakeHttpClient::with_404_response();
3302 let client = cx.read(|cx| Client::new(http_client, cx));
3303 let tree = Worktree::local(
3304 client,
3305 root.path(),
3306 true,
3307 Arc::new(RealFs),
3308 Default::default(),
3309 &mut cx.to_async(),
3310 )
3311 .await
3312 .unwrap();
3313
3314 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3315 .await;
3316 tree.flush_fs_events(cx).await;
3317
3318 tree.read_with(cx, |tree, _cx| {
3319 let tree = tree.as_local().unwrap();
3320
3321 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3322
3323 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3324 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3325 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3326
3327 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3328 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3329 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3330 });
3331
3332 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3333 let tree = tree.as_local().unwrap();
3334 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3335 });
3336
3337 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3338 tree.flush_fs_events(cx).await;
3339
3340 tree.read_with(cx, |tree, _cx| {
3341 let tree = tree.as_local().unwrap();
3342 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3343 assert_ne!(
3344 original_scan_id, new_scan_id,
3345 "original {original_scan_id}, new {new_scan_id}"
3346 );
3347 });
3348
3349 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3350 tree.flush_fs_events(cx).await;
3351
3352 tree.read_with(cx, |tree, _cx| {
3353 let tree = tree.as_local().unwrap();
3354
3355 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3356 });
3357 }
3358
3359 #[test]
3360 fn test_changed_repos() {
3361 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3362 GitRepositoryEntry {
3363 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3364 scan_id,
3365 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3366 git_dir_path: git_dir_path.as_ref().into(),
3367 }
3368 }
3369
3370 let prev_repos: Vec<GitRepositoryEntry> = vec![
3371 fake_entry("/.git", 0),
3372 fake_entry("/a/.git", 0),
3373 fake_entry("/a/b/.git", 0),
3374 ];
3375
3376 let new_repos: Vec<GitRepositoryEntry> = vec![
3377 fake_entry("/a/.git", 1),
3378 fake_entry("/a/b/.git", 0),
3379 fake_entry("/a/c/.git", 0),
3380 ];
3381
3382 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3383
3384 // Deletion retained
3385 assert!(res
3386 .iter()
3387 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3388 .is_some());
3389
3390 // Update retained
3391 assert!(res
3392 .iter()
3393 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3394 .is_some());
3395
3396 // Addition retained
3397 assert!(res
3398 .iter()
3399 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3400 .is_some());
3401
3402 // Nochange, not retained
3403 assert!(res
3404 .iter()
3405 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3406 .is_none());
3407 }
3408
3409 #[gpui::test]
3410 async fn test_write_file(cx: &mut TestAppContext) {
3411 let dir = temp_tree(json!({
3412 ".git": {},
3413 ".gitignore": "ignored-dir\n",
3414 "tracked-dir": {},
3415 "ignored-dir": {}
3416 }));
3417
3418 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3419
3420 let tree = Worktree::local(
3421 client,
3422 dir.path(),
3423 true,
3424 Arc::new(RealFs),
3425 Default::default(),
3426 &mut cx.to_async(),
3427 )
3428 .await
3429 .unwrap();
3430 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3431 .await;
3432 tree.flush_fs_events(cx).await;
3433
3434 tree.update(cx, |tree, cx| {
3435 tree.as_local().unwrap().write_file(
3436 Path::new("tracked-dir/file.txt"),
3437 "hello".into(),
3438 Default::default(),
3439 cx,
3440 )
3441 })
3442 .await
3443 .unwrap();
3444 tree.update(cx, |tree, cx| {
3445 tree.as_local().unwrap().write_file(
3446 Path::new("ignored-dir/file.txt"),
3447 "world".into(),
3448 Default::default(),
3449 cx,
3450 )
3451 })
3452 .await
3453 .unwrap();
3454
3455 tree.read_with(cx, |tree, _| {
3456 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3457 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3458 assert!(!tracked.is_ignored);
3459 assert!(ignored.is_ignored);
3460 });
3461 }
3462
3463 #[gpui::test(iterations = 30)]
3464 async fn test_create_directory(cx: &mut TestAppContext) {
3465 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3466
3467 let fs = FakeFs::new(cx.background());
3468 fs.insert_tree(
3469 "/root",
3470 json!({
3471 "b": {},
3472 "c": {},
3473 "d": {},
3474 }),
3475 )
3476 .await;
3477
3478 let tree = Worktree::local(
3479 client,
3480 "/root".as_ref(),
3481 true,
3482 fs,
3483 Default::default(),
3484 &mut cx.to_async(),
3485 )
3486 .await
3487 .unwrap();
3488
3489 let entry = tree
3490 .update(cx, |tree, cx| {
3491 tree.as_local_mut()
3492 .unwrap()
3493 .create_entry("a/e".as_ref(), true, cx)
3494 })
3495 .await
3496 .unwrap();
3497 assert!(entry.is_dir());
3498
3499 cx.foreground().run_until_parked();
3500
3501 tree.read_with(cx, |tree, _| {
3502 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3503 });
3504 }
3505
3506 #[gpui::test(iterations = 100)]
3507 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3508 let operations = env::var("OPERATIONS")
3509 .map(|o| o.parse().unwrap())
3510 .unwrap_or(40);
3511 let initial_entries = env::var("INITIAL_ENTRIES")
3512 .map(|o| o.parse().unwrap())
3513 .unwrap_or(20);
3514
3515 let root_dir = Path::new("/test");
3516 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3517 fs.as_fake().insert_tree(root_dir, json!({})).await;
3518 for _ in 0..initial_entries {
3519 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3520 }
3521 log::info!("generated initial tree");
3522
3523 let next_entry_id = Arc::new(AtomicUsize::default());
3524 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3525 let worktree = Worktree::local(
3526 client.clone(),
3527 root_dir,
3528 true,
3529 fs.clone(),
3530 next_entry_id.clone(),
3531 &mut cx.to_async(),
3532 )
3533 .await
3534 .unwrap();
3535
3536 worktree
3537 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3538 .await;
3539
3540 // After the initial scan is complete, the `UpdatedEntries` event can
3541 // be used to follow along with all changes to the worktree's snapshot.
3542 worktree.update(cx, |tree, cx| {
3543 let mut paths = tree
3544 .as_local()
3545 .unwrap()
3546 .paths()
3547 .cloned()
3548 .collect::<Vec<_>>();
3549
3550 cx.subscribe(&worktree, move |tree, _, event, _| {
3551 if let Event::UpdatedEntries(changes) = event {
3552 for (path, change_type) in changes.iter() {
3553 let path = path.clone();
3554 let ix = match paths.binary_search(&path) {
3555 Ok(ix) | Err(ix) => ix,
3556 };
3557 match change_type {
3558 PathChange::Added => {
3559 assert_ne!(paths.get(ix), Some(&path));
3560 paths.insert(ix, path);
3561 }
3562 PathChange::Removed => {
3563 assert_eq!(paths.get(ix), Some(&path));
3564 paths.remove(ix);
3565 }
3566 PathChange::Updated => {
3567 assert_eq!(paths.get(ix), Some(&path));
3568 }
3569 PathChange::AddedOrUpdated => {
3570 if paths[ix] != path {
3571 paths.insert(ix, path);
3572 }
3573 }
3574 }
3575 }
3576 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3577 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3578 }
3579 })
3580 .detach();
3581 });
3582
3583 let mut snapshots = Vec::new();
3584 let mut mutations_len = operations;
3585 while mutations_len > 1 {
3586 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3587 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3588 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3589 let len = rng.gen_range(0..=buffered_event_count);
3590 log::info!("flushing {} events", len);
3591 fs.as_fake().flush_events(len).await;
3592 } else {
3593 randomly_mutate_tree(&fs, root_dir, 0.6, &mut rng).await;
3594 mutations_len -= 1;
3595 }
3596
3597 cx.foreground().run_until_parked();
3598 if rng.gen_bool(0.2) {
3599 log::info!("storing snapshot {}", snapshots.len());
3600 let snapshot =
3601 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3602 snapshots.push(snapshot);
3603 }
3604 }
3605
3606 log::info!("quiescing");
3607 fs.as_fake().flush_events(usize::MAX).await;
3608 cx.foreground().run_until_parked();
3609 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3610 snapshot.check_invariants();
3611
3612 {
3613 let new_worktree = Worktree::local(
3614 client.clone(),
3615 root_dir,
3616 true,
3617 fs.clone(),
3618 next_entry_id,
3619 &mut cx.to_async(),
3620 )
3621 .await
3622 .unwrap();
3623 new_worktree
3624 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3625 .await;
3626 let new_snapshot =
3627 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3628 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3629 }
3630
3631 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3632 let include_ignored = rng.gen::<bool>();
3633 if !include_ignored {
3634 let mut entries_by_path_edits = Vec::new();
3635 let mut entries_by_id_edits = Vec::new();
3636 for entry in prev_snapshot
3637 .entries_by_id
3638 .cursor::<()>()
3639 .filter(|e| e.is_ignored)
3640 {
3641 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3642 entries_by_id_edits.push(Edit::Remove(entry.id));
3643 }
3644
3645 prev_snapshot
3646 .entries_by_path
3647 .edit(entries_by_path_edits, &());
3648 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3649 }
3650
3651 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3652 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3653 assert_eq!(
3654 prev_snapshot.to_vec(include_ignored),
3655 snapshot.to_vec(include_ignored),
3656 "wrong update for snapshot {i}. update: {:?}",
3657 update
3658 );
3659 }
3660 }
3661
3662 async fn randomly_mutate_tree(
3663 fs: &Arc<dyn Fs>,
3664 root_path: &Path,
3665 insertion_probability: f64,
3666 rng: &mut impl Rng,
3667 ) {
3668 let mut files = Vec::new();
3669 let mut dirs = Vec::new();
3670 for path in fs.as_fake().paths() {
3671 if path.starts_with(root_path) {
3672 if fs.is_file(&path).await {
3673 files.push(path);
3674 } else {
3675 dirs.push(path);
3676 }
3677 }
3678 }
3679
3680 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3681 let path = dirs.choose(rng).unwrap();
3682 let new_path = path.join(gen_name(rng));
3683
3684 if rng.gen() {
3685 log::info!(
3686 "creating dir {:?}",
3687 new_path.strip_prefix(root_path).unwrap()
3688 );
3689 fs.create_dir(&new_path).await.unwrap();
3690 } else {
3691 log::info!(
3692 "creating file {:?}",
3693 new_path.strip_prefix(root_path).unwrap()
3694 );
3695 fs.create_file(&new_path, Default::default()).await.unwrap();
3696 }
3697 } else if rng.gen_bool(0.05) {
3698 let ignore_dir_path = dirs.choose(rng).unwrap();
3699 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3700
3701 let subdirs = dirs
3702 .iter()
3703 .filter(|d| d.starts_with(&ignore_dir_path))
3704 .cloned()
3705 .collect::<Vec<_>>();
3706 let subfiles = files
3707 .iter()
3708 .filter(|d| d.starts_with(&ignore_dir_path))
3709 .cloned()
3710 .collect::<Vec<_>>();
3711 let files_to_ignore = {
3712 let len = rng.gen_range(0..=subfiles.len());
3713 subfiles.choose_multiple(rng, len)
3714 };
3715 let dirs_to_ignore = {
3716 let len = rng.gen_range(0..subdirs.len());
3717 subdirs.choose_multiple(rng, len)
3718 };
3719
3720 let mut ignore_contents = String::new();
3721 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3722 writeln!(
3723 ignore_contents,
3724 "{}",
3725 path_to_ignore
3726 .strip_prefix(&ignore_dir_path)
3727 .unwrap()
3728 .to_str()
3729 .unwrap()
3730 )
3731 .unwrap();
3732 }
3733 log::info!(
3734 "creating gitignore {:?} with contents:\n{}",
3735 ignore_path.strip_prefix(&root_path).unwrap(),
3736 ignore_contents
3737 );
3738 fs.save(
3739 &ignore_path,
3740 &ignore_contents.as_str().into(),
3741 Default::default(),
3742 )
3743 .await
3744 .unwrap();
3745 } else {
3746 let old_path = {
3747 let file_path = files.choose(rng);
3748 let dir_path = dirs[1..].choose(rng);
3749 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3750 };
3751
3752 let is_rename = rng.gen();
3753 if is_rename {
3754 let new_path_parent = dirs
3755 .iter()
3756 .filter(|d| !d.starts_with(old_path))
3757 .choose(rng)
3758 .unwrap();
3759
3760 let overwrite_existing_dir =
3761 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3762 let new_path = if overwrite_existing_dir {
3763 fs.remove_dir(
3764 &new_path_parent,
3765 RemoveOptions {
3766 recursive: true,
3767 ignore_if_not_exists: true,
3768 },
3769 )
3770 .await
3771 .unwrap();
3772 new_path_parent.to_path_buf()
3773 } else {
3774 new_path_parent.join(gen_name(rng))
3775 };
3776
3777 log::info!(
3778 "renaming {:?} to {}{:?}",
3779 old_path.strip_prefix(&root_path).unwrap(),
3780 if overwrite_existing_dir {
3781 "overwrite "
3782 } else {
3783 ""
3784 },
3785 new_path.strip_prefix(&root_path).unwrap()
3786 );
3787 fs.rename(
3788 &old_path,
3789 &new_path,
3790 fs::RenameOptions {
3791 overwrite: true,
3792 ignore_if_exists: true,
3793 },
3794 )
3795 .await
3796 .unwrap();
3797 } else if fs.is_file(&old_path).await {
3798 log::info!(
3799 "deleting file {:?}",
3800 old_path.strip_prefix(&root_path).unwrap()
3801 );
3802 fs.remove_file(old_path, Default::default()).await.unwrap();
3803 } else {
3804 log::info!(
3805 "deleting dir {:?}",
3806 old_path.strip_prefix(&root_path).unwrap()
3807 );
3808 fs.remove_dir(
3809 &old_path,
3810 RemoveOptions {
3811 recursive: true,
3812 ignore_if_not_exists: true,
3813 },
3814 )
3815 .await
3816 .unwrap();
3817 }
3818 }
3819 }
3820
3821 fn gen_name(rng: &mut impl Rng) -> String {
3822 (0..6)
3823 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3824 .map(char::from)
3825 .collect()
3826 }
3827
3828 impl LocalSnapshot {
3829 fn check_invariants(&self) {
3830 let mut files = self.files(true, 0);
3831 let mut visible_files = self.files(false, 0);
3832 for entry in self.entries_by_path.cursor::<()>() {
3833 if entry.is_file() {
3834 assert_eq!(files.next().unwrap().inode, entry.inode);
3835 if !entry.is_ignored {
3836 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3837 }
3838 }
3839 }
3840 assert!(files.next().is_none());
3841 assert!(visible_files.next().is_none());
3842
3843 let mut bfs_paths = Vec::new();
3844 let mut stack = vec![Path::new("")];
3845 while let Some(path) = stack.pop() {
3846 bfs_paths.push(path);
3847 let ix = stack.len();
3848 for child_entry in self.child_entries(path) {
3849 stack.insert(ix, &child_entry.path);
3850 }
3851 }
3852
3853 let dfs_paths_via_iter = self
3854 .entries_by_path
3855 .cursor::<()>()
3856 .map(|e| e.path.as_ref())
3857 .collect::<Vec<_>>();
3858 assert_eq!(bfs_paths, dfs_paths_via_iter);
3859
3860 let dfs_paths_via_traversal = self
3861 .entries(true)
3862 .map(|e| e.path.as_ref())
3863 .collect::<Vec<_>>();
3864 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3865
3866 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3867 let ignore_parent_path =
3868 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3869 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3870 assert!(self
3871 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3872 .is_some());
3873 }
3874 }
3875
3876 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3877 let mut paths = Vec::new();
3878 for entry in self.entries_by_path.cursor::<()>() {
3879 if include_ignored || !entry.is_ignored {
3880 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3881 }
3882 }
3883 paths.sort_by(|a, b| a.0.cmp(b.0));
3884 paths
3885 }
3886 }
3887}