1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use parking_lot::Mutex;
30use postage::{
31 barrier,
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use smol::channel::{self, Sender};
36use std::{
37 any::Any,
38 cmp::{self, Ordering},
39 convert::TryFrom,
40 ffi::OsStr,
41 fmt,
42 future::Future,
43 mem,
44 ops::{Deref, DerefMut},
45 path::{Path, PathBuf},
46 pin::Pin,
47 sync::{
48 atomic::{AtomicUsize, Ordering::SeqCst},
49 Arc,
50 },
51 time::{Duration, SystemTime},
52};
53use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
54use util::{paths::HOME, ResultExt, TryFutureExt};
55
56#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
57pub struct WorktreeId(usize);
58
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
67 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
68 _background_scanner_task: Task<()>,
69 share: Option<ShareState>,
70 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
71 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 visible: bool,
75}
76
77pub struct RemoteWorktree {
78 snapshot: Snapshot,
79 background_snapshot: Arc<Mutex<Snapshot>>,
80 project_id: u64,
81 client: Arc<Client>,
82 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
83 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
84 replica_id: ReplicaId,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 visible: bool,
87 disconnected: bool,
88}
89
90#[derive(Clone)]
91pub struct Snapshot {
92 id: WorktreeId,
93 abs_path: Arc<Path>,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98 scan_id: usize,
99 completed_scan_id: usize,
100}
101
102#[derive(Clone)]
103pub struct GitRepositoryEntry {
104 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
105
106 pub(crate) scan_id: usize,
107 // Path to folder containing the .git file or directory
108 pub(crate) content_path: Arc<Path>,
109 // Path to the actual .git folder.
110 // Note: if .git is a file, this points to the folder indicated by the .git file
111 pub(crate) git_dir_path: Arc<Path>,
112}
113
114impl std::fmt::Debug for GitRepositoryEntry {
115 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
116 f.debug_struct("GitRepositoryEntry")
117 .field("content_path", &self.content_path)
118 .field("git_dir_path", &self.git_dir_path)
119 .finish()
120 }
121}
122
123#[derive(Debug)]
124pub struct LocalSnapshot {
125 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
126 git_repositories: Vec<GitRepositoryEntry>,
127 removed_entry_ids: HashMap<u64, ProjectEntryId>,
128 next_entry_id: Arc<AtomicUsize>,
129 snapshot: Snapshot,
130}
131
132impl Clone for LocalSnapshot {
133 fn clone(&self) -> Self {
134 Self {
135 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
136 git_repositories: self.git_repositories.iter().cloned().collect(),
137 removed_entry_ids: self.removed_entry_ids.clone(),
138 next_entry_id: self.next_entry_id.clone(),
139 snapshot: self.snapshot.clone(),
140 }
141 }
142}
143
144impl Deref for LocalSnapshot {
145 type Target = Snapshot;
146
147 fn deref(&self) -> &Self::Target {
148 &self.snapshot
149 }
150}
151
152impl DerefMut for LocalSnapshot {
153 fn deref_mut(&mut self) -> &mut Self::Target {
154 &mut self.snapshot
155 }
156}
157
158enum ScanState {
159 Started,
160 Updated {
161 snapshot: LocalSnapshot,
162 changes: HashMap<Arc<Path>, PathChange>,
163 barrier: Option<barrier::Sender>,
164 scanning: bool,
165 },
166}
167
168struct ShareState {
169 project_id: u64,
170 snapshots_tx: watch::Sender<LocalSnapshot>,
171 resume_updates: watch::Sender<()>,
172 _maintain_remote_snapshot: Task<Option<()>>,
173}
174
175pub enum Event {
176 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
177 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
178}
179
180impl Entity for Worktree {
181 type Event = Event;
182}
183
184impl Worktree {
185 pub async fn local(
186 client: Arc<Client>,
187 path: impl Into<Arc<Path>>,
188 visible: bool,
189 fs: Arc<dyn Fs>,
190 next_entry_id: Arc<AtomicUsize>,
191 cx: &mut AsyncAppContext,
192 ) -> Result<ModelHandle<Self>> {
193 // After determining whether the root entry is a file or a directory, populate the
194 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
195 let abs_path = path.into();
196 let metadata = fs
197 .metadata(&abs_path)
198 .await
199 .context("failed to stat worktree path")?;
200
201 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
202 let root_name = abs_path
203 .file_name()
204 .map_or(String::new(), |f| f.to_string_lossy().to_string());
205
206 let mut snapshot = LocalSnapshot {
207 ignores_by_parent_abs_path: Default::default(),
208 git_repositories: Default::default(),
209 removed_entry_ids: Default::default(),
210 next_entry_id,
211 snapshot: Snapshot {
212 id: WorktreeId::from_usize(cx.model_id()),
213 abs_path: abs_path.clone(),
214 root_name: root_name.clone(),
215 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
216 entries_by_path: Default::default(),
217 entries_by_id: Default::default(),
218 scan_id: 1,
219 completed_scan_id: 0,
220 },
221 };
222
223 if let Some(metadata) = metadata {
224 snapshot.insert_entry(
225 Entry::new(
226 Arc::from(Path::new("")),
227 &metadata,
228 &snapshot.next_entry_id,
229 snapshot.root_char_bag,
230 ),
231 fs.as_ref(),
232 );
233 }
234
235 let (path_changes_tx, path_changes_rx) = channel::unbounded();
236 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
237
238 cx.spawn_weak(|this, mut cx| async move {
239 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
240 this.update(&mut cx, |this, cx| {
241 let this = this.as_local_mut().unwrap();
242 match state {
243 ScanState::Started => {
244 *this.is_scanning.0.borrow_mut() = true;
245 }
246 ScanState::Updated {
247 snapshot,
248 changes,
249 barrier,
250 scanning,
251 } => {
252 *this.is_scanning.0.borrow_mut() = scanning;
253 this.set_snapshot(snapshot, cx);
254 cx.emit(Event::UpdatedEntries(changes));
255 drop(barrier);
256 }
257 }
258 cx.notify();
259 });
260 }
261 })
262 .detach();
263
264 let background_scanner_task = cx.background().spawn({
265 let fs = fs.clone();
266 let snapshot = snapshot.clone();
267 let background = cx.background().clone();
268 async move {
269 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
270 BackgroundScanner::new(
271 snapshot,
272 fs,
273 scan_states_tx,
274 background,
275 path_changes_rx,
276 )
277 .run(events)
278 .await;
279 }
280 });
281
282 Worktree::Local(LocalWorktree {
283 snapshot,
284 is_scanning: watch::channel_with(true),
285 share: None,
286 path_changes_tx,
287 _background_scanner_task: background_scanner_task,
288 diagnostics: Default::default(),
289 diagnostic_summaries: Default::default(),
290 client,
291 fs,
292 visible,
293 })
294 }))
295 }
296
297 pub fn remote(
298 project_remote_id: u64,
299 replica_id: ReplicaId,
300 worktree: proto::WorktreeMetadata,
301 client: Arc<Client>,
302 cx: &mut AppContext,
303 ) -> ModelHandle<Self> {
304 cx.add_model(|cx: &mut ModelContext<Self>| {
305 let snapshot = Snapshot {
306 id: WorktreeId(worktree.id as usize),
307 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
308 root_name: worktree.root_name.clone(),
309 root_char_bag: worktree
310 .root_name
311 .chars()
312 .map(|c| c.to_ascii_lowercase())
313 .collect(),
314 entries_by_path: Default::default(),
315 entries_by_id: Default::default(),
316 scan_id: 1,
317 completed_scan_id: 0,
318 };
319
320 let (updates_tx, mut updates_rx) = mpsc::unbounded();
321 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
322 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
323
324 cx.background()
325 .spawn({
326 let background_snapshot = background_snapshot.clone();
327 async move {
328 while let Some(update) = updates_rx.next().await {
329 if let Err(error) =
330 background_snapshot.lock().apply_remote_update(update)
331 {
332 log::error!("error applying worktree update: {}", error);
333 }
334 snapshot_updated_tx.send(()).await.ok();
335 }
336 }
337 })
338 .detach();
339
340 cx.spawn_weak(|this, mut cx| async move {
341 while (snapshot_updated_rx.recv().await).is_some() {
342 if let Some(this) = this.upgrade(&cx) {
343 this.update(&mut cx, |this, cx| {
344 let this = this.as_remote_mut().unwrap();
345 this.snapshot = this.background_snapshot.lock().clone();
346 cx.emit(Event::UpdatedEntries(Default::default()));
347 cx.notify();
348 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
349 if this.observed_snapshot(*scan_id) {
350 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
351 let _ = tx.send(());
352 } else {
353 break;
354 }
355 }
356 });
357 } else {
358 break;
359 }
360 }
361 })
362 .detach();
363
364 Worktree::Remote(RemoteWorktree {
365 project_id: project_remote_id,
366 replica_id,
367 snapshot: snapshot.clone(),
368 background_snapshot,
369 updates_tx: Some(updates_tx),
370 snapshot_subscriptions: Default::default(),
371 client: client.clone(),
372 diagnostic_summaries: Default::default(),
373 visible: worktree.visible,
374 disconnected: false,
375 })
376 })
377 }
378
379 pub fn as_local(&self) -> Option<&LocalWorktree> {
380 if let Worktree::Local(worktree) = self {
381 Some(worktree)
382 } else {
383 None
384 }
385 }
386
387 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
388 if let Worktree::Remote(worktree) = self {
389 Some(worktree)
390 } else {
391 None
392 }
393 }
394
395 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
396 if let Worktree::Local(worktree) = self {
397 Some(worktree)
398 } else {
399 None
400 }
401 }
402
403 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
404 if let Worktree::Remote(worktree) = self {
405 Some(worktree)
406 } else {
407 None
408 }
409 }
410
411 pub fn is_local(&self) -> bool {
412 matches!(self, Worktree::Local(_))
413 }
414
415 pub fn is_remote(&self) -> bool {
416 !self.is_local()
417 }
418
419 pub fn snapshot(&self) -> Snapshot {
420 match self {
421 Worktree::Local(worktree) => worktree.snapshot().snapshot,
422 Worktree::Remote(worktree) => worktree.snapshot(),
423 }
424 }
425
426 pub fn scan_id(&self) -> usize {
427 match self {
428 Worktree::Local(worktree) => worktree.snapshot.scan_id,
429 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
430 }
431 }
432
433 pub fn completed_scan_id(&self) -> usize {
434 match self {
435 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
436 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
437 }
438 }
439
440 pub fn is_visible(&self) -> bool {
441 match self {
442 Worktree::Local(worktree) => worktree.visible,
443 Worktree::Remote(worktree) => worktree.visible,
444 }
445 }
446
447 pub fn replica_id(&self) -> ReplicaId {
448 match self {
449 Worktree::Local(_) => 0,
450 Worktree::Remote(worktree) => worktree.replica_id,
451 }
452 }
453
454 pub fn diagnostic_summaries(
455 &self,
456 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
457 match self {
458 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
459 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
460 }
461 .iter()
462 .map(|(path, summary)| (path.0.clone(), *summary))
463 }
464
465 pub fn abs_path(&self) -> Arc<Path> {
466 match self {
467 Worktree::Local(worktree) => worktree.abs_path.clone(),
468 Worktree::Remote(worktree) => worktree.abs_path.clone(),
469 }
470 }
471}
472
473impl LocalWorktree {
474 pub fn contains_abs_path(&self, path: &Path) -> bool {
475 path.starts_with(&self.abs_path)
476 }
477
478 fn absolutize(&self, path: &Path) -> PathBuf {
479 if path.file_name().is_some() {
480 self.abs_path.join(path)
481 } else {
482 self.abs_path.to_path_buf()
483 }
484 }
485
486 pub(crate) fn load_buffer(
487 &mut self,
488 path: &Path,
489 cx: &mut ModelContext<Worktree>,
490 ) -> Task<Result<ModelHandle<Buffer>>> {
491 let path = Arc::from(path);
492 cx.spawn(move |this, mut cx| async move {
493 let (file, contents, diff_base) = this
494 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
495 .await?;
496 Ok(cx.add_model(|cx| {
497 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
498 buffer.git_diff_recalc(cx);
499 buffer
500 }))
501 })
502 }
503
504 pub fn diagnostics_for_path(
505 &self,
506 path: &Path,
507 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
508 self.diagnostics.get(path).cloned()
509 }
510
511 pub fn update_diagnostics(
512 &mut self,
513 language_server_id: usize,
514 worktree_path: Arc<Path>,
515 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
516 _: &mut ModelContext<Worktree>,
517 ) -> Result<bool> {
518 self.diagnostics.remove(&worktree_path);
519 let old_summary = self
520 .diagnostic_summaries
521 .remove(&PathKey(worktree_path.clone()))
522 .unwrap_or_default();
523 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
524 if !new_summary.is_empty() {
525 self.diagnostic_summaries
526 .insert(PathKey(worktree_path.clone()), new_summary);
527 self.diagnostics.insert(worktree_path.clone(), diagnostics);
528 }
529
530 let updated = !old_summary.is_empty() || !new_summary.is_empty();
531 if updated {
532 if let Some(share) = self.share.as_ref() {
533 self.client
534 .send(proto::UpdateDiagnosticSummary {
535 project_id: share.project_id,
536 worktree_id: self.id().to_proto(),
537 summary: Some(proto::DiagnosticSummary {
538 path: worktree_path.to_string_lossy().to_string(),
539 language_server_id: language_server_id as u64,
540 error_count: new_summary.error_count as u32,
541 warning_count: new_summary.warning_count as u32,
542 }),
543 })
544 .log_err();
545 }
546 }
547
548 Ok(updated)
549 }
550
551 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
552 let updated_repos = Self::changed_repos(
553 &self.snapshot.git_repositories,
554 &new_snapshot.git_repositories,
555 );
556 self.snapshot = new_snapshot;
557
558 if let Some(share) = self.share.as_mut() {
559 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
560 }
561
562 if !updated_repos.is_empty() {
563 cx.emit(Event::UpdatedGitRepositories(updated_repos));
564 }
565 }
566
567 fn changed_repos(
568 old_repos: &[GitRepositoryEntry],
569 new_repos: &[GitRepositoryEntry],
570 ) -> Vec<GitRepositoryEntry> {
571 fn diff<'a>(
572 a: &'a [GitRepositoryEntry],
573 b: &'a [GitRepositoryEntry],
574 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
575 ) {
576 for a_repo in a {
577 let matched = b.iter().find(|b_repo| {
578 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
579 });
580
581 if matched.is_none() {
582 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
583 }
584 }
585 }
586
587 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
588
589 diff(old_repos, new_repos, &mut updated);
590 diff(new_repos, old_repos, &mut updated);
591
592 updated.into_values().collect()
593 }
594
595 pub fn scan_complete(&self) -> impl Future<Output = ()> {
596 let mut is_scanning_rx = self.is_scanning.1.clone();
597 async move {
598 let mut is_scanning = is_scanning_rx.borrow().clone();
599 while is_scanning {
600 if let Some(value) = is_scanning_rx.recv().await {
601 is_scanning = value;
602 } else {
603 break;
604 }
605 }
606 }
607 }
608
609 pub fn snapshot(&self) -> LocalSnapshot {
610 self.snapshot.clone()
611 }
612
613 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
614 proto::WorktreeMetadata {
615 id: self.id().to_proto(),
616 root_name: self.root_name().to_string(),
617 visible: self.visible,
618 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
619 }
620 }
621
622 fn load(
623 &self,
624 path: &Path,
625 cx: &mut ModelContext<Worktree>,
626 ) -> Task<Result<(File, String, Option<String>)>> {
627 let handle = cx.handle();
628 let path = Arc::from(path);
629 let abs_path = self.absolutize(&path);
630 let fs = self.fs.clone();
631 let snapshot = self.snapshot();
632
633 cx.spawn(|this, mut cx| async move {
634 let text = fs.load(&abs_path).await?;
635
636 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
637 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
638 let repo_relative = repo_relative.to_owned();
639 cx.background()
640 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
641 .await
642 } else {
643 None
644 }
645 } else {
646 None
647 };
648
649 // Eagerly populate the snapshot with an updated entry for the loaded file
650 let entry = this
651 .update(&mut cx, |this, cx| {
652 this.as_local().unwrap().refresh_entry(path, None, cx)
653 })
654 .await?;
655
656 Ok((
657 File {
658 entry_id: entry.id,
659 worktree: handle,
660 path: entry.path,
661 mtime: entry.mtime,
662 is_local: true,
663 is_deleted: false,
664 },
665 text,
666 diff_base,
667 ))
668 })
669 }
670
671 pub fn save_buffer(
672 &self,
673 buffer_handle: ModelHandle<Buffer>,
674 path: Arc<Path>,
675 has_changed_file: bool,
676 cx: &mut ModelContext<Worktree>,
677 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
678 let handle = cx.handle();
679 let buffer = buffer_handle.read(cx);
680
681 let rpc = self.client.clone();
682 let buffer_id = buffer.remote_id();
683 let project_id = self.share.as_ref().map(|share| share.project_id);
684
685 let text = buffer.as_rope().clone();
686 let fingerprint = text.fingerprint();
687 let version = buffer.version();
688 let save = self.write_file(path, text, buffer.line_ending(), cx);
689
690 cx.as_mut().spawn(|mut cx| async move {
691 let entry = save.await?;
692
693 if has_changed_file {
694 let new_file = Arc::new(File {
695 entry_id: entry.id,
696 worktree: handle,
697 path: entry.path,
698 mtime: entry.mtime,
699 is_local: true,
700 is_deleted: false,
701 });
702
703 if let Some(project_id) = project_id {
704 rpc.send(proto::UpdateBufferFile {
705 project_id,
706 buffer_id,
707 file: Some(new_file.to_proto()),
708 })
709 .log_err();
710 }
711
712 buffer_handle.update(&mut cx, |buffer, cx| {
713 if has_changed_file {
714 buffer.file_updated(new_file, cx).detach();
715 }
716 });
717 }
718
719 if let Some(project_id) = project_id {
720 rpc.send(proto::BufferSaved {
721 project_id,
722 buffer_id,
723 version: serialize_version(&version),
724 mtime: Some(entry.mtime.into()),
725 fingerprint: serialize_fingerprint(fingerprint),
726 })?;
727 }
728
729 buffer_handle.update(&mut cx, |buffer, cx| {
730 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
731 });
732
733 Ok((version, fingerprint, entry.mtime))
734 })
735 }
736
737 pub fn create_entry(
738 &self,
739 path: impl Into<Arc<Path>>,
740 is_dir: bool,
741 cx: &mut ModelContext<Worktree>,
742 ) -> Task<Result<Entry>> {
743 let path = path.into();
744 let abs_path = self.absolutize(&path);
745 let fs = self.fs.clone();
746 let write = cx.background().spawn(async move {
747 if is_dir {
748 fs.create_dir(&abs_path).await
749 } else {
750 fs.save(&abs_path, &Default::default(), Default::default())
751 .await
752 }
753 });
754
755 cx.spawn(|this, mut cx| async move {
756 write.await?;
757 this.update(&mut cx, |this, cx| {
758 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
759 })
760 .await
761 })
762 }
763
764 pub fn write_file(
765 &self,
766 path: impl Into<Arc<Path>>,
767 text: Rope,
768 line_ending: LineEnding,
769 cx: &mut ModelContext<Worktree>,
770 ) -> Task<Result<Entry>> {
771 let path = path.into();
772 let abs_path = self.absolutize(&path);
773 let fs = self.fs.clone();
774 let write = cx
775 .background()
776 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
777
778 cx.spawn(|this, mut cx| async move {
779 write.await?;
780 this.update(&mut cx, |this, cx| {
781 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
782 })
783 .await
784 })
785 }
786
787 pub fn delete_entry(
788 &self,
789 entry_id: ProjectEntryId,
790 cx: &mut ModelContext<Worktree>,
791 ) -> Option<Task<Result<()>>> {
792 let entry = self.entry_for_id(entry_id)?.clone();
793 let abs_path = self.abs_path.clone();
794 let fs = self.fs.clone();
795
796 let delete = cx.background().spawn(async move {
797 let mut abs_path = fs.canonicalize(&abs_path).await?;
798 if entry.path.file_name().is_some() {
799 abs_path = abs_path.join(&entry.path);
800 }
801 if entry.is_file() {
802 fs.remove_file(&abs_path, Default::default()).await?;
803 } else {
804 fs.remove_dir(
805 &abs_path,
806 RemoveOptions {
807 recursive: true,
808 ignore_if_not_exists: false,
809 },
810 )
811 .await?;
812 }
813 anyhow::Ok(abs_path)
814 });
815
816 Some(cx.spawn(|this, mut cx| async move {
817 let abs_path = delete.await?;
818 let (tx, mut rx) = barrier::channel();
819 this.update(&mut cx, |this, _| {
820 this.as_local_mut()
821 .unwrap()
822 .path_changes_tx
823 .try_send((vec![abs_path], tx))
824 })?;
825 rx.recv().await;
826 Ok(())
827 }))
828 }
829
830 pub fn rename_entry(
831 &self,
832 entry_id: ProjectEntryId,
833 new_path: impl Into<Arc<Path>>,
834 cx: &mut ModelContext<Worktree>,
835 ) -> Option<Task<Result<Entry>>> {
836 let old_path = self.entry_for_id(entry_id)?.path.clone();
837 let new_path = new_path.into();
838 let abs_old_path = self.absolutize(&old_path);
839 let abs_new_path = self.absolutize(&new_path);
840 let fs = self.fs.clone();
841 let rename = cx.background().spawn(async move {
842 fs.rename(&abs_old_path, &abs_new_path, Default::default())
843 .await
844 });
845
846 Some(cx.spawn(|this, mut cx| async move {
847 rename.await?;
848 this.update(&mut cx, |this, cx| {
849 this.as_local_mut()
850 .unwrap()
851 .refresh_entry(new_path.clone(), Some(old_path), cx)
852 })
853 .await
854 }))
855 }
856
857 pub fn copy_entry(
858 &self,
859 entry_id: ProjectEntryId,
860 new_path: impl Into<Arc<Path>>,
861 cx: &mut ModelContext<Worktree>,
862 ) -> Option<Task<Result<Entry>>> {
863 let old_path = self.entry_for_id(entry_id)?.path.clone();
864 let new_path = new_path.into();
865 let abs_old_path = self.absolutize(&old_path);
866 let abs_new_path = self.absolutize(&new_path);
867 let fs = self.fs.clone();
868 let copy = cx.background().spawn(async move {
869 copy_recursive(
870 fs.as_ref(),
871 &abs_old_path,
872 &abs_new_path,
873 Default::default(),
874 )
875 .await
876 });
877
878 Some(cx.spawn(|this, mut cx| async move {
879 copy.await?;
880 this.update(&mut cx, |this, cx| {
881 this.as_local_mut()
882 .unwrap()
883 .refresh_entry(new_path.clone(), None, cx)
884 })
885 .await
886 }))
887 }
888
889 fn refresh_entry(
890 &self,
891 path: Arc<Path>,
892 old_path: Option<Arc<Path>>,
893 cx: &mut ModelContext<Worktree>,
894 ) -> Task<Result<Entry>> {
895 let fs = self.fs.clone();
896 let abs_root_path = self.abs_path.clone();
897 let path_changes_tx = self.path_changes_tx.clone();
898 cx.spawn_weak(move |this, mut cx| async move {
899 let abs_path = fs.canonicalize(&abs_root_path).await?;
900 let mut paths = Vec::with_capacity(2);
901 paths.push(if path.file_name().is_some() {
902 abs_path.join(&path)
903 } else {
904 abs_path.clone()
905 });
906 if let Some(old_path) = old_path {
907 paths.push(if old_path.file_name().is_some() {
908 abs_path.join(&old_path)
909 } else {
910 abs_path.clone()
911 });
912 }
913
914 let (tx, mut rx) = barrier::channel();
915 path_changes_tx.try_send((paths, tx))?;
916 rx.recv().await;
917 this.upgrade(&cx)
918 .ok_or_else(|| anyhow!("worktree was dropped"))?
919 .update(&mut cx, |this, _| {
920 this.entry_for_path(path)
921 .cloned()
922 .ok_or_else(|| anyhow!("failed to read path after update"))
923 })
924 })
925 }
926
927 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
928 let (share_tx, share_rx) = oneshot::channel();
929
930 if let Some(share) = self.share.as_mut() {
931 let _ = share_tx.send(());
932 *share.resume_updates.borrow_mut() = ();
933 } else {
934 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
935 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
936 let worktree_id = cx.model_id() as u64;
937
938 for (path, summary) in self.diagnostic_summaries.iter() {
939 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
940 project_id,
941 worktree_id,
942 summary: Some(summary.to_proto(&path.0)),
943 }) {
944 return Task::ready(Err(e));
945 }
946 }
947
948 let _maintain_remote_snapshot = cx.background().spawn({
949 let client = self.client.clone();
950 async move {
951 let mut share_tx = Some(share_tx);
952 let mut prev_snapshot = LocalSnapshot {
953 ignores_by_parent_abs_path: Default::default(),
954 git_repositories: Default::default(),
955 removed_entry_ids: Default::default(),
956 next_entry_id: Default::default(),
957 snapshot: Snapshot {
958 id: WorktreeId(worktree_id as usize),
959 abs_path: Path::new("").into(),
960 root_name: Default::default(),
961 root_char_bag: Default::default(),
962 entries_by_path: Default::default(),
963 entries_by_id: Default::default(),
964 scan_id: 0,
965 completed_scan_id: 0,
966 },
967 };
968 while let Some(snapshot) = snapshots_rx.recv().await {
969 #[cfg(any(test, feature = "test-support"))]
970 const MAX_CHUNK_SIZE: usize = 2;
971 #[cfg(not(any(test, feature = "test-support")))]
972 const MAX_CHUNK_SIZE: usize = 256;
973
974 let update =
975 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
976 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
977 let _ = resume_updates_rx.try_recv();
978 while let Err(error) = client.request(update.clone()).await {
979 log::error!("failed to send worktree update: {}", error);
980 log::info!("waiting to resume updates");
981 if resume_updates_rx.next().await.is_none() {
982 return Ok(());
983 }
984 }
985 }
986
987 if let Some(share_tx) = share_tx.take() {
988 let _ = share_tx.send(());
989 }
990
991 prev_snapshot = snapshot;
992 }
993
994 Ok::<_, anyhow::Error>(())
995 }
996 .log_err()
997 });
998
999 self.share = Some(ShareState {
1000 project_id,
1001 snapshots_tx,
1002 resume_updates: resume_updates_tx,
1003 _maintain_remote_snapshot,
1004 });
1005 }
1006
1007 cx.foreground()
1008 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1009 }
1010
1011 pub fn unshare(&mut self) {
1012 self.share.take();
1013 }
1014
1015 pub fn is_shared(&self) -> bool {
1016 self.share.is_some()
1017 }
1018}
1019
1020impl RemoteWorktree {
1021 fn snapshot(&self) -> Snapshot {
1022 self.snapshot.clone()
1023 }
1024
1025 pub fn disconnected_from_host(&mut self) {
1026 self.updates_tx.take();
1027 self.snapshot_subscriptions.clear();
1028 self.disconnected = true;
1029 }
1030
1031 pub fn save_buffer(
1032 &self,
1033 buffer_handle: ModelHandle<Buffer>,
1034 cx: &mut ModelContext<Worktree>,
1035 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1036 let buffer = buffer_handle.read(cx);
1037 let buffer_id = buffer.remote_id();
1038 let version = buffer.version();
1039 let rpc = self.client.clone();
1040 let project_id = self.project_id;
1041 cx.as_mut().spawn(|mut cx| async move {
1042 let response = rpc
1043 .request(proto::SaveBuffer {
1044 project_id,
1045 buffer_id,
1046 version: serialize_version(&version),
1047 })
1048 .await?;
1049 let version = deserialize_version(&response.version);
1050 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1051 let mtime = response
1052 .mtime
1053 .ok_or_else(|| anyhow!("missing mtime"))?
1054 .into();
1055
1056 buffer_handle.update(&mut cx, |buffer, cx| {
1057 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1058 });
1059
1060 Ok((version, fingerprint, mtime))
1061 })
1062 }
1063
1064 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1065 if let Some(updates_tx) = &self.updates_tx {
1066 updates_tx
1067 .unbounded_send(update)
1068 .expect("consumer runs to completion");
1069 }
1070 }
1071
1072 fn observed_snapshot(&self, scan_id: usize) -> bool {
1073 self.completed_scan_id >= scan_id
1074 }
1075
1076 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1077 let (tx, rx) = oneshot::channel();
1078 if self.observed_snapshot(scan_id) {
1079 let _ = tx.send(());
1080 } else if self.disconnected {
1081 drop(tx);
1082 } else {
1083 match self
1084 .snapshot_subscriptions
1085 .binary_search_by_key(&scan_id, |probe| probe.0)
1086 {
1087 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1088 }
1089 }
1090
1091 async move {
1092 rx.await?;
1093 Ok(())
1094 }
1095 }
1096
1097 pub fn update_diagnostic_summary(
1098 &mut self,
1099 path: Arc<Path>,
1100 summary: &proto::DiagnosticSummary,
1101 ) {
1102 let summary = DiagnosticSummary {
1103 language_server_id: summary.language_server_id as usize,
1104 error_count: summary.error_count as usize,
1105 warning_count: summary.warning_count as usize,
1106 };
1107 if summary.is_empty() {
1108 self.diagnostic_summaries.remove(&PathKey(path));
1109 } else {
1110 self.diagnostic_summaries.insert(PathKey(path), summary);
1111 }
1112 }
1113
1114 pub fn insert_entry(
1115 &mut self,
1116 entry: proto::Entry,
1117 scan_id: usize,
1118 cx: &mut ModelContext<Worktree>,
1119 ) -> Task<Result<Entry>> {
1120 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1121 cx.spawn(|this, mut cx| async move {
1122 wait_for_snapshot.await?;
1123 this.update(&mut cx, |worktree, _| {
1124 let worktree = worktree.as_remote_mut().unwrap();
1125 let mut snapshot = worktree.background_snapshot.lock();
1126 let entry = snapshot.insert_entry(entry);
1127 worktree.snapshot = snapshot.clone();
1128 entry
1129 })
1130 })
1131 }
1132
1133 pub(crate) fn delete_entry(
1134 &mut self,
1135 id: ProjectEntryId,
1136 scan_id: usize,
1137 cx: &mut ModelContext<Worktree>,
1138 ) -> Task<Result<()>> {
1139 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1140 cx.spawn(|this, mut cx| async move {
1141 wait_for_snapshot.await?;
1142 this.update(&mut cx, |worktree, _| {
1143 let worktree = worktree.as_remote_mut().unwrap();
1144 let mut snapshot = worktree.background_snapshot.lock();
1145 snapshot.delete_entry(id);
1146 worktree.snapshot = snapshot.clone();
1147 });
1148 Ok(())
1149 })
1150 }
1151}
1152
1153impl Snapshot {
1154 pub fn id(&self) -> WorktreeId {
1155 self.id
1156 }
1157
1158 pub fn abs_path(&self) -> &Arc<Path> {
1159 &self.abs_path
1160 }
1161
1162 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1163 self.entries_by_id.get(&entry_id, &()).is_some()
1164 }
1165
1166 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1167 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1168 let old_entry = self.entries_by_id.insert_or_replace(
1169 PathEntry {
1170 id: entry.id,
1171 path: entry.path.clone(),
1172 is_ignored: entry.is_ignored,
1173 scan_id: 0,
1174 },
1175 &(),
1176 );
1177 if let Some(old_entry) = old_entry {
1178 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1179 }
1180 self.entries_by_path.insert_or_replace(entry.clone(), &());
1181 Ok(entry)
1182 }
1183
1184 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1185 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1186 self.entries_by_path = {
1187 let mut cursor = self.entries_by_path.cursor();
1188 let mut new_entries_by_path =
1189 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1190 while let Some(entry) = cursor.item() {
1191 if entry.path.starts_with(&removed_entry.path) {
1192 self.entries_by_id.remove(&entry.id, &());
1193 cursor.next(&());
1194 } else {
1195 break;
1196 }
1197 }
1198 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1199 new_entries_by_path
1200 };
1201
1202 Some(removed_entry.path)
1203 }
1204
1205 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1206 let mut entries_by_path_edits = Vec::new();
1207 let mut entries_by_id_edits = Vec::new();
1208 for entry_id in update.removed_entries {
1209 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1210 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1211 entries_by_id_edits.push(Edit::Remove(entry.id));
1212 }
1213 }
1214
1215 for entry in update.updated_entries {
1216 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1217 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1218 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1219 }
1220 entries_by_id_edits.push(Edit::Insert(PathEntry {
1221 id: entry.id,
1222 path: entry.path.clone(),
1223 is_ignored: entry.is_ignored,
1224 scan_id: 0,
1225 }));
1226 entries_by_path_edits.push(Edit::Insert(entry));
1227 }
1228
1229 self.entries_by_path.edit(entries_by_path_edits, &());
1230 self.entries_by_id.edit(entries_by_id_edits, &());
1231 self.scan_id = update.scan_id as usize;
1232 if update.is_last_update {
1233 self.completed_scan_id = update.scan_id as usize;
1234 }
1235
1236 Ok(())
1237 }
1238
1239 pub fn file_count(&self) -> usize {
1240 self.entries_by_path.summary().file_count
1241 }
1242
1243 pub fn visible_file_count(&self) -> usize {
1244 self.entries_by_path.summary().visible_file_count
1245 }
1246
1247 fn traverse_from_offset(
1248 &self,
1249 include_dirs: bool,
1250 include_ignored: bool,
1251 start_offset: usize,
1252 ) -> Traversal {
1253 let mut cursor = self.entries_by_path.cursor();
1254 cursor.seek(
1255 &TraversalTarget::Count {
1256 count: start_offset,
1257 include_dirs,
1258 include_ignored,
1259 },
1260 Bias::Right,
1261 &(),
1262 );
1263 Traversal {
1264 cursor,
1265 include_dirs,
1266 include_ignored,
1267 }
1268 }
1269
1270 fn traverse_from_path(
1271 &self,
1272 include_dirs: bool,
1273 include_ignored: bool,
1274 path: &Path,
1275 ) -> Traversal {
1276 let mut cursor = self.entries_by_path.cursor();
1277 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1278 Traversal {
1279 cursor,
1280 include_dirs,
1281 include_ignored,
1282 }
1283 }
1284
1285 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1286 self.traverse_from_offset(false, include_ignored, start)
1287 }
1288
1289 pub fn entries(&self, include_ignored: bool) -> Traversal {
1290 self.traverse_from_offset(true, include_ignored, 0)
1291 }
1292
1293 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1294 let empty_path = Path::new("");
1295 self.entries_by_path
1296 .cursor::<()>()
1297 .filter(move |entry| entry.path.as_ref() != empty_path)
1298 .map(|entry| &entry.path)
1299 }
1300
1301 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1302 let mut cursor = self.entries_by_path.cursor();
1303 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1304 let traversal = Traversal {
1305 cursor,
1306 include_dirs: true,
1307 include_ignored: true,
1308 };
1309 ChildEntriesIter {
1310 traversal,
1311 parent_path,
1312 }
1313 }
1314
1315 pub fn root_entry(&self) -> Option<&Entry> {
1316 self.entry_for_path("")
1317 }
1318
1319 pub fn root_name(&self) -> &str {
1320 &self.root_name
1321 }
1322
1323 pub fn scan_id(&self) -> usize {
1324 self.scan_id
1325 }
1326
1327 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1328 let path = path.as_ref();
1329 self.traverse_from_path(true, true, path)
1330 .entry()
1331 .and_then(|entry| {
1332 if entry.path.as_ref() == path {
1333 Some(entry)
1334 } else {
1335 None
1336 }
1337 })
1338 }
1339
1340 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1341 let entry = self.entries_by_id.get(&id, &())?;
1342 self.entry_for_path(&entry.path)
1343 }
1344
1345 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1346 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1347 }
1348}
1349
1350impl LocalSnapshot {
1351 // Gives the most specific git repository for a given path
1352 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1353 self.git_repositories
1354 .iter()
1355 .rev() //git_repository is ordered lexicographically
1356 .find(|repo| repo.manages(path))
1357 .cloned()
1358 }
1359
1360 pub(crate) fn repo_with_dot_git_containing(
1361 &mut self,
1362 path: &Path,
1363 ) -> Option<&mut GitRepositoryEntry> {
1364 // Git repositories cannot be nested, so we don't need to reverse the order
1365 self.git_repositories
1366 .iter_mut()
1367 .find(|repo| repo.in_dot_git(path))
1368 }
1369
1370 #[cfg(test)]
1371 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1372 let root_name = self.root_name.clone();
1373 proto::UpdateWorktree {
1374 project_id,
1375 worktree_id: self.id().to_proto(),
1376 abs_path: self.abs_path().to_string_lossy().into(),
1377 root_name,
1378 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1379 removed_entries: Default::default(),
1380 scan_id: self.scan_id as u64,
1381 is_last_update: true,
1382 }
1383 }
1384
1385 pub(crate) fn build_update(
1386 &self,
1387 other: &Self,
1388 project_id: u64,
1389 worktree_id: u64,
1390 include_ignored: bool,
1391 ) -> proto::UpdateWorktree {
1392 let mut updated_entries = Vec::new();
1393 let mut removed_entries = Vec::new();
1394 let mut self_entries = self
1395 .entries_by_id
1396 .cursor::<()>()
1397 .filter(|e| include_ignored || !e.is_ignored)
1398 .peekable();
1399 let mut other_entries = other
1400 .entries_by_id
1401 .cursor::<()>()
1402 .filter(|e| include_ignored || !e.is_ignored)
1403 .peekable();
1404 loop {
1405 match (self_entries.peek(), other_entries.peek()) {
1406 (Some(self_entry), Some(other_entry)) => {
1407 match Ord::cmp(&self_entry.id, &other_entry.id) {
1408 Ordering::Less => {
1409 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1410 updated_entries.push(entry);
1411 self_entries.next();
1412 }
1413 Ordering::Equal => {
1414 if self_entry.scan_id != other_entry.scan_id {
1415 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1416 updated_entries.push(entry);
1417 }
1418
1419 self_entries.next();
1420 other_entries.next();
1421 }
1422 Ordering::Greater => {
1423 removed_entries.push(other_entry.id.to_proto());
1424 other_entries.next();
1425 }
1426 }
1427 }
1428 (Some(self_entry), None) => {
1429 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1430 updated_entries.push(entry);
1431 self_entries.next();
1432 }
1433 (None, Some(other_entry)) => {
1434 removed_entries.push(other_entry.id.to_proto());
1435 other_entries.next();
1436 }
1437 (None, None) => break,
1438 }
1439 }
1440
1441 proto::UpdateWorktree {
1442 project_id,
1443 worktree_id,
1444 abs_path: self.abs_path().to_string_lossy().into(),
1445 root_name: self.root_name().to_string(),
1446 updated_entries,
1447 removed_entries,
1448 scan_id: self.scan_id as u64,
1449 is_last_update: self.completed_scan_id == self.scan_id,
1450 }
1451 }
1452
1453 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1454 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1455 let abs_path = self.abs_path.join(&entry.path);
1456 match smol::block_on(build_gitignore(&abs_path, fs)) {
1457 Ok(ignore) => {
1458 self.ignores_by_parent_abs_path.insert(
1459 abs_path.parent().unwrap().into(),
1460 (Arc::new(ignore), self.scan_id),
1461 );
1462 }
1463 Err(error) => {
1464 log::error!(
1465 "error loading .gitignore file {:?} - {:?}",
1466 &entry.path,
1467 error
1468 );
1469 }
1470 }
1471 }
1472
1473 self.reuse_entry_id(&mut entry);
1474
1475 if entry.kind == EntryKind::PendingDir {
1476 if let Some(existing_entry) =
1477 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1478 {
1479 entry.kind = existing_entry.kind;
1480 }
1481 }
1482
1483 let scan_id = self.scan_id;
1484 self.entries_by_path.insert_or_replace(entry.clone(), &());
1485 self.entries_by_id.insert_or_replace(
1486 PathEntry {
1487 id: entry.id,
1488 path: entry.path.clone(),
1489 is_ignored: entry.is_ignored,
1490 scan_id,
1491 },
1492 &(),
1493 );
1494
1495 entry
1496 }
1497
1498 fn populate_dir(
1499 &mut self,
1500 parent_path: Arc<Path>,
1501 entries: impl IntoIterator<Item = Entry>,
1502 ignore: Option<Arc<Gitignore>>,
1503 fs: &dyn Fs,
1504 ) {
1505 let mut parent_entry = if let Some(parent_entry) =
1506 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1507 {
1508 parent_entry.clone()
1509 } else {
1510 log::warn!(
1511 "populating a directory {:?} that has been removed",
1512 parent_path
1513 );
1514 return;
1515 };
1516
1517 match parent_entry.kind {
1518 EntryKind::PendingDir => {
1519 parent_entry.kind = EntryKind::Dir;
1520 }
1521 EntryKind::Dir => {}
1522 _ => return,
1523 }
1524
1525 if let Some(ignore) = ignore {
1526 self.ignores_by_parent_abs_path.insert(
1527 self.abs_path.join(&parent_path).into(),
1528 (ignore, self.scan_id),
1529 );
1530 }
1531
1532 if parent_path.file_name() == Some(&DOT_GIT) {
1533 let abs_path = self.abs_path.join(&parent_path);
1534 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1535 if let Err(ix) = self
1536 .git_repositories
1537 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1538 {
1539 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1540 self.git_repositories.insert(
1541 ix,
1542 GitRepositoryEntry {
1543 repo,
1544 scan_id: 0,
1545 content_path,
1546 git_dir_path: parent_path,
1547 },
1548 );
1549 }
1550 }
1551 }
1552
1553 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1554 let mut entries_by_id_edits = Vec::new();
1555
1556 for mut entry in entries {
1557 self.reuse_entry_id(&mut entry);
1558 entries_by_id_edits.push(Edit::Insert(PathEntry {
1559 id: entry.id,
1560 path: entry.path.clone(),
1561 is_ignored: entry.is_ignored,
1562 scan_id: self.scan_id,
1563 }));
1564 entries_by_path_edits.push(Edit::Insert(entry));
1565 }
1566
1567 self.entries_by_path.edit(entries_by_path_edits, &());
1568 self.entries_by_id.edit(entries_by_id_edits, &());
1569 }
1570
1571 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1572 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1573 entry.id = removed_entry_id;
1574 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1575 entry.id = existing_entry.id;
1576 }
1577 }
1578
1579 fn remove_path(&mut self, path: &Path) {
1580 let mut new_entries;
1581 let removed_entries;
1582 {
1583 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1584 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1585 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1586 new_entries.push_tree(cursor.suffix(&()), &());
1587 }
1588 self.entries_by_path = new_entries;
1589
1590 let mut entries_by_id_edits = Vec::new();
1591 for entry in removed_entries.cursor::<()>() {
1592 let removed_entry_id = self
1593 .removed_entry_ids
1594 .entry(entry.inode)
1595 .or_insert(entry.id);
1596 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1597 entries_by_id_edits.push(Edit::Remove(entry.id));
1598 }
1599 self.entries_by_id.edit(entries_by_id_edits, &());
1600
1601 if path.file_name() == Some(&GITIGNORE) {
1602 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1603 if let Some((_, scan_id)) = self
1604 .ignores_by_parent_abs_path
1605 .get_mut(abs_parent_path.as_path())
1606 {
1607 *scan_id = self.snapshot.scan_id;
1608 }
1609 } else if path.file_name() == Some(&DOT_GIT) {
1610 let parent_path = path.parent().unwrap();
1611 if let Ok(ix) = self
1612 .git_repositories
1613 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1614 {
1615 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1616 }
1617 }
1618 }
1619
1620 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1621 let mut inodes = TreeSet::default();
1622 for ancestor in path.ancestors().skip(1) {
1623 if let Some(entry) = self.entry_for_path(ancestor) {
1624 inodes.insert(entry.inode);
1625 }
1626 }
1627 inodes
1628 }
1629
1630 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1631 let mut new_ignores = Vec::new();
1632 for ancestor in abs_path.ancestors().skip(1) {
1633 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1634 new_ignores.push((ancestor, Some(ignore.clone())));
1635 } else {
1636 new_ignores.push((ancestor, None));
1637 }
1638 }
1639
1640 let mut ignore_stack = IgnoreStack::none();
1641 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1642 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1643 ignore_stack = IgnoreStack::all();
1644 break;
1645 } else if let Some(ignore) = ignore {
1646 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1647 }
1648 }
1649
1650 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1651 ignore_stack = IgnoreStack::all();
1652 }
1653
1654 ignore_stack
1655 }
1656
1657 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1658 &self.git_repositories
1659 }
1660}
1661
1662impl GitRepositoryEntry {
1663 // Note that these paths should be relative to the worktree root.
1664 pub(crate) fn manages(&self, path: &Path) -> bool {
1665 path.starts_with(self.content_path.as_ref())
1666 }
1667
1668 // Note that this path should be relative to the worktree root.
1669 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1670 path.starts_with(self.git_dir_path.as_ref())
1671 }
1672}
1673
1674async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1675 let contents = fs.load(abs_path).await?;
1676 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1677 let mut builder = GitignoreBuilder::new(parent);
1678 for line in contents.lines() {
1679 builder.add_line(Some(abs_path.into()), line)?;
1680 }
1681 Ok(builder.build()?)
1682}
1683
1684impl WorktreeId {
1685 pub fn from_usize(handle_id: usize) -> Self {
1686 Self(handle_id)
1687 }
1688
1689 pub(crate) fn from_proto(id: u64) -> Self {
1690 Self(id as usize)
1691 }
1692
1693 pub fn to_proto(&self) -> u64 {
1694 self.0 as u64
1695 }
1696
1697 pub fn to_usize(&self) -> usize {
1698 self.0
1699 }
1700}
1701
1702impl fmt::Display for WorktreeId {
1703 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1704 self.0.fmt(f)
1705 }
1706}
1707
1708impl Deref for Worktree {
1709 type Target = Snapshot;
1710
1711 fn deref(&self) -> &Self::Target {
1712 match self {
1713 Worktree::Local(worktree) => &worktree.snapshot,
1714 Worktree::Remote(worktree) => &worktree.snapshot,
1715 }
1716 }
1717}
1718
1719impl Deref for LocalWorktree {
1720 type Target = LocalSnapshot;
1721
1722 fn deref(&self) -> &Self::Target {
1723 &self.snapshot
1724 }
1725}
1726
1727impl Deref for RemoteWorktree {
1728 type Target = Snapshot;
1729
1730 fn deref(&self) -> &Self::Target {
1731 &self.snapshot
1732 }
1733}
1734
1735impl fmt::Debug for LocalWorktree {
1736 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1737 self.snapshot.fmt(f)
1738 }
1739}
1740
1741impl fmt::Debug for Snapshot {
1742 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1743 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1744 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1745
1746 impl<'a> fmt::Debug for EntriesByPath<'a> {
1747 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1748 f.debug_map()
1749 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1750 .finish()
1751 }
1752 }
1753
1754 impl<'a> fmt::Debug for EntriesById<'a> {
1755 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1756 f.debug_list().entries(self.0.iter()).finish()
1757 }
1758 }
1759
1760 f.debug_struct("Snapshot")
1761 .field("id", &self.id)
1762 .field("root_name", &self.root_name)
1763 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1764 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1765 .finish()
1766 }
1767}
1768
1769#[derive(Clone, PartialEq)]
1770pub struct File {
1771 pub worktree: ModelHandle<Worktree>,
1772 pub path: Arc<Path>,
1773 pub mtime: SystemTime,
1774 pub(crate) entry_id: ProjectEntryId,
1775 pub(crate) is_local: bool,
1776 pub(crate) is_deleted: bool,
1777}
1778
1779impl language::File for File {
1780 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1781 if self.is_local {
1782 Some(self)
1783 } else {
1784 None
1785 }
1786 }
1787
1788 fn mtime(&self) -> SystemTime {
1789 self.mtime
1790 }
1791
1792 fn path(&self) -> &Arc<Path> {
1793 &self.path
1794 }
1795
1796 fn full_path(&self, cx: &AppContext) -> PathBuf {
1797 let mut full_path = PathBuf::new();
1798 let worktree = self.worktree.read(cx);
1799
1800 if worktree.is_visible() {
1801 full_path.push(worktree.root_name());
1802 } else {
1803 let path = worktree.abs_path();
1804
1805 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1806 full_path.push("~");
1807 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1808 } else {
1809 full_path.push(path)
1810 }
1811 }
1812
1813 if self.path.components().next().is_some() {
1814 full_path.push(&self.path);
1815 }
1816
1817 full_path
1818 }
1819
1820 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1821 /// of its worktree, then this method will return the name of the worktree itself.
1822 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1823 self.path
1824 .file_name()
1825 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1826 }
1827
1828 fn is_deleted(&self) -> bool {
1829 self.is_deleted
1830 }
1831
1832 fn as_any(&self) -> &dyn Any {
1833 self
1834 }
1835
1836 fn to_proto(&self) -> rpc::proto::File {
1837 rpc::proto::File {
1838 worktree_id: self.worktree.id() as u64,
1839 entry_id: self.entry_id.to_proto(),
1840 path: self.path.to_string_lossy().into(),
1841 mtime: Some(self.mtime.into()),
1842 is_deleted: self.is_deleted,
1843 }
1844 }
1845}
1846
1847impl language::LocalFile for File {
1848 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1849 self.worktree
1850 .read(cx)
1851 .as_local()
1852 .unwrap()
1853 .abs_path
1854 .join(&self.path)
1855 }
1856
1857 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1858 let worktree = self.worktree.read(cx).as_local().unwrap();
1859 let abs_path = worktree.absolutize(&self.path);
1860 let fs = worktree.fs.clone();
1861 cx.background()
1862 .spawn(async move { fs.load(&abs_path).await })
1863 }
1864
1865 fn buffer_reloaded(
1866 &self,
1867 buffer_id: u64,
1868 version: &clock::Global,
1869 fingerprint: RopeFingerprint,
1870 line_ending: LineEnding,
1871 mtime: SystemTime,
1872 cx: &mut AppContext,
1873 ) {
1874 let worktree = self.worktree.read(cx).as_local().unwrap();
1875 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1876 worktree
1877 .client
1878 .send(proto::BufferReloaded {
1879 project_id,
1880 buffer_id,
1881 version: serialize_version(version),
1882 mtime: Some(mtime.into()),
1883 fingerprint: serialize_fingerprint(fingerprint),
1884 line_ending: serialize_line_ending(line_ending) as i32,
1885 })
1886 .log_err();
1887 }
1888 }
1889}
1890
1891impl File {
1892 pub fn from_proto(
1893 proto: rpc::proto::File,
1894 worktree: ModelHandle<Worktree>,
1895 cx: &AppContext,
1896 ) -> Result<Self> {
1897 let worktree_id = worktree
1898 .read(cx)
1899 .as_remote()
1900 .ok_or_else(|| anyhow!("not remote"))?
1901 .id();
1902
1903 if worktree_id.to_proto() != proto.worktree_id {
1904 return Err(anyhow!("worktree id does not match file"));
1905 }
1906
1907 Ok(Self {
1908 worktree,
1909 path: Path::new(&proto.path).into(),
1910 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1911 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1912 is_local: false,
1913 is_deleted: proto.is_deleted,
1914 })
1915 }
1916
1917 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1918 file.and_then(|f| f.as_any().downcast_ref())
1919 }
1920
1921 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1922 self.worktree.read(cx).id()
1923 }
1924
1925 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1926 if self.is_deleted {
1927 None
1928 } else {
1929 Some(self.entry_id)
1930 }
1931 }
1932}
1933
1934#[derive(Clone, Debug, PartialEq, Eq)]
1935pub struct Entry {
1936 pub id: ProjectEntryId,
1937 pub kind: EntryKind,
1938 pub path: Arc<Path>,
1939 pub inode: u64,
1940 pub mtime: SystemTime,
1941 pub is_symlink: bool,
1942 pub is_ignored: bool,
1943}
1944
1945#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1946pub enum EntryKind {
1947 PendingDir,
1948 Dir,
1949 File(CharBag),
1950}
1951
1952#[derive(Clone, Copy, Debug)]
1953pub enum PathChange {
1954 Added,
1955 Removed,
1956 Updated,
1957 AddedOrUpdated,
1958}
1959
1960impl Entry {
1961 fn new(
1962 path: Arc<Path>,
1963 metadata: &fs::Metadata,
1964 next_entry_id: &AtomicUsize,
1965 root_char_bag: CharBag,
1966 ) -> Self {
1967 Self {
1968 id: ProjectEntryId::new(next_entry_id),
1969 kind: if metadata.is_dir {
1970 EntryKind::PendingDir
1971 } else {
1972 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1973 },
1974 path,
1975 inode: metadata.inode,
1976 mtime: metadata.mtime,
1977 is_symlink: metadata.is_symlink,
1978 is_ignored: false,
1979 }
1980 }
1981
1982 pub fn is_dir(&self) -> bool {
1983 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1984 }
1985
1986 pub fn is_file(&self) -> bool {
1987 matches!(self.kind, EntryKind::File(_))
1988 }
1989}
1990
1991impl sum_tree::Item for Entry {
1992 type Summary = EntrySummary;
1993
1994 fn summary(&self) -> Self::Summary {
1995 let visible_count = if self.is_ignored { 0 } else { 1 };
1996 let file_count;
1997 let visible_file_count;
1998 if self.is_file() {
1999 file_count = 1;
2000 visible_file_count = visible_count;
2001 } else {
2002 file_count = 0;
2003 visible_file_count = 0;
2004 }
2005
2006 EntrySummary {
2007 max_path: self.path.clone(),
2008 count: 1,
2009 visible_count,
2010 file_count,
2011 visible_file_count,
2012 }
2013 }
2014}
2015
2016impl sum_tree::KeyedItem for Entry {
2017 type Key = PathKey;
2018
2019 fn key(&self) -> Self::Key {
2020 PathKey(self.path.clone())
2021 }
2022}
2023
2024#[derive(Clone, Debug)]
2025pub struct EntrySummary {
2026 max_path: Arc<Path>,
2027 count: usize,
2028 visible_count: usize,
2029 file_count: usize,
2030 visible_file_count: usize,
2031}
2032
2033impl Default for EntrySummary {
2034 fn default() -> Self {
2035 Self {
2036 max_path: Arc::from(Path::new("")),
2037 count: 0,
2038 visible_count: 0,
2039 file_count: 0,
2040 visible_file_count: 0,
2041 }
2042 }
2043}
2044
2045impl sum_tree::Summary for EntrySummary {
2046 type Context = ();
2047
2048 fn add_summary(&mut self, rhs: &Self, _: &()) {
2049 self.max_path = rhs.max_path.clone();
2050 self.count += rhs.count;
2051 self.visible_count += rhs.visible_count;
2052 self.file_count += rhs.file_count;
2053 self.visible_file_count += rhs.visible_file_count;
2054 }
2055}
2056
2057#[derive(Clone, Debug)]
2058struct PathEntry {
2059 id: ProjectEntryId,
2060 path: Arc<Path>,
2061 is_ignored: bool,
2062 scan_id: usize,
2063}
2064
2065impl sum_tree::Item for PathEntry {
2066 type Summary = PathEntrySummary;
2067
2068 fn summary(&self) -> Self::Summary {
2069 PathEntrySummary { max_id: self.id }
2070 }
2071}
2072
2073impl sum_tree::KeyedItem for PathEntry {
2074 type Key = ProjectEntryId;
2075
2076 fn key(&self) -> Self::Key {
2077 self.id
2078 }
2079}
2080
2081#[derive(Clone, Debug, Default)]
2082struct PathEntrySummary {
2083 max_id: ProjectEntryId,
2084}
2085
2086impl sum_tree::Summary for PathEntrySummary {
2087 type Context = ();
2088
2089 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2090 self.max_id = summary.max_id;
2091 }
2092}
2093
2094impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2095 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2096 *self = summary.max_id;
2097 }
2098}
2099
2100#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2101pub struct PathKey(Arc<Path>);
2102
2103impl Default for PathKey {
2104 fn default() -> Self {
2105 Self(Path::new("").into())
2106 }
2107}
2108
2109impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2110 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2111 self.0 = summary.max_path.clone();
2112 }
2113}
2114
2115struct BackgroundScanner {
2116 snapshot: Mutex<LocalSnapshot>,
2117 fs: Arc<dyn Fs>,
2118 status_updates_tx: UnboundedSender<ScanState>,
2119 executor: Arc<executor::Background>,
2120 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2121 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2122 finished_initial_scan: bool,
2123}
2124
2125impl BackgroundScanner {
2126 fn new(
2127 snapshot: LocalSnapshot,
2128 fs: Arc<dyn Fs>,
2129 status_updates_tx: UnboundedSender<ScanState>,
2130 executor: Arc<executor::Background>,
2131 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2132 ) -> Self {
2133 Self {
2134 fs,
2135 status_updates_tx,
2136 executor,
2137 refresh_requests_rx,
2138 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2139 snapshot: Mutex::new(snapshot),
2140 finished_initial_scan: false,
2141 }
2142 }
2143
2144 async fn run(
2145 &mut self,
2146 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2147 ) {
2148 use futures::FutureExt as _;
2149
2150 let (root_abs_path, root_inode) = {
2151 let snapshot = self.snapshot.lock();
2152 (
2153 snapshot.abs_path.clone(),
2154 snapshot.root_entry().map(|e| e.inode),
2155 )
2156 };
2157
2158 // Populate ignores above the root.
2159 let ignore_stack;
2160 for ancestor in root_abs_path.ancestors().skip(1) {
2161 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2162 {
2163 self.snapshot
2164 .lock()
2165 .ignores_by_parent_abs_path
2166 .insert(ancestor.into(), (ignore.into(), 0));
2167 }
2168 }
2169 {
2170 let mut snapshot = self.snapshot.lock();
2171 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2172 if ignore_stack.is_all() {
2173 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2174 root_entry.is_ignored = true;
2175 snapshot.insert_entry(root_entry, self.fs.as_ref());
2176 }
2177 }
2178 };
2179
2180 // Perform an initial scan of the directory.
2181 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2182 smol::block_on(scan_job_tx.send(ScanJob {
2183 abs_path: root_abs_path,
2184 path: Arc::from(Path::new("")),
2185 ignore_stack,
2186 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2187 scan_queue: scan_job_tx.clone(),
2188 }))
2189 .unwrap();
2190 drop(scan_job_tx);
2191 self.scan_dirs(true, scan_job_rx).await;
2192
2193 // Process any any FS events that occurred while performing the initial scan.
2194 // For these events, update events cannot be as precise, because we didn't
2195 // have the previous state loaded yet.
2196 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2197 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2198 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2199 paths.extend(more_events.into_iter().map(|e| e.path));
2200 }
2201 self.process_events(paths).await;
2202 }
2203
2204 self.finished_initial_scan = true;
2205
2206 // Continue processing events until the worktree is dropped.
2207 loop {
2208 select_biased! {
2209 // Process any path refresh requests from the worktree. Prioritize
2210 // these before handling changes reported by the filesystem.
2211 request = self.refresh_requests_rx.recv().fuse() => {
2212 let Ok((paths, barrier)) = request else { break };
2213 self.reload_entries_for_paths(paths, None).await;
2214 if !self.send_status_update(false, Some(barrier)) {
2215 break;
2216 }
2217 }
2218
2219 events = events_rx.next().fuse() => {
2220 let Some(events) = events else { break };
2221 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2222 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2223 paths.extend(more_events.into_iter().map(|e| e.path));
2224 }
2225 self.process_events(paths).await;
2226 }
2227 }
2228 }
2229 }
2230
2231 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2232 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2233 if let Some(mut paths) = self
2234 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2235 .await
2236 {
2237 paths.sort_unstable();
2238 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2239 }
2240 drop(scan_job_tx);
2241 self.scan_dirs(false, scan_job_rx).await;
2242 }
2243
2244 async fn scan_dirs(
2245 &self,
2246 enable_progress_updates: bool,
2247 scan_jobs_rx: channel::Receiver<ScanJob>,
2248 ) {
2249 use futures::FutureExt as _;
2250
2251 if self
2252 .status_updates_tx
2253 .unbounded_send(ScanState::Started)
2254 .is_err()
2255 {
2256 return;
2257 }
2258
2259 let progress_update_count = AtomicUsize::new(0);
2260 self.executor
2261 .scoped(|scope| {
2262 for _ in 0..self.executor.num_cpus() {
2263 scope.spawn(async {
2264 let mut last_progress_update_count = 0;
2265 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2266 futures::pin_mut!(progress_update_timer);
2267
2268 loop {
2269 select_biased! {
2270 // Process any path refresh requests before moving on to process
2271 // the scan queue, so that user operations are prioritized.
2272 request = self.refresh_requests_rx.recv().fuse() => {
2273 let Ok((paths, barrier)) = request else { break };
2274 self.reload_entries_for_paths(paths, None).await;
2275 if !self.send_status_update(false, Some(barrier)) {
2276 return;
2277 }
2278 }
2279
2280 // Send periodic progress updates to the worktree. Use an atomic counter
2281 // to ensure that only one of the workers sends a progress update after
2282 // the update interval elapses.
2283 _ = progress_update_timer => {
2284 match progress_update_count.compare_exchange(
2285 last_progress_update_count,
2286 last_progress_update_count + 1,
2287 SeqCst,
2288 SeqCst
2289 ) {
2290 Ok(_) => {
2291 last_progress_update_count += 1;
2292 self.send_status_update(true, None);
2293 }
2294 Err(count) => {
2295 last_progress_update_count = count;
2296 }
2297 }
2298 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2299 }
2300
2301 // Recursively load directories from the file system.
2302 job = scan_jobs_rx.recv().fuse() => {
2303 let Ok(job) = job else { break };
2304 if let Err(err) = self.scan_dir(&job).await {
2305 if job.path.as_ref() != Path::new("") {
2306 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2307 }
2308 }
2309 }
2310 }
2311 }
2312 })
2313 }
2314 })
2315 .await;
2316
2317 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2318 let snapshot = self.update_ignore_statuses(ignore_queue_tx);
2319 self.executor
2320 .scoped(|scope| {
2321 for _ in 0..self.executor.num_cpus() {
2322 scope.spawn(async {
2323 loop {
2324 select_biased! {
2325 // Process any path refresh requests before moving on to process
2326 // the queue of ignore statuses.
2327 request = self.refresh_requests_rx.recv().fuse() => {
2328 let Ok((paths, barrier)) = request else { break };
2329 self.reload_entries_for_paths(paths, None).await;
2330 if !self.send_status_update(false, Some(barrier)) {
2331 return;
2332 }
2333 }
2334
2335 job = ignore_queue_rx.recv().fuse() => {
2336 let Ok(job) = job else { break };
2337 self.update_ignore_status(job, &snapshot).await;
2338 }
2339 }
2340 }
2341 });
2342 }
2343 })
2344 .await;
2345
2346 let mut snapshot = self.snapshot.lock();
2347 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2348 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2349 snapshot.git_repositories = git_repositories;
2350 snapshot.removed_entry_ids.clear();
2351 snapshot.completed_scan_id = snapshot.scan_id;
2352 drop(snapshot);
2353
2354 self.send_status_update(false, None);
2355 }
2356
2357 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2358 let mut prev_state = self.prev_state.lock();
2359 let snapshot = self.snapshot.lock().clone();
2360 let mut old_snapshot = snapshot.snapshot.clone();
2361 mem::swap(&mut old_snapshot, &mut prev_state.0);
2362 let changed_paths = mem::take(&mut prev_state.1);
2363 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2364 self.status_updates_tx
2365 .unbounded_send(ScanState::Updated {
2366 snapshot,
2367 changes,
2368 scanning,
2369 barrier,
2370 })
2371 .is_ok()
2372 }
2373
2374 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2375 let mut new_entries: Vec<Entry> = Vec::new();
2376 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2377 let mut ignore_stack = job.ignore_stack.clone();
2378 let mut new_ignore = None;
2379 let (root_abs_path, root_char_bag, next_entry_id) = {
2380 let snapshot = self.snapshot.lock();
2381 (
2382 snapshot.abs_path().clone(),
2383 snapshot.root_char_bag,
2384 snapshot.next_entry_id.clone(),
2385 )
2386 };
2387 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2388 while let Some(child_abs_path) = child_paths.next().await {
2389 let child_abs_path: Arc<Path> = match child_abs_path {
2390 Ok(child_abs_path) => child_abs_path.into(),
2391 Err(error) => {
2392 log::error!("error processing entry {:?}", error);
2393 continue;
2394 }
2395 };
2396
2397 let child_name = child_abs_path.file_name().unwrap();
2398 let child_path: Arc<Path> = job.path.join(child_name).into();
2399 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2400 Ok(Some(metadata)) => metadata,
2401 Ok(None) => continue,
2402 Err(err) => {
2403 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2404 continue;
2405 }
2406 };
2407
2408 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2409 if child_name == *GITIGNORE {
2410 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2411 Ok(ignore) => {
2412 let ignore = Arc::new(ignore);
2413 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2414 new_ignore = Some(ignore);
2415 }
2416 Err(error) => {
2417 log::error!(
2418 "error loading .gitignore file {:?} - {:?}",
2419 child_name,
2420 error
2421 );
2422 }
2423 }
2424
2425 // Update ignore status of any child entries we've already processed to reflect the
2426 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2427 // there should rarely be too numerous. Update the ignore stack associated with any
2428 // new jobs as well.
2429 let mut new_jobs = new_jobs.iter_mut();
2430 for entry in &mut new_entries {
2431 let entry_abs_path = root_abs_path.join(&entry.path);
2432 entry.is_ignored =
2433 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2434
2435 if entry.is_dir() {
2436 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2437 job.ignore_stack = if entry.is_ignored {
2438 IgnoreStack::all()
2439 } else {
2440 ignore_stack.clone()
2441 };
2442 }
2443 }
2444 }
2445 }
2446
2447 let mut child_entry = Entry::new(
2448 child_path.clone(),
2449 &child_metadata,
2450 &next_entry_id,
2451 root_char_bag,
2452 );
2453
2454 if child_entry.is_dir() {
2455 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2456 child_entry.is_ignored = is_ignored;
2457
2458 // Avoid recursing until crash in the case of a recursive symlink
2459 if !job.ancestor_inodes.contains(&child_entry.inode) {
2460 let mut ancestor_inodes = job.ancestor_inodes.clone();
2461 ancestor_inodes.insert(child_entry.inode);
2462
2463 new_jobs.push(Some(ScanJob {
2464 abs_path: child_abs_path,
2465 path: child_path,
2466 ignore_stack: if is_ignored {
2467 IgnoreStack::all()
2468 } else {
2469 ignore_stack.clone()
2470 },
2471 ancestor_inodes,
2472 scan_queue: job.scan_queue.clone(),
2473 }));
2474 } else {
2475 new_jobs.push(None);
2476 }
2477 } else {
2478 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2479 }
2480
2481 new_entries.push(child_entry);
2482 }
2483
2484 self.snapshot.lock().populate_dir(
2485 job.path.clone(),
2486 new_entries,
2487 new_ignore,
2488 self.fs.as_ref(),
2489 );
2490
2491 for new_job in new_jobs {
2492 if let Some(new_job) = new_job {
2493 job.scan_queue.send(new_job).await.unwrap();
2494 }
2495 }
2496
2497 Ok(())
2498 }
2499
2500 async fn reload_entries_for_paths(
2501 &self,
2502 mut abs_paths: Vec<PathBuf>,
2503 scan_queue_tx: Option<Sender<ScanJob>>,
2504 ) -> Option<Vec<Arc<Path>>> {
2505 let doing_recursive_update = scan_queue_tx.is_some();
2506
2507 abs_paths.sort_unstable();
2508 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2509
2510 let root_abs_path = self.snapshot.lock().abs_path.clone();
2511 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2512 let metadata = futures::future::join_all(
2513 abs_paths
2514 .iter()
2515 .map(|abs_path| self.fs.metadata(&abs_path))
2516 .collect::<Vec<_>>(),
2517 )
2518 .await;
2519
2520 let mut snapshot = self.snapshot.lock();
2521
2522 if snapshot.completed_scan_id == snapshot.scan_id {
2523 snapshot.scan_id += 1;
2524 if !doing_recursive_update {
2525 snapshot.completed_scan_id = snapshot.scan_id;
2526 }
2527 }
2528
2529 // Remove any entries for paths that no longer exist or are being recursively
2530 // refreshed. Do this before adding any new entries, so that renames can be
2531 // detected regardless of the order of the paths.
2532 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2533 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2534 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2535 if matches!(metadata, Ok(None)) || doing_recursive_update {
2536 snapshot.remove_path(path);
2537 }
2538 event_paths.push(path.into());
2539 } else {
2540 log::error!(
2541 "unexpected event {:?} for root path {:?}",
2542 abs_path,
2543 root_canonical_path
2544 );
2545 }
2546 }
2547
2548 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2549 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2550
2551 match metadata {
2552 Ok(Some(metadata)) => {
2553 let ignore_stack =
2554 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2555 let mut fs_entry = Entry::new(
2556 path.clone(),
2557 &metadata,
2558 snapshot.next_entry_id.as_ref(),
2559 snapshot.root_char_bag,
2560 );
2561 fs_entry.is_ignored = ignore_stack.is_all();
2562 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2563
2564 let scan_id = snapshot.scan_id;
2565 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2566 repo.repo.lock().reload_index();
2567 repo.scan_id = scan_id;
2568 }
2569
2570 if let Some(scan_queue_tx) = &scan_queue_tx {
2571 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2572 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2573 ancestor_inodes.insert(metadata.inode);
2574 smol::block_on(scan_queue_tx.send(ScanJob {
2575 abs_path,
2576 path,
2577 ignore_stack,
2578 ancestor_inodes,
2579 scan_queue: scan_queue_tx.clone(),
2580 }))
2581 .unwrap();
2582 }
2583 }
2584 }
2585 Ok(None) => {}
2586 Err(err) => {
2587 // TODO - create a special 'error' entry in the entries tree to mark this
2588 log::error!("error reading file on event {:?}", err);
2589 }
2590 }
2591 }
2592
2593 Some(event_paths)
2594 }
2595
2596 fn update_ignore_statuses(
2597 &self,
2598 ignore_queue_tx: Sender<UpdateIgnoreStatusJob>,
2599 ) -> LocalSnapshot {
2600 let mut snapshot = self.snapshot.lock().clone();
2601 let mut ignores_to_update = Vec::new();
2602 let mut ignores_to_delete = Vec::new();
2603 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2604 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2605 if *scan_id == snapshot.scan_id && snapshot.entry_for_path(parent_path).is_some() {
2606 ignores_to_update.push(parent_abs_path.clone());
2607 }
2608
2609 let ignore_path = parent_path.join(&*GITIGNORE);
2610 if snapshot.entry_for_path(ignore_path).is_none() {
2611 ignores_to_delete.push(parent_abs_path.clone());
2612 }
2613 }
2614 }
2615
2616 for parent_abs_path in ignores_to_delete {
2617 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2618 self.snapshot
2619 .lock()
2620 .ignores_by_parent_abs_path
2621 .remove(&parent_abs_path);
2622 }
2623
2624 ignores_to_update.sort_unstable();
2625 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2626 while let Some(parent_abs_path) = ignores_to_update.next() {
2627 while ignores_to_update
2628 .peek()
2629 .map_or(false, |p| p.starts_with(&parent_abs_path))
2630 {
2631 ignores_to_update.next().unwrap();
2632 }
2633
2634 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2635 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2636 abs_path: parent_abs_path,
2637 ignore_stack,
2638 ignore_queue: ignore_queue_tx.clone(),
2639 }))
2640 .unwrap();
2641 }
2642
2643 snapshot
2644 }
2645
2646 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2647 let mut ignore_stack = job.ignore_stack;
2648 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2649 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2650 }
2651
2652 let mut entries_by_id_edits = Vec::new();
2653 let mut entries_by_path_edits = Vec::new();
2654 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2655 for mut entry in snapshot.child_entries(path).cloned() {
2656 let was_ignored = entry.is_ignored;
2657 let abs_path = snapshot.abs_path().join(&entry.path);
2658 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2659 if entry.is_dir() {
2660 let child_ignore_stack = if entry.is_ignored {
2661 IgnoreStack::all()
2662 } else {
2663 ignore_stack.clone()
2664 };
2665 job.ignore_queue
2666 .send(UpdateIgnoreStatusJob {
2667 abs_path: abs_path.into(),
2668 ignore_stack: child_ignore_stack,
2669 ignore_queue: job.ignore_queue.clone(),
2670 })
2671 .await
2672 .unwrap();
2673 }
2674
2675 if entry.is_ignored != was_ignored {
2676 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2677 path_entry.scan_id = snapshot.scan_id;
2678 path_entry.is_ignored = entry.is_ignored;
2679 entries_by_id_edits.push(Edit::Insert(path_entry));
2680 entries_by_path_edits.push(Edit::Insert(entry));
2681 }
2682 }
2683
2684 let mut snapshot = self.snapshot.lock();
2685 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2686 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2687 }
2688
2689 fn build_change_set(
2690 &self,
2691 old_snapshot: &Snapshot,
2692 new_snapshot: &Snapshot,
2693 event_paths: Vec<Arc<Path>>,
2694 ) -> HashMap<Arc<Path>, PathChange> {
2695 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2696
2697 let mut changes = HashMap::default();
2698 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2699 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2700 let received_before_initialized = !self.finished_initial_scan;
2701
2702 for path in event_paths {
2703 let path = PathKey(path);
2704 old_paths.seek(&path, Bias::Left, &());
2705 new_paths.seek(&path, Bias::Left, &());
2706
2707 loop {
2708 match (old_paths.item(), new_paths.item()) {
2709 (Some(old_entry), Some(new_entry)) => {
2710 if old_entry.path > path.0
2711 && new_entry.path > path.0
2712 && !old_entry.path.starts_with(&path.0)
2713 && !new_entry.path.starts_with(&path.0)
2714 {
2715 break;
2716 }
2717
2718 match Ord::cmp(&old_entry.path, &new_entry.path) {
2719 Ordering::Less => {
2720 changes.insert(old_entry.path.clone(), Removed);
2721 old_paths.next(&());
2722 }
2723 Ordering::Equal => {
2724 if received_before_initialized {
2725 // If the worktree was not fully initialized when this event was generated,
2726 // we can't know whether this entry was added during the scan or whether
2727 // it was merely updated.
2728 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2729 } else if old_entry.mtime != new_entry.mtime {
2730 changes.insert(new_entry.path.clone(), Updated);
2731 }
2732 old_paths.next(&());
2733 new_paths.next(&());
2734 }
2735 Ordering::Greater => {
2736 changes.insert(new_entry.path.clone(), Added);
2737 new_paths.next(&());
2738 }
2739 }
2740 }
2741 (Some(old_entry), None) => {
2742 changes.insert(old_entry.path.clone(), Removed);
2743 old_paths.next(&());
2744 }
2745 (None, Some(new_entry)) => {
2746 changes.insert(new_entry.path.clone(), Added);
2747 new_paths.next(&());
2748 }
2749 (None, None) => break,
2750 }
2751 }
2752 }
2753 changes
2754 }
2755
2756 async fn progress_timer(&self, running: bool) {
2757 if !running {
2758 return futures::future::pending().await;
2759 }
2760
2761 #[cfg(any(test, feature = "test-support"))]
2762 if self.fs.is_fake() {
2763 return self.executor.simulate_random_delay().await;
2764 }
2765
2766 smol::Timer::after(Duration::from_millis(100)).await;
2767 }
2768}
2769
2770fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2771 let mut result = root_char_bag;
2772 result.extend(
2773 path.to_string_lossy()
2774 .chars()
2775 .map(|c| c.to_ascii_lowercase()),
2776 );
2777 result
2778}
2779
2780struct ScanJob {
2781 abs_path: Arc<Path>,
2782 path: Arc<Path>,
2783 ignore_stack: Arc<IgnoreStack>,
2784 scan_queue: Sender<ScanJob>,
2785 ancestor_inodes: TreeSet<u64>,
2786}
2787
2788struct UpdateIgnoreStatusJob {
2789 abs_path: Arc<Path>,
2790 ignore_stack: Arc<IgnoreStack>,
2791 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2792}
2793
2794pub trait WorktreeHandle {
2795 #[cfg(any(test, feature = "test-support"))]
2796 fn flush_fs_events<'a>(
2797 &self,
2798 cx: &'a gpui::TestAppContext,
2799 ) -> futures::future::LocalBoxFuture<'a, ()>;
2800}
2801
2802impl WorktreeHandle for ModelHandle<Worktree> {
2803 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2804 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2805 // extra directory scans, and emit extra scan-state notifications.
2806 //
2807 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2808 // to ensure that all redundant FS events have already been processed.
2809 #[cfg(any(test, feature = "test-support"))]
2810 fn flush_fs_events<'a>(
2811 &self,
2812 cx: &'a gpui::TestAppContext,
2813 ) -> futures::future::LocalBoxFuture<'a, ()> {
2814 use smol::future::FutureExt;
2815
2816 let filename = "fs-event-sentinel";
2817 let tree = self.clone();
2818 let (fs, root_path) = self.read_with(cx, |tree, _| {
2819 let tree = tree.as_local().unwrap();
2820 (tree.fs.clone(), tree.abs_path().clone())
2821 });
2822
2823 async move {
2824 fs.create_file(&root_path.join(filename), Default::default())
2825 .await
2826 .unwrap();
2827 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2828 .await;
2829
2830 fs.remove_file(&root_path.join(filename), Default::default())
2831 .await
2832 .unwrap();
2833 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2834 .await;
2835
2836 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2837 .await;
2838 }
2839 .boxed_local()
2840 }
2841}
2842
2843#[derive(Clone, Debug)]
2844struct TraversalProgress<'a> {
2845 max_path: &'a Path,
2846 count: usize,
2847 visible_count: usize,
2848 file_count: usize,
2849 visible_file_count: usize,
2850}
2851
2852impl<'a> TraversalProgress<'a> {
2853 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2854 match (include_ignored, include_dirs) {
2855 (true, true) => self.count,
2856 (true, false) => self.file_count,
2857 (false, true) => self.visible_count,
2858 (false, false) => self.visible_file_count,
2859 }
2860 }
2861}
2862
2863impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2864 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2865 self.max_path = summary.max_path.as_ref();
2866 self.count += summary.count;
2867 self.visible_count += summary.visible_count;
2868 self.file_count += summary.file_count;
2869 self.visible_file_count += summary.visible_file_count;
2870 }
2871}
2872
2873impl<'a> Default for TraversalProgress<'a> {
2874 fn default() -> Self {
2875 Self {
2876 max_path: Path::new(""),
2877 count: 0,
2878 visible_count: 0,
2879 file_count: 0,
2880 visible_file_count: 0,
2881 }
2882 }
2883}
2884
2885pub struct Traversal<'a> {
2886 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2887 include_ignored: bool,
2888 include_dirs: bool,
2889}
2890
2891impl<'a> Traversal<'a> {
2892 pub fn advance(&mut self) -> bool {
2893 self.advance_to_offset(self.offset() + 1)
2894 }
2895
2896 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2897 self.cursor.seek_forward(
2898 &TraversalTarget::Count {
2899 count: offset,
2900 include_dirs: self.include_dirs,
2901 include_ignored: self.include_ignored,
2902 },
2903 Bias::Right,
2904 &(),
2905 )
2906 }
2907
2908 pub fn advance_to_sibling(&mut self) -> bool {
2909 while let Some(entry) = self.cursor.item() {
2910 self.cursor.seek_forward(
2911 &TraversalTarget::PathSuccessor(&entry.path),
2912 Bias::Left,
2913 &(),
2914 );
2915 if let Some(entry) = self.cursor.item() {
2916 if (self.include_dirs || !entry.is_dir())
2917 && (self.include_ignored || !entry.is_ignored)
2918 {
2919 return true;
2920 }
2921 }
2922 }
2923 false
2924 }
2925
2926 pub fn entry(&self) -> Option<&'a Entry> {
2927 self.cursor.item()
2928 }
2929
2930 pub fn offset(&self) -> usize {
2931 self.cursor
2932 .start()
2933 .count(self.include_dirs, self.include_ignored)
2934 }
2935}
2936
2937impl<'a> Iterator for Traversal<'a> {
2938 type Item = &'a Entry;
2939
2940 fn next(&mut self) -> Option<Self::Item> {
2941 if let Some(item) = self.entry() {
2942 self.advance();
2943 Some(item)
2944 } else {
2945 None
2946 }
2947 }
2948}
2949
2950#[derive(Debug)]
2951enum TraversalTarget<'a> {
2952 Path(&'a Path),
2953 PathSuccessor(&'a Path),
2954 Count {
2955 count: usize,
2956 include_ignored: bool,
2957 include_dirs: bool,
2958 },
2959}
2960
2961impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2962 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2963 match self {
2964 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2965 TraversalTarget::PathSuccessor(path) => {
2966 if !cursor_location.max_path.starts_with(path) {
2967 Ordering::Equal
2968 } else {
2969 Ordering::Greater
2970 }
2971 }
2972 TraversalTarget::Count {
2973 count,
2974 include_dirs,
2975 include_ignored,
2976 } => Ord::cmp(
2977 count,
2978 &cursor_location.count(*include_dirs, *include_ignored),
2979 ),
2980 }
2981 }
2982}
2983
2984struct ChildEntriesIter<'a> {
2985 parent_path: &'a Path,
2986 traversal: Traversal<'a>,
2987}
2988
2989impl<'a> Iterator for ChildEntriesIter<'a> {
2990 type Item = &'a Entry;
2991
2992 fn next(&mut self) -> Option<Self::Item> {
2993 if let Some(item) = self.traversal.entry() {
2994 if item.path.starts_with(&self.parent_path) {
2995 self.traversal.advance_to_sibling();
2996 return Some(item);
2997 }
2998 }
2999 None
3000 }
3001}
3002
3003impl<'a> From<&'a Entry> for proto::Entry {
3004 fn from(entry: &'a Entry) -> Self {
3005 Self {
3006 id: entry.id.to_proto(),
3007 is_dir: entry.is_dir(),
3008 path: entry.path.to_string_lossy().into(),
3009 inode: entry.inode,
3010 mtime: Some(entry.mtime.into()),
3011 is_symlink: entry.is_symlink,
3012 is_ignored: entry.is_ignored,
3013 }
3014 }
3015}
3016
3017impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3018 type Error = anyhow::Error;
3019
3020 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3021 if let Some(mtime) = entry.mtime {
3022 let kind = if entry.is_dir {
3023 EntryKind::Dir
3024 } else {
3025 let mut char_bag = *root_char_bag;
3026 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3027 EntryKind::File(char_bag)
3028 };
3029 let path: Arc<Path> = PathBuf::from(entry.path).into();
3030 Ok(Entry {
3031 id: ProjectEntryId::from_proto(entry.id),
3032 kind,
3033 path,
3034 inode: entry.inode,
3035 mtime: mtime.into(),
3036 is_symlink: entry.is_symlink,
3037 is_ignored: entry.is_ignored,
3038 })
3039 } else {
3040 Err(anyhow!(
3041 "missing mtime in remote worktree entry {:?}",
3042 entry.path
3043 ))
3044 }
3045 }
3046}
3047
3048#[cfg(test)]
3049mod tests {
3050 use super::*;
3051 use fs::repository::FakeGitRepository;
3052 use fs::{FakeFs, RealFs};
3053 use gpui::{executor::Deterministic, TestAppContext};
3054 use rand::prelude::*;
3055 use serde_json::json;
3056 use std::{env, fmt::Write};
3057 use util::http::FakeHttpClient;
3058
3059 use util::test::temp_tree;
3060
3061 #[gpui::test]
3062 async fn test_traversal(cx: &mut TestAppContext) {
3063 let fs = FakeFs::new(cx.background());
3064 fs.insert_tree(
3065 "/root",
3066 json!({
3067 ".gitignore": "a/b\n",
3068 "a": {
3069 "b": "",
3070 "c": "",
3071 }
3072 }),
3073 )
3074 .await;
3075
3076 let http_client = FakeHttpClient::with_404_response();
3077 let client = cx.read(|cx| Client::new(http_client, cx));
3078
3079 let tree = Worktree::local(
3080 client,
3081 Path::new("/root"),
3082 true,
3083 fs,
3084 Default::default(),
3085 &mut cx.to_async(),
3086 )
3087 .await
3088 .unwrap();
3089 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3090 .await;
3091
3092 tree.read_with(cx, |tree, _| {
3093 assert_eq!(
3094 tree.entries(false)
3095 .map(|entry| entry.path.as_ref())
3096 .collect::<Vec<_>>(),
3097 vec![
3098 Path::new(""),
3099 Path::new(".gitignore"),
3100 Path::new("a"),
3101 Path::new("a/c"),
3102 ]
3103 );
3104 assert_eq!(
3105 tree.entries(true)
3106 .map(|entry| entry.path.as_ref())
3107 .collect::<Vec<_>>(),
3108 vec![
3109 Path::new(""),
3110 Path::new(".gitignore"),
3111 Path::new("a"),
3112 Path::new("a/b"),
3113 Path::new("a/c"),
3114 ]
3115 );
3116 })
3117 }
3118
3119 #[gpui::test(iterations = 10)]
3120 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3121 let fs = FakeFs::new(cx.background());
3122 fs.insert_tree(
3123 "/root",
3124 json!({
3125 "lib": {
3126 "a": {
3127 "a.txt": ""
3128 },
3129 "b": {
3130 "b.txt": ""
3131 }
3132 }
3133 }),
3134 )
3135 .await;
3136 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3137 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3138
3139 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3140 let tree = Worktree::local(
3141 client,
3142 Path::new("/root"),
3143 true,
3144 fs.clone(),
3145 Default::default(),
3146 &mut cx.to_async(),
3147 )
3148 .await
3149 .unwrap();
3150
3151 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3152 .await;
3153
3154 tree.read_with(cx, |tree, _| {
3155 assert_eq!(
3156 tree.entries(false)
3157 .map(|entry| entry.path.as_ref())
3158 .collect::<Vec<_>>(),
3159 vec![
3160 Path::new(""),
3161 Path::new("lib"),
3162 Path::new("lib/a"),
3163 Path::new("lib/a/a.txt"),
3164 Path::new("lib/a/lib"),
3165 Path::new("lib/b"),
3166 Path::new("lib/b/b.txt"),
3167 Path::new("lib/b/lib"),
3168 ]
3169 );
3170 });
3171
3172 fs.rename(
3173 Path::new("/root/lib/a/lib"),
3174 Path::new("/root/lib/a/lib-2"),
3175 Default::default(),
3176 )
3177 .await
3178 .unwrap();
3179 executor.run_until_parked();
3180 tree.read_with(cx, |tree, _| {
3181 assert_eq!(
3182 tree.entries(false)
3183 .map(|entry| entry.path.as_ref())
3184 .collect::<Vec<_>>(),
3185 vec![
3186 Path::new(""),
3187 Path::new("lib"),
3188 Path::new("lib/a"),
3189 Path::new("lib/a/a.txt"),
3190 Path::new("lib/a/lib-2"),
3191 Path::new("lib/b"),
3192 Path::new("lib/b/b.txt"),
3193 Path::new("lib/b/lib"),
3194 ]
3195 );
3196 });
3197 }
3198
3199 #[gpui::test]
3200 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3201 let parent_dir = temp_tree(json!({
3202 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3203 "tree": {
3204 ".git": {},
3205 ".gitignore": "ignored-dir\n",
3206 "tracked-dir": {
3207 "tracked-file1": "",
3208 "ancestor-ignored-file1": "",
3209 },
3210 "ignored-dir": {
3211 "ignored-file1": ""
3212 }
3213 }
3214 }));
3215 let dir = parent_dir.path().join("tree");
3216
3217 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3218
3219 let tree = Worktree::local(
3220 client,
3221 dir.as_path(),
3222 true,
3223 Arc::new(RealFs),
3224 Default::default(),
3225 &mut cx.to_async(),
3226 )
3227 .await
3228 .unwrap();
3229 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3230 .await;
3231 tree.flush_fs_events(cx).await;
3232 cx.read(|cx| {
3233 let tree = tree.read(cx);
3234 assert!(
3235 !tree
3236 .entry_for_path("tracked-dir/tracked-file1")
3237 .unwrap()
3238 .is_ignored
3239 );
3240 assert!(
3241 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3242 .unwrap()
3243 .is_ignored
3244 );
3245 assert!(
3246 tree.entry_for_path("ignored-dir/ignored-file1")
3247 .unwrap()
3248 .is_ignored
3249 );
3250 });
3251
3252 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3253 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3254 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3255 tree.flush_fs_events(cx).await;
3256 cx.read(|cx| {
3257 let tree = tree.read(cx);
3258 assert!(
3259 !tree
3260 .entry_for_path("tracked-dir/tracked-file2")
3261 .unwrap()
3262 .is_ignored
3263 );
3264 assert!(
3265 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3266 .unwrap()
3267 .is_ignored
3268 );
3269 assert!(
3270 tree.entry_for_path("ignored-dir/ignored-file2")
3271 .unwrap()
3272 .is_ignored
3273 );
3274 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3275 });
3276 }
3277
3278 #[gpui::test]
3279 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3280 let root = temp_tree(json!({
3281 "dir1": {
3282 ".git": {},
3283 "deps": {
3284 "dep1": {
3285 ".git": {},
3286 "src": {
3287 "a.txt": ""
3288 }
3289 }
3290 },
3291 "src": {
3292 "b.txt": ""
3293 }
3294 },
3295 "c.txt": "",
3296 }));
3297
3298 let http_client = FakeHttpClient::with_404_response();
3299 let client = cx.read(|cx| Client::new(http_client, cx));
3300 let tree = Worktree::local(
3301 client,
3302 root.path(),
3303 true,
3304 Arc::new(RealFs),
3305 Default::default(),
3306 &mut cx.to_async(),
3307 )
3308 .await
3309 .unwrap();
3310
3311 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3312 .await;
3313 tree.flush_fs_events(cx).await;
3314
3315 tree.read_with(cx, |tree, _cx| {
3316 let tree = tree.as_local().unwrap();
3317
3318 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3319
3320 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3321 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3322 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3323
3324 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3325 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3326 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3327 });
3328
3329 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3330 let tree = tree.as_local().unwrap();
3331 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3332 });
3333
3334 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3335 tree.flush_fs_events(cx).await;
3336
3337 tree.read_with(cx, |tree, _cx| {
3338 let tree = tree.as_local().unwrap();
3339 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3340 assert_ne!(
3341 original_scan_id, new_scan_id,
3342 "original {original_scan_id}, new {new_scan_id}"
3343 );
3344 });
3345
3346 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3347 tree.flush_fs_events(cx).await;
3348
3349 tree.read_with(cx, |tree, _cx| {
3350 let tree = tree.as_local().unwrap();
3351
3352 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3353 });
3354 }
3355
3356 #[test]
3357 fn test_changed_repos() {
3358 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3359 GitRepositoryEntry {
3360 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3361 scan_id,
3362 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3363 git_dir_path: git_dir_path.as_ref().into(),
3364 }
3365 }
3366
3367 let prev_repos: Vec<GitRepositoryEntry> = vec![
3368 fake_entry("/.git", 0),
3369 fake_entry("/a/.git", 0),
3370 fake_entry("/a/b/.git", 0),
3371 ];
3372
3373 let new_repos: Vec<GitRepositoryEntry> = vec![
3374 fake_entry("/a/.git", 1),
3375 fake_entry("/a/b/.git", 0),
3376 fake_entry("/a/c/.git", 0),
3377 ];
3378
3379 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3380
3381 // Deletion retained
3382 assert!(res
3383 .iter()
3384 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3385 .is_some());
3386
3387 // Update retained
3388 assert!(res
3389 .iter()
3390 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3391 .is_some());
3392
3393 // Addition retained
3394 assert!(res
3395 .iter()
3396 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3397 .is_some());
3398
3399 // Nochange, not retained
3400 assert!(res
3401 .iter()
3402 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3403 .is_none());
3404 }
3405
3406 #[gpui::test]
3407 async fn test_write_file(cx: &mut TestAppContext) {
3408 let dir = temp_tree(json!({
3409 ".git": {},
3410 ".gitignore": "ignored-dir\n",
3411 "tracked-dir": {},
3412 "ignored-dir": {}
3413 }));
3414
3415 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3416
3417 let tree = Worktree::local(
3418 client,
3419 dir.path(),
3420 true,
3421 Arc::new(RealFs),
3422 Default::default(),
3423 &mut cx.to_async(),
3424 )
3425 .await
3426 .unwrap();
3427 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3428 .await;
3429 tree.flush_fs_events(cx).await;
3430
3431 tree.update(cx, |tree, cx| {
3432 tree.as_local().unwrap().write_file(
3433 Path::new("tracked-dir/file.txt"),
3434 "hello".into(),
3435 Default::default(),
3436 cx,
3437 )
3438 })
3439 .await
3440 .unwrap();
3441 tree.update(cx, |tree, cx| {
3442 tree.as_local().unwrap().write_file(
3443 Path::new("ignored-dir/file.txt"),
3444 "world".into(),
3445 Default::default(),
3446 cx,
3447 )
3448 })
3449 .await
3450 .unwrap();
3451
3452 tree.read_with(cx, |tree, _| {
3453 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3454 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3455 assert!(!tracked.is_ignored);
3456 assert!(ignored.is_ignored);
3457 });
3458 }
3459
3460 #[gpui::test(iterations = 30)]
3461 async fn test_create_directory(cx: &mut TestAppContext) {
3462 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3463
3464 let fs = FakeFs::new(cx.background());
3465 fs.insert_tree(
3466 "/root",
3467 json!({
3468 "b": {},
3469 "c": {},
3470 "d": {},
3471 }),
3472 )
3473 .await;
3474
3475 let tree = Worktree::local(
3476 client,
3477 "/root".as_ref(),
3478 true,
3479 fs,
3480 Default::default(),
3481 &mut cx.to_async(),
3482 )
3483 .await
3484 .unwrap();
3485
3486 let entry = tree
3487 .update(cx, |tree, cx| {
3488 tree.as_local_mut()
3489 .unwrap()
3490 .create_entry("a/e".as_ref(), true, cx)
3491 })
3492 .await
3493 .unwrap();
3494 assert!(entry.is_dir());
3495
3496 cx.foreground().run_until_parked();
3497
3498 tree.read_with(cx, |tree, _| {
3499 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3500 });
3501 }
3502
3503 #[gpui::test(iterations = 100)]
3504 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3505 let operations = env::var("OPERATIONS")
3506 .map(|o| o.parse().unwrap())
3507 .unwrap_or(40);
3508 let initial_entries = env::var("INITIAL_ENTRIES")
3509 .map(|o| o.parse().unwrap())
3510 .unwrap_or(20);
3511
3512 let root_dir = Path::new("/test");
3513 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3514 fs.as_fake().insert_tree(root_dir, json!({})).await;
3515 for _ in 0..initial_entries {
3516 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3517 }
3518 log::info!("generated initial tree");
3519
3520 let next_entry_id = Arc::new(AtomicUsize::default());
3521 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3522 let worktree = Worktree::local(
3523 client.clone(),
3524 root_dir,
3525 true,
3526 fs.clone(),
3527 next_entry_id.clone(),
3528 &mut cx.to_async(),
3529 )
3530 .await
3531 .unwrap();
3532
3533 worktree
3534 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3535 .await;
3536
3537 // After the initial scan is complete, the `UpdatedEntries` event can
3538 // be used to follow along with all changes to the worktree's snapshot.
3539 worktree.update(cx, |tree, cx| {
3540 let mut paths = tree
3541 .as_local()
3542 .unwrap()
3543 .paths()
3544 .cloned()
3545 .collect::<Vec<_>>();
3546
3547 cx.subscribe(&worktree, move |tree, _, event, _| {
3548 if let Event::UpdatedEntries(changes) = event {
3549 for (path, change_type) in changes.iter() {
3550 let path = path.clone();
3551 let ix = match paths.binary_search(&path) {
3552 Ok(ix) | Err(ix) => ix,
3553 };
3554 match change_type {
3555 PathChange::Added => {
3556 assert_ne!(paths.get(ix), Some(&path));
3557 paths.insert(ix, path);
3558 }
3559 PathChange::Removed => {
3560 assert_eq!(paths.get(ix), Some(&path));
3561 paths.remove(ix);
3562 }
3563 PathChange::Updated => {
3564 assert_eq!(paths.get(ix), Some(&path));
3565 }
3566 PathChange::AddedOrUpdated => {
3567 if paths[ix] != path {
3568 paths.insert(ix, path);
3569 }
3570 }
3571 }
3572 }
3573 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3574 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3575 }
3576 })
3577 .detach();
3578 });
3579
3580 let mut snapshots = Vec::new();
3581 let mut mutations_len = operations;
3582 while mutations_len > 1 {
3583 randomly_mutate_tree(&fs, root_dir, 1.0, &mut rng).await;
3584 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3585 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3586 let len = rng.gen_range(0..=buffered_event_count);
3587 log::info!("flushing {} events", len);
3588 fs.as_fake().flush_events(len).await;
3589 } else {
3590 randomly_mutate_tree(&fs, root_dir, 0.6, &mut rng).await;
3591 mutations_len -= 1;
3592 }
3593
3594 cx.foreground().run_until_parked();
3595 if rng.gen_bool(0.2) {
3596 log::info!("storing snapshot {}", snapshots.len());
3597 let snapshot =
3598 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3599 snapshots.push(snapshot);
3600 }
3601 }
3602
3603 log::info!("quiescing");
3604 fs.as_fake().flush_events(usize::MAX).await;
3605 cx.foreground().run_until_parked();
3606 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3607 snapshot.check_invariants();
3608
3609 {
3610 let new_worktree = Worktree::local(
3611 client.clone(),
3612 root_dir,
3613 true,
3614 fs.clone(),
3615 next_entry_id,
3616 &mut cx.to_async(),
3617 )
3618 .await
3619 .unwrap();
3620 new_worktree
3621 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3622 .await;
3623 let new_snapshot =
3624 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3625 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3626 }
3627
3628 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3629 let include_ignored = rng.gen::<bool>();
3630 if !include_ignored {
3631 let mut entries_by_path_edits = Vec::new();
3632 let mut entries_by_id_edits = Vec::new();
3633 for entry in prev_snapshot
3634 .entries_by_id
3635 .cursor::<()>()
3636 .filter(|e| e.is_ignored)
3637 {
3638 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3639 entries_by_id_edits.push(Edit::Remove(entry.id));
3640 }
3641
3642 prev_snapshot
3643 .entries_by_path
3644 .edit(entries_by_path_edits, &());
3645 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3646 }
3647
3648 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3649 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3650 assert_eq!(
3651 prev_snapshot.to_vec(include_ignored),
3652 snapshot.to_vec(include_ignored),
3653 "wrong update for snapshot {i}. update: {:?}",
3654 update
3655 );
3656 }
3657 }
3658
3659 async fn randomly_mutate_tree(
3660 fs: &Arc<dyn Fs>,
3661 root_path: &Path,
3662 insertion_probability: f64,
3663 rng: &mut impl Rng,
3664 ) {
3665 let mut files = Vec::new();
3666 let mut dirs = Vec::new();
3667 for path in fs.as_fake().paths() {
3668 if path.starts_with(root_path) {
3669 if fs.is_file(&path).await {
3670 files.push(path);
3671 } else {
3672 dirs.push(path);
3673 }
3674 }
3675 }
3676
3677 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3678 let path = dirs.choose(rng).unwrap();
3679 let new_path = path.join(gen_name(rng));
3680
3681 if rng.gen() {
3682 log::info!(
3683 "creating dir {:?}",
3684 new_path.strip_prefix(root_path).unwrap()
3685 );
3686 fs.create_dir(&new_path).await.unwrap();
3687 } else {
3688 log::info!(
3689 "creating file {:?}",
3690 new_path.strip_prefix(root_path).unwrap()
3691 );
3692 fs.create_file(&new_path, Default::default()).await.unwrap();
3693 }
3694 } else if rng.gen_bool(0.05) {
3695 let ignore_dir_path = dirs.choose(rng).unwrap();
3696 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3697
3698 let subdirs = dirs
3699 .iter()
3700 .filter(|d| d.starts_with(&ignore_dir_path))
3701 .cloned()
3702 .collect::<Vec<_>>();
3703 let subfiles = files
3704 .iter()
3705 .filter(|d| d.starts_with(&ignore_dir_path))
3706 .cloned()
3707 .collect::<Vec<_>>();
3708 let files_to_ignore = {
3709 let len = rng.gen_range(0..=subfiles.len());
3710 subfiles.choose_multiple(rng, len)
3711 };
3712 let dirs_to_ignore = {
3713 let len = rng.gen_range(0..subdirs.len());
3714 subdirs.choose_multiple(rng, len)
3715 };
3716
3717 let mut ignore_contents = String::new();
3718 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3719 writeln!(
3720 ignore_contents,
3721 "{}",
3722 path_to_ignore
3723 .strip_prefix(&ignore_dir_path)
3724 .unwrap()
3725 .to_str()
3726 .unwrap()
3727 )
3728 .unwrap();
3729 }
3730 log::info!(
3731 "creating gitignore {:?} with contents:\n{}",
3732 ignore_path.strip_prefix(&root_path).unwrap(),
3733 ignore_contents
3734 );
3735 fs.save(
3736 &ignore_path,
3737 &ignore_contents.as_str().into(),
3738 Default::default(),
3739 )
3740 .await
3741 .unwrap();
3742 } else {
3743 let old_path = {
3744 let file_path = files.choose(rng);
3745 let dir_path = dirs[1..].choose(rng);
3746 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3747 };
3748
3749 let is_rename = rng.gen();
3750 if is_rename {
3751 let new_path_parent = dirs
3752 .iter()
3753 .filter(|d| !d.starts_with(old_path))
3754 .choose(rng)
3755 .unwrap();
3756
3757 let overwrite_existing_dir =
3758 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3759 let new_path = if overwrite_existing_dir {
3760 fs.remove_dir(
3761 &new_path_parent,
3762 RemoveOptions {
3763 recursive: true,
3764 ignore_if_not_exists: true,
3765 },
3766 )
3767 .await
3768 .unwrap();
3769 new_path_parent.to_path_buf()
3770 } else {
3771 new_path_parent.join(gen_name(rng))
3772 };
3773
3774 log::info!(
3775 "renaming {:?} to {}{:?}",
3776 old_path.strip_prefix(&root_path).unwrap(),
3777 if overwrite_existing_dir {
3778 "overwrite "
3779 } else {
3780 ""
3781 },
3782 new_path.strip_prefix(&root_path).unwrap()
3783 );
3784 fs.rename(
3785 &old_path,
3786 &new_path,
3787 fs::RenameOptions {
3788 overwrite: true,
3789 ignore_if_exists: true,
3790 },
3791 )
3792 .await
3793 .unwrap();
3794 } else if fs.is_file(&old_path).await {
3795 log::info!(
3796 "deleting file {:?}",
3797 old_path.strip_prefix(&root_path).unwrap()
3798 );
3799 fs.remove_file(old_path, Default::default()).await.unwrap();
3800 } else {
3801 log::info!(
3802 "deleting dir {:?}",
3803 old_path.strip_prefix(&root_path).unwrap()
3804 );
3805 fs.remove_dir(
3806 &old_path,
3807 RemoveOptions {
3808 recursive: true,
3809 ignore_if_not_exists: true,
3810 },
3811 )
3812 .await
3813 .unwrap();
3814 }
3815 }
3816 }
3817
3818 fn gen_name(rng: &mut impl Rng) -> String {
3819 (0..6)
3820 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3821 .map(char::from)
3822 .collect()
3823 }
3824
3825 impl LocalSnapshot {
3826 fn check_invariants(&self) {
3827 let mut files = self.files(true, 0);
3828 let mut visible_files = self.files(false, 0);
3829 for entry in self.entries_by_path.cursor::<()>() {
3830 if entry.is_file() {
3831 assert_eq!(files.next().unwrap().inode, entry.inode);
3832 if !entry.is_ignored {
3833 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
3834 }
3835 }
3836 }
3837 assert!(files.next().is_none());
3838 assert!(visible_files.next().is_none());
3839
3840 let mut bfs_paths = Vec::new();
3841 let mut stack = vec![Path::new("")];
3842 while let Some(path) = stack.pop() {
3843 bfs_paths.push(path);
3844 let ix = stack.len();
3845 for child_entry in self.child_entries(path) {
3846 stack.insert(ix, &child_entry.path);
3847 }
3848 }
3849
3850 let dfs_paths_via_iter = self
3851 .entries_by_path
3852 .cursor::<()>()
3853 .map(|e| e.path.as_ref())
3854 .collect::<Vec<_>>();
3855 assert_eq!(bfs_paths, dfs_paths_via_iter);
3856
3857 let dfs_paths_via_traversal = self
3858 .entries(true)
3859 .map(|e| e.path.as_ref())
3860 .collect::<Vec<_>>();
3861 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
3862
3863 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
3864 let ignore_parent_path =
3865 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
3866 assert!(self.entry_for_path(&ignore_parent_path).is_some());
3867 assert!(self
3868 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
3869 .is_some());
3870 }
3871 }
3872
3873 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
3874 let mut paths = Vec::new();
3875 for entry in self.entries_by_path.cursor::<()>() {
3876 if include_ignored || !entry.is_ignored {
3877 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
3878 }
3879 }
3880 paths.sort_by(|a, b| a.0.cmp(b.0));
3881 paths
3882 }
3883 }
3884}