1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use parking_lot::Mutex;
30use postage::{
31 barrier,
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use smol::channel::{self, Sender};
36use std::{
37 any::Any,
38 cmp::{self, Ordering},
39 convert::TryFrom,
40 ffi::OsStr,
41 fmt,
42 future::Future,
43 mem,
44 ops::{Deref, DerefMut},
45 path::{Path, PathBuf},
46 pin::Pin,
47 sync::{
48 atomic::{AtomicUsize, Ordering::SeqCst},
49 Arc,
50 },
51 time::{Duration, SystemTime},
52};
53use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
54use util::{paths::HOME, ResultExt, TryFutureExt};
55
56#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
57pub struct WorktreeId(usize);
58
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
67 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
68 _background_scanner_task: Task<()>,
69 share: Option<ShareState>,
70 diagnostics: HashMap<Arc<Path>, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>>,
71 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 visible: bool,
75}
76
77pub struct RemoteWorktree {
78 snapshot: Snapshot,
79 background_snapshot: Arc<Mutex<Snapshot>>,
80 project_id: u64,
81 client: Arc<Client>,
82 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
83 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
84 replica_id: ReplicaId,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 visible: bool,
87 disconnected: bool,
88}
89
90#[derive(Clone)]
91pub struct Snapshot {
92 id: WorktreeId,
93 abs_path: Arc<Path>,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98
99 /// A number that increases every time the worktree begins scanning
100 /// a set of paths from the filesystem. This scanning could be caused
101 /// by some operation performed on the worktree, such as reading or
102 /// writing a file, or by an event reported by the filesystem.
103 scan_id: usize,
104
105 /// The latest scan id that has completed, and whose preceding scans
106 /// have all completed. The current `scan_id` could be more than one
107 /// greater than the `completed_scan_id` if operations are performed
108 /// on the worktree while it is processing a file-system event.
109 completed_scan_id: usize,
110}
111
112#[derive(Clone)]
113pub struct GitRepositoryEntry {
114 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
115
116 pub(crate) scan_id: usize,
117 // Path to folder containing the .git file or directory
118 pub(crate) content_path: Arc<Path>,
119 // Path to the actual .git folder.
120 // Note: if .git is a file, this points to the folder indicated by the .git file
121 pub(crate) git_dir_path: Arc<Path>,
122}
123
124impl std::fmt::Debug for GitRepositoryEntry {
125 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
126 f.debug_struct("GitRepositoryEntry")
127 .field("content_path", &self.content_path)
128 .field("git_dir_path", &self.git_dir_path)
129 .finish()
130 }
131}
132
133#[derive(Debug)]
134pub struct LocalSnapshot {
135 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
136 git_repositories: Vec<GitRepositoryEntry>,
137 removed_entry_ids: HashMap<u64, ProjectEntryId>,
138 next_entry_id: Arc<AtomicUsize>,
139 snapshot: Snapshot,
140}
141
142impl Clone for LocalSnapshot {
143 fn clone(&self) -> Self {
144 Self {
145 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
146 git_repositories: self.git_repositories.iter().cloned().collect(),
147 removed_entry_ids: self.removed_entry_ids.clone(),
148 next_entry_id: self.next_entry_id.clone(),
149 snapshot: self.snapshot.clone(),
150 }
151 }
152}
153
154impl Deref for LocalSnapshot {
155 type Target = Snapshot;
156
157 fn deref(&self) -> &Self::Target {
158 &self.snapshot
159 }
160}
161
162impl DerefMut for LocalSnapshot {
163 fn deref_mut(&mut self) -> &mut Self::Target {
164 &mut self.snapshot
165 }
166}
167
168enum ScanState {
169 Started,
170 Updated {
171 snapshot: LocalSnapshot,
172 changes: HashMap<Arc<Path>, PathChange>,
173 barrier: Option<barrier::Sender>,
174 scanning: bool,
175 },
176}
177
178struct ShareState {
179 project_id: u64,
180 snapshots_tx: watch::Sender<LocalSnapshot>,
181 resume_updates: watch::Sender<()>,
182 _maintain_remote_snapshot: Task<Option<()>>,
183}
184
185pub enum Event {
186 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
187 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
188}
189
190impl Entity for Worktree {
191 type Event = Event;
192}
193
194impl Worktree {
195 pub async fn local(
196 client: Arc<Client>,
197 path: impl Into<Arc<Path>>,
198 visible: bool,
199 fs: Arc<dyn Fs>,
200 next_entry_id: Arc<AtomicUsize>,
201 cx: &mut AsyncAppContext,
202 ) -> Result<ModelHandle<Self>> {
203 // After determining whether the root entry is a file or a directory, populate the
204 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
205 let abs_path = path.into();
206 let metadata = fs
207 .metadata(&abs_path)
208 .await
209 .context("failed to stat worktree path")?;
210
211 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
212 let root_name = abs_path
213 .file_name()
214 .map_or(String::new(), |f| f.to_string_lossy().to_string());
215
216 let mut snapshot = LocalSnapshot {
217 ignores_by_parent_abs_path: Default::default(),
218 git_repositories: Default::default(),
219 removed_entry_ids: Default::default(),
220 next_entry_id,
221 snapshot: Snapshot {
222 id: WorktreeId::from_usize(cx.model_id()),
223 abs_path: abs_path.clone(),
224 root_name: root_name.clone(),
225 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
226 entries_by_path: Default::default(),
227 entries_by_id: Default::default(),
228 scan_id: 1,
229 completed_scan_id: 0,
230 },
231 };
232
233 if let Some(metadata) = metadata {
234 snapshot.insert_entry(
235 Entry::new(
236 Arc::from(Path::new("")),
237 &metadata,
238 &snapshot.next_entry_id,
239 snapshot.root_char_bag,
240 ),
241 fs.as_ref(),
242 );
243 }
244
245 let (path_changes_tx, path_changes_rx) = channel::unbounded();
246 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
247
248 cx.spawn_weak(|this, mut cx| async move {
249 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
250 this.update(&mut cx, |this, cx| {
251 let this = this.as_local_mut().unwrap();
252 match state {
253 ScanState::Started => {
254 *this.is_scanning.0.borrow_mut() = true;
255 }
256 ScanState::Updated {
257 snapshot,
258 changes,
259 barrier,
260 scanning,
261 } => {
262 *this.is_scanning.0.borrow_mut() = scanning;
263 this.set_snapshot(snapshot, cx);
264 cx.emit(Event::UpdatedEntries(changes));
265 drop(barrier);
266 }
267 }
268 cx.notify();
269 });
270 }
271 })
272 .detach();
273
274 let background_scanner_task = cx.background().spawn({
275 let fs = fs.clone();
276 let snapshot = snapshot.clone();
277 let background = cx.background().clone();
278 async move {
279 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
280 BackgroundScanner::new(
281 snapshot,
282 fs,
283 scan_states_tx,
284 background,
285 path_changes_rx,
286 )
287 .run(events)
288 .await;
289 }
290 });
291
292 Worktree::Local(LocalWorktree {
293 snapshot,
294 is_scanning: watch::channel_with(true),
295 share: None,
296 path_changes_tx,
297 _background_scanner_task: background_scanner_task,
298 diagnostics: Default::default(),
299 diagnostic_summaries: Default::default(),
300 client,
301 fs,
302 visible,
303 })
304 }))
305 }
306
307 pub fn remote(
308 project_remote_id: u64,
309 replica_id: ReplicaId,
310 worktree: proto::WorktreeMetadata,
311 client: Arc<Client>,
312 cx: &mut AppContext,
313 ) -> ModelHandle<Self> {
314 cx.add_model(|cx: &mut ModelContext<Self>| {
315 let snapshot = Snapshot {
316 id: WorktreeId(worktree.id as usize),
317 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
318 root_name: worktree.root_name.clone(),
319 root_char_bag: worktree
320 .root_name
321 .chars()
322 .map(|c| c.to_ascii_lowercase())
323 .collect(),
324 entries_by_path: Default::default(),
325 entries_by_id: Default::default(),
326 scan_id: 1,
327 completed_scan_id: 0,
328 };
329
330 let (updates_tx, mut updates_rx) = mpsc::unbounded();
331 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
332 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
333
334 cx.background()
335 .spawn({
336 let background_snapshot = background_snapshot.clone();
337 async move {
338 while let Some(update) = updates_rx.next().await {
339 if let Err(error) =
340 background_snapshot.lock().apply_remote_update(update)
341 {
342 log::error!("error applying worktree update: {}", error);
343 }
344 snapshot_updated_tx.send(()).await.ok();
345 }
346 }
347 })
348 .detach();
349
350 cx.spawn_weak(|this, mut cx| async move {
351 while (snapshot_updated_rx.recv().await).is_some() {
352 if let Some(this) = this.upgrade(&cx) {
353 this.update(&mut cx, |this, cx| {
354 let this = this.as_remote_mut().unwrap();
355 this.snapshot = this.background_snapshot.lock().clone();
356 cx.emit(Event::UpdatedEntries(Default::default()));
357 cx.notify();
358 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
359 if this.observed_snapshot(*scan_id) {
360 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
361 let _ = tx.send(());
362 } else {
363 break;
364 }
365 }
366 });
367 } else {
368 break;
369 }
370 }
371 })
372 .detach();
373
374 Worktree::Remote(RemoteWorktree {
375 project_id: project_remote_id,
376 replica_id,
377 snapshot: snapshot.clone(),
378 background_snapshot,
379 updates_tx: Some(updates_tx),
380 snapshot_subscriptions: Default::default(),
381 client: client.clone(),
382 diagnostic_summaries: Default::default(),
383 visible: worktree.visible,
384 disconnected: false,
385 })
386 })
387 }
388
389 pub fn as_local(&self) -> Option<&LocalWorktree> {
390 if let Worktree::Local(worktree) = self {
391 Some(worktree)
392 } else {
393 None
394 }
395 }
396
397 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
398 if let Worktree::Remote(worktree) = self {
399 Some(worktree)
400 } else {
401 None
402 }
403 }
404
405 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
406 if let Worktree::Local(worktree) = self {
407 Some(worktree)
408 } else {
409 None
410 }
411 }
412
413 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
414 if let Worktree::Remote(worktree) = self {
415 Some(worktree)
416 } else {
417 None
418 }
419 }
420
421 pub fn is_local(&self) -> bool {
422 matches!(self, Worktree::Local(_))
423 }
424
425 pub fn is_remote(&self) -> bool {
426 !self.is_local()
427 }
428
429 pub fn snapshot(&self) -> Snapshot {
430 match self {
431 Worktree::Local(worktree) => worktree.snapshot().snapshot,
432 Worktree::Remote(worktree) => worktree.snapshot(),
433 }
434 }
435
436 pub fn scan_id(&self) -> usize {
437 match self {
438 Worktree::Local(worktree) => worktree.snapshot.scan_id,
439 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
440 }
441 }
442
443 pub fn completed_scan_id(&self) -> usize {
444 match self {
445 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
446 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
447 }
448 }
449
450 pub fn is_visible(&self) -> bool {
451 match self {
452 Worktree::Local(worktree) => worktree.visible,
453 Worktree::Remote(worktree) => worktree.visible,
454 }
455 }
456
457 pub fn replica_id(&self) -> ReplicaId {
458 match self {
459 Worktree::Local(_) => 0,
460 Worktree::Remote(worktree) => worktree.replica_id,
461 }
462 }
463
464 pub fn diagnostic_summaries(
465 &self,
466 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
467 match self {
468 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
469 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
470 }
471 .iter()
472 .map(|(path, summary)| (path.0.clone(), *summary))
473 }
474
475 pub fn abs_path(&self) -> Arc<Path> {
476 match self {
477 Worktree::Local(worktree) => worktree.abs_path.clone(),
478 Worktree::Remote(worktree) => worktree.abs_path.clone(),
479 }
480 }
481}
482
483impl LocalWorktree {
484 pub fn contains_abs_path(&self, path: &Path) -> bool {
485 path.starts_with(&self.abs_path)
486 }
487
488 fn absolutize(&self, path: &Path) -> PathBuf {
489 if path.file_name().is_some() {
490 self.abs_path.join(path)
491 } else {
492 self.abs_path.to_path_buf()
493 }
494 }
495
496 pub(crate) fn load_buffer(
497 &mut self,
498 path: &Path,
499 cx: &mut ModelContext<Worktree>,
500 ) -> Task<Result<ModelHandle<Buffer>>> {
501 let path = Arc::from(path);
502 cx.spawn(move |this, mut cx| async move {
503 let (file, contents, diff_base) = this
504 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
505 .await?;
506 Ok(cx.add_model(|cx| {
507 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
508 buffer.git_diff_recalc(cx);
509 buffer
510 }))
511 })
512 }
513
514 pub fn diagnostics_for_path(
515 &self,
516 path: &Path,
517 ) -> Option<Vec<DiagnosticEntry<Unclipped<PointUtf16>>>> {
518 self.diagnostics.get(path).cloned()
519 }
520
521 pub fn update_diagnostics(
522 &mut self,
523 language_server_id: usize,
524 worktree_path: Arc<Path>,
525 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
526 _: &mut ModelContext<Worktree>,
527 ) -> Result<bool> {
528 self.diagnostics.remove(&worktree_path);
529 let old_summary = self
530 .diagnostic_summaries
531 .remove(&PathKey(worktree_path.clone()))
532 .unwrap_or_default();
533 let new_summary = DiagnosticSummary::new(language_server_id, &diagnostics);
534 if !new_summary.is_empty() {
535 self.diagnostic_summaries
536 .insert(PathKey(worktree_path.clone()), new_summary);
537 self.diagnostics.insert(worktree_path.clone(), diagnostics);
538 }
539
540 let updated = !old_summary.is_empty() || !new_summary.is_empty();
541 if updated {
542 if let Some(share) = self.share.as_ref() {
543 self.client
544 .send(proto::UpdateDiagnosticSummary {
545 project_id: share.project_id,
546 worktree_id: self.id().to_proto(),
547 summary: Some(proto::DiagnosticSummary {
548 path: worktree_path.to_string_lossy().to_string(),
549 language_server_id: language_server_id as u64,
550 error_count: new_summary.error_count as u32,
551 warning_count: new_summary.warning_count as u32,
552 }),
553 })
554 .log_err();
555 }
556 }
557
558 Ok(updated)
559 }
560
561 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
562 let updated_repos = Self::changed_repos(
563 &self.snapshot.git_repositories,
564 &new_snapshot.git_repositories,
565 );
566 self.snapshot = new_snapshot;
567
568 if let Some(share) = self.share.as_mut() {
569 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
570 }
571
572 if !updated_repos.is_empty() {
573 cx.emit(Event::UpdatedGitRepositories(updated_repos));
574 }
575 }
576
577 fn changed_repos(
578 old_repos: &[GitRepositoryEntry],
579 new_repos: &[GitRepositoryEntry],
580 ) -> Vec<GitRepositoryEntry> {
581 fn diff<'a>(
582 a: &'a [GitRepositoryEntry],
583 b: &'a [GitRepositoryEntry],
584 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
585 ) {
586 for a_repo in a {
587 let matched = b.iter().find(|b_repo| {
588 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
589 });
590
591 if matched.is_none() {
592 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
593 }
594 }
595 }
596
597 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
598
599 diff(old_repos, new_repos, &mut updated);
600 diff(new_repos, old_repos, &mut updated);
601
602 updated.into_values().collect()
603 }
604
605 pub fn scan_complete(&self) -> impl Future<Output = ()> {
606 let mut is_scanning_rx = self.is_scanning.1.clone();
607 async move {
608 let mut is_scanning = is_scanning_rx.borrow().clone();
609 while is_scanning {
610 if let Some(value) = is_scanning_rx.recv().await {
611 is_scanning = value;
612 } else {
613 break;
614 }
615 }
616 }
617 }
618
619 pub fn snapshot(&self) -> LocalSnapshot {
620 self.snapshot.clone()
621 }
622
623 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
624 proto::WorktreeMetadata {
625 id: self.id().to_proto(),
626 root_name: self.root_name().to_string(),
627 visible: self.visible,
628 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
629 }
630 }
631
632 fn load(
633 &self,
634 path: &Path,
635 cx: &mut ModelContext<Worktree>,
636 ) -> Task<Result<(File, String, Option<String>)>> {
637 let handle = cx.handle();
638 let path = Arc::from(path);
639 let abs_path = self.absolutize(&path);
640 let fs = self.fs.clone();
641 let snapshot = self.snapshot();
642
643 cx.spawn(|this, mut cx| async move {
644 let text = fs.load(&abs_path).await?;
645
646 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
647 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
648 let repo_relative = repo_relative.to_owned();
649 cx.background()
650 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
651 .await
652 } else {
653 None
654 }
655 } else {
656 None
657 };
658
659 // Eagerly populate the snapshot with an updated entry for the loaded file
660 let entry = this
661 .update(&mut cx, |this, cx| {
662 this.as_local().unwrap().refresh_entry(path, None, cx)
663 })
664 .await?;
665
666 Ok((
667 File {
668 entry_id: entry.id,
669 worktree: handle,
670 path: entry.path,
671 mtime: entry.mtime,
672 is_local: true,
673 is_deleted: false,
674 },
675 text,
676 diff_base,
677 ))
678 })
679 }
680
681 pub fn save_buffer(
682 &self,
683 buffer_handle: ModelHandle<Buffer>,
684 path: Arc<Path>,
685 has_changed_file: bool,
686 cx: &mut ModelContext<Worktree>,
687 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
688 let handle = cx.handle();
689 let buffer = buffer_handle.read(cx);
690
691 let rpc = self.client.clone();
692 let buffer_id = buffer.remote_id();
693 let project_id = self.share.as_ref().map(|share| share.project_id);
694
695 let text = buffer.as_rope().clone();
696 let fingerprint = text.fingerprint();
697 let version = buffer.version();
698 let save = self.write_file(path, text, buffer.line_ending(), cx);
699
700 cx.as_mut().spawn(|mut cx| async move {
701 let entry = save.await?;
702
703 if has_changed_file {
704 let new_file = Arc::new(File {
705 entry_id: entry.id,
706 worktree: handle,
707 path: entry.path,
708 mtime: entry.mtime,
709 is_local: true,
710 is_deleted: false,
711 });
712
713 if let Some(project_id) = project_id {
714 rpc.send(proto::UpdateBufferFile {
715 project_id,
716 buffer_id,
717 file: Some(new_file.to_proto()),
718 })
719 .log_err();
720 }
721
722 buffer_handle.update(&mut cx, |buffer, cx| {
723 if has_changed_file {
724 buffer.file_updated(new_file, cx).detach();
725 }
726 });
727 }
728
729 if let Some(project_id) = project_id {
730 rpc.send(proto::BufferSaved {
731 project_id,
732 buffer_id,
733 version: serialize_version(&version),
734 mtime: Some(entry.mtime.into()),
735 fingerprint: serialize_fingerprint(fingerprint),
736 })?;
737 }
738
739 buffer_handle.update(&mut cx, |buffer, cx| {
740 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
741 });
742
743 Ok((version, fingerprint, entry.mtime))
744 })
745 }
746
747 pub fn create_entry(
748 &self,
749 path: impl Into<Arc<Path>>,
750 is_dir: bool,
751 cx: &mut ModelContext<Worktree>,
752 ) -> Task<Result<Entry>> {
753 let path = path.into();
754 let abs_path = self.absolutize(&path);
755 let fs = self.fs.clone();
756 let write = cx.background().spawn(async move {
757 if is_dir {
758 fs.create_dir(&abs_path).await
759 } else {
760 fs.save(&abs_path, &Default::default(), Default::default())
761 .await
762 }
763 });
764
765 cx.spawn(|this, mut cx| async move {
766 write.await?;
767 this.update(&mut cx, |this, cx| {
768 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
769 })
770 .await
771 })
772 }
773
774 pub fn write_file(
775 &self,
776 path: impl Into<Arc<Path>>,
777 text: Rope,
778 line_ending: LineEnding,
779 cx: &mut ModelContext<Worktree>,
780 ) -> Task<Result<Entry>> {
781 let path = path.into();
782 let abs_path = self.absolutize(&path);
783 let fs = self.fs.clone();
784 let write = cx
785 .background()
786 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
787
788 cx.spawn(|this, mut cx| async move {
789 write.await?;
790 this.update(&mut cx, |this, cx| {
791 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
792 })
793 .await
794 })
795 }
796
797 pub fn delete_entry(
798 &self,
799 entry_id: ProjectEntryId,
800 cx: &mut ModelContext<Worktree>,
801 ) -> Option<Task<Result<()>>> {
802 let entry = self.entry_for_id(entry_id)?.clone();
803 let abs_path = self.abs_path.clone();
804 let fs = self.fs.clone();
805
806 let delete = cx.background().spawn(async move {
807 let mut abs_path = fs.canonicalize(&abs_path).await?;
808 if entry.path.file_name().is_some() {
809 abs_path = abs_path.join(&entry.path);
810 }
811 if entry.is_file() {
812 fs.remove_file(&abs_path, Default::default()).await?;
813 } else {
814 fs.remove_dir(
815 &abs_path,
816 RemoveOptions {
817 recursive: true,
818 ignore_if_not_exists: false,
819 },
820 )
821 .await?;
822 }
823 anyhow::Ok(abs_path)
824 });
825
826 Some(cx.spawn(|this, mut cx| async move {
827 let abs_path = delete.await?;
828 let (tx, mut rx) = barrier::channel();
829 this.update(&mut cx, |this, _| {
830 this.as_local_mut()
831 .unwrap()
832 .path_changes_tx
833 .try_send((vec![abs_path], tx))
834 })?;
835 rx.recv().await;
836 Ok(())
837 }))
838 }
839
840 pub fn rename_entry(
841 &self,
842 entry_id: ProjectEntryId,
843 new_path: impl Into<Arc<Path>>,
844 cx: &mut ModelContext<Worktree>,
845 ) -> Option<Task<Result<Entry>>> {
846 let old_path = self.entry_for_id(entry_id)?.path.clone();
847 let new_path = new_path.into();
848 let abs_old_path = self.absolutize(&old_path);
849 let abs_new_path = self.absolutize(&new_path);
850 let fs = self.fs.clone();
851 let rename = cx.background().spawn(async move {
852 fs.rename(&abs_old_path, &abs_new_path, Default::default())
853 .await
854 });
855
856 Some(cx.spawn(|this, mut cx| async move {
857 rename.await?;
858 this.update(&mut cx, |this, cx| {
859 this.as_local_mut()
860 .unwrap()
861 .refresh_entry(new_path.clone(), Some(old_path), cx)
862 })
863 .await
864 }))
865 }
866
867 pub fn copy_entry(
868 &self,
869 entry_id: ProjectEntryId,
870 new_path: impl Into<Arc<Path>>,
871 cx: &mut ModelContext<Worktree>,
872 ) -> Option<Task<Result<Entry>>> {
873 let old_path = self.entry_for_id(entry_id)?.path.clone();
874 let new_path = new_path.into();
875 let abs_old_path = self.absolutize(&old_path);
876 let abs_new_path = self.absolutize(&new_path);
877 let fs = self.fs.clone();
878 let copy = cx.background().spawn(async move {
879 copy_recursive(
880 fs.as_ref(),
881 &abs_old_path,
882 &abs_new_path,
883 Default::default(),
884 )
885 .await
886 });
887
888 Some(cx.spawn(|this, mut cx| async move {
889 copy.await?;
890 this.update(&mut cx, |this, cx| {
891 this.as_local_mut()
892 .unwrap()
893 .refresh_entry(new_path.clone(), None, cx)
894 })
895 .await
896 }))
897 }
898
899 fn refresh_entry(
900 &self,
901 path: Arc<Path>,
902 old_path: Option<Arc<Path>>,
903 cx: &mut ModelContext<Worktree>,
904 ) -> Task<Result<Entry>> {
905 let fs = self.fs.clone();
906 let abs_root_path = self.abs_path.clone();
907 let path_changes_tx = self.path_changes_tx.clone();
908 cx.spawn_weak(move |this, mut cx| async move {
909 let abs_path = fs.canonicalize(&abs_root_path).await?;
910 let mut paths = Vec::with_capacity(2);
911 paths.push(if path.file_name().is_some() {
912 abs_path.join(&path)
913 } else {
914 abs_path.clone()
915 });
916 if let Some(old_path) = old_path {
917 paths.push(if old_path.file_name().is_some() {
918 abs_path.join(&old_path)
919 } else {
920 abs_path.clone()
921 });
922 }
923
924 let (tx, mut rx) = barrier::channel();
925 path_changes_tx.try_send((paths, tx))?;
926 rx.recv().await;
927 this.upgrade(&cx)
928 .ok_or_else(|| anyhow!("worktree was dropped"))?
929 .update(&mut cx, |this, _| {
930 this.entry_for_path(path)
931 .cloned()
932 .ok_or_else(|| anyhow!("failed to read path after update"))
933 })
934 })
935 }
936
937 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
938 let (share_tx, share_rx) = oneshot::channel();
939
940 if let Some(share) = self.share.as_mut() {
941 let _ = share_tx.send(());
942 *share.resume_updates.borrow_mut() = ();
943 } else {
944 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
945 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
946 let worktree_id = cx.model_id() as u64;
947
948 for (path, summary) in self.diagnostic_summaries.iter() {
949 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
950 project_id,
951 worktree_id,
952 summary: Some(summary.to_proto(&path.0)),
953 }) {
954 return Task::ready(Err(e));
955 }
956 }
957
958 let _maintain_remote_snapshot = cx.background().spawn({
959 let client = self.client.clone();
960 async move {
961 let mut share_tx = Some(share_tx);
962 let mut prev_snapshot = LocalSnapshot {
963 ignores_by_parent_abs_path: Default::default(),
964 git_repositories: Default::default(),
965 removed_entry_ids: Default::default(),
966 next_entry_id: Default::default(),
967 snapshot: Snapshot {
968 id: WorktreeId(worktree_id as usize),
969 abs_path: Path::new("").into(),
970 root_name: Default::default(),
971 root_char_bag: Default::default(),
972 entries_by_path: Default::default(),
973 entries_by_id: Default::default(),
974 scan_id: 0,
975 completed_scan_id: 0,
976 },
977 };
978 while let Some(snapshot) = snapshots_rx.recv().await {
979 #[cfg(any(test, feature = "test-support"))]
980 const MAX_CHUNK_SIZE: usize = 2;
981 #[cfg(not(any(test, feature = "test-support")))]
982 const MAX_CHUNK_SIZE: usize = 256;
983
984 let update =
985 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
986 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
987 let _ = resume_updates_rx.try_recv();
988 while let Err(error) = client.request(update.clone()).await {
989 log::error!("failed to send worktree update: {}", error);
990 log::info!("waiting to resume updates");
991 if resume_updates_rx.next().await.is_none() {
992 return Ok(());
993 }
994 }
995 }
996
997 if let Some(share_tx) = share_tx.take() {
998 let _ = share_tx.send(());
999 }
1000
1001 prev_snapshot = snapshot;
1002 }
1003
1004 Ok::<_, anyhow::Error>(())
1005 }
1006 .log_err()
1007 });
1008
1009 self.share = Some(ShareState {
1010 project_id,
1011 snapshots_tx,
1012 resume_updates: resume_updates_tx,
1013 _maintain_remote_snapshot,
1014 });
1015 }
1016
1017 cx.foreground()
1018 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1019 }
1020
1021 pub fn unshare(&mut self) {
1022 self.share.take();
1023 }
1024
1025 pub fn is_shared(&self) -> bool {
1026 self.share.is_some()
1027 }
1028}
1029
1030impl RemoteWorktree {
1031 fn snapshot(&self) -> Snapshot {
1032 self.snapshot.clone()
1033 }
1034
1035 pub fn disconnected_from_host(&mut self) {
1036 self.updates_tx.take();
1037 self.snapshot_subscriptions.clear();
1038 self.disconnected = true;
1039 }
1040
1041 pub fn save_buffer(
1042 &self,
1043 buffer_handle: ModelHandle<Buffer>,
1044 cx: &mut ModelContext<Worktree>,
1045 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1046 let buffer = buffer_handle.read(cx);
1047 let buffer_id = buffer.remote_id();
1048 let version = buffer.version();
1049 let rpc = self.client.clone();
1050 let project_id = self.project_id;
1051 cx.as_mut().spawn(|mut cx| async move {
1052 let response = rpc
1053 .request(proto::SaveBuffer {
1054 project_id,
1055 buffer_id,
1056 version: serialize_version(&version),
1057 })
1058 .await?;
1059 let version = deserialize_version(&response.version);
1060 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1061 let mtime = response
1062 .mtime
1063 .ok_or_else(|| anyhow!("missing mtime"))?
1064 .into();
1065
1066 buffer_handle.update(&mut cx, |buffer, cx| {
1067 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1068 });
1069
1070 Ok((version, fingerprint, mtime))
1071 })
1072 }
1073
1074 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1075 if let Some(updates_tx) = &self.updates_tx {
1076 updates_tx
1077 .unbounded_send(update)
1078 .expect("consumer runs to completion");
1079 }
1080 }
1081
1082 fn observed_snapshot(&self, scan_id: usize) -> bool {
1083 self.completed_scan_id >= scan_id
1084 }
1085
1086 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1087 let (tx, rx) = oneshot::channel();
1088 if self.observed_snapshot(scan_id) {
1089 let _ = tx.send(());
1090 } else if self.disconnected {
1091 drop(tx);
1092 } else {
1093 match self
1094 .snapshot_subscriptions
1095 .binary_search_by_key(&scan_id, |probe| probe.0)
1096 {
1097 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1098 }
1099 }
1100
1101 async move {
1102 rx.await?;
1103 Ok(())
1104 }
1105 }
1106
1107 pub fn update_diagnostic_summary(
1108 &mut self,
1109 path: Arc<Path>,
1110 summary: &proto::DiagnosticSummary,
1111 ) {
1112 let summary = DiagnosticSummary {
1113 language_server_id: summary.language_server_id as usize,
1114 error_count: summary.error_count as usize,
1115 warning_count: summary.warning_count as usize,
1116 };
1117 if summary.is_empty() {
1118 self.diagnostic_summaries.remove(&PathKey(path));
1119 } else {
1120 self.diagnostic_summaries.insert(PathKey(path), summary);
1121 }
1122 }
1123
1124 pub fn insert_entry(
1125 &mut self,
1126 entry: proto::Entry,
1127 scan_id: usize,
1128 cx: &mut ModelContext<Worktree>,
1129 ) -> Task<Result<Entry>> {
1130 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1131 cx.spawn(|this, mut cx| async move {
1132 wait_for_snapshot.await?;
1133 this.update(&mut cx, |worktree, _| {
1134 let worktree = worktree.as_remote_mut().unwrap();
1135 let mut snapshot = worktree.background_snapshot.lock();
1136 let entry = snapshot.insert_entry(entry);
1137 worktree.snapshot = snapshot.clone();
1138 entry
1139 })
1140 })
1141 }
1142
1143 pub(crate) fn delete_entry(
1144 &mut self,
1145 id: ProjectEntryId,
1146 scan_id: usize,
1147 cx: &mut ModelContext<Worktree>,
1148 ) -> Task<Result<()>> {
1149 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1150 cx.spawn(|this, mut cx| async move {
1151 wait_for_snapshot.await?;
1152 this.update(&mut cx, |worktree, _| {
1153 let worktree = worktree.as_remote_mut().unwrap();
1154 let mut snapshot = worktree.background_snapshot.lock();
1155 snapshot.delete_entry(id);
1156 worktree.snapshot = snapshot.clone();
1157 });
1158 Ok(())
1159 })
1160 }
1161}
1162
1163impl Snapshot {
1164 pub fn id(&self) -> WorktreeId {
1165 self.id
1166 }
1167
1168 pub fn abs_path(&self) -> &Arc<Path> {
1169 &self.abs_path
1170 }
1171
1172 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1173 self.entries_by_id.get(&entry_id, &()).is_some()
1174 }
1175
1176 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1177 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1178 let old_entry = self.entries_by_id.insert_or_replace(
1179 PathEntry {
1180 id: entry.id,
1181 path: entry.path.clone(),
1182 is_ignored: entry.is_ignored,
1183 scan_id: 0,
1184 },
1185 &(),
1186 );
1187 if let Some(old_entry) = old_entry {
1188 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1189 }
1190 self.entries_by_path.insert_or_replace(entry.clone(), &());
1191 Ok(entry)
1192 }
1193
1194 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1195 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1196 self.entries_by_path = {
1197 let mut cursor = self.entries_by_path.cursor();
1198 let mut new_entries_by_path =
1199 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1200 while let Some(entry) = cursor.item() {
1201 if entry.path.starts_with(&removed_entry.path) {
1202 self.entries_by_id.remove(&entry.id, &());
1203 cursor.next(&());
1204 } else {
1205 break;
1206 }
1207 }
1208 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1209 new_entries_by_path
1210 };
1211
1212 Some(removed_entry.path)
1213 }
1214
1215 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1216 let mut entries_by_path_edits = Vec::new();
1217 let mut entries_by_id_edits = Vec::new();
1218 for entry_id in update.removed_entries {
1219 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1220 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1221 entries_by_id_edits.push(Edit::Remove(entry.id));
1222 }
1223 }
1224
1225 for entry in update.updated_entries {
1226 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1227 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1228 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1229 }
1230 entries_by_id_edits.push(Edit::Insert(PathEntry {
1231 id: entry.id,
1232 path: entry.path.clone(),
1233 is_ignored: entry.is_ignored,
1234 scan_id: 0,
1235 }));
1236 entries_by_path_edits.push(Edit::Insert(entry));
1237 }
1238
1239 self.entries_by_path.edit(entries_by_path_edits, &());
1240 self.entries_by_id.edit(entries_by_id_edits, &());
1241 self.scan_id = update.scan_id as usize;
1242 if update.is_last_update {
1243 self.completed_scan_id = update.scan_id as usize;
1244 }
1245
1246 Ok(())
1247 }
1248
1249 pub fn file_count(&self) -> usize {
1250 self.entries_by_path.summary().file_count
1251 }
1252
1253 pub fn visible_file_count(&self) -> usize {
1254 self.entries_by_path.summary().visible_file_count
1255 }
1256
1257 fn traverse_from_offset(
1258 &self,
1259 include_dirs: bool,
1260 include_ignored: bool,
1261 start_offset: usize,
1262 ) -> Traversal {
1263 let mut cursor = self.entries_by_path.cursor();
1264 cursor.seek(
1265 &TraversalTarget::Count {
1266 count: start_offset,
1267 include_dirs,
1268 include_ignored,
1269 },
1270 Bias::Right,
1271 &(),
1272 );
1273 Traversal {
1274 cursor,
1275 include_dirs,
1276 include_ignored,
1277 }
1278 }
1279
1280 fn traverse_from_path(
1281 &self,
1282 include_dirs: bool,
1283 include_ignored: bool,
1284 path: &Path,
1285 ) -> Traversal {
1286 let mut cursor = self.entries_by_path.cursor();
1287 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1288 Traversal {
1289 cursor,
1290 include_dirs,
1291 include_ignored,
1292 }
1293 }
1294
1295 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1296 self.traverse_from_offset(false, include_ignored, start)
1297 }
1298
1299 pub fn entries(&self, include_ignored: bool) -> Traversal {
1300 self.traverse_from_offset(true, include_ignored, 0)
1301 }
1302
1303 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1304 let empty_path = Path::new("");
1305 self.entries_by_path
1306 .cursor::<()>()
1307 .filter(move |entry| entry.path.as_ref() != empty_path)
1308 .map(|entry| &entry.path)
1309 }
1310
1311 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1312 let mut cursor = self.entries_by_path.cursor();
1313 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1314 let traversal = Traversal {
1315 cursor,
1316 include_dirs: true,
1317 include_ignored: true,
1318 };
1319 ChildEntriesIter {
1320 traversal,
1321 parent_path,
1322 }
1323 }
1324
1325 pub fn root_entry(&self) -> Option<&Entry> {
1326 self.entry_for_path("")
1327 }
1328
1329 pub fn root_name(&self) -> &str {
1330 &self.root_name
1331 }
1332
1333 pub fn scan_id(&self) -> usize {
1334 self.scan_id
1335 }
1336
1337 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1338 let path = path.as_ref();
1339 self.traverse_from_path(true, true, path)
1340 .entry()
1341 .and_then(|entry| {
1342 if entry.path.as_ref() == path {
1343 Some(entry)
1344 } else {
1345 None
1346 }
1347 })
1348 }
1349
1350 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1351 let entry = self.entries_by_id.get(&id, &())?;
1352 self.entry_for_path(&entry.path)
1353 }
1354
1355 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1356 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1357 }
1358}
1359
1360impl LocalSnapshot {
1361 // Gives the most specific git repository for a given path
1362 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1363 self.git_repositories
1364 .iter()
1365 .rev() //git_repository is ordered lexicographically
1366 .find(|repo| repo.manages(path))
1367 .cloned()
1368 }
1369
1370 pub(crate) fn repo_with_dot_git_containing(
1371 &mut self,
1372 path: &Path,
1373 ) -> Option<&mut GitRepositoryEntry> {
1374 // Git repositories cannot be nested, so we don't need to reverse the order
1375 self.git_repositories
1376 .iter_mut()
1377 .find(|repo| repo.in_dot_git(path))
1378 }
1379
1380 #[cfg(test)]
1381 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1382 let root_name = self.root_name.clone();
1383 proto::UpdateWorktree {
1384 project_id,
1385 worktree_id: self.id().to_proto(),
1386 abs_path: self.abs_path().to_string_lossy().into(),
1387 root_name,
1388 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1389 removed_entries: Default::default(),
1390 scan_id: self.scan_id as u64,
1391 is_last_update: true,
1392 }
1393 }
1394
1395 pub(crate) fn build_update(
1396 &self,
1397 other: &Self,
1398 project_id: u64,
1399 worktree_id: u64,
1400 include_ignored: bool,
1401 ) -> proto::UpdateWorktree {
1402 let mut updated_entries = Vec::new();
1403 let mut removed_entries = Vec::new();
1404 let mut self_entries = self
1405 .entries_by_id
1406 .cursor::<()>()
1407 .filter(|e| include_ignored || !e.is_ignored)
1408 .peekable();
1409 let mut other_entries = other
1410 .entries_by_id
1411 .cursor::<()>()
1412 .filter(|e| include_ignored || !e.is_ignored)
1413 .peekable();
1414 loop {
1415 match (self_entries.peek(), other_entries.peek()) {
1416 (Some(self_entry), Some(other_entry)) => {
1417 match Ord::cmp(&self_entry.id, &other_entry.id) {
1418 Ordering::Less => {
1419 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1420 updated_entries.push(entry);
1421 self_entries.next();
1422 }
1423 Ordering::Equal => {
1424 if self_entry.scan_id != other_entry.scan_id {
1425 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1426 updated_entries.push(entry);
1427 }
1428
1429 self_entries.next();
1430 other_entries.next();
1431 }
1432 Ordering::Greater => {
1433 removed_entries.push(other_entry.id.to_proto());
1434 other_entries.next();
1435 }
1436 }
1437 }
1438 (Some(self_entry), None) => {
1439 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1440 updated_entries.push(entry);
1441 self_entries.next();
1442 }
1443 (None, Some(other_entry)) => {
1444 removed_entries.push(other_entry.id.to_proto());
1445 other_entries.next();
1446 }
1447 (None, None) => break,
1448 }
1449 }
1450
1451 proto::UpdateWorktree {
1452 project_id,
1453 worktree_id,
1454 abs_path: self.abs_path().to_string_lossy().into(),
1455 root_name: self.root_name().to_string(),
1456 updated_entries,
1457 removed_entries,
1458 scan_id: self.scan_id as u64,
1459 is_last_update: self.completed_scan_id == self.scan_id,
1460 }
1461 }
1462
1463 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1464 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1465 let abs_path = self.abs_path.join(&entry.path);
1466 match smol::block_on(build_gitignore(&abs_path, fs)) {
1467 Ok(ignore) => {
1468 self.ignores_by_parent_abs_path.insert(
1469 abs_path.parent().unwrap().into(),
1470 (Arc::new(ignore), self.scan_id),
1471 );
1472 }
1473 Err(error) => {
1474 log::error!(
1475 "error loading .gitignore file {:?} - {:?}",
1476 &entry.path,
1477 error
1478 );
1479 }
1480 }
1481 }
1482
1483 self.reuse_entry_id(&mut entry);
1484
1485 if entry.kind == EntryKind::PendingDir {
1486 if let Some(existing_entry) =
1487 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1488 {
1489 entry.kind = existing_entry.kind;
1490 }
1491 }
1492
1493 let scan_id = self.scan_id;
1494 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1495 if let Some(removed) = removed {
1496 if removed.id != entry.id {
1497 self.entries_by_id.remove(&removed.id, &());
1498 }
1499 }
1500 self.entries_by_id.insert_or_replace(
1501 PathEntry {
1502 id: entry.id,
1503 path: entry.path.clone(),
1504 is_ignored: entry.is_ignored,
1505 scan_id,
1506 },
1507 &(),
1508 );
1509
1510 entry
1511 }
1512
1513 fn populate_dir(
1514 &mut self,
1515 parent_path: Arc<Path>,
1516 entries: impl IntoIterator<Item = Entry>,
1517 ignore: Option<Arc<Gitignore>>,
1518 fs: &dyn Fs,
1519 ) {
1520 let mut parent_entry = if let Some(parent_entry) =
1521 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1522 {
1523 parent_entry.clone()
1524 } else {
1525 log::warn!(
1526 "populating a directory {:?} that has been removed",
1527 parent_path
1528 );
1529 return;
1530 };
1531
1532 match parent_entry.kind {
1533 EntryKind::PendingDir => {
1534 parent_entry.kind = EntryKind::Dir;
1535 }
1536 EntryKind::Dir => {}
1537 _ => return,
1538 }
1539
1540 if let Some(ignore) = ignore {
1541 self.ignores_by_parent_abs_path.insert(
1542 self.abs_path.join(&parent_path).into(),
1543 (ignore, self.scan_id),
1544 );
1545 }
1546
1547 if parent_path.file_name() == Some(&DOT_GIT) {
1548 let abs_path = self.abs_path.join(&parent_path);
1549 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1550 if let Err(ix) = self
1551 .git_repositories
1552 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1553 {
1554 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1555 self.git_repositories.insert(
1556 ix,
1557 GitRepositoryEntry {
1558 repo,
1559 scan_id: 0,
1560 content_path,
1561 git_dir_path: parent_path,
1562 },
1563 );
1564 }
1565 }
1566 }
1567
1568 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1569 let mut entries_by_id_edits = Vec::new();
1570
1571 for mut entry in entries {
1572 self.reuse_entry_id(&mut entry);
1573 entries_by_id_edits.push(Edit::Insert(PathEntry {
1574 id: entry.id,
1575 path: entry.path.clone(),
1576 is_ignored: entry.is_ignored,
1577 scan_id: self.scan_id,
1578 }));
1579 entries_by_path_edits.push(Edit::Insert(entry));
1580 }
1581
1582 self.entries_by_path.edit(entries_by_path_edits, &());
1583 self.entries_by_id.edit(entries_by_id_edits, &());
1584 }
1585
1586 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1587 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1588 entry.id = removed_entry_id;
1589 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1590 entry.id = existing_entry.id;
1591 }
1592 }
1593
1594 fn remove_path(&mut self, path: &Path) {
1595 let mut new_entries;
1596 let removed_entries;
1597 {
1598 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1599 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1600 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1601 new_entries.push_tree(cursor.suffix(&()), &());
1602 }
1603 self.entries_by_path = new_entries;
1604
1605 let mut entries_by_id_edits = Vec::new();
1606 for entry in removed_entries.cursor::<()>() {
1607 let removed_entry_id = self
1608 .removed_entry_ids
1609 .entry(entry.inode)
1610 .or_insert(entry.id);
1611 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1612 entries_by_id_edits.push(Edit::Remove(entry.id));
1613 }
1614 self.entries_by_id.edit(entries_by_id_edits, &());
1615
1616 if path.file_name() == Some(&GITIGNORE) {
1617 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1618 if let Some((_, scan_id)) = self
1619 .ignores_by_parent_abs_path
1620 .get_mut(abs_parent_path.as_path())
1621 {
1622 *scan_id = self.snapshot.scan_id;
1623 }
1624 } else if path.file_name() == Some(&DOT_GIT) {
1625 let parent_path = path.parent().unwrap();
1626 if let Ok(ix) = self
1627 .git_repositories
1628 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1629 {
1630 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1631 }
1632 }
1633 }
1634
1635 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1636 let mut inodes = TreeSet::default();
1637 for ancestor in path.ancestors().skip(1) {
1638 if let Some(entry) = self.entry_for_path(ancestor) {
1639 inodes.insert(entry.inode);
1640 }
1641 }
1642 inodes
1643 }
1644
1645 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1646 let mut new_ignores = Vec::new();
1647 for ancestor in abs_path.ancestors().skip(1) {
1648 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1649 new_ignores.push((ancestor, Some(ignore.clone())));
1650 } else {
1651 new_ignores.push((ancestor, None));
1652 }
1653 }
1654
1655 let mut ignore_stack = IgnoreStack::none();
1656 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1657 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1658 ignore_stack = IgnoreStack::all();
1659 break;
1660 } else if let Some(ignore) = ignore {
1661 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1662 }
1663 }
1664
1665 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1666 ignore_stack = IgnoreStack::all();
1667 }
1668
1669 ignore_stack
1670 }
1671
1672 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1673 &self.git_repositories
1674 }
1675}
1676
1677impl GitRepositoryEntry {
1678 // Note that these paths should be relative to the worktree root.
1679 pub(crate) fn manages(&self, path: &Path) -> bool {
1680 path.starts_with(self.content_path.as_ref())
1681 }
1682
1683 // Note that this path should be relative to the worktree root.
1684 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1685 path.starts_with(self.git_dir_path.as_ref())
1686 }
1687}
1688
1689async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1690 let contents = fs.load(abs_path).await?;
1691 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1692 let mut builder = GitignoreBuilder::new(parent);
1693 for line in contents.lines() {
1694 builder.add_line(Some(abs_path.into()), line)?;
1695 }
1696 Ok(builder.build()?)
1697}
1698
1699impl WorktreeId {
1700 pub fn from_usize(handle_id: usize) -> Self {
1701 Self(handle_id)
1702 }
1703
1704 pub(crate) fn from_proto(id: u64) -> Self {
1705 Self(id as usize)
1706 }
1707
1708 pub fn to_proto(&self) -> u64 {
1709 self.0 as u64
1710 }
1711
1712 pub fn to_usize(&self) -> usize {
1713 self.0
1714 }
1715}
1716
1717impl fmt::Display for WorktreeId {
1718 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1719 self.0.fmt(f)
1720 }
1721}
1722
1723impl Deref for Worktree {
1724 type Target = Snapshot;
1725
1726 fn deref(&self) -> &Self::Target {
1727 match self {
1728 Worktree::Local(worktree) => &worktree.snapshot,
1729 Worktree::Remote(worktree) => &worktree.snapshot,
1730 }
1731 }
1732}
1733
1734impl Deref for LocalWorktree {
1735 type Target = LocalSnapshot;
1736
1737 fn deref(&self) -> &Self::Target {
1738 &self.snapshot
1739 }
1740}
1741
1742impl Deref for RemoteWorktree {
1743 type Target = Snapshot;
1744
1745 fn deref(&self) -> &Self::Target {
1746 &self.snapshot
1747 }
1748}
1749
1750impl fmt::Debug for LocalWorktree {
1751 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1752 self.snapshot.fmt(f)
1753 }
1754}
1755
1756impl fmt::Debug for Snapshot {
1757 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1758 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1759 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1760
1761 impl<'a> fmt::Debug for EntriesByPath<'a> {
1762 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1763 f.debug_map()
1764 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1765 .finish()
1766 }
1767 }
1768
1769 impl<'a> fmt::Debug for EntriesById<'a> {
1770 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1771 f.debug_list().entries(self.0.iter()).finish()
1772 }
1773 }
1774
1775 f.debug_struct("Snapshot")
1776 .field("id", &self.id)
1777 .field("root_name", &self.root_name)
1778 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1779 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1780 .finish()
1781 }
1782}
1783
1784#[derive(Clone, PartialEq)]
1785pub struct File {
1786 pub worktree: ModelHandle<Worktree>,
1787 pub path: Arc<Path>,
1788 pub mtime: SystemTime,
1789 pub(crate) entry_id: ProjectEntryId,
1790 pub(crate) is_local: bool,
1791 pub(crate) is_deleted: bool,
1792}
1793
1794impl language::File for File {
1795 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1796 if self.is_local {
1797 Some(self)
1798 } else {
1799 None
1800 }
1801 }
1802
1803 fn mtime(&self) -> SystemTime {
1804 self.mtime
1805 }
1806
1807 fn path(&self) -> &Arc<Path> {
1808 &self.path
1809 }
1810
1811 fn full_path(&self, cx: &AppContext) -> PathBuf {
1812 let mut full_path = PathBuf::new();
1813 let worktree = self.worktree.read(cx);
1814
1815 if worktree.is_visible() {
1816 full_path.push(worktree.root_name());
1817 } else {
1818 let path = worktree.abs_path();
1819
1820 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1821 full_path.push("~");
1822 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1823 } else {
1824 full_path.push(path)
1825 }
1826 }
1827
1828 if self.path.components().next().is_some() {
1829 full_path.push(&self.path);
1830 }
1831
1832 full_path
1833 }
1834
1835 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1836 /// of its worktree, then this method will return the name of the worktree itself.
1837 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1838 self.path
1839 .file_name()
1840 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1841 }
1842
1843 fn is_deleted(&self) -> bool {
1844 self.is_deleted
1845 }
1846
1847 fn as_any(&self) -> &dyn Any {
1848 self
1849 }
1850
1851 fn to_proto(&self) -> rpc::proto::File {
1852 rpc::proto::File {
1853 worktree_id: self.worktree.id() as u64,
1854 entry_id: self.entry_id.to_proto(),
1855 path: self.path.to_string_lossy().into(),
1856 mtime: Some(self.mtime.into()),
1857 is_deleted: self.is_deleted,
1858 }
1859 }
1860}
1861
1862impl language::LocalFile for File {
1863 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1864 self.worktree
1865 .read(cx)
1866 .as_local()
1867 .unwrap()
1868 .abs_path
1869 .join(&self.path)
1870 }
1871
1872 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1873 let worktree = self.worktree.read(cx).as_local().unwrap();
1874 let abs_path = worktree.absolutize(&self.path);
1875 let fs = worktree.fs.clone();
1876 cx.background()
1877 .spawn(async move { fs.load(&abs_path).await })
1878 }
1879
1880 fn buffer_reloaded(
1881 &self,
1882 buffer_id: u64,
1883 version: &clock::Global,
1884 fingerprint: RopeFingerprint,
1885 line_ending: LineEnding,
1886 mtime: SystemTime,
1887 cx: &mut AppContext,
1888 ) {
1889 let worktree = self.worktree.read(cx).as_local().unwrap();
1890 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1891 worktree
1892 .client
1893 .send(proto::BufferReloaded {
1894 project_id,
1895 buffer_id,
1896 version: serialize_version(version),
1897 mtime: Some(mtime.into()),
1898 fingerprint: serialize_fingerprint(fingerprint),
1899 line_ending: serialize_line_ending(line_ending) as i32,
1900 })
1901 .log_err();
1902 }
1903 }
1904}
1905
1906impl File {
1907 pub fn from_proto(
1908 proto: rpc::proto::File,
1909 worktree: ModelHandle<Worktree>,
1910 cx: &AppContext,
1911 ) -> Result<Self> {
1912 let worktree_id = worktree
1913 .read(cx)
1914 .as_remote()
1915 .ok_or_else(|| anyhow!("not remote"))?
1916 .id();
1917
1918 if worktree_id.to_proto() != proto.worktree_id {
1919 return Err(anyhow!("worktree id does not match file"));
1920 }
1921
1922 Ok(Self {
1923 worktree,
1924 path: Path::new(&proto.path).into(),
1925 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1926 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1927 is_local: false,
1928 is_deleted: proto.is_deleted,
1929 })
1930 }
1931
1932 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1933 file.and_then(|f| f.as_any().downcast_ref())
1934 }
1935
1936 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1937 self.worktree.read(cx).id()
1938 }
1939
1940 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1941 if self.is_deleted {
1942 None
1943 } else {
1944 Some(self.entry_id)
1945 }
1946 }
1947}
1948
1949#[derive(Clone, Debug, PartialEq, Eq)]
1950pub struct Entry {
1951 pub id: ProjectEntryId,
1952 pub kind: EntryKind,
1953 pub path: Arc<Path>,
1954 pub inode: u64,
1955 pub mtime: SystemTime,
1956 pub is_symlink: bool,
1957 pub is_ignored: bool,
1958}
1959
1960#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1961pub enum EntryKind {
1962 PendingDir,
1963 Dir,
1964 File(CharBag),
1965}
1966
1967#[derive(Clone, Copy, Debug)]
1968pub enum PathChange {
1969 Added,
1970 Removed,
1971 Updated,
1972 AddedOrUpdated,
1973}
1974
1975impl Entry {
1976 fn new(
1977 path: Arc<Path>,
1978 metadata: &fs::Metadata,
1979 next_entry_id: &AtomicUsize,
1980 root_char_bag: CharBag,
1981 ) -> Self {
1982 Self {
1983 id: ProjectEntryId::new(next_entry_id),
1984 kind: if metadata.is_dir {
1985 EntryKind::PendingDir
1986 } else {
1987 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1988 },
1989 path,
1990 inode: metadata.inode,
1991 mtime: metadata.mtime,
1992 is_symlink: metadata.is_symlink,
1993 is_ignored: false,
1994 }
1995 }
1996
1997 pub fn is_dir(&self) -> bool {
1998 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
1999 }
2000
2001 pub fn is_file(&self) -> bool {
2002 matches!(self.kind, EntryKind::File(_))
2003 }
2004}
2005
2006impl sum_tree::Item for Entry {
2007 type Summary = EntrySummary;
2008
2009 fn summary(&self) -> Self::Summary {
2010 let visible_count = if self.is_ignored { 0 } else { 1 };
2011 let file_count;
2012 let visible_file_count;
2013 if self.is_file() {
2014 file_count = 1;
2015 visible_file_count = visible_count;
2016 } else {
2017 file_count = 0;
2018 visible_file_count = 0;
2019 }
2020
2021 EntrySummary {
2022 max_path: self.path.clone(),
2023 count: 1,
2024 visible_count,
2025 file_count,
2026 visible_file_count,
2027 }
2028 }
2029}
2030
2031impl sum_tree::KeyedItem for Entry {
2032 type Key = PathKey;
2033
2034 fn key(&self) -> Self::Key {
2035 PathKey(self.path.clone())
2036 }
2037}
2038
2039#[derive(Clone, Debug)]
2040pub struct EntrySummary {
2041 max_path: Arc<Path>,
2042 count: usize,
2043 visible_count: usize,
2044 file_count: usize,
2045 visible_file_count: usize,
2046}
2047
2048impl Default for EntrySummary {
2049 fn default() -> Self {
2050 Self {
2051 max_path: Arc::from(Path::new("")),
2052 count: 0,
2053 visible_count: 0,
2054 file_count: 0,
2055 visible_file_count: 0,
2056 }
2057 }
2058}
2059
2060impl sum_tree::Summary for EntrySummary {
2061 type Context = ();
2062
2063 fn add_summary(&mut self, rhs: &Self, _: &()) {
2064 self.max_path = rhs.max_path.clone();
2065 self.count += rhs.count;
2066 self.visible_count += rhs.visible_count;
2067 self.file_count += rhs.file_count;
2068 self.visible_file_count += rhs.visible_file_count;
2069 }
2070}
2071
2072#[derive(Clone, Debug)]
2073struct PathEntry {
2074 id: ProjectEntryId,
2075 path: Arc<Path>,
2076 is_ignored: bool,
2077 scan_id: usize,
2078}
2079
2080impl sum_tree::Item for PathEntry {
2081 type Summary = PathEntrySummary;
2082
2083 fn summary(&self) -> Self::Summary {
2084 PathEntrySummary { max_id: self.id }
2085 }
2086}
2087
2088impl sum_tree::KeyedItem for PathEntry {
2089 type Key = ProjectEntryId;
2090
2091 fn key(&self) -> Self::Key {
2092 self.id
2093 }
2094}
2095
2096#[derive(Clone, Debug, Default)]
2097struct PathEntrySummary {
2098 max_id: ProjectEntryId,
2099}
2100
2101impl sum_tree::Summary for PathEntrySummary {
2102 type Context = ();
2103
2104 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2105 self.max_id = summary.max_id;
2106 }
2107}
2108
2109impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2110 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2111 *self = summary.max_id;
2112 }
2113}
2114
2115#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2116pub struct PathKey(Arc<Path>);
2117
2118impl Default for PathKey {
2119 fn default() -> Self {
2120 Self(Path::new("").into())
2121 }
2122}
2123
2124impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2125 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2126 self.0 = summary.max_path.clone();
2127 }
2128}
2129
2130struct BackgroundScanner {
2131 snapshot: Mutex<LocalSnapshot>,
2132 fs: Arc<dyn Fs>,
2133 status_updates_tx: UnboundedSender<ScanState>,
2134 executor: Arc<executor::Background>,
2135 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2136 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2137 finished_initial_scan: bool,
2138}
2139
2140impl BackgroundScanner {
2141 fn new(
2142 snapshot: LocalSnapshot,
2143 fs: Arc<dyn Fs>,
2144 status_updates_tx: UnboundedSender<ScanState>,
2145 executor: Arc<executor::Background>,
2146 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2147 ) -> Self {
2148 Self {
2149 fs,
2150 status_updates_tx,
2151 executor,
2152 refresh_requests_rx,
2153 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2154 snapshot: Mutex::new(snapshot),
2155 finished_initial_scan: false,
2156 }
2157 }
2158
2159 async fn run(
2160 &mut self,
2161 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2162 ) {
2163 use futures::FutureExt as _;
2164
2165 let (root_abs_path, root_inode) = {
2166 let snapshot = self.snapshot.lock();
2167 (
2168 snapshot.abs_path.clone(),
2169 snapshot.root_entry().map(|e| e.inode),
2170 )
2171 };
2172
2173 // Populate ignores above the root.
2174 let ignore_stack;
2175 for ancestor in root_abs_path.ancestors().skip(1) {
2176 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2177 {
2178 self.snapshot
2179 .lock()
2180 .ignores_by_parent_abs_path
2181 .insert(ancestor.into(), (ignore.into(), 0));
2182 }
2183 }
2184 {
2185 let mut snapshot = self.snapshot.lock();
2186 snapshot.scan_id += 1;
2187 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2188 if ignore_stack.is_all() {
2189 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2190 root_entry.is_ignored = true;
2191 snapshot.insert_entry(root_entry, self.fs.as_ref());
2192 }
2193 }
2194 };
2195
2196 // Perform an initial scan of the directory.
2197 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2198 smol::block_on(scan_job_tx.send(ScanJob {
2199 abs_path: root_abs_path,
2200 path: Arc::from(Path::new("")),
2201 ignore_stack,
2202 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2203 scan_queue: scan_job_tx.clone(),
2204 }))
2205 .unwrap();
2206 drop(scan_job_tx);
2207 self.scan_dirs(true, scan_job_rx).await;
2208 {
2209 let mut snapshot = self.snapshot.lock();
2210 snapshot.completed_scan_id = snapshot.scan_id;
2211 }
2212 self.send_status_update(false, None);
2213
2214 // Process any any FS events that occurred while performing the initial scan.
2215 // For these events, update events cannot be as precise, because we didn't
2216 // have the previous state loaded yet.
2217 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2218 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2219 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2220 paths.extend(more_events.into_iter().map(|e| e.path));
2221 }
2222 self.process_events(paths).await;
2223 }
2224
2225 self.finished_initial_scan = true;
2226
2227 // Continue processing events until the worktree is dropped.
2228 loop {
2229 select_biased! {
2230 // Process any path refresh requests from the worktree. Prioritize
2231 // these before handling changes reported by the filesystem.
2232 request = self.refresh_requests_rx.recv().fuse() => {
2233 let Ok((paths, barrier)) = request else { break };
2234 if !self.process_refresh_request(paths, barrier).await {
2235 return;
2236 }
2237 }
2238
2239 events = events_rx.next().fuse() => {
2240 let Some(events) = events else { break };
2241 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2242 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2243 paths.extend(more_events.into_iter().map(|e| e.path));
2244 }
2245 self.process_events(paths).await;
2246 }
2247 }
2248 }
2249 }
2250
2251 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2252 self.reload_entries_for_paths(paths, None).await;
2253 self.send_status_update(false, Some(barrier))
2254 }
2255
2256 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2257 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2258 if let Some(mut paths) = self
2259 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2260 .await
2261 {
2262 paths.sort_unstable();
2263 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2264 }
2265 drop(scan_job_tx);
2266 self.scan_dirs(false, scan_job_rx).await;
2267
2268 self.update_ignore_statuses().await;
2269
2270 let mut snapshot = self.snapshot.lock();
2271 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2272 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2273 snapshot.git_repositories = git_repositories;
2274 snapshot.removed_entry_ids.clear();
2275 snapshot.completed_scan_id = snapshot.scan_id;
2276 drop(snapshot);
2277
2278 self.send_status_update(false, None);
2279 }
2280
2281 async fn scan_dirs(
2282 &self,
2283 enable_progress_updates: bool,
2284 scan_jobs_rx: channel::Receiver<ScanJob>,
2285 ) {
2286 use futures::FutureExt as _;
2287
2288 if self
2289 .status_updates_tx
2290 .unbounded_send(ScanState::Started)
2291 .is_err()
2292 {
2293 return;
2294 }
2295
2296 let progress_update_count = AtomicUsize::new(0);
2297 self.executor
2298 .scoped(|scope| {
2299 for _ in 0..self.executor.num_cpus() {
2300 scope.spawn(async {
2301 let mut last_progress_update_count = 0;
2302 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2303 futures::pin_mut!(progress_update_timer);
2304
2305 loop {
2306 select_biased! {
2307 // Process any path refresh requests before moving on to process
2308 // the scan queue, so that user operations are prioritized.
2309 request = self.refresh_requests_rx.recv().fuse() => {
2310 let Ok((paths, barrier)) = request else { break };
2311 if !self.process_refresh_request(paths, barrier).await {
2312 return;
2313 }
2314 }
2315
2316 // Send periodic progress updates to the worktree. Use an atomic counter
2317 // to ensure that only one of the workers sends a progress update after
2318 // the update interval elapses.
2319 _ = progress_update_timer => {
2320 match progress_update_count.compare_exchange(
2321 last_progress_update_count,
2322 last_progress_update_count + 1,
2323 SeqCst,
2324 SeqCst
2325 ) {
2326 Ok(_) => {
2327 last_progress_update_count += 1;
2328 self.send_status_update(true, None);
2329 }
2330 Err(count) => {
2331 last_progress_update_count = count;
2332 }
2333 }
2334 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2335 }
2336
2337 // Recursively load directories from the file system.
2338 job = scan_jobs_rx.recv().fuse() => {
2339 let Ok(job) = job else { break };
2340 if let Err(err) = self.scan_dir(&job).await {
2341 if job.path.as_ref() != Path::new("") {
2342 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2343 }
2344 }
2345 }
2346 }
2347 }
2348 })
2349 }
2350 })
2351 .await;
2352 }
2353
2354 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2355 let mut prev_state = self.prev_state.lock();
2356 let snapshot = self.snapshot.lock().clone();
2357 let mut old_snapshot = snapshot.snapshot.clone();
2358 mem::swap(&mut old_snapshot, &mut prev_state.0);
2359 let changed_paths = mem::take(&mut prev_state.1);
2360 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2361 self.status_updates_tx
2362 .unbounded_send(ScanState::Updated {
2363 snapshot,
2364 changes,
2365 scanning,
2366 barrier,
2367 })
2368 .is_ok()
2369 }
2370
2371 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2372 let mut new_entries: Vec<Entry> = Vec::new();
2373 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2374 let mut ignore_stack = job.ignore_stack.clone();
2375 let mut new_ignore = None;
2376 let (root_abs_path, root_char_bag, next_entry_id) = {
2377 let snapshot = self.snapshot.lock();
2378 (
2379 snapshot.abs_path().clone(),
2380 snapshot.root_char_bag,
2381 snapshot.next_entry_id.clone(),
2382 )
2383 };
2384 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2385 while let Some(child_abs_path) = child_paths.next().await {
2386 let child_abs_path: Arc<Path> = match child_abs_path {
2387 Ok(child_abs_path) => child_abs_path.into(),
2388 Err(error) => {
2389 log::error!("error processing entry {:?}", error);
2390 continue;
2391 }
2392 };
2393
2394 let child_name = child_abs_path.file_name().unwrap();
2395 let child_path: Arc<Path> = job.path.join(child_name).into();
2396 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2397 Ok(Some(metadata)) => metadata,
2398 Ok(None) => continue,
2399 Err(err) => {
2400 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2401 continue;
2402 }
2403 };
2404
2405 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2406 if child_name == *GITIGNORE {
2407 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2408 Ok(ignore) => {
2409 let ignore = Arc::new(ignore);
2410 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2411 new_ignore = Some(ignore);
2412 }
2413 Err(error) => {
2414 log::error!(
2415 "error loading .gitignore file {:?} - {:?}",
2416 child_name,
2417 error
2418 );
2419 }
2420 }
2421
2422 // Update ignore status of any child entries we've already processed to reflect the
2423 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2424 // there should rarely be too numerous. Update the ignore stack associated with any
2425 // new jobs as well.
2426 let mut new_jobs = new_jobs.iter_mut();
2427 for entry in &mut new_entries {
2428 let entry_abs_path = root_abs_path.join(&entry.path);
2429 entry.is_ignored =
2430 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2431
2432 if entry.is_dir() {
2433 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2434 job.ignore_stack = if entry.is_ignored {
2435 IgnoreStack::all()
2436 } else {
2437 ignore_stack.clone()
2438 };
2439 }
2440 }
2441 }
2442 }
2443
2444 let mut child_entry = Entry::new(
2445 child_path.clone(),
2446 &child_metadata,
2447 &next_entry_id,
2448 root_char_bag,
2449 );
2450
2451 if child_entry.is_dir() {
2452 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2453 child_entry.is_ignored = is_ignored;
2454
2455 // Avoid recursing until crash in the case of a recursive symlink
2456 if !job.ancestor_inodes.contains(&child_entry.inode) {
2457 let mut ancestor_inodes = job.ancestor_inodes.clone();
2458 ancestor_inodes.insert(child_entry.inode);
2459
2460 new_jobs.push(Some(ScanJob {
2461 abs_path: child_abs_path,
2462 path: child_path,
2463 ignore_stack: if is_ignored {
2464 IgnoreStack::all()
2465 } else {
2466 ignore_stack.clone()
2467 },
2468 ancestor_inodes,
2469 scan_queue: job.scan_queue.clone(),
2470 }));
2471 } else {
2472 new_jobs.push(None);
2473 }
2474 } else {
2475 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2476 }
2477
2478 new_entries.push(child_entry);
2479 }
2480
2481 self.snapshot.lock().populate_dir(
2482 job.path.clone(),
2483 new_entries,
2484 new_ignore,
2485 self.fs.as_ref(),
2486 );
2487
2488 for new_job in new_jobs {
2489 if let Some(new_job) = new_job {
2490 job.scan_queue.send(new_job).await.unwrap();
2491 }
2492 }
2493
2494 Ok(())
2495 }
2496
2497 async fn reload_entries_for_paths(
2498 &self,
2499 mut abs_paths: Vec<PathBuf>,
2500 scan_queue_tx: Option<Sender<ScanJob>>,
2501 ) -> Option<Vec<Arc<Path>>> {
2502 let doing_recursive_update = scan_queue_tx.is_some();
2503
2504 abs_paths.sort_unstable();
2505 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2506
2507 let root_abs_path = self.snapshot.lock().abs_path.clone();
2508 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2509 let metadata = futures::future::join_all(
2510 abs_paths
2511 .iter()
2512 .map(|abs_path| self.fs.metadata(&abs_path))
2513 .collect::<Vec<_>>(),
2514 )
2515 .await;
2516
2517 let mut snapshot = self.snapshot.lock();
2518 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2519 snapshot.scan_id += 1;
2520 if is_idle && !doing_recursive_update {
2521 snapshot.completed_scan_id = snapshot.scan_id;
2522 }
2523
2524 // Remove any entries for paths that no longer exist or are being recursively
2525 // refreshed. Do this before adding any new entries, so that renames can be
2526 // detected regardless of the order of the paths.
2527 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2528 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2529 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2530 if matches!(metadata, Ok(None)) || doing_recursive_update {
2531 snapshot.remove_path(path);
2532 }
2533 event_paths.push(path.into());
2534 } else {
2535 log::error!(
2536 "unexpected event {:?} for root path {:?}",
2537 abs_path,
2538 root_canonical_path
2539 );
2540 }
2541 }
2542
2543 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2544 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2545
2546 match metadata {
2547 Ok(Some(metadata)) => {
2548 let ignore_stack =
2549 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2550 let mut fs_entry = Entry::new(
2551 path.clone(),
2552 &metadata,
2553 snapshot.next_entry_id.as_ref(),
2554 snapshot.root_char_bag,
2555 );
2556 fs_entry.is_ignored = ignore_stack.is_all();
2557 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2558
2559 let scan_id = snapshot.scan_id;
2560 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2561 repo.repo.lock().reload_index();
2562 repo.scan_id = scan_id;
2563 }
2564
2565 if let Some(scan_queue_tx) = &scan_queue_tx {
2566 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2567 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2568 ancestor_inodes.insert(metadata.inode);
2569 smol::block_on(scan_queue_tx.send(ScanJob {
2570 abs_path,
2571 path,
2572 ignore_stack,
2573 ancestor_inodes,
2574 scan_queue: scan_queue_tx.clone(),
2575 }))
2576 .unwrap();
2577 }
2578 }
2579 }
2580 Ok(None) => {}
2581 Err(err) => {
2582 // TODO - create a special 'error' entry in the entries tree to mark this
2583 log::error!("error reading file on event {:?}", err);
2584 }
2585 }
2586 }
2587
2588 Some(event_paths)
2589 }
2590
2591 async fn update_ignore_statuses(&self) {
2592 use futures::FutureExt as _;
2593
2594 let mut snapshot = self.snapshot.lock().clone();
2595 let mut ignores_to_update = Vec::new();
2596 let mut ignores_to_delete = Vec::new();
2597 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2598 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2599 if *scan_id > snapshot.completed_scan_id
2600 && snapshot.entry_for_path(parent_path).is_some()
2601 {
2602 ignores_to_update.push(parent_abs_path.clone());
2603 }
2604
2605 let ignore_path = parent_path.join(&*GITIGNORE);
2606 if snapshot.entry_for_path(ignore_path).is_none() {
2607 ignores_to_delete.push(parent_abs_path.clone());
2608 }
2609 }
2610 }
2611
2612 for parent_abs_path in ignores_to_delete {
2613 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2614 self.snapshot
2615 .lock()
2616 .ignores_by_parent_abs_path
2617 .remove(&parent_abs_path);
2618 }
2619
2620 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2621 ignores_to_update.sort_unstable();
2622 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2623 while let Some(parent_abs_path) = ignores_to_update.next() {
2624 while ignores_to_update
2625 .peek()
2626 .map_or(false, |p| p.starts_with(&parent_abs_path))
2627 {
2628 ignores_to_update.next().unwrap();
2629 }
2630
2631 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2632 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2633 abs_path: parent_abs_path,
2634 ignore_stack,
2635 ignore_queue: ignore_queue_tx.clone(),
2636 }))
2637 .unwrap();
2638 }
2639 drop(ignore_queue_tx);
2640
2641 self.executor
2642 .scoped(|scope| {
2643 for _ in 0..self.executor.num_cpus() {
2644 scope.spawn(async {
2645 loop {
2646 select_biased! {
2647 // Process any path refresh requests before moving on to process
2648 // the queue of ignore statuses.
2649 request = self.refresh_requests_rx.recv().fuse() => {
2650 let Ok((paths, barrier)) = request else { break };
2651 if !self.process_refresh_request(paths, barrier).await {
2652 return;
2653 }
2654 }
2655
2656 // Recursively process directories whose ignores have changed.
2657 job = ignore_queue_rx.recv().fuse() => {
2658 let Ok(job) = job else { break };
2659 self.update_ignore_status(job, &snapshot).await;
2660 }
2661 }
2662 }
2663 });
2664 }
2665 })
2666 .await;
2667 }
2668
2669 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2670 let mut ignore_stack = job.ignore_stack;
2671 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2672 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2673 }
2674
2675 let mut entries_by_id_edits = Vec::new();
2676 let mut entries_by_path_edits = Vec::new();
2677 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2678 for mut entry in snapshot.child_entries(path).cloned() {
2679 let was_ignored = entry.is_ignored;
2680 let abs_path = snapshot.abs_path().join(&entry.path);
2681 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2682 if entry.is_dir() {
2683 let child_ignore_stack = if entry.is_ignored {
2684 IgnoreStack::all()
2685 } else {
2686 ignore_stack.clone()
2687 };
2688 job.ignore_queue
2689 .send(UpdateIgnoreStatusJob {
2690 abs_path: abs_path.into(),
2691 ignore_stack: child_ignore_stack,
2692 ignore_queue: job.ignore_queue.clone(),
2693 })
2694 .await
2695 .unwrap();
2696 }
2697
2698 if entry.is_ignored != was_ignored {
2699 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2700 path_entry.scan_id = snapshot.scan_id;
2701 path_entry.is_ignored = entry.is_ignored;
2702 entries_by_id_edits.push(Edit::Insert(path_entry));
2703 entries_by_path_edits.push(Edit::Insert(entry));
2704 }
2705 }
2706
2707 let mut snapshot = self.snapshot.lock();
2708 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2709 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2710 }
2711
2712 fn build_change_set(
2713 &self,
2714 old_snapshot: &Snapshot,
2715 new_snapshot: &Snapshot,
2716 event_paths: Vec<Arc<Path>>,
2717 ) -> HashMap<Arc<Path>, PathChange> {
2718 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2719
2720 let mut changes = HashMap::default();
2721 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2722 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2723 let received_before_initialized = !self.finished_initial_scan;
2724
2725 for path in event_paths {
2726 let path = PathKey(path);
2727 old_paths.seek(&path, Bias::Left, &());
2728 new_paths.seek(&path, Bias::Left, &());
2729
2730 loop {
2731 match (old_paths.item(), new_paths.item()) {
2732 (Some(old_entry), Some(new_entry)) => {
2733 if old_entry.path > path.0
2734 && new_entry.path > path.0
2735 && !old_entry.path.starts_with(&path.0)
2736 && !new_entry.path.starts_with(&path.0)
2737 {
2738 break;
2739 }
2740
2741 match Ord::cmp(&old_entry.path, &new_entry.path) {
2742 Ordering::Less => {
2743 changes.insert(old_entry.path.clone(), Removed);
2744 old_paths.next(&());
2745 }
2746 Ordering::Equal => {
2747 if received_before_initialized {
2748 // If the worktree was not fully initialized when this event was generated,
2749 // we can't know whether this entry was added during the scan or whether
2750 // it was merely updated.
2751 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2752 } else if old_entry.mtime != new_entry.mtime {
2753 changes.insert(new_entry.path.clone(), Updated);
2754 }
2755 old_paths.next(&());
2756 new_paths.next(&());
2757 }
2758 Ordering::Greater => {
2759 changes.insert(new_entry.path.clone(), Added);
2760 new_paths.next(&());
2761 }
2762 }
2763 }
2764 (Some(old_entry), None) => {
2765 changes.insert(old_entry.path.clone(), Removed);
2766 old_paths.next(&());
2767 }
2768 (None, Some(new_entry)) => {
2769 changes.insert(new_entry.path.clone(), Added);
2770 new_paths.next(&());
2771 }
2772 (None, None) => break,
2773 }
2774 }
2775 }
2776 changes
2777 }
2778
2779 async fn progress_timer(&self, running: bool) {
2780 if !running {
2781 return futures::future::pending().await;
2782 }
2783
2784 #[cfg(any(test, feature = "test-support"))]
2785 if self.fs.is_fake() {
2786 return self.executor.simulate_random_delay().await;
2787 }
2788
2789 smol::Timer::after(Duration::from_millis(100)).await;
2790 }
2791}
2792
2793fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2794 let mut result = root_char_bag;
2795 result.extend(
2796 path.to_string_lossy()
2797 .chars()
2798 .map(|c| c.to_ascii_lowercase()),
2799 );
2800 result
2801}
2802
2803struct ScanJob {
2804 abs_path: Arc<Path>,
2805 path: Arc<Path>,
2806 ignore_stack: Arc<IgnoreStack>,
2807 scan_queue: Sender<ScanJob>,
2808 ancestor_inodes: TreeSet<u64>,
2809}
2810
2811struct UpdateIgnoreStatusJob {
2812 abs_path: Arc<Path>,
2813 ignore_stack: Arc<IgnoreStack>,
2814 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2815}
2816
2817pub trait WorktreeHandle {
2818 #[cfg(any(test, feature = "test-support"))]
2819 fn flush_fs_events<'a>(
2820 &self,
2821 cx: &'a gpui::TestAppContext,
2822 ) -> futures::future::LocalBoxFuture<'a, ()>;
2823}
2824
2825impl WorktreeHandle for ModelHandle<Worktree> {
2826 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2827 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2828 // extra directory scans, and emit extra scan-state notifications.
2829 //
2830 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2831 // to ensure that all redundant FS events have already been processed.
2832 #[cfg(any(test, feature = "test-support"))]
2833 fn flush_fs_events<'a>(
2834 &self,
2835 cx: &'a gpui::TestAppContext,
2836 ) -> futures::future::LocalBoxFuture<'a, ()> {
2837 use smol::future::FutureExt;
2838
2839 let filename = "fs-event-sentinel";
2840 let tree = self.clone();
2841 let (fs, root_path) = self.read_with(cx, |tree, _| {
2842 let tree = tree.as_local().unwrap();
2843 (tree.fs.clone(), tree.abs_path().clone())
2844 });
2845
2846 async move {
2847 fs.create_file(&root_path.join(filename), Default::default())
2848 .await
2849 .unwrap();
2850 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2851 .await;
2852
2853 fs.remove_file(&root_path.join(filename), Default::default())
2854 .await
2855 .unwrap();
2856 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2857 .await;
2858
2859 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2860 .await;
2861 }
2862 .boxed_local()
2863 }
2864}
2865
2866#[derive(Clone, Debug)]
2867struct TraversalProgress<'a> {
2868 max_path: &'a Path,
2869 count: usize,
2870 visible_count: usize,
2871 file_count: usize,
2872 visible_file_count: usize,
2873}
2874
2875impl<'a> TraversalProgress<'a> {
2876 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2877 match (include_ignored, include_dirs) {
2878 (true, true) => self.count,
2879 (true, false) => self.file_count,
2880 (false, true) => self.visible_count,
2881 (false, false) => self.visible_file_count,
2882 }
2883 }
2884}
2885
2886impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2887 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2888 self.max_path = summary.max_path.as_ref();
2889 self.count += summary.count;
2890 self.visible_count += summary.visible_count;
2891 self.file_count += summary.file_count;
2892 self.visible_file_count += summary.visible_file_count;
2893 }
2894}
2895
2896impl<'a> Default for TraversalProgress<'a> {
2897 fn default() -> Self {
2898 Self {
2899 max_path: Path::new(""),
2900 count: 0,
2901 visible_count: 0,
2902 file_count: 0,
2903 visible_file_count: 0,
2904 }
2905 }
2906}
2907
2908pub struct Traversal<'a> {
2909 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2910 include_ignored: bool,
2911 include_dirs: bool,
2912}
2913
2914impl<'a> Traversal<'a> {
2915 pub fn advance(&mut self) -> bool {
2916 self.advance_to_offset(self.offset() + 1)
2917 }
2918
2919 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2920 self.cursor.seek_forward(
2921 &TraversalTarget::Count {
2922 count: offset,
2923 include_dirs: self.include_dirs,
2924 include_ignored: self.include_ignored,
2925 },
2926 Bias::Right,
2927 &(),
2928 )
2929 }
2930
2931 pub fn advance_to_sibling(&mut self) -> bool {
2932 while let Some(entry) = self.cursor.item() {
2933 self.cursor.seek_forward(
2934 &TraversalTarget::PathSuccessor(&entry.path),
2935 Bias::Left,
2936 &(),
2937 );
2938 if let Some(entry) = self.cursor.item() {
2939 if (self.include_dirs || !entry.is_dir())
2940 && (self.include_ignored || !entry.is_ignored)
2941 {
2942 return true;
2943 }
2944 }
2945 }
2946 false
2947 }
2948
2949 pub fn entry(&self) -> Option<&'a Entry> {
2950 self.cursor.item()
2951 }
2952
2953 pub fn offset(&self) -> usize {
2954 self.cursor
2955 .start()
2956 .count(self.include_dirs, self.include_ignored)
2957 }
2958}
2959
2960impl<'a> Iterator for Traversal<'a> {
2961 type Item = &'a Entry;
2962
2963 fn next(&mut self) -> Option<Self::Item> {
2964 if let Some(item) = self.entry() {
2965 self.advance();
2966 Some(item)
2967 } else {
2968 None
2969 }
2970 }
2971}
2972
2973#[derive(Debug)]
2974enum TraversalTarget<'a> {
2975 Path(&'a Path),
2976 PathSuccessor(&'a Path),
2977 Count {
2978 count: usize,
2979 include_ignored: bool,
2980 include_dirs: bool,
2981 },
2982}
2983
2984impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2985 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2986 match self {
2987 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2988 TraversalTarget::PathSuccessor(path) => {
2989 if !cursor_location.max_path.starts_with(path) {
2990 Ordering::Equal
2991 } else {
2992 Ordering::Greater
2993 }
2994 }
2995 TraversalTarget::Count {
2996 count,
2997 include_dirs,
2998 include_ignored,
2999 } => Ord::cmp(
3000 count,
3001 &cursor_location.count(*include_dirs, *include_ignored),
3002 ),
3003 }
3004 }
3005}
3006
3007struct ChildEntriesIter<'a> {
3008 parent_path: &'a Path,
3009 traversal: Traversal<'a>,
3010}
3011
3012impl<'a> Iterator for ChildEntriesIter<'a> {
3013 type Item = &'a Entry;
3014
3015 fn next(&mut self) -> Option<Self::Item> {
3016 if let Some(item) = self.traversal.entry() {
3017 if item.path.starts_with(&self.parent_path) {
3018 self.traversal.advance_to_sibling();
3019 return Some(item);
3020 }
3021 }
3022 None
3023 }
3024}
3025
3026impl<'a> From<&'a Entry> for proto::Entry {
3027 fn from(entry: &'a Entry) -> Self {
3028 Self {
3029 id: entry.id.to_proto(),
3030 is_dir: entry.is_dir(),
3031 path: entry.path.to_string_lossy().into(),
3032 inode: entry.inode,
3033 mtime: Some(entry.mtime.into()),
3034 is_symlink: entry.is_symlink,
3035 is_ignored: entry.is_ignored,
3036 }
3037 }
3038}
3039
3040impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3041 type Error = anyhow::Error;
3042
3043 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3044 if let Some(mtime) = entry.mtime {
3045 let kind = if entry.is_dir {
3046 EntryKind::Dir
3047 } else {
3048 let mut char_bag = *root_char_bag;
3049 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3050 EntryKind::File(char_bag)
3051 };
3052 let path: Arc<Path> = PathBuf::from(entry.path).into();
3053 Ok(Entry {
3054 id: ProjectEntryId::from_proto(entry.id),
3055 kind,
3056 path,
3057 inode: entry.inode,
3058 mtime: mtime.into(),
3059 is_symlink: entry.is_symlink,
3060 is_ignored: entry.is_ignored,
3061 })
3062 } else {
3063 Err(anyhow!(
3064 "missing mtime in remote worktree entry {:?}",
3065 entry.path
3066 ))
3067 }
3068 }
3069}
3070
3071#[cfg(test)]
3072mod tests {
3073 use super::*;
3074 use fs::repository::FakeGitRepository;
3075 use fs::{FakeFs, RealFs};
3076 use gpui::{executor::Deterministic, TestAppContext};
3077 use pretty_assertions::assert_eq;
3078 use rand::prelude::*;
3079 use serde_json::json;
3080 use std::{env, fmt::Write};
3081 use util::{http::FakeHttpClient, test::temp_tree};
3082
3083 #[gpui::test]
3084 async fn test_traversal(cx: &mut TestAppContext) {
3085 let fs = FakeFs::new(cx.background());
3086 fs.insert_tree(
3087 "/root",
3088 json!({
3089 ".gitignore": "a/b\n",
3090 "a": {
3091 "b": "",
3092 "c": "",
3093 }
3094 }),
3095 )
3096 .await;
3097
3098 let http_client = FakeHttpClient::with_404_response();
3099 let client = cx.read(|cx| Client::new(http_client, cx));
3100
3101 let tree = Worktree::local(
3102 client,
3103 Path::new("/root"),
3104 true,
3105 fs,
3106 Default::default(),
3107 &mut cx.to_async(),
3108 )
3109 .await
3110 .unwrap();
3111 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3112 .await;
3113
3114 tree.read_with(cx, |tree, _| {
3115 assert_eq!(
3116 tree.entries(false)
3117 .map(|entry| entry.path.as_ref())
3118 .collect::<Vec<_>>(),
3119 vec![
3120 Path::new(""),
3121 Path::new(".gitignore"),
3122 Path::new("a"),
3123 Path::new("a/c"),
3124 ]
3125 );
3126 assert_eq!(
3127 tree.entries(true)
3128 .map(|entry| entry.path.as_ref())
3129 .collect::<Vec<_>>(),
3130 vec![
3131 Path::new(""),
3132 Path::new(".gitignore"),
3133 Path::new("a"),
3134 Path::new("a/b"),
3135 Path::new("a/c"),
3136 ]
3137 );
3138 })
3139 }
3140
3141 #[gpui::test(iterations = 10)]
3142 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3143 let fs = FakeFs::new(cx.background());
3144 fs.insert_tree(
3145 "/root",
3146 json!({
3147 "lib": {
3148 "a": {
3149 "a.txt": ""
3150 },
3151 "b": {
3152 "b.txt": ""
3153 }
3154 }
3155 }),
3156 )
3157 .await;
3158 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3159 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3160
3161 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3162 let tree = Worktree::local(
3163 client,
3164 Path::new("/root"),
3165 true,
3166 fs.clone(),
3167 Default::default(),
3168 &mut cx.to_async(),
3169 )
3170 .await
3171 .unwrap();
3172
3173 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3174 .await;
3175
3176 tree.read_with(cx, |tree, _| {
3177 assert_eq!(
3178 tree.entries(false)
3179 .map(|entry| entry.path.as_ref())
3180 .collect::<Vec<_>>(),
3181 vec![
3182 Path::new(""),
3183 Path::new("lib"),
3184 Path::new("lib/a"),
3185 Path::new("lib/a/a.txt"),
3186 Path::new("lib/a/lib"),
3187 Path::new("lib/b"),
3188 Path::new("lib/b/b.txt"),
3189 Path::new("lib/b/lib"),
3190 ]
3191 );
3192 });
3193
3194 fs.rename(
3195 Path::new("/root/lib/a/lib"),
3196 Path::new("/root/lib/a/lib-2"),
3197 Default::default(),
3198 )
3199 .await
3200 .unwrap();
3201 executor.run_until_parked();
3202 tree.read_with(cx, |tree, _| {
3203 assert_eq!(
3204 tree.entries(false)
3205 .map(|entry| entry.path.as_ref())
3206 .collect::<Vec<_>>(),
3207 vec![
3208 Path::new(""),
3209 Path::new("lib"),
3210 Path::new("lib/a"),
3211 Path::new("lib/a/a.txt"),
3212 Path::new("lib/a/lib-2"),
3213 Path::new("lib/b"),
3214 Path::new("lib/b/b.txt"),
3215 Path::new("lib/b/lib"),
3216 ]
3217 );
3218 });
3219 }
3220
3221 #[gpui::test]
3222 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3223 let parent_dir = temp_tree(json!({
3224 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3225 "tree": {
3226 ".git": {},
3227 ".gitignore": "ignored-dir\n",
3228 "tracked-dir": {
3229 "tracked-file1": "",
3230 "ancestor-ignored-file1": "",
3231 },
3232 "ignored-dir": {
3233 "ignored-file1": ""
3234 }
3235 }
3236 }));
3237 let dir = parent_dir.path().join("tree");
3238
3239 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3240
3241 let tree = Worktree::local(
3242 client,
3243 dir.as_path(),
3244 true,
3245 Arc::new(RealFs),
3246 Default::default(),
3247 &mut cx.to_async(),
3248 )
3249 .await
3250 .unwrap();
3251 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3252 .await;
3253 tree.flush_fs_events(cx).await;
3254 cx.read(|cx| {
3255 let tree = tree.read(cx);
3256 assert!(
3257 !tree
3258 .entry_for_path("tracked-dir/tracked-file1")
3259 .unwrap()
3260 .is_ignored
3261 );
3262 assert!(
3263 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3264 .unwrap()
3265 .is_ignored
3266 );
3267 assert!(
3268 tree.entry_for_path("ignored-dir/ignored-file1")
3269 .unwrap()
3270 .is_ignored
3271 );
3272 });
3273
3274 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3275 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3276 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3277 tree.flush_fs_events(cx).await;
3278 cx.read(|cx| {
3279 let tree = tree.read(cx);
3280 assert!(
3281 !tree
3282 .entry_for_path("tracked-dir/tracked-file2")
3283 .unwrap()
3284 .is_ignored
3285 );
3286 assert!(
3287 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3288 .unwrap()
3289 .is_ignored
3290 );
3291 assert!(
3292 tree.entry_for_path("ignored-dir/ignored-file2")
3293 .unwrap()
3294 .is_ignored
3295 );
3296 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3297 });
3298 }
3299
3300 #[gpui::test]
3301 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3302 let root = temp_tree(json!({
3303 "dir1": {
3304 ".git": {},
3305 "deps": {
3306 "dep1": {
3307 ".git": {},
3308 "src": {
3309 "a.txt": ""
3310 }
3311 }
3312 },
3313 "src": {
3314 "b.txt": ""
3315 }
3316 },
3317 "c.txt": "",
3318 }));
3319
3320 let http_client = FakeHttpClient::with_404_response();
3321 let client = cx.read(|cx| Client::new(http_client, cx));
3322 let tree = Worktree::local(
3323 client,
3324 root.path(),
3325 true,
3326 Arc::new(RealFs),
3327 Default::default(),
3328 &mut cx.to_async(),
3329 )
3330 .await
3331 .unwrap();
3332
3333 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3334 .await;
3335 tree.flush_fs_events(cx).await;
3336
3337 tree.read_with(cx, |tree, _cx| {
3338 let tree = tree.as_local().unwrap();
3339
3340 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3341
3342 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3343 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3344 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3345
3346 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3347 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3348 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3349 });
3350
3351 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3352 let tree = tree.as_local().unwrap();
3353 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3354 });
3355
3356 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3357 tree.flush_fs_events(cx).await;
3358
3359 tree.read_with(cx, |tree, _cx| {
3360 let tree = tree.as_local().unwrap();
3361 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3362 assert_ne!(
3363 original_scan_id, new_scan_id,
3364 "original {original_scan_id}, new {new_scan_id}"
3365 );
3366 });
3367
3368 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3369 tree.flush_fs_events(cx).await;
3370
3371 tree.read_with(cx, |tree, _cx| {
3372 let tree = tree.as_local().unwrap();
3373
3374 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3375 });
3376 }
3377
3378 #[test]
3379 fn test_changed_repos() {
3380 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3381 GitRepositoryEntry {
3382 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3383 scan_id,
3384 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3385 git_dir_path: git_dir_path.as_ref().into(),
3386 }
3387 }
3388
3389 let prev_repos: Vec<GitRepositoryEntry> = vec![
3390 fake_entry("/.git", 0),
3391 fake_entry("/a/.git", 0),
3392 fake_entry("/a/b/.git", 0),
3393 ];
3394
3395 let new_repos: Vec<GitRepositoryEntry> = vec![
3396 fake_entry("/a/.git", 1),
3397 fake_entry("/a/b/.git", 0),
3398 fake_entry("/a/c/.git", 0),
3399 ];
3400
3401 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3402
3403 // Deletion retained
3404 assert!(res
3405 .iter()
3406 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3407 .is_some());
3408
3409 // Update retained
3410 assert!(res
3411 .iter()
3412 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3413 .is_some());
3414
3415 // Addition retained
3416 assert!(res
3417 .iter()
3418 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3419 .is_some());
3420
3421 // Nochange, not retained
3422 assert!(res
3423 .iter()
3424 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3425 .is_none());
3426 }
3427
3428 #[gpui::test]
3429 async fn test_write_file(cx: &mut TestAppContext) {
3430 let dir = temp_tree(json!({
3431 ".git": {},
3432 ".gitignore": "ignored-dir\n",
3433 "tracked-dir": {},
3434 "ignored-dir": {}
3435 }));
3436
3437 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3438
3439 let tree = Worktree::local(
3440 client,
3441 dir.path(),
3442 true,
3443 Arc::new(RealFs),
3444 Default::default(),
3445 &mut cx.to_async(),
3446 )
3447 .await
3448 .unwrap();
3449 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3450 .await;
3451 tree.flush_fs_events(cx).await;
3452
3453 tree.update(cx, |tree, cx| {
3454 tree.as_local().unwrap().write_file(
3455 Path::new("tracked-dir/file.txt"),
3456 "hello".into(),
3457 Default::default(),
3458 cx,
3459 )
3460 })
3461 .await
3462 .unwrap();
3463 tree.update(cx, |tree, cx| {
3464 tree.as_local().unwrap().write_file(
3465 Path::new("ignored-dir/file.txt"),
3466 "world".into(),
3467 Default::default(),
3468 cx,
3469 )
3470 })
3471 .await
3472 .unwrap();
3473
3474 tree.read_with(cx, |tree, _| {
3475 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3476 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3477 assert!(!tracked.is_ignored);
3478 assert!(ignored.is_ignored);
3479 });
3480 }
3481
3482 #[gpui::test(iterations = 30)]
3483 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3484 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3485
3486 let fs = FakeFs::new(cx.background());
3487 fs.insert_tree(
3488 "/root",
3489 json!({
3490 "b": {},
3491 "c": {},
3492 "d": {},
3493 }),
3494 )
3495 .await;
3496
3497 let tree = Worktree::local(
3498 client,
3499 "/root".as_ref(),
3500 true,
3501 fs,
3502 Default::default(),
3503 &mut cx.to_async(),
3504 )
3505 .await
3506 .unwrap();
3507
3508 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3509
3510 let entry = tree
3511 .update(cx, |tree, cx| {
3512 tree.as_local_mut()
3513 .unwrap()
3514 .create_entry("a/e".as_ref(), true, cx)
3515 })
3516 .await
3517 .unwrap();
3518 assert!(entry.is_dir());
3519
3520 cx.foreground().run_until_parked();
3521 tree.read_with(cx, |tree, _| {
3522 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3523 });
3524
3525 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3526 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3527 snapshot1.apply_remote_update(update).unwrap();
3528 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3529 }
3530
3531 #[gpui::test(iterations = 100)]
3532 async fn test_random_worktree_operations_during_initial_scan(
3533 cx: &mut TestAppContext,
3534 mut rng: StdRng,
3535 ) {
3536 let operations = env::var("OPERATIONS")
3537 .map(|o| o.parse().unwrap())
3538 .unwrap_or(5);
3539 let initial_entries = env::var("INITIAL_ENTRIES")
3540 .map(|o| o.parse().unwrap())
3541 .unwrap_or(20);
3542
3543 let root_dir = Path::new("/test");
3544 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3545 fs.as_fake().insert_tree(root_dir, json!({})).await;
3546 for _ in 0..initial_entries {
3547 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3548 }
3549 log::info!("generated initial tree");
3550
3551 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3552 let worktree = Worktree::local(
3553 client.clone(),
3554 root_dir,
3555 true,
3556 fs.clone(),
3557 Default::default(),
3558 &mut cx.to_async(),
3559 )
3560 .await
3561 .unwrap();
3562
3563 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3564
3565 for _ in 0..operations {
3566 worktree
3567 .update(cx, |worktree, cx| {
3568 randomly_mutate_worktree(worktree, &mut rng, cx)
3569 })
3570 .await
3571 .log_err();
3572 worktree.read_with(cx, |tree, _| {
3573 tree.as_local().unwrap().snapshot.check_invariants()
3574 });
3575
3576 if rng.gen_bool(0.6) {
3577 let new_snapshot =
3578 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3579 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3580 snapshot.apply_remote_update(update.clone()).unwrap();
3581 assert_eq!(
3582 snapshot.to_vec(true),
3583 new_snapshot.to_vec(true),
3584 "incorrect snapshot after update {:?}",
3585 update
3586 );
3587 }
3588 }
3589
3590 worktree
3591 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3592 .await;
3593 worktree.read_with(cx, |tree, _| {
3594 tree.as_local().unwrap().snapshot.check_invariants()
3595 });
3596
3597 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3598 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3599 snapshot.apply_remote_update(update.clone()).unwrap();
3600 assert_eq!(
3601 snapshot.to_vec(true),
3602 new_snapshot.to_vec(true),
3603 "incorrect snapshot after update {:?}",
3604 update
3605 );
3606 }
3607
3608 #[gpui::test(iterations = 100)]
3609 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3610 let operations = env::var("OPERATIONS")
3611 .map(|o| o.parse().unwrap())
3612 .unwrap_or(40);
3613 let initial_entries = env::var("INITIAL_ENTRIES")
3614 .map(|o| o.parse().unwrap())
3615 .unwrap_or(20);
3616
3617 let root_dir = Path::new("/test");
3618 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3619 fs.as_fake().insert_tree(root_dir, json!({})).await;
3620 for _ in 0..initial_entries {
3621 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3622 }
3623 log::info!("generated initial tree");
3624
3625 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3626 let worktree = Worktree::local(
3627 client.clone(),
3628 root_dir,
3629 true,
3630 fs.clone(),
3631 Default::default(),
3632 &mut cx.to_async(),
3633 )
3634 .await
3635 .unwrap();
3636
3637 worktree
3638 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3639 .await;
3640
3641 // After the initial scan is complete, the `UpdatedEntries` event can
3642 // be used to follow along with all changes to the worktree's snapshot.
3643 worktree.update(cx, |tree, cx| {
3644 let mut paths = tree
3645 .as_local()
3646 .unwrap()
3647 .paths()
3648 .cloned()
3649 .collect::<Vec<_>>();
3650
3651 cx.subscribe(&worktree, move |tree, _, event, _| {
3652 if let Event::UpdatedEntries(changes) = event {
3653 for (path, change_type) in changes.iter() {
3654 let path = path.clone();
3655 let ix = match paths.binary_search(&path) {
3656 Ok(ix) | Err(ix) => ix,
3657 };
3658 match change_type {
3659 PathChange::Added => {
3660 assert_ne!(paths.get(ix), Some(&path));
3661 paths.insert(ix, path);
3662 }
3663 PathChange::Removed => {
3664 assert_eq!(paths.get(ix), Some(&path));
3665 paths.remove(ix);
3666 }
3667 PathChange::Updated => {
3668 assert_eq!(paths.get(ix), Some(&path));
3669 }
3670 PathChange::AddedOrUpdated => {
3671 if paths[ix] != path {
3672 paths.insert(ix, path);
3673 }
3674 }
3675 }
3676 }
3677 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3678 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3679 }
3680 })
3681 .detach();
3682 });
3683
3684 let mut snapshots = Vec::new();
3685 let mut mutations_len = operations;
3686 while mutations_len > 1 {
3687 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3688 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3689 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3690 let len = rng.gen_range(0..=buffered_event_count);
3691 log::info!("flushing {} events", len);
3692 fs.as_fake().flush_events(len).await;
3693 } else {
3694 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3695 mutations_len -= 1;
3696 }
3697
3698 cx.foreground().run_until_parked();
3699 if rng.gen_bool(0.2) {
3700 log::info!("storing snapshot {}", snapshots.len());
3701 let snapshot =
3702 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3703 snapshots.push(snapshot);
3704 }
3705 }
3706
3707 log::info!("quiescing");
3708 fs.as_fake().flush_events(usize::MAX).await;
3709 cx.foreground().run_until_parked();
3710 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3711 snapshot.check_invariants();
3712
3713 {
3714 let new_worktree = Worktree::local(
3715 client.clone(),
3716 root_dir,
3717 true,
3718 fs.clone(),
3719 Default::default(),
3720 &mut cx.to_async(),
3721 )
3722 .await
3723 .unwrap();
3724 new_worktree
3725 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3726 .await;
3727 let new_snapshot =
3728 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3729 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3730 }
3731
3732 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3733 let include_ignored = rng.gen::<bool>();
3734 if !include_ignored {
3735 let mut entries_by_path_edits = Vec::new();
3736 let mut entries_by_id_edits = Vec::new();
3737 for entry in prev_snapshot
3738 .entries_by_id
3739 .cursor::<()>()
3740 .filter(|e| e.is_ignored)
3741 {
3742 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3743 entries_by_id_edits.push(Edit::Remove(entry.id));
3744 }
3745
3746 prev_snapshot
3747 .entries_by_path
3748 .edit(entries_by_path_edits, &());
3749 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3750 }
3751
3752 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3753 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3754 assert_eq!(
3755 prev_snapshot.to_vec(include_ignored),
3756 snapshot.to_vec(include_ignored),
3757 "wrong update for snapshot {i}. update: {:?}",
3758 update
3759 );
3760 }
3761 }
3762
3763 fn randomly_mutate_worktree(
3764 worktree: &mut Worktree,
3765 rng: &mut impl Rng,
3766 cx: &mut ModelContext<Worktree>,
3767 ) -> Task<Result<()>> {
3768 let worktree = worktree.as_local_mut().unwrap();
3769 let snapshot = worktree.snapshot();
3770 let entry = snapshot.entries(false).choose(rng).unwrap();
3771
3772 match rng.gen_range(0_u32..100) {
3773 0..=33 if entry.path.as_ref() != Path::new("") => {
3774 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3775 worktree.delete_entry(entry.id, cx).unwrap()
3776 }
3777 ..=66 if entry.path.as_ref() != Path::new("") => {
3778 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3779 let new_parent_path = if other_entry.is_dir() {
3780 other_entry.path.clone()
3781 } else {
3782 other_entry.path.parent().unwrap().into()
3783 };
3784 let mut new_path = new_parent_path.join(gen_name(rng));
3785 if new_path.starts_with(&entry.path) {
3786 new_path = gen_name(rng).into();
3787 }
3788
3789 log::info!(
3790 "renaming entry {:?} ({}) to {:?}",
3791 entry.path,
3792 entry.id.0,
3793 new_path
3794 );
3795 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3796 cx.foreground().spawn(async move {
3797 task.await?;
3798 Ok(())
3799 })
3800 }
3801 _ => {
3802 let task = if entry.is_dir() {
3803 let child_path = entry.path.join(gen_name(rng));
3804 let is_dir = rng.gen_bool(0.3);
3805 log::info!(
3806 "creating {} at {:?}",
3807 if is_dir { "dir" } else { "file" },
3808 child_path,
3809 );
3810 worktree.create_entry(child_path, is_dir, cx)
3811 } else {
3812 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3813 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3814 };
3815 cx.foreground().spawn(async move {
3816 task.await?;
3817 Ok(())
3818 })
3819 }
3820 }
3821 }
3822
3823 async fn randomly_mutate_fs(
3824 fs: &Arc<dyn Fs>,
3825 root_path: &Path,
3826 insertion_probability: f64,
3827 rng: &mut impl Rng,
3828 ) {
3829 let mut files = Vec::new();
3830 let mut dirs = Vec::new();
3831 for path in fs.as_fake().paths() {
3832 if path.starts_with(root_path) {
3833 if fs.is_file(&path).await {
3834 files.push(path);
3835 } else {
3836 dirs.push(path);
3837 }
3838 }
3839 }
3840
3841 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3842 let path = dirs.choose(rng).unwrap();
3843 let new_path = path.join(gen_name(rng));
3844
3845 if rng.gen() {
3846 log::info!(
3847 "creating dir {:?}",
3848 new_path.strip_prefix(root_path).unwrap()
3849 );
3850 fs.create_dir(&new_path).await.unwrap();
3851 } else {
3852 log::info!(
3853 "creating file {:?}",
3854 new_path.strip_prefix(root_path).unwrap()
3855 );
3856 fs.create_file(&new_path, Default::default()).await.unwrap();
3857 }
3858 } else if rng.gen_bool(0.05) {
3859 let ignore_dir_path = dirs.choose(rng).unwrap();
3860 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3861
3862 let subdirs = dirs
3863 .iter()
3864 .filter(|d| d.starts_with(&ignore_dir_path))
3865 .cloned()
3866 .collect::<Vec<_>>();
3867 let subfiles = files
3868 .iter()
3869 .filter(|d| d.starts_with(&ignore_dir_path))
3870 .cloned()
3871 .collect::<Vec<_>>();
3872 let files_to_ignore = {
3873 let len = rng.gen_range(0..=subfiles.len());
3874 subfiles.choose_multiple(rng, len)
3875 };
3876 let dirs_to_ignore = {
3877 let len = rng.gen_range(0..subdirs.len());
3878 subdirs.choose_multiple(rng, len)
3879 };
3880
3881 let mut ignore_contents = String::new();
3882 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3883 writeln!(
3884 ignore_contents,
3885 "{}",
3886 path_to_ignore
3887 .strip_prefix(&ignore_dir_path)
3888 .unwrap()
3889 .to_str()
3890 .unwrap()
3891 )
3892 .unwrap();
3893 }
3894 log::info!(
3895 "creating gitignore {:?} with contents:\n{}",
3896 ignore_path.strip_prefix(&root_path).unwrap(),
3897 ignore_contents
3898 );
3899 fs.save(
3900 &ignore_path,
3901 &ignore_contents.as_str().into(),
3902 Default::default(),
3903 )
3904 .await
3905 .unwrap();
3906 } else {
3907 let old_path = {
3908 let file_path = files.choose(rng);
3909 let dir_path = dirs[1..].choose(rng);
3910 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3911 };
3912
3913 let is_rename = rng.gen();
3914 if is_rename {
3915 let new_path_parent = dirs
3916 .iter()
3917 .filter(|d| !d.starts_with(old_path))
3918 .choose(rng)
3919 .unwrap();
3920
3921 let overwrite_existing_dir =
3922 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3923 let new_path = if overwrite_existing_dir {
3924 fs.remove_dir(
3925 &new_path_parent,
3926 RemoveOptions {
3927 recursive: true,
3928 ignore_if_not_exists: true,
3929 },
3930 )
3931 .await
3932 .unwrap();
3933 new_path_parent.to_path_buf()
3934 } else {
3935 new_path_parent.join(gen_name(rng))
3936 };
3937
3938 log::info!(
3939 "renaming {:?} to {}{:?}",
3940 old_path.strip_prefix(&root_path).unwrap(),
3941 if overwrite_existing_dir {
3942 "overwrite "
3943 } else {
3944 ""
3945 },
3946 new_path.strip_prefix(&root_path).unwrap()
3947 );
3948 fs.rename(
3949 &old_path,
3950 &new_path,
3951 fs::RenameOptions {
3952 overwrite: true,
3953 ignore_if_exists: true,
3954 },
3955 )
3956 .await
3957 .unwrap();
3958 } else if fs.is_file(&old_path).await {
3959 log::info!(
3960 "deleting file {:?}",
3961 old_path.strip_prefix(&root_path).unwrap()
3962 );
3963 fs.remove_file(old_path, Default::default()).await.unwrap();
3964 } else {
3965 log::info!(
3966 "deleting dir {:?}",
3967 old_path.strip_prefix(&root_path).unwrap()
3968 );
3969 fs.remove_dir(
3970 &old_path,
3971 RemoveOptions {
3972 recursive: true,
3973 ignore_if_not_exists: true,
3974 },
3975 )
3976 .await
3977 .unwrap();
3978 }
3979 }
3980 }
3981
3982 fn gen_name(rng: &mut impl Rng) -> String {
3983 (0..6)
3984 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3985 .map(char::from)
3986 .collect()
3987 }
3988
3989 impl LocalSnapshot {
3990 fn check_invariants(&self) {
3991 assert_eq!(
3992 self.entries_by_path
3993 .cursor::<()>()
3994 .map(|e| (&e.path, e.id))
3995 .collect::<Vec<_>>(),
3996 self.entries_by_id
3997 .cursor::<()>()
3998 .map(|e| (&e.path, e.id))
3999 .collect::<collections::BTreeSet<_>>()
4000 .into_iter()
4001 .collect::<Vec<_>>(),
4002 "entries_by_path and entries_by_id are inconsistent"
4003 );
4004
4005 let mut files = self.files(true, 0);
4006 let mut visible_files = self.files(false, 0);
4007 for entry in self.entries_by_path.cursor::<()>() {
4008 if entry.is_file() {
4009 assert_eq!(files.next().unwrap().inode, entry.inode);
4010 if !entry.is_ignored {
4011 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4012 }
4013 }
4014 }
4015
4016 assert!(files.next().is_none());
4017 assert!(visible_files.next().is_none());
4018
4019 let mut bfs_paths = Vec::new();
4020 let mut stack = vec![Path::new("")];
4021 while let Some(path) = stack.pop() {
4022 bfs_paths.push(path);
4023 let ix = stack.len();
4024 for child_entry in self.child_entries(path) {
4025 stack.insert(ix, &child_entry.path);
4026 }
4027 }
4028
4029 let dfs_paths_via_iter = self
4030 .entries_by_path
4031 .cursor::<()>()
4032 .map(|e| e.path.as_ref())
4033 .collect::<Vec<_>>();
4034 assert_eq!(bfs_paths, dfs_paths_via_iter);
4035
4036 let dfs_paths_via_traversal = self
4037 .entries(true)
4038 .map(|e| e.path.as_ref())
4039 .collect::<Vec<_>>();
4040 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4041
4042 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4043 let ignore_parent_path =
4044 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4045 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4046 assert!(self
4047 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4048 .is_some());
4049 }
4050 }
4051
4052 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4053 let mut paths = Vec::new();
4054 for entry in self.entries_by_path.cursor::<()>() {
4055 if include_ignored || !entry.is_ignored {
4056 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4057 }
4058 }
4059 paths.sort_by(|a, b| a.0.cmp(b.0));
4060 paths
4061 }
4062 }
4063}