1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use parking_lot::Mutex;
30use postage::{
31 barrier,
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use smol::channel::{self, Sender};
36use std::{
37 any::Any,
38 cmp::{self, Ordering},
39 convert::TryFrom,
40 ffi::OsStr,
41 fmt,
42 future::Future,
43 mem,
44 ops::{Deref, DerefMut},
45 path::{Path, PathBuf},
46 pin::Pin,
47 sync::{
48 atomic::{AtomicUsize, Ordering::SeqCst},
49 Arc,
50 },
51 time::{Duration, SystemTime},
52};
53use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
54use util::{paths::HOME, ResultExt, TryFutureExt};
55
56#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
57pub struct WorktreeId(usize);
58
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
67 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
68 _background_scanner_task: Task<()>,
69 share: Option<ShareState>,
70 diagnostics: HashMap<Arc<Path>, Vec<(usize, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>)>>,
71 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 visible: bool,
75}
76
77pub struct RemoteWorktree {
78 snapshot: Snapshot,
79 background_snapshot: Arc<Mutex<Snapshot>>,
80 project_id: u64,
81 client: Arc<Client>,
82 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
83 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
84 replica_id: ReplicaId,
85 diagnostic_summaries: TreeMap<PathKey, DiagnosticSummary>,
86 visible: bool,
87 disconnected: bool,
88}
89
90#[derive(Clone)]
91pub struct Snapshot {
92 id: WorktreeId,
93 abs_path: Arc<Path>,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98
99 /// A number that increases every time the worktree begins scanning
100 /// a set of paths from the filesystem. This scanning could be caused
101 /// by some operation performed on the worktree, such as reading or
102 /// writing a file, or by an event reported by the filesystem.
103 scan_id: usize,
104
105 /// The latest scan id that has completed, and whose preceding scans
106 /// have all completed. The current `scan_id` could be more than one
107 /// greater than the `completed_scan_id` if operations are performed
108 /// on the worktree while it is processing a file-system event.
109 completed_scan_id: usize,
110}
111
112#[derive(Clone)]
113pub struct GitRepositoryEntry {
114 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
115
116 pub(crate) scan_id: usize,
117 // Path to folder containing the .git file or directory
118 pub(crate) content_path: Arc<Path>,
119 // Path to the actual .git folder.
120 // Note: if .git is a file, this points to the folder indicated by the .git file
121 pub(crate) git_dir_path: Arc<Path>,
122}
123
124impl std::fmt::Debug for GitRepositoryEntry {
125 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
126 f.debug_struct("GitRepositoryEntry")
127 .field("content_path", &self.content_path)
128 .field("git_dir_path", &self.git_dir_path)
129 .finish()
130 }
131}
132
133#[derive(Debug)]
134pub struct LocalSnapshot {
135 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
136 git_repositories: Vec<GitRepositoryEntry>,
137 removed_entry_ids: HashMap<u64, ProjectEntryId>,
138 next_entry_id: Arc<AtomicUsize>,
139 snapshot: Snapshot,
140}
141
142impl Clone for LocalSnapshot {
143 fn clone(&self) -> Self {
144 Self {
145 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
146 git_repositories: self.git_repositories.iter().cloned().collect(),
147 removed_entry_ids: self.removed_entry_ids.clone(),
148 next_entry_id: self.next_entry_id.clone(),
149 snapshot: self.snapshot.clone(),
150 }
151 }
152}
153
154impl Deref for LocalSnapshot {
155 type Target = Snapshot;
156
157 fn deref(&self) -> &Self::Target {
158 &self.snapshot
159 }
160}
161
162impl DerefMut for LocalSnapshot {
163 fn deref_mut(&mut self) -> &mut Self::Target {
164 &mut self.snapshot
165 }
166}
167
168enum ScanState {
169 Started,
170 Updated {
171 snapshot: LocalSnapshot,
172 changes: HashMap<Arc<Path>, PathChange>,
173 barrier: Option<barrier::Sender>,
174 scanning: bool,
175 },
176}
177
178struct ShareState {
179 project_id: u64,
180 snapshots_tx: watch::Sender<LocalSnapshot>,
181 resume_updates: watch::Sender<()>,
182 _maintain_remote_snapshot: Task<Option<()>>,
183}
184
185pub enum Event {
186 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
187 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
188}
189
190impl Entity for Worktree {
191 type Event = Event;
192}
193
194impl Worktree {
195 pub async fn local(
196 client: Arc<Client>,
197 path: impl Into<Arc<Path>>,
198 visible: bool,
199 fs: Arc<dyn Fs>,
200 next_entry_id: Arc<AtomicUsize>,
201 cx: &mut AsyncAppContext,
202 ) -> Result<ModelHandle<Self>> {
203 // After determining whether the root entry is a file or a directory, populate the
204 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
205 let abs_path = path.into();
206 let metadata = fs
207 .metadata(&abs_path)
208 .await
209 .context("failed to stat worktree path")?;
210
211 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
212 let root_name = abs_path
213 .file_name()
214 .map_or(String::new(), |f| f.to_string_lossy().to_string());
215
216 let mut snapshot = LocalSnapshot {
217 ignores_by_parent_abs_path: Default::default(),
218 git_repositories: Default::default(),
219 removed_entry_ids: Default::default(),
220 next_entry_id,
221 snapshot: Snapshot {
222 id: WorktreeId::from_usize(cx.model_id()),
223 abs_path: abs_path.clone(),
224 root_name: root_name.clone(),
225 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
226 entries_by_path: Default::default(),
227 entries_by_id: Default::default(),
228 scan_id: 1,
229 completed_scan_id: 0,
230 },
231 };
232
233 if let Some(metadata) = metadata {
234 snapshot.insert_entry(
235 Entry::new(
236 Arc::from(Path::new("")),
237 &metadata,
238 &snapshot.next_entry_id,
239 snapshot.root_char_bag,
240 ),
241 fs.as_ref(),
242 );
243 }
244
245 let (path_changes_tx, path_changes_rx) = channel::unbounded();
246 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
247
248 cx.spawn_weak(|this, mut cx| async move {
249 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
250 this.update(&mut cx, |this, cx| {
251 let this = this.as_local_mut().unwrap();
252 match state {
253 ScanState::Started => {
254 *this.is_scanning.0.borrow_mut() = true;
255 }
256 ScanState::Updated {
257 snapshot,
258 changes,
259 barrier,
260 scanning,
261 } => {
262 *this.is_scanning.0.borrow_mut() = scanning;
263 this.set_snapshot(snapshot, cx);
264 cx.emit(Event::UpdatedEntries(changes));
265 drop(barrier);
266 }
267 }
268 cx.notify();
269 });
270 }
271 })
272 .detach();
273
274 let background_scanner_task = cx.background().spawn({
275 let fs = fs.clone();
276 let snapshot = snapshot.clone();
277 let background = cx.background().clone();
278 async move {
279 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
280 BackgroundScanner::new(
281 snapshot,
282 fs,
283 scan_states_tx,
284 background,
285 path_changes_rx,
286 )
287 .run(events)
288 .await;
289 }
290 });
291
292 Worktree::Local(LocalWorktree {
293 snapshot,
294 is_scanning: watch::channel_with(true),
295 share: None,
296 path_changes_tx,
297 _background_scanner_task: background_scanner_task,
298 diagnostics: Default::default(),
299 diagnostic_summaries: Default::default(),
300 client,
301 fs,
302 visible,
303 })
304 }))
305 }
306
307 pub fn remote(
308 project_remote_id: u64,
309 replica_id: ReplicaId,
310 worktree: proto::WorktreeMetadata,
311 client: Arc<Client>,
312 cx: &mut AppContext,
313 ) -> ModelHandle<Self> {
314 cx.add_model(|cx: &mut ModelContext<Self>| {
315 let snapshot = Snapshot {
316 id: WorktreeId(worktree.id as usize),
317 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
318 root_name: worktree.root_name.clone(),
319 root_char_bag: worktree
320 .root_name
321 .chars()
322 .map(|c| c.to_ascii_lowercase())
323 .collect(),
324 entries_by_path: Default::default(),
325 entries_by_id: Default::default(),
326 scan_id: 1,
327 completed_scan_id: 0,
328 };
329
330 let (updates_tx, mut updates_rx) = mpsc::unbounded();
331 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
332 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
333
334 cx.background()
335 .spawn({
336 let background_snapshot = background_snapshot.clone();
337 async move {
338 while let Some(update) = updates_rx.next().await {
339 if let Err(error) =
340 background_snapshot.lock().apply_remote_update(update)
341 {
342 log::error!("error applying worktree update: {}", error);
343 }
344 snapshot_updated_tx.send(()).await.ok();
345 }
346 }
347 })
348 .detach();
349
350 cx.spawn_weak(|this, mut cx| async move {
351 while (snapshot_updated_rx.recv().await).is_some() {
352 if let Some(this) = this.upgrade(&cx) {
353 this.update(&mut cx, |this, cx| {
354 let this = this.as_remote_mut().unwrap();
355 this.snapshot = this.background_snapshot.lock().clone();
356 cx.emit(Event::UpdatedEntries(Default::default()));
357 cx.notify();
358 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
359 if this.observed_snapshot(*scan_id) {
360 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
361 let _ = tx.send(());
362 } else {
363 break;
364 }
365 }
366 });
367 } else {
368 break;
369 }
370 }
371 })
372 .detach();
373
374 Worktree::Remote(RemoteWorktree {
375 project_id: project_remote_id,
376 replica_id,
377 snapshot: snapshot.clone(),
378 background_snapshot,
379 updates_tx: Some(updates_tx),
380 snapshot_subscriptions: Default::default(),
381 client: client.clone(),
382 diagnostic_summaries: Default::default(),
383 visible: worktree.visible,
384 disconnected: false,
385 })
386 })
387 }
388
389 pub fn as_local(&self) -> Option<&LocalWorktree> {
390 if let Worktree::Local(worktree) = self {
391 Some(worktree)
392 } else {
393 None
394 }
395 }
396
397 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
398 if let Worktree::Remote(worktree) = self {
399 Some(worktree)
400 } else {
401 None
402 }
403 }
404
405 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
406 if let Worktree::Local(worktree) = self {
407 Some(worktree)
408 } else {
409 None
410 }
411 }
412
413 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
414 if let Worktree::Remote(worktree) = self {
415 Some(worktree)
416 } else {
417 None
418 }
419 }
420
421 pub fn is_local(&self) -> bool {
422 matches!(self, Worktree::Local(_))
423 }
424
425 pub fn is_remote(&self) -> bool {
426 !self.is_local()
427 }
428
429 pub fn snapshot(&self) -> Snapshot {
430 match self {
431 Worktree::Local(worktree) => worktree.snapshot().snapshot,
432 Worktree::Remote(worktree) => worktree.snapshot(),
433 }
434 }
435
436 pub fn scan_id(&self) -> usize {
437 match self {
438 Worktree::Local(worktree) => worktree.snapshot.scan_id,
439 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
440 }
441 }
442
443 pub fn completed_scan_id(&self) -> usize {
444 match self {
445 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
446 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
447 }
448 }
449
450 pub fn is_visible(&self) -> bool {
451 match self {
452 Worktree::Local(worktree) => worktree.visible,
453 Worktree::Remote(worktree) => worktree.visible,
454 }
455 }
456
457 pub fn replica_id(&self) -> ReplicaId {
458 match self {
459 Worktree::Local(_) => 0,
460 Worktree::Remote(worktree) => worktree.replica_id,
461 }
462 }
463
464 pub fn diagnostic_summaries(
465 &self,
466 ) -> impl Iterator<Item = (Arc<Path>, DiagnosticSummary)> + '_ {
467 match self {
468 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
469 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
470 }
471 .iter()
472 .map(|(path, summary)| (path.0.clone(), *summary))
473 }
474
475 pub fn abs_path(&self) -> Arc<Path> {
476 match self {
477 Worktree::Local(worktree) => worktree.abs_path.clone(),
478 Worktree::Remote(worktree) => worktree.abs_path.clone(),
479 }
480 }
481}
482
483impl LocalWorktree {
484 pub fn contains_abs_path(&self, path: &Path) -> bool {
485 path.starts_with(&self.abs_path)
486 }
487
488 fn absolutize(&self, path: &Path) -> PathBuf {
489 if path.file_name().is_some() {
490 self.abs_path.join(path)
491 } else {
492 self.abs_path.to_path_buf()
493 }
494 }
495
496 pub(crate) fn load_buffer(
497 &mut self,
498 path: &Path,
499 cx: &mut ModelContext<Worktree>,
500 ) -> Task<Result<ModelHandle<Buffer>>> {
501 let path = Arc::from(path);
502 cx.spawn(move |this, mut cx| async move {
503 let (file, contents, diff_base) = this
504 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
505 .await?;
506 Ok(cx.add_model(|cx| {
507 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
508 buffer.git_diff_recalc(cx);
509 buffer
510 }))
511 })
512 }
513
514 pub fn diagnostics_for_path(
515 &self,
516 path: &Path,
517 ) -> Vec<(usize, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>)> {
518 self.diagnostics.get(path).cloned().unwrap_or_default()
519 }
520
521 pub fn update_diagnostics(
522 &mut self,
523 server_id: usize,
524 worktree_path: Arc<Path>,
525 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
526 _: &mut ModelContext<Worktree>,
527 ) -> Result<bool> {
528 self.diagnostics.remove(&worktree_path);
529 let old_summary = self
530 .diagnostic_summaries
531 .remove(&PathKey(worktree_path.clone()))
532 .unwrap_or_default();
533 let new_summary = DiagnosticSummary::new(server_id, &diagnostics);
534 if !new_summary.is_empty() {
535 self.diagnostic_summaries
536 .insert(PathKey(worktree_path.clone()), new_summary);
537 let diagnostics_by_server_id =
538 self.diagnostics.entry(worktree_path.clone()).or_default();
539 let ix = match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
540 Ok(ix) | Err(ix) => ix,
541 };
542 diagnostics_by_server_id[ix] = (server_id, diagnostics);
543 }
544
545 let updated = !old_summary.is_empty() || !new_summary.is_empty();
546 if updated {
547 if let Some(share) = self.share.as_ref() {
548 self.client
549 .send(proto::UpdateDiagnosticSummary {
550 project_id: share.project_id,
551 worktree_id: self.id().to_proto(),
552 summary: Some(proto::DiagnosticSummary {
553 path: worktree_path.to_string_lossy().to_string(),
554 language_server_id: server_id as u64,
555 error_count: new_summary.error_count as u32,
556 warning_count: new_summary.warning_count as u32,
557 }),
558 })
559 .log_err();
560 }
561 }
562
563 Ok(updated)
564 }
565
566 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
567 let updated_repos = Self::changed_repos(
568 &self.snapshot.git_repositories,
569 &new_snapshot.git_repositories,
570 );
571 self.snapshot = new_snapshot;
572
573 if let Some(share) = self.share.as_mut() {
574 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
575 }
576
577 if !updated_repos.is_empty() {
578 cx.emit(Event::UpdatedGitRepositories(updated_repos));
579 }
580 }
581
582 fn changed_repos(
583 old_repos: &[GitRepositoryEntry],
584 new_repos: &[GitRepositoryEntry],
585 ) -> Vec<GitRepositoryEntry> {
586 fn diff<'a>(
587 a: &'a [GitRepositoryEntry],
588 b: &'a [GitRepositoryEntry],
589 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
590 ) {
591 for a_repo in a {
592 let matched = b.iter().find(|b_repo| {
593 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
594 });
595
596 if matched.is_none() {
597 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
598 }
599 }
600 }
601
602 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
603
604 diff(old_repos, new_repos, &mut updated);
605 diff(new_repos, old_repos, &mut updated);
606
607 updated.into_values().collect()
608 }
609
610 pub fn scan_complete(&self) -> impl Future<Output = ()> {
611 let mut is_scanning_rx = self.is_scanning.1.clone();
612 async move {
613 let mut is_scanning = is_scanning_rx.borrow().clone();
614 while is_scanning {
615 if let Some(value) = is_scanning_rx.recv().await {
616 is_scanning = value;
617 } else {
618 break;
619 }
620 }
621 }
622 }
623
624 pub fn snapshot(&self) -> LocalSnapshot {
625 self.snapshot.clone()
626 }
627
628 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
629 proto::WorktreeMetadata {
630 id: self.id().to_proto(),
631 root_name: self.root_name().to_string(),
632 visible: self.visible,
633 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
634 }
635 }
636
637 fn load(
638 &self,
639 path: &Path,
640 cx: &mut ModelContext<Worktree>,
641 ) -> Task<Result<(File, String, Option<String>)>> {
642 let handle = cx.handle();
643 let path = Arc::from(path);
644 let abs_path = self.absolutize(&path);
645 let fs = self.fs.clone();
646 let snapshot = self.snapshot();
647
648 cx.spawn(|this, mut cx| async move {
649 let text = fs.load(&abs_path).await?;
650
651 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
652 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
653 let repo_relative = repo_relative.to_owned();
654 cx.background()
655 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
656 .await
657 } else {
658 None
659 }
660 } else {
661 None
662 };
663
664 // Eagerly populate the snapshot with an updated entry for the loaded file
665 let entry = this
666 .update(&mut cx, |this, cx| {
667 this.as_local().unwrap().refresh_entry(path, None, cx)
668 })
669 .await?;
670
671 Ok((
672 File {
673 entry_id: entry.id,
674 worktree: handle,
675 path: entry.path,
676 mtime: entry.mtime,
677 is_local: true,
678 is_deleted: false,
679 },
680 text,
681 diff_base,
682 ))
683 })
684 }
685
686 pub fn save_buffer(
687 &self,
688 buffer_handle: ModelHandle<Buffer>,
689 path: Arc<Path>,
690 has_changed_file: bool,
691 cx: &mut ModelContext<Worktree>,
692 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
693 let handle = cx.handle();
694 let buffer = buffer_handle.read(cx);
695
696 let rpc = self.client.clone();
697 let buffer_id = buffer.remote_id();
698 let project_id = self.share.as_ref().map(|share| share.project_id);
699
700 let text = buffer.as_rope().clone();
701 let fingerprint = text.fingerprint();
702 let version = buffer.version();
703 let save = self.write_file(path, text, buffer.line_ending(), cx);
704
705 cx.as_mut().spawn(|mut cx| async move {
706 let entry = save.await?;
707
708 if has_changed_file {
709 let new_file = Arc::new(File {
710 entry_id: entry.id,
711 worktree: handle,
712 path: entry.path,
713 mtime: entry.mtime,
714 is_local: true,
715 is_deleted: false,
716 });
717
718 if let Some(project_id) = project_id {
719 rpc.send(proto::UpdateBufferFile {
720 project_id,
721 buffer_id,
722 file: Some(new_file.to_proto()),
723 })
724 .log_err();
725 }
726
727 buffer_handle.update(&mut cx, |buffer, cx| {
728 if has_changed_file {
729 buffer.file_updated(new_file, cx).detach();
730 }
731 });
732 }
733
734 if let Some(project_id) = project_id {
735 rpc.send(proto::BufferSaved {
736 project_id,
737 buffer_id,
738 version: serialize_version(&version),
739 mtime: Some(entry.mtime.into()),
740 fingerprint: serialize_fingerprint(fingerprint),
741 })?;
742 }
743
744 buffer_handle.update(&mut cx, |buffer, cx| {
745 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
746 });
747
748 Ok((version, fingerprint, entry.mtime))
749 })
750 }
751
752 pub fn create_entry(
753 &self,
754 path: impl Into<Arc<Path>>,
755 is_dir: bool,
756 cx: &mut ModelContext<Worktree>,
757 ) -> Task<Result<Entry>> {
758 let path = path.into();
759 let abs_path = self.absolutize(&path);
760 let fs = self.fs.clone();
761 let write = cx.background().spawn(async move {
762 if is_dir {
763 fs.create_dir(&abs_path).await
764 } else {
765 fs.save(&abs_path, &Default::default(), Default::default())
766 .await
767 }
768 });
769
770 cx.spawn(|this, mut cx| async move {
771 write.await?;
772 this.update(&mut cx, |this, cx| {
773 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
774 })
775 .await
776 })
777 }
778
779 pub fn write_file(
780 &self,
781 path: impl Into<Arc<Path>>,
782 text: Rope,
783 line_ending: LineEnding,
784 cx: &mut ModelContext<Worktree>,
785 ) -> Task<Result<Entry>> {
786 let path = path.into();
787 let abs_path = self.absolutize(&path);
788 let fs = self.fs.clone();
789 let write = cx
790 .background()
791 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
792
793 cx.spawn(|this, mut cx| async move {
794 write.await?;
795 this.update(&mut cx, |this, cx| {
796 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
797 })
798 .await
799 })
800 }
801
802 pub fn delete_entry(
803 &self,
804 entry_id: ProjectEntryId,
805 cx: &mut ModelContext<Worktree>,
806 ) -> Option<Task<Result<()>>> {
807 let entry = self.entry_for_id(entry_id)?.clone();
808 let abs_path = self.abs_path.clone();
809 let fs = self.fs.clone();
810
811 let delete = cx.background().spawn(async move {
812 let mut abs_path = fs.canonicalize(&abs_path).await?;
813 if entry.path.file_name().is_some() {
814 abs_path = abs_path.join(&entry.path);
815 }
816 if entry.is_file() {
817 fs.remove_file(&abs_path, Default::default()).await?;
818 } else {
819 fs.remove_dir(
820 &abs_path,
821 RemoveOptions {
822 recursive: true,
823 ignore_if_not_exists: false,
824 },
825 )
826 .await?;
827 }
828 anyhow::Ok(abs_path)
829 });
830
831 Some(cx.spawn(|this, mut cx| async move {
832 let abs_path = delete.await?;
833 let (tx, mut rx) = barrier::channel();
834 this.update(&mut cx, |this, _| {
835 this.as_local_mut()
836 .unwrap()
837 .path_changes_tx
838 .try_send((vec![abs_path], tx))
839 })?;
840 rx.recv().await;
841 Ok(())
842 }))
843 }
844
845 pub fn rename_entry(
846 &self,
847 entry_id: ProjectEntryId,
848 new_path: impl Into<Arc<Path>>,
849 cx: &mut ModelContext<Worktree>,
850 ) -> Option<Task<Result<Entry>>> {
851 let old_path = self.entry_for_id(entry_id)?.path.clone();
852 let new_path = new_path.into();
853 let abs_old_path = self.absolutize(&old_path);
854 let abs_new_path = self.absolutize(&new_path);
855 let fs = self.fs.clone();
856 let rename = cx.background().spawn(async move {
857 fs.rename(&abs_old_path, &abs_new_path, Default::default())
858 .await
859 });
860
861 Some(cx.spawn(|this, mut cx| async move {
862 rename.await?;
863 this.update(&mut cx, |this, cx| {
864 this.as_local_mut()
865 .unwrap()
866 .refresh_entry(new_path.clone(), Some(old_path), cx)
867 })
868 .await
869 }))
870 }
871
872 pub fn copy_entry(
873 &self,
874 entry_id: ProjectEntryId,
875 new_path: impl Into<Arc<Path>>,
876 cx: &mut ModelContext<Worktree>,
877 ) -> Option<Task<Result<Entry>>> {
878 let old_path = self.entry_for_id(entry_id)?.path.clone();
879 let new_path = new_path.into();
880 let abs_old_path = self.absolutize(&old_path);
881 let abs_new_path = self.absolutize(&new_path);
882 let fs = self.fs.clone();
883 let copy = cx.background().spawn(async move {
884 copy_recursive(
885 fs.as_ref(),
886 &abs_old_path,
887 &abs_new_path,
888 Default::default(),
889 )
890 .await
891 });
892
893 Some(cx.spawn(|this, mut cx| async move {
894 copy.await?;
895 this.update(&mut cx, |this, cx| {
896 this.as_local_mut()
897 .unwrap()
898 .refresh_entry(new_path.clone(), None, cx)
899 })
900 .await
901 }))
902 }
903
904 fn refresh_entry(
905 &self,
906 path: Arc<Path>,
907 old_path: Option<Arc<Path>>,
908 cx: &mut ModelContext<Worktree>,
909 ) -> Task<Result<Entry>> {
910 let fs = self.fs.clone();
911 let abs_root_path = self.abs_path.clone();
912 let path_changes_tx = self.path_changes_tx.clone();
913 cx.spawn_weak(move |this, mut cx| async move {
914 let abs_path = fs.canonicalize(&abs_root_path).await?;
915 let mut paths = Vec::with_capacity(2);
916 paths.push(if path.file_name().is_some() {
917 abs_path.join(&path)
918 } else {
919 abs_path.clone()
920 });
921 if let Some(old_path) = old_path {
922 paths.push(if old_path.file_name().is_some() {
923 abs_path.join(&old_path)
924 } else {
925 abs_path.clone()
926 });
927 }
928
929 let (tx, mut rx) = barrier::channel();
930 path_changes_tx.try_send((paths, tx))?;
931 rx.recv().await;
932 this.upgrade(&cx)
933 .ok_or_else(|| anyhow!("worktree was dropped"))?
934 .update(&mut cx, |this, _| {
935 this.entry_for_path(path)
936 .cloned()
937 .ok_or_else(|| anyhow!("failed to read path after update"))
938 })
939 })
940 }
941
942 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
943 let (share_tx, share_rx) = oneshot::channel();
944
945 if let Some(share) = self.share.as_mut() {
946 let _ = share_tx.send(());
947 *share.resume_updates.borrow_mut() = ();
948 } else {
949 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
950 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
951 let worktree_id = cx.model_id() as u64;
952
953 for (path, summary) in self.diagnostic_summaries.iter() {
954 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
955 project_id,
956 worktree_id,
957 summary: Some(summary.to_proto(&path.0)),
958 }) {
959 return Task::ready(Err(e));
960 }
961 }
962
963 let _maintain_remote_snapshot = cx.background().spawn({
964 let client = self.client.clone();
965 async move {
966 let mut share_tx = Some(share_tx);
967 let mut prev_snapshot = LocalSnapshot {
968 ignores_by_parent_abs_path: Default::default(),
969 git_repositories: Default::default(),
970 removed_entry_ids: Default::default(),
971 next_entry_id: Default::default(),
972 snapshot: Snapshot {
973 id: WorktreeId(worktree_id as usize),
974 abs_path: Path::new("").into(),
975 root_name: Default::default(),
976 root_char_bag: Default::default(),
977 entries_by_path: Default::default(),
978 entries_by_id: Default::default(),
979 scan_id: 0,
980 completed_scan_id: 0,
981 },
982 };
983 while let Some(snapshot) = snapshots_rx.recv().await {
984 #[cfg(any(test, feature = "test-support"))]
985 const MAX_CHUNK_SIZE: usize = 2;
986 #[cfg(not(any(test, feature = "test-support")))]
987 const MAX_CHUNK_SIZE: usize = 256;
988
989 let update =
990 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
991 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
992 let _ = resume_updates_rx.try_recv();
993 while let Err(error) = client.request(update.clone()).await {
994 log::error!("failed to send worktree update: {}", error);
995 log::info!("waiting to resume updates");
996 if resume_updates_rx.next().await.is_none() {
997 return Ok(());
998 }
999 }
1000 }
1001
1002 if let Some(share_tx) = share_tx.take() {
1003 let _ = share_tx.send(());
1004 }
1005
1006 prev_snapshot = snapshot;
1007 }
1008
1009 Ok::<_, anyhow::Error>(())
1010 }
1011 .log_err()
1012 });
1013
1014 self.share = Some(ShareState {
1015 project_id,
1016 snapshots_tx,
1017 resume_updates: resume_updates_tx,
1018 _maintain_remote_snapshot,
1019 });
1020 }
1021
1022 cx.foreground()
1023 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1024 }
1025
1026 pub fn unshare(&mut self) {
1027 self.share.take();
1028 }
1029
1030 pub fn is_shared(&self) -> bool {
1031 self.share.is_some()
1032 }
1033}
1034
1035impl RemoteWorktree {
1036 fn snapshot(&self) -> Snapshot {
1037 self.snapshot.clone()
1038 }
1039
1040 pub fn disconnected_from_host(&mut self) {
1041 self.updates_tx.take();
1042 self.snapshot_subscriptions.clear();
1043 self.disconnected = true;
1044 }
1045
1046 pub fn save_buffer(
1047 &self,
1048 buffer_handle: ModelHandle<Buffer>,
1049 cx: &mut ModelContext<Worktree>,
1050 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1051 let buffer = buffer_handle.read(cx);
1052 let buffer_id = buffer.remote_id();
1053 let version = buffer.version();
1054 let rpc = self.client.clone();
1055 let project_id = self.project_id;
1056 cx.as_mut().spawn(|mut cx| async move {
1057 let response = rpc
1058 .request(proto::SaveBuffer {
1059 project_id,
1060 buffer_id,
1061 version: serialize_version(&version),
1062 })
1063 .await?;
1064 let version = deserialize_version(&response.version);
1065 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1066 let mtime = response
1067 .mtime
1068 .ok_or_else(|| anyhow!("missing mtime"))?
1069 .into();
1070
1071 buffer_handle.update(&mut cx, |buffer, cx| {
1072 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1073 });
1074
1075 Ok((version, fingerprint, mtime))
1076 })
1077 }
1078
1079 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1080 if let Some(updates_tx) = &self.updates_tx {
1081 updates_tx
1082 .unbounded_send(update)
1083 .expect("consumer runs to completion");
1084 }
1085 }
1086
1087 fn observed_snapshot(&self, scan_id: usize) -> bool {
1088 self.completed_scan_id >= scan_id
1089 }
1090
1091 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1092 let (tx, rx) = oneshot::channel();
1093 if self.observed_snapshot(scan_id) {
1094 let _ = tx.send(());
1095 } else if self.disconnected {
1096 drop(tx);
1097 } else {
1098 match self
1099 .snapshot_subscriptions
1100 .binary_search_by_key(&scan_id, |probe| probe.0)
1101 {
1102 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1103 }
1104 }
1105
1106 async move {
1107 rx.await?;
1108 Ok(())
1109 }
1110 }
1111
1112 pub fn update_diagnostic_summary(
1113 &mut self,
1114 path: Arc<Path>,
1115 summary: &proto::DiagnosticSummary,
1116 ) {
1117 let summary = DiagnosticSummary {
1118 language_server_id: summary.language_server_id as usize,
1119 error_count: summary.error_count as usize,
1120 warning_count: summary.warning_count as usize,
1121 };
1122 if summary.is_empty() {
1123 self.diagnostic_summaries.remove(&PathKey(path));
1124 } else {
1125 self.diagnostic_summaries.insert(PathKey(path), summary);
1126 }
1127 }
1128
1129 pub fn insert_entry(
1130 &mut self,
1131 entry: proto::Entry,
1132 scan_id: usize,
1133 cx: &mut ModelContext<Worktree>,
1134 ) -> Task<Result<Entry>> {
1135 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1136 cx.spawn(|this, mut cx| async move {
1137 wait_for_snapshot.await?;
1138 this.update(&mut cx, |worktree, _| {
1139 let worktree = worktree.as_remote_mut().unwrap();
1140 let mut snapshot = worktree.background_snapshot.lock();
1141 let entry = snapshot.insert_entry(entry);
1142 worktree.snapshot = snapshot.clone();
1143 entry
1144 })
1145 })
1146 }
1147
1148 pub(crate) fn delete_entry(
1149 &mut self,
1150 id: ProjectEntryId,
1151 scan_id: usize,
1152 cx: &mut ModelContext<Worktree>,
1153 ) -> Task<Result<()>> {
1154 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1155 cx.spawn(|this, mut cx| async move {
1156 wait_for_snapshot.await?;
1157 this.update(&mut cx, |worktree, _| {
1158 let worktree = worktree.as_remote_mut().unwrap();
1159 let mut snapshot = worktree.background_snapshot.lock();
1160 snapshot.delete_entry(id);
1161 worktree.snapshot = snapshot.clone();
1162 });
1163 Ok(())
1164 })
1165 }
1166}
1167
1168impl Snapshot {
1169 pub fn id(&self) -> WorktreeId {
1170 self.id
1171 }
1172
1173 pub fn abs_path(&self) -> &Arc<Path> {
1174 &self.abs_path
1175 }
1176
1177 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1178 self.entries_by_id.get(&entry_id, &()).is_some()
1179 }
1180
1181 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1182 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1183 let old_entry = self.entries_by_id.insert_or_replace(
1184 PathEntry {
1185 id: entry.id,
1186 path: entry.path.clone(),
1187 is_ignored: entry.is_ignored,
1188 scan_id: 0,
1189 },
1190 &(),
1191 );
1192 if let Some(old_entry) = old_entry {
1193 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1194 }
1195 self.entries_by_path.insert_or_replace(entry.clone(), &());
1196 Ok(entry)
1197 }
1198
1199 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1200 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1201 self.entries_by_path = {
1202 let mut cursor = self.entries_by_path.cursor();
1203 let mut new_entries_by_path =
1204 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1205 while let Some(entry) = cursor.item() {
1206 if entry.path.starts_with(&removed_entry.path) {
1207 self.entries_by_id.remove(&entry.id, &());
1208 cursor.next(&());
1209 } else {
1210 break;
1211 }
1212 }
1213 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1214 new_entries_by_path
1215 };
1216
1217 Some(removed_entry.path)
1218 }
1219
1220 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1221 let mut entries_by_path_edits = Vec::new();
1222 let mut entries_by_id_edits = Vec::new();
1223 for entry_id in update.removed_entries {
1224 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1225 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1226 entries_by_id_edits.push(Edit::Remove(entry.id));
1227 }
1228 }
1229
1230 for entry in update.updated_entries {
1231 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1232 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1233 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1234 }
1235 entries_by_id_edits.push(Edit::Insert(PathEntry {
1236 id: entry.id,
1237 path: entry.path.clone(),
1238 is_ignored: entry.is_ignored,
1239 scan_id: 0,
1240 }));
1241 entries_by_path_edits.push(Edit::Insert(entry));
1242 }
1243
1244 self.entries_by_path.edit(entries_by_path_edits, &());
1245 self.entries_by_id.edit(entries_by_id_edits, &());
1246 self.scan_id = update.scan_id as usize;
1247 if update.is_last_update {
1248 self.completed_scan_id = update.scan_id as usize;
1249 }
1250
1251 Ok(())
1252 }
1253
1254 pub fn file_count(&self) -> usize {
1255 self.entries_by_path.summary().file_count
1256 }
1257
1258 pub fn visible_file_count(&self) -> usize {
1259 self.entries_by_path.summary().visible_file_count
1260 }
1261
1262 fn traverse_from_offset(
1263 &self,
1264 include_dirs: bool,
1265 include_ignored: bool,
1266 start_offset: usize,
1267 ) -> Traversal {
1268 let mut cursor = self.entries_by_path.cursor();
1269 cursor.seek(
1270 &TraversalTarget::Count {
1271 count: start_offset,
1272 include_dirs,
1273 include_ignored,
1274 },
1275 Bias::Right,
1276 &(),
1277 );
1278 Traversal {
1279 cursor,
1280 include_dirs,
1281 include_ignored,
1282 }
1283 }
1284
1285 fn traverse_from_path(
1286 &self,
1287 include_dirs: bool,
1288 include_ignored: bool,
1289 path: &Path,
1290 ) -> Traversal {
1291 let mut cursor = self.entries_by_path.cursor();
1292 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1293 Traversal {
1294 cursor,
1295 include_dirs,
1296 include_ignored,
1297 }
1298 }
1299
1300 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1301 self.traverse_from_offset(false, include_ignored, start)
1302 }
1303
1304 pub fn entries(&self, include_ignored: bool) -> Traversal {
1305 self.traverse_from_offset(true, include_ignored, 0)
1306 }
1307
1308 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1309 let empty_path = Path::new("");
1310 self.entries_by_path
1311 .cursor::<()>()
1312 .filter(move |entry| entry.path.as_ref() != empty_path)
1313 .map(|entry| &entry.path)
1314 }
1315
1316 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1317 let mut cursor = self.entries_by_path.cursor();
1318 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1319 let traversal = Traversal {
1320 cursor,
1321 include_dirs: true,
1322 include_ignored: true,
1323 };
1324 ChildEntriesIter {
1325 traversal,
1326 parent_path,
1327 }
1328 }
1329
1330 pub fn root_entry(&self) -> Option<&Entry> {
1331 self.entry_for_path("")
1332 }
1333
1334 pub fn root_name(&self) -> &str {
1335 &self.root_name
1336 }
1337
1338 pub fn scan_id(&self) -> usize {
1339 self.scan_id
1340 }
1341
1342 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1343 let path = path.as_ref();
1344 self.traverse_from_path(true, true, path)
1345 .entry()
1346 .and_then(|entry| {
1347 if entry.path.as_ref() == path {
1348 Some(entry)
1349 } else {
1350 None
1351 }
1352 })
1353 }
1354
1355 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1356 let entry = self.entries_by_id.get(&id, &())?;
1357 self.entry_for_path(&entry.path)
1358 }
1359
1360 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1361 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1362 }
1363}
1364
1365impl LocalSnapshot {
1366 // Gives the most specific git repository for a given path
1367 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1368 self.git_repositories
1369 .iter()
1370 .rev() //git_repository is ordered lexicographically
1371 .find(|repo| repo.manages(path))
1372 .cloned()
1373 }
1374
1375 pub(crate) fn repo_with_dot_git_containing(
1376 &mut self,
1377 path: &Path,
1378 ) -> Option<&mut GitRepositoryEntry> {
1379 // Git repositories cannot be nested, so we don't need to reverse the order
1380 self.git_repositories
1381 .iter_mut()
1382 .find(|repo| repo.in_dot_git(path))
1383 }
1384
1385 #[cfg(test)]
1386 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1387 let root_name = self.root_name.clone();
1388 proto::UpdateWorktree {
1389 project_id,
1390 worktree_id: self.id().to_proto(),
1391 abs_path: self.abs_path().to_string_lossy().into(),
1392 root_name,
1393 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1394 removed_entries: Default::default(),
1395 scan_id: self.scan_id as u64,
1396 is_last_update: true,
1397 }
1398 }
1399
1400 pub(crate) fn build_update(
1401 &self,
1402 other: &Self,
1403 project_id: u64,
1404 worktree_id: u64,
1405 include_ignored: bool,
1406 ) -> proto::UpdateWorktree {
1407 let mut updated_entries = Vec::new();
1408 let mut removed_entries = Vec::new();
1409 let mut self_entries = self
1410 .entries_by_id
1411 .cursor::<()>()
1412 .filter(|e| include_ignored || !e.is_ignored)
1413 .peekable();
1414 let mut other_entries = other
1415 .entries_by_id
1416 .cursor::<()>()
1417 .filter(|e| include_ignored || !e.is_ignored)
1418 .peekable();
1419 loop {
1420 match (self_entries.peek(), other_entries.peek()) {
1421 (Some(self_entry), Some(other_entry)) => {
1422 match Ord::cmp(&self_entry.id, &other_entry.id) {
1423 Ordering::Less => {
1424 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1425 updated_entries.push(entry);
1426 self_entries.next();
1427 }
1428 Ordering::Equal => {
1429 if self_entry.scan_id != other_entry.scan_id {
1430 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1431 updated_entries.push(entry);
1432 }
1433
1434 self_entries.next();
1435 other_entries.next();
1436 }
1437 Ordering::Greater => {
1438 removed_entries.push(other_entry.id.to_proto());
1439 other_entries.next();
1440 }
1441 }
1442 }
1443 (Some(self_entry), None) => {
1444 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1445 updated_entries.push(entry);
1446 self_entries.next();
1447 }
1448 (None, Some(other_entry)) => {
1449 removed_entries.push(other_entry.id.to_proto());
1450 other_entries.next();
1451 }
1452 (None, None) => break,
1453 }
1454 }
1455
1456 proto::UpdateWorktree {
1457 project_id,
1458 worktree_id,
1459 abs_path: self.abs_path().to_string_lossy().into(),
1460 root_name: self.root_name().to_string(),
1461 updated_entries,
1462 removed_entries,
1463 scan_id: self.scan_id as u64,
1464 is_last_update: self.completed_scan_id == self.scan_id,
1465 }
1466 }
1467
1468 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1469 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1470 let abs_path = self.abs_path.join(&entry.path);
1471 match smol::block_on(build_gitignore(&abs_path, fs)) {
1472 Ok(ignore) => {
1473 self.ignores_by_parent_abs_path.insert(
1474 abs_path.parent().unwrap().into(),
1475 (Arc::new(ignore), self.scan_id),
1476 );
1477 }
1478 Err(error) => {
1479 log::error!(
1480 "error loading .gitignore file {:?} - {:?}",
1481 &entry.path,
1482 error
1483 );
1484 }
1485 }
1486 }
1487
1488 self.reuse_entry_id(&mut entry);
1489
1490 if entry.kind == EntryKind::PendingDir {
1491 if let Some(existing_entry) =
1492 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1493 {
1494 entry.kind = existing_entry.kind;
1495 }
1496 }
1497
1498 let scan_id = self.scan_id;
1499 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1500 if let Some(removed) = removed {
1501 if removed.id != entry.id {
1502 self.entries_by_id.remove(&removed.id, &());
1503 }
1504 }
1505 self.entries_by_id.insert_or_replace(
1506 PathEntry {
1507 id: entry.id,
1508 path: entry.path.clone(),
1509 is_ignored: entry.is_ignored,
1510 scan_id,
1511 },
1512 &(),
1513 );
1514
1515 entry
1516 }
1517
1518 fn populate_dir(
1519 &mut self,
1520 parent_path: Arc<Path>,
1521 entries: impl IntoIterator<Item = Entry>,
1522 ignore: Option<Arc<Gitignore>>,
1523 fs: &dyn Fs,
1524 ) {
1525 let mut parent_entry = if let Some(parent_entry) =
1526 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1527 {
1528 parent_entry.clone()
1529 } else {
1530 log::warn!(
1531 "populating a directory {:?} that has been removed",
1532 parent_path
1533 );
1534 return;
1535 };
1536
1537 match parent_entry.kind {
1538 EntryKind::PendingDir => {
1539 parent_entry.kind = EntryKind::Dir;
1540 }
1541 EntryKind::Dir => {}
1542 _ => return,
1543 }
1544
1545 if let Some(ignore) = ignore {
1546 self.ignores_by_parent_abs_path.insert(
1547 self.abs_path.join(&parent_path).into(),
1548 (ignore, self.scan_id),
1549 );
1550 }
1551
1552 if parent_path.file_name() == Some(&DOT_GIT) {
1553 let abs_path = self.abs_path.join(&parent_path);
1554 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1555 if let Err(ix) = self
1556 .git_repositories
1557 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1558 {
1559 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1560 self.git_repositories.insert(
1561 ix,
1562 GitRepositoryEntry {
1563 repo,
1564 scan_id: 0,
1565 content_path,
1566 git_dir_path: parent_path,
1567 },
1568 );
1569 }
1570 }
1571 }
1572
1573 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1574 let mut entries_by_id_edits = Vec::new();
1575
1576 for mut entry in entries {
1577 self.reuse_entry_id(&mut entry);
1578 entries_by_id_edits.push(Edit::Insert(PathEntry {
1579 id: entry.id,
1580 path: entry.path.clone(),
1581 is_ignored: entry.is_ignored,
1582 scan_id: self.scan_id,
1583 }));
1584 entries_by_path_edits.push(Edit::Insert(entry));
1585 }
1586
1587 self.entries_by_path.edit(entries_by_path_edits, &());
1588 self.entries_by_id.edit(entries_by_id_edits, &());
1589 }
1590
1591 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1592 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1593 entry.id = removed_entry_id;
1594 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1595 entry.id = existing_entry.id;
1596 }
1597 }
1598
1599 fn remove_path(&mut self, path: &Path) {
1600 let mut new_entries;
1601 let removed_entries;
1602 {
1603 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1604 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1605 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1606 new_entries.push_tree(cursor.suffix(&()), &());
1607 }
1608 self.entries_by_path = new_entries;
1609
1610 let mut entries_by_id_edits = Vec::new();
1611 for entry in removed_entries.cursor::<()>() {
1612 let removed_entry_id = self
1613 .removed_entry_ids
1614 .entry(entry.inode)
1615 .or_insert(entry.id);
1616 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1617 entries_by_id_edits.push(Edit::Remove(entry.id));
1618 }
1619 self.entries_by_id.edit(entries_by_id_edits, &());
1620
1621 if path.file_name() == Some(&GITIGNORE) {
1622 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1623 if let Some((_, scan_id)) = self
1624 .ignores_by_parent_abs_path
1625 .get_mut(abs_parent_path.as_path())
1626 {
1627 *scan_id = self.snapshot.scan_id;
1628 }
1629 } else if path.file_name() == Some(&DOT_GIT) {
1630 let parent_path = path.parent().unwrap();
1631 if let Ok(ix) = self
1632 .git_repositories
1633 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1634 {
1635 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1636 }
1637 }
1638 }
1639
1640 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1641 let mut inodes = TreeSet::default();
1642 for ancestor in path.ancestors().skip(1) {
1643 if let Some(entry) = self.entry_for_path(ancestor) {
1644 inodes.insert(entry.inode);
1645 }
1646 }
1647 inodes
1648 }
1649
1650 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1651 let mut new_ignores = Vec::new();
1652 for ancestor in abs_path.ancestors().skip(1) {
1653 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1654 new_ignores.push((ancestor, Some(ignore.clone())));
1655 } else {
1656 new_ignores.push((ancestor, None));
1657 }
1658 }
1659
1660 let mut ignore_stack = IgnoreStack::none();
1661 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1662 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1663 ignore_stack = IgnoreStack::all();
1664 break;
1665 } else if let Some(ignore) = ignore {
1666 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1667 }
1668 }
1669
1670 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1671 ignore_stack = IgnoreStack::all();
1672 }
1673
1674 ignore_stack
1675 }
1676
1677 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1678 &self.git_repositories
1679 }
1680}
1681
1682impl GitRepositoryEntry {
1683 // Note that these paths should be relative to the worktree root.
1684 pub(crate) fn manages(&self, path: &Path) -> bool {
1685 path.starts_with(self.content_path.as_ref())
1686 }
1687
1688 // Note that this path should be relative to the worktree root.
1689 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1690 path.starts_with(self.git_dir_path.as_ref())
1691 }
1692}
1693
1694async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1695 let contents = fs.load(abs_path).await?;
1696 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1697 let mut builder = GitignoreBuilder::new(parent);
1698 for line in contents.lines() {
1699 builder.add_line(Some(abs_path.into()), line)?;
1700 }
1701 Ok(builder.build()?)
1702}
1703
1704impl WorktreeId {
1705 pub fn from_usize(handle_id: usize) -> Self {
1706 Self(handle_id)
1707 }
1708
1709 pub(crate) fn from_proto(id: u64) -> Self {
1710 Self(id as usize)
1711 }
1712
1713 pub fn to_proto(&self) -> u64 {
1714 self.0 as u64
1715 }
1716
1717 pub fn to_usize(&self) -> usize {
1718 self.0
1719 }
1720}
1721
1722impl fmt::Display for WorktreeId {
1723 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1724 self.0.fmt(f)
1725 }
1726}
1727
1728impl Deref for Worktree {
1729 type Target = Snapshot;
1730
1731 fn deref(&self) -> &Self::Target {
1732 match self {
1733 Worktree::Local(worktree) => &worktree.snapshot,
1734 Worktree::Remote(worktree) => &worktree.snapshot,
1735 }
1736 }
1737}
1738
1739impl Deref for LocalWorktree {
1740 type Target = LocalSnapshot;
1741
1742 fn deref(&self) -> &Self::Target {
1743 &self.snapshot
1744 }
1745}
1746
1747impl Deref for RemoteWorktree {
1748 type Target = Snapshot;
1749
1750 fn deref(&self) -> &Self::Target {
1751 &self.snapshot
1752 }
1753}
1754
1755impl fmt::Debug for LocalWorktree {
1756 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1757 self.snapshot.fmt(f)
1758 }
1759}
1760
1761impl fmt::Debug for Snapshot {
1762 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1763 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1764 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1765
1766 impl<'a> fmt::Debug for EntriesByPath<'a> {
1767 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1768 f.debug_map()
1769 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1770 .finish()
1771 }
1772 }
1773
1774 impl<'a> fmt::Debug for EntriesById<'a> {
1775 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1776 f.debug_list().entries(self.0.iter()).finish()
1777 }
1778 }
1779
1780 f.debug_struct("Snapshot")
1781 .field("id", &self.id)
1782 .field("root_name", &self.root_name)
1783 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1784 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1785 .finish()
1786 }
1787}
1788
1789#[derive(Clone, PartialEq)]
1790pub struct File {
1791 pub worktree: ModelHandle<Worktree>,
1792 pub path: Arc<Path>,
1793 pub mtime: SystemTime,
1794 pub(crate) entry_id: ProjectEntryId,
1795 pub(crate) is_local: bool,
1796 pub(crate) is_deleted: bool,
1797}
1798
1799impl language::File for File {
1800 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1801 if self.is_local {
1802 Some(self)
1803 } else {
1804 None
1805 }
1806 }
1807
1808 fn mtime(&self) -> SystemTime {
1809 self.mtime
1810 }
1811
1812 fn path(&self) -> &Arc<Path> {
1813 &self.path
1814 }
1815
1816 fn full_path(&self, cx: &AppContext) -> PathBuf {
1817 let mut full_path = PathBuf::new();
1818 let worktree = self.worktree.read(cx);
1819
1820 if worktree.is_visible() {
1821 full_path.push(worktree.root_name());
1822 } else {
1823 let path = worktree.abs_path();
1824
1825 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1826 full_path.push("~");
1827 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1828 } else {
1829 full_path.push(path)
1830 }
1831 }
1832
1833 if self.path.components().next().is_some() {
1834 full_path.push(&self.path);
1835 }
1836
1837 full_path
1838 }
1839
1840 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1841 /// of its worktree, then this method will return the name of the worktree itself.
1842 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1843 self.path
1844 .file_name()
1845 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1846 }
1847
1848 fn is_deleted(&self) -> bool {
1849 self.is_deleted
1850 }
1851
1852 fn as_any(&self) -> &dyn Any {
1853 self
1854 }
1855
1856 fn to_proto(&self) -> rpc::proto::File {
1857 rpc::proto::File {
1858 worktree_id: self.worktree.id() as u64,
1859 entry_id: self.entry_id.to_proto(),
1860 path: self.path.to_string_lossy().into(),
1861 mtime: Some(self.mtime.into()),
1862 is_deleted: self.is_deleted,
1863 }
1864 }
1865}
1866
1867impl language::LocalFile for File {
1868 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1869 self.worktree
1870 .read(cx)
1871 .as_local()
1872 .unwrap()
1873 .abs_path
1874 .join(&self.path)
1875 }
1876
1877 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1878 let worktree = self.worktree.read(cx).as_local().unwrap();
1879 let abs_path = worktree.absolutize(&self.path);
1880 let fs = worktree.fs.clone();
1881 cx.background()
1882 .spawn(async move { fs.load(&abs_path).await })
1883 }
1884
1885 fn buffer_reloaded(
1886 &self,
1887 buffer_id: u64,
1888 version: &clock::Global,
1889 fingerprint: RopeFingerprint,
1890 line_ending: LineEnding,
1891 mtime: SystemTime,
1892 cx: &mut AppContext,
1893 ) {
1894 let worktree = self.worktree.read(cx).as_local().unwrap();
1895 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1896 worktree
1897 .client
1898 .send(proto::BufferReloaded {
1899 project_id,
1900 buffer_id,
1901 version: serialize_version(version),
1902 mtime: Some(mtime.into()),
1903 fingerprint: serialize_fingerprint(fingerprint),
1904 line_ending: serialize_line_ending(line_ending) as i32,
1905 })
1906 .log_err();
1907 }
1908 }
1909}
1910
1911impl File {
1912 pub fn from_proto(
1913 proto: rpc::proto::File,
1914 worktree: ModelHandle<Worktree>,
1915 cx: &AppContext,
1916 ) -> Result<Self> {
1917 let worktree_id = worktree
1918 .read(cx)
1919 .as_remote()
1920 .ok_or_else(|| anyhow!("not remote"))?
1921 .id();
1922
1923 if worktree_id.to_proto() != proto.worktree_id {
1924 return Err(anyhow!("worktree id does not match file"));
1925 }
1926
1927 Ok(Self {
1928 worktree,
1929 path: Path::new(&proto.path).into(),
1930 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1931 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1932 is_local: false,
1933 is_deleted: proto.is_deleted,
1934 })
1935 }
1936
1937 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1938 file.and_then(|f| f.as_any().downcast_ref())
1939 }
1940
1941 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1942 self.worktree.read(cx).id()
1943 }
1944
1945 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1946 if self.is_deleted {
1947 None
1948 } else {
1949 Some(self.entry_id)
1950 }
1951 }
1952}
1953
1954#[derive(Clone, Debug, PartialEq, Eq)]
1955pub struct Entry {
1956 pub id: ProjectEntryId,
1957 pub kind: EntryKind,
1958 pub path: Arc<Path>,
1959 pub inode: u64,
1960 pub mtime: SystemTime,
1961 pub is_symlink: bool,
1962 pub is_ignored: bool,
1963}
1964
1965#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1966pub enum EntryKind {
1967 PendingDir,
1968 Dir,
1969 File(CharBag),
1970}
1971
1972#[derive(Clone, Copy, Debug)]
1973pub enum PathChange {
1974 Added,
1975 Removed,
1976 Updated,
1977 AddedOrUpdated,
1978}
1979
1980impl Entry {
1981 fn new(
1982 path: Arc<Path>,
1983 metadata: &fs::Metadata,
1984 next_entry_id: &AtomicUsize,
1985 root_char_bag: CharBag,
1986 ) -> Self {
1987 Self {
1988 id: ProjectEntryId::new(next_entry_id),
1989 kind: if metadata.is_dir {
1990 EntryKind::PendingDir
1991 } else {
1992 EntryKind::File(char_bag_for_path(root_char_bag, &path))
1993 },
1994 path,
1995 inode: metadata.inode,
1996 mtime: metadata.mtime,
1997 is_symlink: metadata.is_symlink,
1998 is_ignored: false,
1999 }
2000 }
2001
2002 pub fn is_dir(&self) -> bool {
2003 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2004 }
2005
2006 pub fn is_file(&self) -> bool {
2007 matches!(self.kind, EntryKind::File(_))
2008 }
2009}
2010
2011impl sum_tree::Item for Entry {
2012 type Summary = EntrySummary;
2013
2014 fn summary(&self) -> Self::Summary {
2015 let visible_count = if self.is_ignored { 0 } else { 1 };
2016 let file_count;
2017 let visible_file_count;
2018 if self.is_file() {
2019 file_count = 1;
2020 visible_file_count = visible_count;
2021 } else {
2022 file_count = 0;
2023 visible_file_count = 0;
2024 }
2025
2026 EntrySummary {
2027 max_path: self.path.clone(),
2028 count: 1,
2029 visible_count,
2030 file_count,
2031 visible_file_count,
2032 }
2033 }
2034}
2035
2036impl sum_tree::KeyedItem for Entry {
2037 type Key = PathKey;
2038
2039 fn key(&self) -> Self::Key {
2040 PathKey(self.path.clone())
2041 }
2042}
2043
2044#[derive(Clone, Debug)]
2045pub struct EntrySummary {
2046 max_path: Arc<Path>,
2047 count: usize,
2048 visible_count: usize,
2049 file_count: usize,
2050 visible_file_count: usize,
2051}
2052
2053impl Default for EntrySummary {
2054 fn default() -> Self {
2055 Self {
2056 max_path: Arc::from(Path::new("")),
2057 count: 0,
2058 visible_count: 0,
2059 file_count: 0,
2060 visible_file_count: 0,
2061 }
2062 }
2063}
2064
2065impl sum_tree::Summary for EntrySummary {
2066 type Context = ();
2067
2068 fn add_summary(&mut self, rhs: &Self, _: &()) {
2069 self.max_path = rhs.max_path.clone();
2070 self.count += rhs.count;
2071 self.visible_count += rhs.visible_count;
2072 self.file_count += rhs.file_count;
2073 self.visible_file_count += rhs.visible_file_count;
2074 }
2075}
2076
2077#[derive(Clone, Debug)]
2078struct PathEntry {
2079 id: ProjectEntryId,
2080 path: Arc<Path>,
2081 is_ignored: bool,
2082 scan_id: usize,
2083}
2084
2085impl sum_tree::Item for PathEntry {
2086 type Summary = PathEntrySummary;
2087
2088 fn summary(&self) -> Self::Summary {
2089 PathEntrySummary { max_id: self.id }
2090 }
2091}
2092
2093impl sum_tree::KeyedItem for PathEntry {
2094 type Key = ProjectEntryId;
2095
2096 fn key(&self) -> Self::Key {
2097 self.id
2098 }
2099}
2100
2101#[derive(Clone, Debug, Default)]
2102struct PathEntrySummary {
2103 max_id: ProjectEntryId,
2104}
2105
2106impl sum_tree::Summary for PathEntrySummary {
2107 type Context = ();
2108
2109 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2110 self.max_id = summary.max_id;
2111 }
2112}
2113
2114impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2115 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2116 *self = summary.max_id;
2117 }
2118}
2119
2120#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2121pub struct PathKey(Arc<Path>);
2122
2123impl Default for PathKey {
2124 fn default() -> Self {
2125 Self(Path::new("").into())
2126 }
2127}
2128
2129impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2130 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2131 self.0 = summary.max_path.clone();
2132 }
2133}
2134
2135struct BackgroundScanner {
2136 snapshot: Mutex<LocalSnapshot>,
2137 fs: Arc<dyn Fs>,
2138 status_updates_tx: UnboundedSender<ScanState>,
2139 executor: Arc<executor::Background>,
2140 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2141 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2142 finished_initial_scan: bool,
2143}
2144
2145impl BackgroundScanner {
2146 fn new(
2147 snapshot: LocalSnapshot,
2148 fs: Arc<dyn Fs>,
2149 status_updates_tx: UnboundedSender<ScanState>,
2150 executor: Arc<executor::Background>,
2151 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2152 ) -> Self {
2153 Self {
2154 fs,
2155 status_updates_tx,
2156 executor,
2157 refresh_requests_rx,
2158 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2159 snapshot: Mutex::new(snapshot),
2160 finished_initial_scan: false,
2161 }
2162 }
2163
2164 async fn run(
2165 &mut self,
2166 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2167 ) {
2168 use futures::FutureExt as _;
2169
2170 let (root_abs_path, root_inode) = {
2171 let snapshot = self.snapshot.lock();
2172 (
2173 snapshot.abs_path.clone(),
2174 snapshot.root_entry().map(|e| e.inode),
2175 )
2176 };
2177
2178 // Populate ignores above the root.
2179 let ignore_stack;
2180 for ancestor in root_abs_path.ancestors().skip(1) {
2181 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2182 {
2183 self.snapshot
2184 .lock()
2185 .ignores_by_parent_abs_path
2186 .insert(ancestor.into(), (ignore.into(), 0));
2187 }
2188 }
2189 {
2190 let mut snapshot = self.snapshot.lock();
2191 snapshot.scan_id += 1;
2192 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2193 if ignore_stack.is_all() {
2194 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2195 root_entry.is_ignored = true;
2196 snapshot.insert_entry(root_entry, self.fs.as_ref());
2197 }
2198 }
2199 };
2200
2201 // Perform an initial scan of the directory.
2202 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2203 smol::block_on(scan_job_tx.send(ScanJob {
2204 abs_path: root_abs_path,
2205 path: Arc::from(Path::new("")),
2206 ignore_stack,
2207 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2208 scan_queue: scan_job_tx.clone(),
2209 }))
2210 .unwrap();
2211 drop(scan_job_tx);
2212 self.scan_dirs(true, scan_job_rx).await;
2213 {
2214 let mut snapshot = self.snapshot.lock();
2215 snapshot.completed_scan_id = snapshot.scan_id;
2216 }
2217 self.send_status_update(false, None);
2218
2219 // Process any any FS events that occurred while performing the initial scan.
2220 // For these events, update events cannot be as precise, because we didn't
2221 // have the previous state loaded yet.
2222 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2223 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2224 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2225 paths.extend(more_events.into_iter().map(|e| e.path));
2226 }
2227 self.process_events(paths).await;
2228 }
2229
2230 self.finished_initial_scan = true;
2231
2232 // Continue processing events until the worktree is dropped.
2233 loop {
2234 select_biased! {
2235 // Process any path refresh requests from the worktree. Prioritize
2236 // these before handling changes reported by the filesystem.
2237 request = self.refresh_requests_rx.recv().fuse() => {
2238 let Ok((paths, barrier)) = request else { break };
2239 if !self.process_refresh_request(paths, barrier).await {
2240 return;
2241 }
2242 }
2243
2244 events = events_rx.next().fuse() => {
2245 let Some(events) = events else { break };
2246 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2247 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2248 paths.extend(more_events.into_iter().map(|e| e.path));
2249 }
2250 self.process_events(paths).await;
2251 }
2252 }
2253 }
2254 }
2255
2256 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2257 self.reload_entries_for_paths(paths, None).await;
2258 self.send_status_update(false, Some(barrier))
2259 }
2260
2261 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2262 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2263 if let Some(mut paths) = self
2264 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2265 .await
2266 {
2267 paths.sort_unstable();
2268 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2269 }
2270 drop(scan_job_tx);
2271 self.scan_dirs(false, scan_job_rx).await;
2272
2273 self.update_ignore_statuses().await;
2274
2275 let mut snapshot = self.snapshot.lock();
2276 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2277 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2278 snapshot.git_repositories = git_repositories;
2279 snapshot.removed_entry_ids.clear();
2280 snapshot.completed_scan_id = snapshot.scan_id;
2281 drop(snapshot);
2282
2283 self.send_status_update(false, None);
2284 }
2285
2286 async fn scan_dirs(
2287 &self,
2288 enable_progress_updates: bool,
2289 scan_jobs_rx: channel::Receiver<ScanJob>,
2290 ) {
2291 use futures::FutureExt as _;
2292
2293 if self
2294 .status_updates_tx
2295 .unbounded_send(ScanState::Started)
2296 .is_err()
2297 {
2298 return;
2299 }
2300
2301 let progress_update_count = AtomicUsize::new(0);
2302 self.executor
2303 .scoped(|scope| {
2304 for _ in 0..self.executor.num_cpus() {
2305 scope.spawn(async {
2306 let mut last_progress_update_count = 0;
2307 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2308 futures::pin_mut!(progress_update_timer);
2309
2310 loop {
2311 select_biased! {
2312 // Process any path refresh requests before moving on to process
2313 // the scan queue, so that user operations are prioritized.
2314 request = self.refresh_requests_rx.recv().fuse() => {
2315 let Ok((paths, barrier)) = request else { break };
2316 if !self.process_refresh_request(paths, barrier).await {
2317 return;
2318 }
2319 }
2320
2321 // Send periodic progress updates to the worktree. Use an atomic counter
2322 // to ensure that only one of the workers sends a progress update after
2323 // the update interval elapses.
2324 _ = progress_update_timer => {
2325 match progress_update_count.compare_exchange(
2326 last_progress_update_count,
2327 last_progress_update_count + 1,
2328 SeqCst,
2329 SeqCst
2330 ) {
2331 Ok(_) => {
2332 last_progress_update_count += 1;
2333 self.send_status_update(true, None);
2334 }
2335 Err(count) => {
2336 last_progress_update_count = count;
2337 }
2338 }
2339 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2340 }
2341
2342 // Recursively load directories from the file system.
2343 job = scan_jobs_rx.recv().fuse() => {
2344 let Ok(job) = job else { break };
2345 if let Err(err) = self.scan_dir(&job).await {
2346 if job.path.as_ref() != Path::new("") {
2347 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2348 }
2349 }
2350 }
2351 }
2352 }
2353 })
2354 }
2355 })
2356 .await;
2357 }
2358
2359 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2360 let mut prev_state = self.prev_state.lock();
2361 let snapshot = self.snapshot.lock().clone();
2362 let mut old_snapshot = snapshot.snapshot.clone();
2363 mem::swap(&mut old_snapshot, &mut prev_state.0);
2364 let changed_paths = mem::take(&mut prev_state.1);
2365 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2366 self.status_updates_tx
2367 .unbounded_send(ScanState::Updated {
2368 snapshot,
2369 changes,
2370 scanning,
2371 barrier,
2372 })
2373 .is_ok()
2374 }
2375
2376 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2377 let mut new_entries: Vec<Entry> = Vec::new();
2378 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2379 let mut ignore_stack = job.ignore_stack.clone();
2380 let mut new_ignore = None;
2381 let (root_abs_path, root_char_bag, next_entry_id) = {
2382 let snapshot = self.snapshot.lock();
2383 (
2384 snapshot.abs_path().clone(),
2385 snapshot.root_char_bag,
2386 snapshot.next_entry_id.clone(),
2387 )
2388 };
2389 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2390 while let Some(child_abs_path) = child_paths.next().await {
2391 let child_abs_path: Arc<Path> = match child_abs_path {
2392 Ok(child_abs_path) => child_abs_path.into(),
2393 Err(error) => {
2394 log::error!("error processing entry {:?}", error);
2395 continue;
2396 }
2397 };
2398
2399 let child_name = child_abs_path.file_name().unwrap();
2400 let child_path: Arc<Path> = job.path.join(child_name).into();
2401 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2402 Ok(Some(metadata)) => metadata,
2403 Ok(None) => continue,
2404 Err(err) => {
2405 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2406 continue;
2407 }
2408 };
2409
2410 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2411 if child_name == *GITIGNORE {
2412 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2413 Ok(ignore) => {
2414 let ignore = Arc::new(ignore);
2415 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2416 new_ignore = Some(ignore);
2417 }
2418 Err(error) => {
2419 log::error!(
2420 "error loading .gitignore file {:?} - {:?}",
2421 child_name,
2422 error
2423 );
2424 }
2425 }
2426
2427 // Update ignore status of any child entries we've already processed to reflect the
2428 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2429 // there should rarely be too numerous. Update the ignore stack associated with any
2430 // new jobs as well.
2431 let mut new_jobs = new_jobs.iter_mut();
2432 for entry in &mut new_entries {
2433 let entry_abs_path = root_abs_path.join(&entry.path);
2434 entry.is_ignored =
2435 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2436
2437 if entry.is_dir() {
2438 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2439 job.ignore_stack = if entry.is_ignored {
2440 IgnoreStack::all()
2441 } else {
2442 ignore_stack.clone()
2443 };
2444 }
2445 }
2446 }
2447 }
2448
2449 let mut child_entry = Entry::new(
2450 child_path.clone(),
2451 &child_metadata,
2452 &next_entry_id,
2453 root_char_bag,
2454 );
2455
2456 if child_entry.is_dir() {
2457 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2458 child_entry.is_ignored = is_ignored;
2459
2460 // Avoid recursing until crash in the case of a recursive symlink
2461 if !job.ancestor_inodes.contains(&child_entry.inode) {
2462 let mut ancestor_inodes = job.ancestor_inodes.clone();
2463 ancestor_inodes.insert(child_entry.inode);
2464
2465 new_jobs.push(Some(ScanJob {
2466 abs_path: child_abs_path,
2467 path: child_path,
2468 ignore_stack: if is_ignored {
2469 IgnoreStack::all()
2470 } else {
2471 ignore_stack.clone()
2472 },
2473 ancestor_inodes,
2474 scan_queue: job.scan_queue.clone(),
2475 }));
2476 } else {
2477 new_jobs.push(None);
2478 }
2479 } else {
2480 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2481 }
2482
2483 new_entries.push(child_entry);
2484 }
2485
2486 self.snapshot.lock().populate_dir(
2487 job.path.clone(),
2488 new_entries,
2489 new_ignore,
2490 self.fs.as_ref(),
2491 );
2492
2493 for new_job in new_jobs {
2494 if let Some(new_job) = new_job {
2495 job.scan_queue.send(new_job).await.unwrap();
2496 }
2497 }
2498
2499 Ok(())
2500 }
2501
2502 async fn reload_entries_for_paths(
2503 &self,
2504 mut abs_paths: Vec<PathBuf>,
2505 scan_queue_tx: Option<Sender<ScanJob>>,
2506 ) -> Option<Vec<Arc<Path>>> {
2507 let doing_recursive_update = scan_queue_tx.is_some();
2508
2509 abs_paths.sort_unstable();
2510 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2511
2512 let root_abs_path = self.snapshot.lock().abs_path.clone();
2513 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2514 let metadata = futures::future::join_all(
2515 abs_paths
2516 .iter()
2517 .map(|abs_path| self.fs.metadata(&abs_path))
2518 .collect::<Vec<_>>(),
2519 )
2520 .await;
2521
2522 let mut snapshot = self.snapshot.lock();
2523 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2524 snapshot.scan_id += 1;
2525 if is_idle && !doing_recursive_update {
2526 snapshot.completed_scan_id = snapshot.scan_id;
2527 }
2528
2529 // Remove any entries for paths that no longer exist or are being recursively
2530 // refreshed. Do this before adding any new entries, so that renames can be
2531 // detected regardless of the order of the paths.
2532 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2533 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2534 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2535 if matches!(metadata, Ok(None)) || doing_recursive_update {
2536 snapshot.remove_path(path);
2537 }
2538 event_paths.push(path.into());
2539 } else {
2540 log::error!(
2541 "unexpected event {:?} for root path {:?}",
2542 abs_path,
2543 root_canonical_path
2544 );
2545 }
2546 }
2547
2548 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2549 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2550
2551 match metadata {
2552 Ok(Some(metadata)) => {
2553 let ignore_stack =
2554 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2555 let mut fs_entry = Entry::new(
2556 path.clone(),
2557 &metadata,
2558 snapshot.next_entry_id.as_ref(),
2559 snapshot.root_char_bag,
2560 );
2561 fs_entry.is_ignored = ignore_stack.is_all();
2562 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2563
2564 let scan_id = snapshot.scan_id;
2565 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2566 repo.repo.lock().reload_index();
2567 repo.scan_id = scan_id;
2568 }
2569
2570 if let Some(scan_queue_tx) = &scan_queue_tx {
2571 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2572 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2573 ancestor_inodes.insert(metadata.inode);
2574 smol::block_on(scan_queue_tx.send(ScanJob {
2575 abs_path,
2576 path,
2577 ignore_stack,
2578 ancestor_inodes,
2579 scan_queue: scan_queue_tx.clone(),
2580 }))
2581 .unwrap();
2582 }
2583 }
2584 }
2585 Ok(None) => {}
2586 Err(err) => {
2587 // TODO - create a special 'error' entry in the entries tree to mark this
2588 log::error!("error reading file on event {:?}", err);
2589 }
2590 }
2591 }
2592
2593 Some(event_paths)
2594 }
2595
2596 async fn update_ignore_statuses(&self) {
2597 use futures::FutureExt as _;
2598
2599 let mut snapshot = self.snapshot.lock().clone();
2600 let mut ignores_to_update = Vec::new();
2601 let mut ignores_to_delete = Vec::new();
2602 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2603 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2604 if *scan_id > snapshot.completed_scan_id
2605 && snapshot.entry_for_path(parent_path).is_some()
2606 {
2607 ignores_to_update.push(parent_abs_path.clone());
2608 }
2609
2610 let ignore_path = parent_path.join(&*GITIGNORE);
2611 if snapshot.entry_for_path(ignore_path).is_none() {
2612 ignores_to_delete.push(parent_abs_path.clone());
2613 }
2614 }
2615 }
2616
2617 for parent_abs_path in ignores_to_delete {
2618 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2619 self.snapshot
2620 .lock()
2621 .ignores_by_parent_abs_path
2622 .remove(&parent_abs_path);
2623 }
2624
2625 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2626 ignores_to_update.sort_unstable();
2627 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2628 while let Some(parent_abs_path) = ignores_to_update.next() {
2629 while ignores_to_update
2630 .peek()
2631 .map_or(false, |p| p.starts_with(&parent_abs_path))
2632 {
2633 ignores_to_update.next().unwrap();
2634 }
2635
2636 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2637 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2638 abs_path: parent_abs_path,
2639 ignore_stack,
2640 ignore_queue: ignore_queue_tx.clone(),
2641 }))
2642 .unwrap();
2643 }
2644 drop(ignore_queue_tx);
2645
2646 self.executor
2647 .scoped(|scope| {
2648 for _ in 0..self.executor.num_cpus() {
2649 scope.spawn(async {
2650 loop {
2651 select_biased! {
2652 // Process any path refresh requests before moving on to process
2653 // the queue of ignore statuses.
2654 request = self.refresh_requests_rx.recv().fuse() => {
2655 let Ok((paths, barrier)) = request else { break };
2656 if !self.process_refresh_request(paths, barrier).await {
2657 return;
2658 }
2659 }
2660
2661 // Recursively process directories whose ignores have changed.
2662 job = ignore_queue_rx.recv().fuse() => {
2663 let Ok(job) = job else { break };
2664 self.update_ignore_status(job, &snapshot).await;
2665 }
2666 }
2667 }
2668 });
2669 }
2670 })
2671 .await;
2672 }
2673
2674 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2675 let mut ignore_stack = job.ignore_stack;
2676 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2677 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2678 }
2679
2680 let mut entries_by_id_edits = Vec::new();
2681 let mut entries_by_path_edits = Vec::new();
2682 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2683 for mut entry in snapshot.child_entries(path).cloned() {
2684 let was_ignored = entry.is_ignored;
2685 let abs_path = snapshot.abs_path().join(&entry.path);
2686 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2687 if entry.is_dir() {
2688 let child_ignore_stack = if entry.is_ignored {
2689 IgnoreStack::all()
2690 } else {
2691 ignore_stack.clone()
2692 };
2693 job.ignore_queue
2694 .send(UpdateIgnoreStatusJob {
2695 abs_path: abs_path.into(),
2696 ignore_stack: child_ignore_stack,
2697 ignore_queue: job.ignore_queue.clone(),
2698 })
2699 .await
2700 .unwrap();
2701 }
2702
2703 if entry.is_ignored != was_ignored {
2704 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2705 path_entry.scan_id = snapshot.scan_id;
2706 path_entry.is_ignored = entry.is_ignored;
2707 entries_by_id_edits.push(Edit::Insert(path_entry));
2708 entries_by_path_edits.push(Edit::Insert(entry));
2709 }
2710 }
2711
2712 let mut snapshot = self.snapshot.lock();
2713 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2714 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2715 }
2716
2717 fn build_change_set(
2718 &self,
2719 old_snapshot: &Snapshot,
2720 new_snapshot: &Snapshot,
2721 event_paths: Vec<Arc<Path>>,
2722 ) -> HashMap<Arc<Path>, PathChange> {
2723 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2724
2725 let mut changes = HashMap::default();
2726 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2727 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2728 let received_before_initialized = !self.finished_initial_scan;
2729
2730 for path in event_paths {
2731 let path = PathKey(path);
2732 old_paths.seek(&path, Bias::Left, &());
2733 new_paths.seek(&path, Bias::Left, &());
2734
2735 loop {
2736 match (old_paths.item(), new_paths.item()) {
2737 (Some(old_entry), Some(new_entry)) => {
2738 if old_entry.path > path.0
2739 && new_entry.path > path.0
2740 && !old_entry.path.starts_with(&path.0)
2741 && !new_entry.path.starts_with(&path.0)
2742 {
2743 break;
2744 }
2745
2746 match Ord::cmp(&old_entry.path, &new_entry.path) {
2747 Ordering::Less => {
2748 changes.insert(old_entry.path.clone(), Removed);
2749 old_paths.next(&());
2750 }
2751 Ordering::Equal => {
2752 if received_before_initialized {
2753 // If the worktree was not fully initialized when this event was generated,
2754 // we can't know whether this entry was added during the scan or whether
2755 // it was merely updated.
2756 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2757 } else if old_entry.mtime != new_entry.mtime {
2758 changes.insert(new_entry.path.clone(), Updated);
2759 }
2760 old_paths.next(&());
2761 new_paths.next(&());
2762 }
2763 Ordering::Greater => {
2764 changes.insert(new_entry.path.clone(), Added);
2765 new_paths.next(&());
2766 }
2767 }
2768 }
2769 (Some(old_entry), None) => {
2770 changes.insert(old_entry.path.clone(), Removed);
2771 old_paths.next(&());
2772 }
2773 (None, Some(new_entry)) => {
2774 changes.insert(new_entry.path.clone(), Added);
2775 new_paths.next(&());
2776 }
2777 (None, None) => break,
2778 }
2779 }
2780 }
2781 changes
2782 }
2783
2784 async fn progress_timer(&self, running: bool) {
2785 if !running {
2786 return futures::future::pending().await;
2787 }
2788
2789 #[cfg(any(test, feature = "test-support"))]
2790 if self.fs.is_fake() {
2791 return self.executor.simulate_random_delay().await;
2792 }
2793
2794 smol::Timer::after(Duration::from_millis(100)).await;
2795 }
2796}
2797
2798fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2799 let mut result = root_char_bag;
2800 result.extend(
2801 path.to_string_lossy()
2802 .chars()
2803 .map(|c| c.to_ascii_lowercase()),
2804 );
2805 result
2806}
2807
2808struct ScanJob {
2809 abs_path: Arc<Path>,
2810 path: Arc<Path>,
2811 ignore_stack: Arc<IgnoreStack>,
2812 scan_queue: Sender<ScanJob>,
2813 ancestor_inodes: TreeSet<u64>,
2814}
2815
2816struct UpdateIgnoreStatusJob {
2817 abs_path: Arc<Path>,
2818 ignore_stack: Arc<IgnoreStack>,
2819 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2820}
2821
2822pub trait WorktreeHandle {
2823 #[cfg(any(test, feature = "test-support"))]
2824 fn flush_fs_events<'a>(
2825 &self,
2826 cx: &'a gpui::TestAppContext,
2827 ) -> futures::future::LocalBoxFuture<'a, ()>;
2828}
2829
2830impl WorktreeHandle for ModelHandle<Worktree> {
2831 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2832 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2833 // extra directory scans, and emit extra scan-state notifications.
2834 //
2835 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2836 // to ensure that all redundant FS events have already been processed.
2837 #[cfg(any(test, feature = "test-support"))]
2838 fn flush_fs_events<'a>(
2839 &self,
2840 cx: &'a gpui::TestAppContext,
2841 ) -> futures::future::LocalBoxFuture<'a, ()> {
2842 use smol::future::FutureExt;
2843
2844 let filename = "fs-event-sentinel";
2845 let tree = self.clone();
2846 let (fs, root_path) = self.read_with(cx, |tree, _| {
2847 let tree = tree.as_local().unwrap();
2848 (tree.fs.clone(), tree.abs_path().clone())
2849 });
2850
2851 async move {
2852 fs.create_file(&root_path.join(filename), Default::default())
2853 .await
2854 .unwrap();
2855 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2856 .await;
2857
2858 fs.remove_file(&root_path.join(filename), Default::default())
2859 .await
2860 .unwrap();
2861 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2862 .await;
2863
2864 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2865 .await;
2866 }
2867 .boxed_local()
2868 }
2869}
2870
2871#[derive(Clone, Debug)]
2872struct TraversalProgress<'a> {
2873 max_path: &'a Path,
2874 count: usize,
2875 visible_count: usize,
2876 file_count: usize,
2877 visible_file_count: usize,
2878}
2879
2880impl<'a> TraversalProgress<'a> {
2881 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2882 match (include_ignored, include_dirs) {
2883 (true, true) => self.count,
2884 (true, false) => self.file_count,
2885 (false, true) => self.visible_count,
2886 (false, false) => self.visible_file_count,
2887 }
2888 }
2889}
2890
2891impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2892 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2893 self.max_path = summary.max_path.as_ref();
2894 self.count += summary.count;
2895 self.visible_count += summary.visible_count;
2896 self.file_count += summary.file_count;
2897 self.visible_file_count += summary.visible_file_count;
2898 }
2899}
2900
2901impl<'a> Default for TraversalProgress<'a> {
2902 fn default() -> Self {
2903 Self {
2904 max_path: Path::new(""),
2905 count: 0,
2906 visible_count: 0,
2907 file_count: 0,
2908 visible_file_count: 0,
2909 }
2910 }
2911}
2912
2913pub struct Traversal<'a> {
2914 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2915 include_ignored: bool,
2916 include_dirs: bool,
2917}
2918
2919impl<'a> Traversal<'a> {
2920 pub fn advance(&mut self) -> bool {
2921 self.advance_to_offset(self.offset() + 1)
2922 }
2923
2924 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2925 self.cursor.seek_forward(
2926 &TraversalTarget::Count {
2927 count: offset,
2928 include_dirs: self.include_dirs,
2929 include_ignored: self.include_ignored,
2930 },
2931 Bias::Right,
2932 &(),
2933 )
2934 }
2935
2936 pub fn advance_to_sibling(&mut self) -> bool {
2937 while let Some(entry) = self.cursor.item() {
2938 self.cursor.seek_forward(
2939 &TraversalTarget::PathSuccessor(&entry.path),
2940 Bias::Left,
2941 &(),
2942 );
2943 if let Some(entry) = self.cursor.item() {
2944 if (self.include_dirs || !entry.is_dir())
2945 && (self.include_ignored || !entry.is_ignored)
2946 {
2947 return true;
2948 }
2949 }
2950 }
2951 false
2952 }
2953
2954 pub fn entry(&self) -> Option<&'a Entry> {
2955 self.cursor.item()
2956 }
2957
2958 pub fn offset(&self) -> usize {
2959 self.cursor
2960 .start()
2961 .count(self.include_dirs, self.include_ignored)
2962 }
2963}
2964
2965impl<'a> Iterator for Traversal<'a> {
2966 type Item = &'a Entry;
2967
2968 fn next(&mut self) -> Option<Self::Item> {
2969 if let Some(item) = self.entry() {
2970 self.advance();
2971 Some(item)
2972 } else {
2973 None
2974 }
2975 }
2976}
2977
2978#[derive(Debug)]
2979enum TraversalTarget<'a> {
2980 Path(&'a Path),
2981 PathSuccessor(&'a Path),
2982 Count {
2983 count: usize,
2984 include_ignored: bool,
2985 include_dirs: bool,
2986 },
2987}
2988
2989impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
2990 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
2991 match self {
2992 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
2993 TraversalTarget::PathSuccessor(path) => {
2994 if !cursor_location.max_path.starts_with(path) {
2995 Ordering::Equal
2996 } else {
2997 Ordering::Greater
2998 }
2999 }
3000 TraversalTarget::Count {
3001 count,
3002 include_dirs,
3003 include_ignored,
3004 } => Ord::cmp(
3005 count,
3006 &cursor_location.count(*include_dirs, *include_ignored),
3007 ),
3008 }
3009 }
3010}
3011
3012struct ChildEntriesIter<'a> {
3013 parent_path: &'a Path,
3014 traversal: Traversal<'a>,
3015}
3016
3017impl<'a> Iterator for ChildEntriesIter<'a> {
3018 type Item = &'a Entry;
3019
3020 fn next(&mut self) -> Option<Self::Item> {
3021 if let Some(item) = self.traversal.entry() {
3022 if item.path.starts_with(&self.parent_path) {
3023 self.traversal.advance_to_sibling();
3024 return Some(item);
3025 }
3026 }
3027 None
3028 }
3029}
3030
3031impl<'a> From<&'a Entry> for proto::Entry {
3032 fn from(entry: &'a Entry) -> Self {
3033 Self {
3034 id: entry.id.to_proto(),
3035 is_dir: entry.is_dir(),
3036 path: entry.path.to_string_lossy().into(),
3037 inode: entry.inode,
3038 mtime: Some(entry.mtime.into()),
3039 is_symlink: entry.is_symlink,
3040 is_ignored: entry.is_ignored,
3041 }
3042 }
3043}
3044
3045impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3046 type Error = anyhow::Error;
3047
3048 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3049 if let Some(mtime) = entry.mtime {
3050 let kind = if entry.is_dir {
3051 EntryKind::Dir
3052 } else {
3053 let mut char_bag = *root_char_bag;
3054 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3055 EntryKind::File(char_bag)
3056 };
3057 let path: Arc<Path> = PathBuf::from(entry.path).into();
3058 Ok(Entry {
3059 id: ProjectEntryId::from_proto(entry.id),
3060 kind,
3061 path,
3062 inode: entry.inode,
3063 mtime: mtime.into(),
3064 is_symlink: entry.is_symlink,
3065 is_ignored: entry.is_ignored,
3066 })
3067 } else {
3068 Err(anyhow!(
3069 "missing mtime in remote worktree entry {:?}",
3070 entry.path
3071 ))
3072 }
3073 }
3074}
3075
3076#[cfg(test)]
3077mod tests {
3078 use super::*;
3079 use fs::repository::FakeGitRepository;
3080 use fs::{FakeFs, RealFs};
3081 use gpui::{executor::Deterministic, TestAppContext};
3082 use pretty_assertions::assert_eq;
3083 use rand::prelude::*;
3084 use serde_json::json;
3085 use std::{env, fmt::Write};
3086 use util::{http::FakeHttpClient, test::temp_tree};
3087
3088 #[gpui::test]
3089 async fn test_traversal(cx: &mut TestAppContext) {
3090 let fs = FakeFs::new(cx.background());
3091 fs.insert_tree(
3092 "/root",
3093 json!({
3094 ".gitignore": "a/b\n",
3095 "a": {
3096 "b": "",
3097 "c": "",
3098 }
3099 }),
3100 )
3101 .await;
3102
3103 let http_client = FakeHttpClient::with_404_response();
3104 let client = cx.read(|cx| Client::new(http_client, cx));
3105
3106 let tree = Worktree::local(
3107 client,
3108 Path::new("/root"),
3109 true,
3110 fs,
3111 Default::default(),
3112 &mut cx.to_async(),
3113 )
3114 .await
3115 .unwrap();
3116 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3117 .await;
3118
3119 tree.read_with(cx, |tree, _| {
3120 assert_eq!(
3121 tree.entries(false)
3122 .map(|entry| entry.path.as_ref())
3123 .collect::<Vec<_>>(),
3124 vec![
3125 Path::new(""),
3126 Path::new(".gitignore"),
3127 Path::new("a"),
3128 Path::new("a/c"),
3129 ]
3130 );
3131 assert_eq!(
3132 tree.entries(true)
3133 .map(|entry| entry.path.as_ref())
3134 .collect::<Vec<_>>(),
3135 vec![
3136 Path::new(""),
3137 Path::new(".gitignore"),
3138 Path::new("a"),
3139 Path::new("a/b"),
3140 Path::new("a/c"),
3141 ]
3142 );
3143 })
3144 }
3145
3146 #[gpui::test(iterations = 10)]
3147 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3148 let fs = FakeFs::new(cx.background());
3149 fs.insert_tree(
3150 "/root",
3151 json!({
3152 "lib": {
3153 "a": {
3154 "a.txt": ""
3155 },
3156 "b": {
3157 "b.txt": ""
3158 }
3159 }
3160 }),
3161 )
3162 .await;
3163 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3164 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3165
3166 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3167 let tree = Worktree::local(
3168 client,
3169 Path::new("/root"),
3170 true,
3171 fs.clone(),
3172 Default::default(),
3173 &mut cx.to_async(),
3174 )
3175 .await
3176 .unwrap();
3177
3178 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3179 .await;
3180
3181 tree.read_with(cx, |tree, _| {
3182 assert_eq!(
3183 tree.entries(false)
3184 .map(|entry| entry.path.as_ref())
3185 .collect::<Vec<_>>(),
3186 vec![
3187 Path::new(""),
3188 Path::new("lib"),
3189 Path::new("lib/a"),
3190 Path::new("lib/a/a.txt"),
3191 Path::new("lib/a/lib"),
3192 Path::new("lib/b"),
3193 Path::new("lib/b/b.txt"),
3194 Path::new("lib/b/lib"),
3195 ]
3196 );
3197 });
3198
3199 fs.rename(
3200 Path::new("/root/lib/a/lib"),
3201 Path::new("/root/lib/a/lib-2"),
3202 Default::default(),
3203 )
3204 .await
3205 .unwrap();
3206 executor.run_until_parked();
3207 tree.read_with(cx, |tree, _| {
3208 assert_eq!(
3209 tree.entries(false)
3210 .map(|entry| entry.path.as_ref())
3211 .collect::<Vec<_>>(),
3212 vec![
3213 Path::new(""),
3214 Path::new("lib"),
3215 Path::new("lib/a"),
3216 Path::new("lib/a/a.txt"),
3217 Path::new("lib/a/lib-2"),
3218 Path::new("lib/b"),
3219 Path::new("lib/b/b.txt"),
3220 Path::new("lib/b/lib"),
3221 ]
3222 );
3223 });
3224 }
3225
3226 #[gpui::test]
3227 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3228 let parent_dir = temp_tree(json!({
3229 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3230 "tree": {
3231 ".git": {},
3232 ".gitignore": "ignored-dir\n",
3233 "tracked-dir": {
3234 "tracked-file1": "",
3235 "ancestor-ignored-file1": "",
3236 },
3237 "ignored-dir": {
3238 "ignored-file1": ""
3239 }
3240 }
3241 }));
3242 let dir = parent_dir.path().join("tree");
3243
3244 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3245
3246 let tree = Worktree::local(
3247 client,
3248 dir.as_path(),
3249 true,
3250 Arc::new(RealFs),
3251 Default::default(),
3252 &mut cx.to_async(),
3253 )
3254 .await
3255 .unwrap();
3256 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3257 .await;
3258 tree.flush_fs_events(cx).await;
3259 cx.read(|cx| {
3260 let tree = tree.read(cx);
3261 assert!(
3262 !tree
3263 .entry_for_path("tracked-dir/tracked-file1")
3264 .unwrap()
3265 .is_ignored
3266 );
3267 assert!(
3268 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3269 .unwrap()
3270 .is_ignored
3271 );
3272 assert!(
3273 tree.entry_for_path("ignored-dir/ignored-file1")
3274 .unwrap()
3275 .is_ignored
3276 );
3277 });
3278
3279 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3280 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3281 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3282 tree.flush_fs_events(cx).await;
3283 cx.read(|cx| {
3284 let tree = tree.read(cx);
3285 assert!(
3286 !tree
3287 .entry_for_path("tracked-dir/tracked-file2")
3288 .unwrap()
3289 .is_ignored
3290 );
3291 assert!(
3292 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3293 .unwrap()
3294 .is_ignored
3295 );
3296 assert!(
3297 tree.entry_for_path("ignored-dir/ignored-file2")
3298 .unwrap()
3299 .is_ignored
3300 );
3301 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3302 });
3303 }
3304
3305 #[gpui::test]
3306 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3307 let root = temp_tree(json!({
3308 "dir1": {
3309 ".git": {},
3310 "deps": {
3311 "dep1": {
3312 ".git": {},
3313 "src": {
3314 "a.txt": ""
3315 }
3316 }
3317 },
3318 "src": {
3319 "b.txt": ""
3320 }
3321 },
3322 "c.txt": "",
3323 }));
3324
3325 let http_client = FakeHttpClient::with_404_response();
3326 let client = cx.read(|cx| Client::new(http_client, cx));
3327 let tree = Worktree::local(
3328 client,
3329 root.path(),
3330 true,
3331 Arc::new(RealFs),
3332 Default::default(),
3333 &mut cx.to_async(),
3334 )
3335 .await
3336 .unwrap();
3337
3338 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3339 .await;
3340 tree.flush_fs_events(cx).await;
3341
3342 tree.read_with(cx, |tree, _cx| {
3343 let tree = tree.as_local().unwrap();
3344
3345 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3346
3347 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3348 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3349 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3350
3351 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3352 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3353 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3354 });
3355
3356 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3357 let tree = tree.as_local().unwrap();
3358 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3359 });
3360
3361 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3362 tree.flush_fs_events(cx).await;
3363
3364 tree.read_with(cx, |tree, _cx| {
3365 let tree = tree.as_local().unwrap();
3366 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3367 assert_ne!(
3368 original_scan_id, new_scan_id,
3369 "original {original_scan_id}, new {new_scan_id}"
3370 );
3371 });
3372
3373 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3374 tree.flush_fs_events(cx).await;
3375
3376 tree.read_with(cx, |tree, _cx| {
3377 let tree = tree.as_local().unwrap();
3378
3379 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3380 });
3381 }
3382
3383 #[test]
3384 fn test_changed_repos() {
3385 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3386 GitRepositoryEntry {
3387 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3388 scan_id,
3389 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3390 git_dir_path: git_dir_path.as_ref().into(),
3391 }
3392 }
3393
3394 let prev_repos: Vec<GitRepositoryEntry> = vec![
3395 fake_entry("/.git", 0),
3396 fake_entry("/a/.git", 0),
3397 fake_entry("/a/b/.git", 0),
3398 ];
3399
3400 let new_repos: Vec<GitRepositoryEntry> = vec![
3401 fake_entry("/a/.git", 1),
3402 fake_entry("/a/b/.git", 0),
3403 fake_entry("/a/c/.git", 0),
3404 ];
3405
3406 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3407
3408 // Deletion retained
3409 assert!(res
3410 .iter()
3411 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3412 .is_some());
3413
3414 // Update retained
3415 assert!(res
3416 .iter()
3417 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3418 .is_some());
3419
3420 // Addition retained
3421 assert!(res
3422 .iter()
3423 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3424 .is_some());
3425
3426 // Nochange, not retained
3427 assert!(res
3428 .iter()
3429 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3430 .is_none());
3431 }
3432
3433 #[gpui::test]
3434 async fn test_write_file(cx: &mut TestAppContext) {
3435 let dir = temp_tree(json!({
3436 ".git": {},
3437 ".gitignore": "ignored-dir\n",
3438 "tracked-dir": {},
3439 "ignored-dir": {}
3440 }));
3441
3442 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3443
3444 let tree = Worktree::local(
3445 client,
3446 dir.path(),
3447 true,
3448 Arc::new(RealFs),
3449 Default::default(),
3450 &mut cx.to_async(),
3451 )
3452 .await
3453 .unwrap();
3454 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3455 .await;
3456 tree.flush_fs_events(cx).await;
3457
3458 tree.update(cx, |tree, cx| {
3459 tree.as_local().unwrap().write_file(
3460 Path::new("tracked-dir/file.txt"),
3461 "hello".into(),
3462 Default::default(),
3463 cx,
3464 )
3465 })
3466 .await
3467 .unwrap();
3468 tree.update(cx, |tree, cx| {
3469 tree.as_local().unwrap().write_file(
3470 Path::new("ignored-dir/file.txt"),
3471 "world".into(),
3472 Default::default(),
3473 cx,
3474 )
3475 })
3476 .await
3477 .unwrap();
3478
3479 tree.read_with(cx, |tree, _| {
3480 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3481 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3482 assert!(!tracked.is_ignored);
3483 assert!(ignored.is_ignored);
3484 });
3485 }
3486
3487 #[gpui::test(iterations = 30)]
3488 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3489 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3490
3491 let fs = FakeFs::new(cx.background());
3492 fs.insert_tree(
3493 "/root",
3494 json!({
3495 "b": {},
3496 "c": {},
3497 "d": {},
3498 }),
3499 )
3500 .await;
3501
3502 let tree = Worktree::local(
3503 client,
3504 "/root".as_ref(),
3505 true,
3506 fs,
3507 Default::default(),
3508 &mut cx.to_async(),
3509 )
3510 .await
3511 .unwrap();
3512
3513 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3514
3515 let entry = tree
3516 .update(cx, |tree, cx| {
3517 tree.as_local_mut()
3518 .unwrap()
3519 .create_entry("a/e".as_ref(), true, cx)
3520 })
3521 .await
3522 .unwrap();
3523 assert!(entry.is_dir());
3524
3525 cx.foreground().run_until_parked();
3526 tree.read_with(cx, |tree, _| {
3527 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3528 });
3529
3530 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3531 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3532 snapshot1.apply_remote_update(update).unwrap();
3533 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3534 }
3535
3536 #[gpui::test(iterations = 100)]
3537 async fn test_random_worktree_operations_during_initial_scan(
3538 cx: &mut TestAppContext,
3539 mut rng: StdRng,
3540 ) {
3541 let operations = env::var("OPERATIONS")
3542 .map(|o| o.parse().unwrap())
3543 .unwrap_or(5);
3544 let initial_entries = env::var("INITIAL_ENTRIES")
3545 .map(|o| o.parse().unwrap())
3546 .unwrap_or(20);
3547
3548 let root_dir = Path::new("/test");
3549 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3550 fs.as_fake().insert_tree(root_dir, json!({})).await;
3551 for _ in 0..initial_entries {
3552 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3553 }
3554 log::info!("generated initial tree");
3555
3556 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3557 let worktree = Worktree::local(
3558 client.clone(),
3559 root_dir,
3560 true,
3561 fs.clone(),
3562 Default::default(),
3563 &mut cx.to_async(),
3564 )
3565 .await
3566 .unwrap();
3567
3568 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3569
3570 for _ in 0..operations {
3571 worktree
3572 .update(cx, |worktree, cx| {
3573 randomly_mutate_worktree(worktree, &mut rng, cx)
3574 })
3575 .await
3576 .log_err();
3577 worktree.read_with(cx, |tree, _| {
3578 tree.as_local().unwrap().snapshot.check_invariants()
3579 });
3580
3581 if rng.gen_bool(0.6) {
3582 let new_snapshot =
3583 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3584 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3585 snapshot.apply_remote_update(update.clone()).unwrap();
3586 assert_eq!(
3587 snapshot.to_vec(true),
3588 new_snapshot.to_vec(true),
3589 "incorrect snapshot after update {:?}",
3590 update
3591 );
3592 }
3593 }
3594
3595 worktree
3596 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3597 .await;
3598 worktree.read_with(cx, |tree, _| {
3599 tree.as_local().unwrap().snapshot.check_invariants()
3600 });
3601
3602 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3603 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3604 snapshot.apply_remote_update(update.clone()).unwrap();
3605 assert_eq!(
3606 snapshot.to_vec(true),
3607 new_snapshot.to_vec(true),
3608 "incorrect snapshot after update {:?}",
3609 update
3610 );
3611 }
3612
3613 #[gpui::test(iterations = 100)]
3614 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3615 let operations = env::var("OPERATIONS")
3616 .map(|o| o.parse().unwrap())
3617 .unwrap_or(40);
3618 let initial_entries = env::var("INITIAL_ENTRIES")
3619 .map(|o| o.parse().unwrap())
3620 .unwrap_or(20);
3621
3622 let root_dir = Path::new("/test");
3623 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3624 fs.as_fake().insert_tree(root_dir, json!({})).await;
3625 for _ in 0..initial_entries {
3626 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3627 }
3628 log::info!("generated initial tree");
3629
3630 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3631 let worktree = Worktree::local(
3632 client.clone(),
3633 root_dir,
3634 true,
3635 fs.clone(),
3636 Default::default(),
3637 &mut cx.to_async(),
3638 )
3639 .await
3640 .unwrap();
3641
3642 worktree
3643 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3644 .await;
3645
3646 // After the initial scan is complete, the `UpdatedEntries` event can
3647 // be used to follow along with all changes to the worktree's snapshot.
3648 worktree.update(cx, |tree, cx| {
3649 let mut paths = tree
3650 .as_local()
3651 .unwrap()
3652 .paths()
3653 .cloned()
3654 .collect::<Vec<_>>();
3655
3656 cx.subscribe(&worktree, move |tree, _, event, _| {
3657 if let Event::UpdatedEntries(changes) = event {
3658 for (path, change_type) in changes.iter() {
3659 let path = path.clone();
3660 let ix = match paths.binary_search(&path) {
3661 Ok(ix) | Err(ix) => ix,
3662 };
3663 match change_type {
3664 PathChange::Added => {
3665 assert_ne!(paths.get(ix), Some(&path));
3666 paths.insert(ix, path);
3667 }
3668 PathChange::Removed => {
3669 assert_eq!(paths.get(ix), Some(&path));
3670 paths.remove(ix);
3671 }
3672 PathChange::Updated => {
3673 assert_eq!(paths.get(ix), Some(&path));
3674 }
3675 PathChange::AddedOrUpdated => {
3676 if paths[ix] != path {
3677 paths.insert(ix, path);
3678 }
3679 }
3680 }
3681 }
3682 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3683 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3684 }
3685 })
3686 .detach();
3687 });
3688
3689 let mut snapshots = Vec::new();
3690 let mut mutations_len = operations;
3691 while mutations_len > 1 {
3692 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3693 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3694 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3695 let len = rng.gen_range(0..=buffered_event_count);
3696 log::info!("flushing {} events", len);
3697 fs.as_fake().flush_events(len).await;
3698 } else {
3699 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3700 mutations_len -= 1;
3701 }
3702
3703 cx.foreground().run_until_parked();
3704 if rng.gen_bool(0.2) {
3705 log::info!("storing snapshot {}", snapshots.len());
3706 let snapshot =
3707 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3708 snapshots.push(snapshot);
3709 }
3710 }
3711
3712 log::info!("quiescing");
3713 fs.as_fake().flush_events(usize::MAX).await;
3714 cx.foreground().run_until_parked();
3715 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3716 snapshot.check_invariants();
3717
3718 {
3719 let new_worktree = Worktree::local(
3720 client.clone(),
3721 root_dir,
3722 true,
3723 fs.clone(),
3724 Default::default(),
3725 &mut cx.to_async(),
3726 )
3727 .await
3728 .unwrap();
3729 new_worktree
3730 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3731 .await;
3732 let new_snapshot =
3733 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3734 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3735 }
3736
3737 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3738 let include_ignored = rng.gen::<bool>();
3739 if !include_ignored {
3740 let mut entries_by_path_edits = Vec::new();
3741 let mut entries_by_id_edits = Vec::new();
3742 for entry in prev_snapshot
3743 .entries_by_id
3744 .cursor::<()>()
3745 .filter(|e| e.is_ignored)
3746 {
3747 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3748 entries_by_id_edits.push(Edit::Remove(entry.id));
3749 }
3750
3751 prev_snapshot
3752 .entries_by_path
3753 .edit(entries_by_path_edits, &());
3754 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3755 }
3756
3757 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3758 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3759 assert_eq!(
3760 prev_snapshot.to_vec(include_ignored),
3761 snapshot.to_vec(include_ignored),
3762 "wrong update for snapshot {i}. update: {:?}",
3763 update
3764 );
3765 }
3766 }
3767
3768 fn randomly_mutate_worktree(
3769 worktree: &mut Worktree,
3770 rng: &mut impl Rng,
3771 cx: &mut ModelContext<Worktree>,
3772 ) -> Task<Result<()>> {
3773 let worktree = worktree.as_local_mut().unwrap();
3774 let snapshot = worktree.snapshot();
3775 let entry = snapshot.entries(false).choose(rng).unwrap();
3776
3777 match rng.gen_range(0_u32..100) {
3778 0..=33 if entry.path.as_ref() != Path::new("") => {
3779 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3780 worktree.delete_entry(entry.id, cx).unwrap()
3781 }
3782 ..=66 if entry.path.as_ref() != Path::new("") => {
3783 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3784 let new_parent_path = if other_entry.is_dir() {
3785 other_entry.path.clone()
3786 } else {
3787 other_entry.path.parent().unwrap().into()
3788 };
3789 let mut new_path = new_parent_path.join(gen_name(rng));
3790 if new_path.starts_with(&entry.path) {
3791 new_path = gen_name(rng).into();
3792 }
3793
3794 log::info!(
3795 "renaming entry {:?} ({}) to {:?}",
3796 entry.path,
3797 entry.id.0,
3798 new_path
3799 );
3800 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3801 cx.foreground().spawn(async move {
3802 task.await?;
3803 Ok(())
3804 })
3805 }
3806 _ => {
3807 let task = if entry.is_dir() {
3808 let child_path = entry.path.join(gen_name(rng));
3809 let is_dir = rng.gen_bool(0.3);
3810 log::info!(
3811 "creating {} at {:?}",
3812 if is_dir { "dir" } else { "file" },
3813 child_path,
3814 );
3815 worktree.create_entry(child_path, is_dir, cx)
3816 } else {
3817 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3818 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3819 };
3820 cx.foreground().spawn(async move {
3821 task.await?;
3822 Ok(())
3823 })
3824 }
3825 }
3826 }
3827
3828 async fn randomly_mutate_fs(
3829 fs: &Arc<dyn Fs>,
3830 root_path: &Path,
3831 insertion_probability: f64,
3832 rng: &mut impl Rng,
3833 ) {
3834 let mut files = Vec::new();
3835 let mut dirs = Vec::new();
3836 for path in fs.as_fake().paths() {
3837 if path.starts_with(root_path) {
3838 if fs.is_file(&path).await {
3839 files.push(path);
3840 } else {
3841 dirs.push(path);
3842 }
3843 }
3844 }
3845
3846 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3847 let path = dirs.choose(rng).unwrap();
3848 let new_path = path.join(gen_name(rng));
3849
3850 if rng.gen() {
3851 log::info!(
3852 "creating dir {:?}",
3853 new_path.strip_prefix(root_path).unwrap()
3854 );
3855 fs.create_dir(&new_path).await.unwrap();
3856 } else {
3857 log::info!(
3858 "creating file {:?}",
3859 new_path.strip_prefix(root_path).unwrap()
3860 );
3861 fs.create_file(&new_path, Default::default()).await.unwrap();
3862 }
3863 } else if rng.gen_bool(0.05) {
3864 let ignore_dir_path = dirs.choose(rng).unwrap();
3865 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3866
3867 let subdirs = dirs
3868 .iter()
3869 .filter(|d| d.starts_with(&ignore_dir_path))
3870 .cloned()
3871 .collect::<Vec<_>>();
3872 let subfiles = files
3873 .iter()
3874 .filter(|d| d.starts_with(&ignore_dir_path))
3875 .cloned()
3876 .collect::<Vec<_>>();
3877 let files_to_ignore = {
3878 let len = rng.gen_range(0..=subfiles.len());
3879 subfiles.choose_multiple(rng, len)
3880 };
3881 let dirs_to_ignore = {
3882 let len = rng.gen_range(0..subdirs.len());
3883 subdirs.choose_multiple(rng, len)
3884 };
3885
3886 let mut ignore_contents = String::new();
3887 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3888 writeln!(
3889 ignore_contents,
3890 "{}",
3891 path_to_ignore
3892 .strip_prefix(&ignore_dir_path)
3893 .unwrap()
3894 .to_str()
3895 .unwrap()
3896 )
3897 .unwrap();
3898 }
3899 log::info!(
3900 "creating gitignore {:?} with contents:\n{}",
3901 ignore_path.strip_prefix(&root_path).unwrap(),
3902 ignore_contents
3903 );
3904 fs.save(
3905 &ignore_path,
3906 &ignore_contents.as_str().into(),
3907 Default::default(),
3908 )
3909 .await
3910 .unwrap();
3911 } else {
3912 let old_path = {
3913 let file_path = files.choose(rng);
3914 let dir_path = dirs[1..].choose(rng);
3915 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3916 };
3917
3918 let is_rename = rng.gen();
3919 if is_rename {
3920 let new_path_parent = dirs
3921 .iter()
3922 .filter(|d| !d.starts_with(old_path))
3923 .choose(rng)
3924 .unwrap();
3925
3926 let overwrite_existing_dir =
3927 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3928 let new_path = if overwrite_existing_dir {
3929 fs.remove_dir(
3930 &new_path_parent,
3931 RemoveOptions {
3932 recursive: true,
3933 ignore_if_not_exists: true,
3934 },
3935 )
3936 .await
3937 .unwrap();
3938 new_path_parent.to_path_buf()
3939 } else {
3940 new_path_parent.join(gen_name(rng))
3941 };
3942
3943 log::info!(
3944 "renaming {:?} to {}{:?}",
3945 old_path.strip_prefix(&root_path).unwrap(),
3946 if overwrite_existing_dir {
3947 "overwrite "
3948 } else {
3949 ""
3950 },
3951 new_path.strip_prefix(&root_path).unwrap()
3952 );
3953 fs.rename(
3954 &old_path,
3955 &new_path,
3956 fs::RenameOptions {
3957 overwrite: true,
3958 ignore_if_exists: true,
3959 },
3960 )
3961 .await
3962 .unwrap();
3963 } else if fs.is_file(&old_path).await {
3964 log::info!(
3965 "deleting file {:?}",
3966 old_path.strip_prefix(&root_path).unwrap()
3967 );
3968 fs.remove_file(old_path, Default::default()).await.unwrap();
3969 } else {
3970 log::info!(
3971 "deleting dir {:?}",
3972 old_path.strip_prefix(&root_path).unwrap()
3973 );
3974 fs.remove_dir(
3975 &old_path,
3976 RemoveOptions {
3977 recursive: true,
3978 ignore_if_not_exists: true,
3979 },
3980 )
3981 .await
3982 .unwrap();
3983 }
3984 }
3985 }
3986
3987 fn gen_name(rng: &mut impl Rng) -> String {
3988 (0..6)
3989 .map(|_| rng.sample(rand::distributions::Alphanumeric))
3990 .map(char::from)
3991 .collect()
3992 }
3993
3994 impl LocalSnapshot {
3995 fn check_invariants(&self) {
3996 assert_eq!(
3997 self.entries_by_path
3998 .cursor::<()>()
3999 .map(|e| (&e.path, e.id))
4000 .collect::<Vec<_>>(),
4001 self.entries_by_id
4002 .cursor::<()>()
4003 .map(|e| (&e.path, e.id))
4004 .collect::<collections::BTreeSet<_>>()
4005 .into_iter()
4006 .collect::<Vec<_>>(),
4007 "entries_by_path and entries_by_id are inconsistent"
4008 );
4009
4010 let mut files = self.files(true, 0);
4011 let mut visible_files = self.files(false, 0);
4012 for entry in self.entries_by_path.cursor::<()>() {
4013 if entry.is_file() {
4014 assert_eq!(files.next().unwrap().inode, entry.inode);
4015 if !entry.is_ignored {
4016 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4017 }
4018 }
4019 }
4020
4021 assert!(files.next().is_none());
4022 assert!(visible_files.next().is_none());
4023
4024 let mut bfs_paths = Vec::new();
4025 let mut stack = vec![Path::new("")];
4026 while let Some(path) = stack.pop() {
4027 bfs_paths.push(path);
4028 let ix = stack.len();
4029 for child_entry in self.child_entries(path) {
4030 stack.insert(ix, &child_entry.path);
4031 }
4032 }
4033
4034 let dfs_paths_via_iter = self
4035 .entries_by_path
4036 .cursor::<()>()
4037 .map(|e| e.path.as_ref())
4038 .collect::<Vec<_>>();
4039 assert_eq!(bfs_paths, dfs_paths_via_iter);
4040
4041 let dfs_paths_via_traversal = self
4042 .entries(true)
4043 .map(|e| e.path.as_ref())
4044 .collect::<Vec<_>>();
4045 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4046
4047 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4048 let ignore_parent_path =
4049 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4050 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4051 assert!(self
4052 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4053 .is_some());
4054 }
4055 }
4056
4057 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4058 let mut paths = Vec::new();
4059 for entry in self.entries_by_path.cursor::<()>() {
4060 if include_ignored || !entry.is_ignored {
4061 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4062 }
4063 }
4064 paths.sort_by(|a, b| a.0.cmp(b.0));
4065 paths
4066 }
4067 }
4068}