1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use parking_lot::Mutex;
30use postage::{
31 barrier,
32 prelude::{Sink as _, Stream as _},
33 watch,
34};
35use smol::channel::{self, Sender};
36use std::{
37 any::Any,
38 cmp::{self, Ordering},
39 convert::TryFrom,
40 ffi::OsStr,
41 fmt,
42 future::Future,
43 mem,
44 ops::{Deref, DerefMut},
45 path::{Path, PathBuf},
46 pin::Pin,
47 sync::{
48 atomic::{AtomicUsize, Ordering::SeqCst},
49 Arc,
50 },
51 time::{Duration, SystemTime},
52};
53use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeSet};
54use util::{paths::HOME, ResultExt, TryFutureExt};
55
56#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
57pub struct WorktreeId(usize);
58
59pub enum Worktree {
60 Local(LocalWorktree),
61 Remote(RemoteWorktree),
62}
63
64pub struct LocalWorktree {
65 snapshot: LocalSnapshot,
66 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
67 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
68 _background_scanner_task: Task<()>,
69 share: Option<ShareState>,
70 diagnostics: HashMap<Arc<Path>, Vec<(usize, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>)>>,
71 diagnostic_summaries: HashMap<Arc<Path>, HashMap<usize, DiagnosticSummary>>,
72 client: Arc<Client>,
73 fs: Arc<dyn Fs>,
74 visible: bool,
75}
76
77pub struct RemoteWorktree {
78 snapshot: Snapshot,
79 background_snapshot: Arc<Mutex<Snapshot>>,
80 project_id: u64,
81 client: Arc<Client>,
82 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
83 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
84 replica_id: ReplicaId,
85 diagnostic_summaries: HashMap<Arc<Path>, HashMap<usize, DiagnosticSummary>>,
86 visible: bool,
87 disconnected: bool,
88}
89
90#[derive(Clone)]
91pub struct Snapshot {
92 id: WorktreeId,
93 abs_path: Arc<Path>,
94 root_name: String,
95 root_char_bag: CharBag,
96 entries_by_path: SumTree<Entry>,
97 entries_by_id: SumTree<PathEntry>,
98
99 /// A number that increases every time the worktree begins scanning
100 /// a set of paths from the filesystem. This scanning could be caused
101 /// by some operation performed on the worktree, such as reading or
102 /// writing a file, or by an event reported by the filesystem.
103 scan_id: usize,
104
105 /// The latest scan id that has completed, and whose preceding scans
106 /// have all completed. The current `scan_id` could be more than one
107 /// greater than the `completed_scan_id` if operations are performed
108 /// on the worktree while it is processing a file-system event.
109 completed_scan_id: usize,
110}
111
112#[derive(Clone)]
113pub struct GitRepositoryEntry {
114 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
115
116 pub(crate) scan_id: usize,
117 // Path to folder containing the .git file or directory
118 pub(crate) content_path: Arc<Path>,
119 // Path to the actual .git folder.
120 // Note: if .git is a file, this points to the folder indicated by the .git file
121 pub(crate) git_dir_path: Arc<Path>,
122}
123
124impl std::fmt::Debug for GitRepositoryEntry {
125 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
126 f.debug_struct("GitRepositoryEntry")
127 .field("content_path", &self.content_path)
128 .field("git_dir_path", &self.git_dir_path)
129 .finish()
130 }
131}
132
133#[derive(Debug)]
134pub struct LocalSnapshot {
135 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
136 git_repositories: Vec<GitRepositoryEntry>,
137 removed_entry_ids: HashMap<u64, ProjectEntryId>,
138 next_entry_id: Arc<AtomicUsize>,
139 snapshot: Snapshot,
140}
141
142impl Clone for LocalSnapshot {
143 fn clone(&self) -> Self {
144 Self {
145 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
146 git_repositories: self.git_repositories.iter().cloned().collect(),
147 removed_entry_ids: self.removed_entry_ids.clone(),
148 next_entry_id: self.next_entry_id.clone(),
149 snapshot: self.snapshot.clone(),
150 }
151 }
152}
153
154impl Deref for LocalSnapshot {
155 type Target = Snapshot;
156
157 fn deref(&self) -> &Self::Target {
158 &self.snapshot
159 }
160}
161
162impl DerefMut for LocalSnapshot {
163 fn deref_mut(&mut self) -> &mut Self::Target {
164 &mut self.snapshot
165 }
166}
167
168enum ScanState {
169 Started,
170 Updated {
171 snapshot: LocalSnapshot,
172 changes: HashMap<Arc<Path>, PathChange>,
173 barrier: Option<barrier::Sender>,
174 scanning: bool,
175 },
176}
177
178struct ShareState {
179 project_id: u64,
180 snapshots_tx: watch::Sender<LocalSnapshot>,
181 resume_updates: watch::Sender<()>,
182 _maintain_remote_snapshot: Task<Option<()>>,
183}
184
185pub enum Event {
186 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
187 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
188}
189
190impl Entity for Worktree {
191 type Event = Event;
192}
193
194impl Worktree {
195 pub async fn local(
196 client: Arc<Client>,
197 path: impl Into<Arc<Path>>,
198 visible: bool,
199 fs: Arc<dyn Fs>,
200 next_entry_id: Arc<AtomicUsize>,
201 cx: &mut AsyncAppContext,
202 ) -> Result<ModelHandle<Self>> {
203 // After determining whether the root entry is a file or a directory, populate the
204 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
205 let abs_path = path.into();
206 let metadata = fs
207 .metadata(&abs_path)
208 .await
209 .context("failed to stat worktree path")?;
210
211 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
212 let root_name = abs_path
213 .file_name()
214 .map_or(String::new(), |f| f.to_string_lossy().to_string());
215
216 let mut snapshot = LocalSnapshot {
217 ignores_by_parent_abs_path: Default::default(),
218 git_repositories: Default::default(),
219 removed_entry_ids: Default::default(),
220 next_entry_id,
221 snapshot: Snapshot {
222 id: WorktreeId::from_usize(cx.model_id()),
223 abs_path: abs_path.clone(),
224 root_name: root_name.clone(),
225 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
226 entries_by_path: Default::default(),
227 entries_by_id: Default::default(),
228 scan_id: 1,
229 completed_scan_id: 0,
230 },
231 };
232
233 if let Some(metadata) = metadata {
234 snapshot.insert_entry(
235 Entry::new(
236 Arc::from(Path::new("")),
237 &metadata,
238 &snapshot.next_entry_id,
239 snapshot.root_char_bag,
240 ),
241 fs.as_ref(),
242 );
243 }
244
245 let (path_changes_tx, path_changes_rx) = channel::unbounded();
246 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
247
248 cx.spawn_weak(|this, mut cx| async move {
249 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
250 this.update(&mut cx, |this, cx| {
251 let this = this.as_local_mut().unwrap();
252 match state {
253 ScanState::Started => {
254 *this.is_scanning.0.borrow_mut() = true;
255 }
256 ScanState::Updated {
257 snapshot,
258 changes,
259 barrier,
260 scanning,
261 } => {
262 *this.is_scanning.0.borrow_mut() = scanning;
263 this.set_snapshot(snapshot, cx);
264 cx.emit(Event::UpdatedEntries(changes));
265 drop(barrier);
266 }
267 }
268 cx.notify();
269 });
270 }
271 })
272 .detach();
273
274 let background_scanner_task = cx.background().spawn({
275 let fs = fs.clone();
276 let snapshot = snapshot.clone();
277 let background = cx.background().clone();
278 async move {
279 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
280 BackgroundScanner::new(
281 snapshot,
282 fs,
283 scan_states_tx,
284 background,
285 path_changes_rx,
286 )
287 .run(events)
288 .await;
289 }
290 });
291
292 Worktree::Local(LocalWorktree {
293 snapshot,
294 is_scanning: watch::channel_with(true),
295 share: None,
296 path_changes_tx,
297 _background_scanner_task: background_scanner_task,
298 diagnostics: Default::default(),
299 diagnostic_summaries: Default::default(),
300 client,
301 fs,
302 visible,
303 })
304 }))
305 }
306
307 pub fn remote(
308 project_remote_id: u64,
309 replica_id: ReplicaId,
310 worktree: proto::WorktreeMetadata,
311 client: Arc<Client>,
312 cx: &mut AppContext,
313 ) -> ModelHandle<Self> {
314 cx.add_model(|cx: &mut ModelContext<Self>| {
315 let snapshot = Snapshot {
316 id: WorktreeId(worktree.id as usize),
317 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
318 root_name: worktree.root_name.clone(),
319 root_char_bag: worktree
320 .root_name
321 .chars()
322 .map(|c| c.to_ascii_lowercase())
323 .collect(),
324 entries_by_path: Default::default(),
325 entries_by_id: Default::default(),
326 scan_id: 1,
327 completed_scan_id: 0,
328 };
329
330 let (updates_tx, mut updates_rx) = mpsc::unbounded();
331 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
332 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
333
334 cx.background()
335 .spawn({
336 let background_snapshot = background_snapshot.clone();
337 async move {
338 while let Some(update) = updates_rx.next().await {
339 if let Err(error) =
340 background_snapshot.lock().apply_remote_update(update)
341 {
342 log::error!("error applying worktree update: {}", error);
343 }
344 snapshot_updated_tx.send(()).await.ok();
345 }
346 }
347 })
348 .detach();
349
350 cx.spawn_weak(|this, mut cx| async move {
351 while (snapshot_updated_rx.recv().await).is_some() {
352 if let Some(this) = this.upgrade(&cx) {
353 this.update(&mut cx, |this, cx| {
354 let this = this.as_remote_mut().unwrap();
355 this.snapshot = this.background_snapshot.lock().clone();
356 cx.emit(Event::UpdatedEntries(Default::default()));
357 cx.notify();
358 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
359 if this.observed_snapshot(*scan_id) {
360 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
361 let _ = tx.send(());
362 } else {
363 break;
364 }
365 }
366 });
367 } else {
368 break;
369 }
370 }
371 })
372 .detach();
373
374 Worktree::Remote(RemoteWorktree {
375 project_id: project_remote_id,
376 replica_id,
377 snapshot: snapshot.clone(),
378 background_snapshot,
379 updates_tx: Some(updates_tx),
380 snapshot_subscriptions: Default::default(),
381 client: client.clone(),
382 diagnostic_summaries: Default::default(),
383 visible: worktree.visible,
384 disconnected: false,
385 })
386 })
387 }
388
389 pub fn as_local(&self) -> Option<&LocalWorktree> {
390 if let Worktree::Local(worktree) = self {
391 Some(worktree)
392 } else {
393 None
394 }
395 }
396
397 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
398 if let Worktree::Remote(worktree) = self {
399 Some(worktree)
400 } else {
401 None
402 }
403 }
404
405 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
406 if let Worktree::Local(worktree) = self {
407 Some(worktree)
408 } else {
409 None
410 }
411 }
412
413 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
414 if let Worktree::Remote(worktree) = self {
415 Some(worktree)
416 } else {
417 None
418 }
419 }
420
421 pub fn is_local(&self) -> bool {
422 matches!(self, Worktree::Local(_))
423 }
424
425 pub fn is_remote(&self) -> bool {
426 !self.is_local()
427 }
428
429 pub fn snapshot(&self) -> Snapshot {
430 match self {
431 Worktree::Local(worktree) => worktree.snapshot().snapshot,
432 Worktree::Remote(worktree) => worktree.snapshot(),
433 }
434 }
435
436 pub fn scan_id(&self) -> usize {
437 match self {
438 Worktree::Local(worktree) => worktree.snapshot.scan_id,
439 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
440 }
441 }
442
443 pub fn completed_scan_id(&self) -> usize {
444 match self {
445 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
446 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
447 }
448 }
449
450 pub fn is_visible(&self) -> bool {
451 match self {
452 Worktree::Local(worktree) => worktree.visible,
453 Worktree::Remote(worktree) => worktree.visible,
454 }
455 }
456
457 pub fn replica_id(&self) -> ReplicaId {
458 match self {
459 Worktree::Local(_) => 0,
460 Worktree::Remote(worktree) => worktree.replica_id,
461 }
462 }
463
464 pub fn diagnostic_summaries(
465 &self,
466 ) -> impl Iterator<Item = (Arc<Path>, usize, DiagnosticSummary)> + '_ {
467 match self {
468 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
469 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
470 }
471 .iter()
472 .flat_map(|(path, summaries)| {
473 summaries
474 .iter()
475 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
476 })
477 }
478
479 pub fn abs_path(&self) -> Arc<Path> {
480 match self {
481 Worktree::Local(worktree) => worktree.abs_path.clone(),
482 Worktree::Remote(worktree) => worktree.abs_path.clone(),
483 }
484 }
485}
486
487impl LocalWorktree {
488 pub fn contains_abs_path(&self, path: &Path) -> bool {
489 path.starts_with(&self.abs_path)
490 }
491
492 fn absolutize(&self, path: &Path) -> PathBuf {
493 if path.file_name().is_some() {
494 self.abs_path.join(path)
495 } else {
496 self.abs_path.to_path_buf()
497 }
498 }
499
500 pub(crate) fn load_buffer(
501 &mut self,
502 path: &Path,
503 cx: &mut ModelContext<Worktree>,
504 ) -> Task<Result<ModelHandle<Buffer>>> {
505 let path = Arc::from(path);
506 cx.spawn(move |this, mut cx| async move {
507 let (file, contents, diff_base) = this
508 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
509 .await?;
510 Ok(cx.add_model(|cx| {
511 let mut buffer = Buffer::from_file(0, contents, diff_base, Arc::new(file), cx);
512 buffer.git_diff_recalc(cx);
513 buffer
514 }))
515 })
516 }
517
518 pub fn diagnostics_for_path(
519 &self,
520 path: &Path,
521 ) -> Vec<(usize, Vec<DiagnosticEntry<Unclipped<PointUtf16>>>)> {
522 self.diagnostics.get(path).cloned().unwrap_or_default()
523 }
524
525 pub fn update_diagnostics(
526 &mut self,
527 server_id: usize,
528 worktree_path: Arc<Path>,
529 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
530 _: &mut ModelContext<Worktree>,
531 ) -> Result<bool> {
532 let summaries_by_server_id = self
533 .diagnostic_summaries
534 .entry(worktree_path.clone())
535 .or_default();
536
537 let old_summary = summaries_by_server_id
538 .remove(&server_id)
539 .unwrap_or_default();
540
541 let new_summary = DiagnosticSummary::new(&diagnostics);
542 if new_summary.is_empty() {
543 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
544 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
545 diagnostics_by_server_id.remove(ix);
546 }
547 if diagnostics_by_server_id.is_empty() {
548 self.diagnostics.remove(&worktree_path);
549 }
550 }
551 } else {
552 summaries_by_server_id.insert(server_id, new_summary);
553 let diagnostics_by_server_id =
554 self.diagnostics.entry(worktree_path.clone()).or_default();
555 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
556 Ok(ix) => {
557 diagnostics_by_server_id[ix] = (server_id, diagnostics);
558 }
559 Err(ix) => {
560 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
561 }
562 }
563 }
564
565 if !old_summary.is_empty() || !new_summary.is_empty() {
566 if let Some(share) = self.share.as_ref() {
567 self.client
568 .send(proto::UpdateDiagnosticSummary {
569 project_id: share.project_id,
570 worktree_id: self.id().to_proto(),
571 summary: Some(proto::DiagnosticSummary {
572 path: worktree_path.to_string_lossy().to_string(),
573 language_server_id: server_id as u64,
574 error_count: new_summary.error_count as u32,
575 warning_count: new_summary.warning_count as u32,
576 }),
577 })
578 .log_err();
579 }
580 }
581
582 Ok(!old_summary.is_empty() || !new_summary.is_empty())
583 }
584
585 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
586 let updated_repos = Self::changed_repos(
587 &self.snapshot.git_repositories,
588 &new_snapshot.git_repositories,
589 );
590 self.snapshot = new_snapshot;
591
592 if let Some(share) = self.share.as_mut() {
593 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
594 }
595
596 if !updated_repos.is_empty() {
597 cx.emit(Event::UpdatedGitRepositories(updated_repos));
598 }
599 }
600
601 fn changed_repos(
602 old_repos: &[GitRepositoryEntry],
603 new_repos: &[GitRepositoryEntry],
604 ) -> Vec<GitRepositoryEntry> {
605 fn diff<'a>(
606 a: &'a [GitRepositoryEntry],
607 b: &'a [GitRepositoryEntry],
608 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
609 ) {
610 for a_repo in a {
611 let matched = b.iter().find(|b_repo| {
612 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
613 });
614
615 if matched.is_none() {
616 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
617 }
618 }
619 }
620
621 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
622
623 diff(old_repos, new_repos, &mut updated);
624 diff(new_repos, old_repos, &mut updated);
625
626 updated.into_values().collect()
627 }
628
629 pub fn scan_complete(&self) -> impl Future<Output = ()> {
630 let mut is_scanning_rx = self.is_scanning.1.clone();
631 async move {
632 let mut is_scanning = is_scanning_rx.borrow().clone();
633 while is_scanning {
634 if let Some(value) = is_scanning_rx.recv().await {
635 is_scanning = value;
636 } else {
637 break;
638 }
639 }
640 }
641 }
642
643 pub fn snapshot(&self) -> LocalSnapshot {
644 self.snapshot.clone()
645 }
646
647 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
648 proto::WorktreeMetadata {
649 id: self.id().to_proto(),
650 root_name: self.root_name().to_string(),
651 visible: self.visible,
652 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
653 }
654 }
655
656 fn load(
657 &self,
658 path: &Path,
659 cx: &mut ModelContext<Worktree>,
660 ) -> Task<Result<(File, String, Option<String>)>> {
661 let handle = cx.handle();
662 let path = Arc::from(path);
663 let abs_path = self.absolutize(&path);
664 let fs = self.fs.clone();
665 let snapshot = self.snapshot();
666
667 cx.spawn(|this, mut cx| async move {
668 let text = fs.load(&abs_path).await?;
669
670 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
671 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
672 let repo_relative = repo_relative.to_owned();
673 cx.background()
674 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
675 .await
676 } else {
677 None
678 }
679 } else {
680 None
681 };
682
683 // Eagerly populate the snapshot with an updated entry for the loaded file
684 let entry = this
685 .update(&mut cx, |this, cx| {
686 this.as_local().unwrap().refresh_entry(path, None, cx)
687 })
688 .await?;
689
690 Ok((
691 File {
692 entry_id: entry.id,
693 worktree: handle,
694 path: entry.path,
695 mtime: entry.mtime,
696 is_local: true,
697 is_deleted: false,
698 },
699 text,
700 diff_base,
701 ))
702 })
703 }
704
705 pub fn save_buffer(
706 &self,
707 buffer_handle: ModelHandle<Buffer>,
708 path: Arc<Path>,
709 has_changed_file: bool,
710 cx: &mut ModelContext<Worktree>,
711 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
712 let handle = cx.handle();
713 let buffer = buffer_handle.read(cx);
714
715 let rpc = self.client.clone();
716 let buffer_id = buffer.remote_id();
717 let project_id = self.share.as_ref().map(|share| share.project_id);
718
719 let text = buffer.as_rope().clone();
720 let fingerprint = text.fingerprint();
721 let version = buffer.version();
722 let save = self.write_file(path, text, buffer.line_ending(), cx);
723
724 cx.as_mut().spawn(|mut cx| async move {
725 let entry = save.await?;
726
727 if has_changed_file {
728 let new_file = Arc::new(File {
729 entry_id: entry.id,
730 worktree: handle,
731 path: entry.path,
732 mtime: entry.mtime,
733 is_local: true,
734 is_deleted: false,
735 });
736
737 if let Some(project_id) = project_id {
738 rpc.send(proto::UpdateBufferFile {
739 project_id,
740 buffer_id,
741 file: Some(new_file.to_proto()),
742 })
743 .log_err();
744 }
745
746 buffer_handle.update(&mut cx, |buffer, cx| {
747 if has_changed_file {
748 buffer.file_updated(new_file, cx).detach();
749 }
750 });
751 }
752
753 if let Some(project_id) = project_id {
754 rpc.send(proto::BufferSaved {
755 project_id,
756 buffer_id,
757 version: serialize_version(&version),
758 mtime: Some(entry.mtime.into()),
759 fingerprint: serialize_fingerprint(fingerprint),
760 })?;
761 }
762
763 buffer_handle.update(&mut cx, |buffer, cx| {
764 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
765 });
766
767 Ok((version, fingerprint, entry.mtime))
768 })
769 }
770
771 pub fn create_entry(
772 &self,
773 path: impl Into<Arc<Path>>,
774 is_dir: bool,
775 cx: &mut ModelContext<Worktree>,
776 ) -> Task<Result<Entry>> {
777 let path = path.into();
778 let abs_path = self.absolutize(&path);
779 let fs = self.fs.clone();
780 let write = cx.background().spawn(async move {
781 if is_dir {
782 fs.create_dir(&abs_path).await
783 } else {
784 fs.save(&abs_path, &Default::default(), Default::default())
785 .await
786 }
787 });
788
789 cx.spawn(|this, mut cx| async move {
790 write.await?;
791 this.update(&mut cx, |this, cx| {
792 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
793 })
794 .await
795 })
796 }
797
798 pub fn write_file(
799 &self,
800 path: impl Into<Arc<Path>>,
801 text: Rope,
802 line_ending: LineEnding,
803 cx: &mut ModelContext<Worktree>,
804 ) -> Task<Result<Entry>> {
805 let path = path.into();
806 let abs_path = self.absolutize(&path);
807 let fs = self.fs.clone();
808 let write = cx
809 .background()
810 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
811
812 cx.spawn(|this, mut cx| async move {
813 write.await?;
814 this.update(&mut cx, |this, cx| {
815 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
816 })
817 .await
818 })
819 }
820
821 pub fn delete_entry(
822 &self,
823 entry_id: ProjectEntryId,
824 cx: &mut ModelContext<Worktree>,
825 ) -> Option<Task<Result<()>>> {
826 let entry = self.entry_for_id(entry_id)?.clone();
827 let abs_path = self.abs_path.clone();
828 let fs = self.fs.clone();
829
830 let delete = cx.background().spawn(async move {
831 let mut abs_path = fs.canonicalize(&abs_path).await?;
832 if entry.path.file_name().is_some() {
833 abs_path = abs_path.join(&entry.path);
834 }
835 if entry.is_file() {
836 fs.remove_file(&abs_path, Default::default()).await?;
837 } else {
838 fs.remove_dir(
839 &abs_path,
840 RemoveOptions {
841 recursive: true,
842 ignore_if_not_exists: false,
843 },
844 )
845 .await?;
846 }
847 anyhow::Ok(abs_path)
848 });
849
850 Some(cx.spawn(|this, mut cx| async move {
851 let abs_path = delete.await?;
852 let (tx, mut rx) = barrier::channel();
853 this.update(&mut cx, |this, _| {
854 this.as_local_mut()
855 .unwrap()
856 .path_changes_tx
857 .try_send((vec![abs_path], tx))
858 })?;
859 rx.recv().await;
860 Ok(())
861 }))
862 }
863
864 pub fn rename_entry(
865 &self,
866 entry_id: ProjectEntryId,
867 new_path: impl Into<Arc<Path>>,
868 cx: &mut ModelContext<Worktree>,
869 ) -> Option<Task<Result<Entry>>> {
870 let old_path = self.entry_for_id(entry_id)?.path.clone();
871 let new_path = new_path.into();
872 let abs_old_path = self.absolutize(&old_path);
873 let abs_new_path = self.absolutize(&new_path);
874 let fs = self.fs.clone();
875 let rename = cx.background().spawn(async move {
876 fs.rename(&abs_old_path, &abs_new_path, Default::default())
877 .await
878 });
879
880 Some(cx.spawn(|this, mut cx| async move {
881 rename.await?;
882 this.update(&mut cx, |this, cx| {
883 this.as_local_mut()
884 .unwrap()
885 .refresh_entry(new_path.clone(), Some(old_path), cx)
886 })
887 .await
888 }))
889 }
890
891 pub fn copy_entry(
892 &self,
893 entry_id: ProjectEntryId,
894 new_path: impl Into<Arc<Path>>,
895 cx: &mut ModelContext<Worktree>,
896 ) -> Option<Task<Result<Entry>>> {
897 let old_path = self.entry_for_id(entry_id)?.path.clone();
898 let new_path = new_path.into();
899 let abs_old_path = self.absolutize(&old_path);
900 let abs_new_path = self.absolutize(&new_path);
901 let fs = self.fs.clone();
902 let copy = cx.background().spawn(async move {
903 copy_recursive(
904 fs.as_ref(),
905 &abs_old_path,
906 &abs_new_path,
907 Default::default(),
908 )
909 .await
910 });
911
912 Some(cx.spawn(|this, mut cx| async move {
913 copy.await?;
914 this.update(&mut cx, |this, cx| {
915 this.as_local_mut()
916 .unwrap()
917 .refresh_entry(new_path.clone(), None, cx)
918 })
919 .await
920 }))
921 }
922
923 fn refresh_entry(
924 &self,
925 path: Arc<Path>,
926 old_path: Option<Arc<Path>>,
927 cx: &mut ModelContext<Worktree>,
928 ) -> Task<Result<Entry>> {
929 let fs = self.fs.clone();
930 let abs_root_path = self.abs_path.clone();
931 let path_changes_tx = self.path_changes_tx.clone();
932 cx.spawn_weak(move |this, mut cx| async move {
933 let abs_path = fs.canonicalize(&abs_root_path).await?;
934 let mut paths = Vec::with_capacity(2);
935 paths.push(if path.file_name().is_some() {
936 abs_path.join(&path)
937 } else {
938 abs_path.clone()
939 });
940 if let Some(old_path) = old_path {
941 paths.push(if old_path.file_name().is_some() {
942 abs_path.join(&old_path)
943 } else {
944 abs_path.clone()
945 });
946 }
947
948 let (tx, mut rx) = barrier::channel();
949 path_changes_tx.try_send((paths, tx))?;
950 rx.recv().await;
951 this.upgrade(&cx)
952 .ok_or_else(|| anyhow!("worktree was dropped"))?
953 .update(&mut cx, |this, _| {
954 this.entry_for_path(path)
955 .cloned()
956 .ok_or_else(|| anyhow!("failed to read path after update"))
957 })
958 })
959 }
960
961 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
962 let (share_tx, share_rx) = oneshot::channel();
963
964 if let Some(share) = self.share.as_mut() {
965 let _ = share_tx.send(());
966 *share.resume_updates.borrow_mut() = ();
967 } else {
968 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
969 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
970 let worktree_id = cx.model_id() as u64;
971
972 for (path, summaries) in &self.diagnostic_summaries {
973 for (&server_id, summary) in summaries {
974 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
975 project_id,
976 worktree_id,
977 summary: Some(summary.to_proto(server_id, &path)),
978 }) {
979 return Task::ready(Err(e));
980 }
981 }
982 }
983
984 let _maintain_remote_snapshot = cx.background().spawn({
985 let client = self.client.clone();
986 async move {
987 let mut share_tx = Some(share_tx);
988 let mut prev_snapshot = LocalSnapshot {
989 ignores_by_parent_abs_path: Default::default(),
990 git_repositories: Default::default(),
991 removed_entry_ids: Default::default(),
992 next_entry_id: Default::default(),
993 snapshot: Snapshot {
994 id: WorktreeId(worktree_id as usize),
995 abs_path: Path::new("").into(),
996 root_name: Default::default(),
997 root_char_bag: Default::default(),
998 entries_by_path: Default::default(),
999 entries_by_id: Default::default(),
1000 scan_id: 0,
1001 completed_scan_id: 0,
1002 },
1003 };
1004 while let Some(snapshot) = snapshots_rx.recv().await {
1005 #[cfg(any(test, feature = "test-support"))]
1006 const MAX_CHUNK_SIZE: usize = 2;
1007 #[cfg(not(any(test, feature = "test-support")))]
1008 const MAX_CHUNK_SIZE: usize = 256;
1009
1010 let update =
1011 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1012 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1013 let _ = resume_updates_rx.try_recv();
1014 while let Err(error) = client.request(update.clone()).await {
1015 log::error!("failed to send worktree update: {}", error);
1016 log::info!("waiting to resume updates");
1017 if resume_updates_rx.next().await.is_none() {
1018 return Ok(());
1019 }
1020 }
1021 }
1022
1023 if let Some(share_tx) = share_tx.take() {
1024 let _ = share_tx.send(());
1025 }
1026
1027 prev_snapshot = snapshot;
1028 }
1029
1030 Ok::<_, anyhow::Error>(())
1031 }
1032 .log_err()
1033 });
1034
1035 self.share = Some(ShareState {
1036 project_id,
1037 snapshots_tx,
1038 resume_updates: resume_updates_tx,
1039 _maintain_remote_snapshot,
1040 });
1041 }
1042
1043 cx.foreground()
1044 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1045 }
1046
1047 pub fn unshare(&mut self) {
1048 self.share.take();
1049 }
1050
1051 pub fn is_shared(&self) -> bool {
1052 self.share.is_some()
1053 }
1054}
1055
1056impl RemoteWorktree {
1057 fn snapshot(&self) -> Snapshot {
1058 self.snapshot.clone()
1059 }
1060
1061 pub fn disconnected_from_host(&mut self) {
1062 self.updates_tx.take();
1063 self.snapshot_subscriptions.clear();
1064 self.disconnected = true;
1065 }
1066
1067 pub fn save_buffer(
1068 &self,
1069 buffer_handle: ModelHandle<Buffer>,
1070 cx: &mut ModelContext<Worktree>,
1071 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1072 let buffer = buffer_handle.read(cx);
1073 let buffer_id = buffer.remote_id();
1074 let version = buffer.version();
1075 let rpc = self.client.clone();
1076 let project_id = self.project_id;
1077 cx.as_mut().spawn(|mut cx| async move {
1078 let response = rpc
1079 .request(proto::SaveBuffer {
1080 project_id,
1081 buffer_id,
1082 version: serialize_version(&version),
1083 })
1084 .await?;
1085 let version = deserialize_version(&response.version);
1086 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1087 let mtime = response
1088 .mtime
1089 .ok_or_else(|| anyhow!("missing mtime"))?
1090 .into();
1091
1092 buffer_handle.update(&mut cx, |buffer, cx| {
1093 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1094 });
1095
1096 Ok((version, fingerprint, mtime))
1097 })
1098 }
1099
1100 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1101 if let Some(updates_tx) = &self.updates_tx {
1102 updates_tx
1103 .unbounded_send(update)
1104 .expect("consumer runs to completion");
1105 }
1106 }
1107
1108 fn observed_snapshot(&self, scan_id: usize) -> bool {
1109 self.completed_scan_id >= scan_id
1110 }
1111
1112 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1113 let (tx, rx) = oneshot::channel();
1114 if self.observed_snapshot(scan_id) {
1115 let _ = tx.send(());
1116 } else if self.disconnected {
1117 drop(tx);
1118 } else {
1119 match self
1120 .snapshot_subscriptions
1121 .binary_search_by_key(&scan_id, |probe| probe.0)
1122 {
1123 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1124 }
1125 }
1126
1127 async move {
1128 rx.await?;
1129 Ok(())
1130 }
1131 }
1132
1133 pub fn update_diagnostic_summary(
1134 &mut self,
1135 path: Arc<Path>,
1136 summary: &proto::DiagnosticSummary,
1137 ) {
1138 let server_id = summary.language_server_id as usize;
1139 let summary = DiagnosticSummary {
1140 error_count: summary.error_count as usize,
1141 warning_count: summary.warning_count as usize,
1142 };
1143
1144 if summary.is_empty() {
1145 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1146 summaries.remove(&server_id);
1147 if summaries.is_empty() {
1148 self.diagnostic_summaries.remove(&path);
1149 }
1150 }
1151 } else {
1152 self.diagnostic_summaries
1153 .entry(path)
1154 .or_default()
1155 .insert(server_id, summary);
1156 }
1157 }
1158
1159 pub fn insert_entry(
1160 &mut self,
1161 entry: proto::Entry,
1162 scan_id: usize,
1163 cx: &mut ModelContext<Worktree>,
1164 ) -> Task<Result<Entry>> {
1165 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1166 cx.spawn(|this, mut cx| async move {
1167 wait_for_snapshot.await?;
1168 this.update(&mut cx, |worktree, _| {
1169 let worktree = worktree.as_remote_mut().unwrap();
1170 let mut snapshot = worktree.background_snapshot.lock();
1171 let entry = snapshot.insert_entry(entry);
1172 worktree.snapshot = snapshot.clone();
1173 entry
1174 })
1175 })
1176 }
1177
1178 pub(crate) fn delete_entry(
1179 &mut self,
1180 id: ProjectEntryId,
1181 scan_id: usize,
1182 cx: &mut ModelContext<Worktree>,
1183 ) -> Task<Result<()>> {
1184 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1185 cx.spawn(|this, mut cx| async move {
1186 wait_for_snapshot.await?;
1187 this.update(&mut cx, |worktree, _| {
1188 let worktree = worktree.as_remote_mut().unwrap();
1189 let mut snapshot = worktree.background_snapshot.lock();
1190 snapshot.delete_entry(id);
1191 worktree.snapshot = snapshot.clone();
1192 });
1193 Ok(())
1194 })
1195 }
1196}
1197
1198impl Snapshot {
1199 pub fn id(&self) -> WorktreeId {
1200 self.id
1201 }
1202
1203 pub fn abs_path(&self) -> &Arc<Path> {
1204 &self.abs_path
1205 }
1206
1207 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1208 self.entries_by_id.get(&entry_id, &()).is_some()
1209 }
1210
1211 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1212 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1213 let old_entry = self.entries_by_id.insert_or_replace(
1214 PathEntry {
1215 id: entry.id,
1216 path: entry.path.clone(),
1217 is_ignored: entry.is_ignored,
1218 scan_id: 0,
1219 },
1220 &(),
1221 );
1222 if let Some(old_entry) = old_entry {
1223 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1224 }
1225 self.entries_by_path.insert_or_replace(entry.clone(), &());
1226 Ok(entry)
1227 }
1228
1229 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1230 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1231 self.entries_by_path = {
1232 let mut cursor = self.entries_by_path.cursor();
1233 let mut new_entries_by_path =
1234 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1235 while let Some(entry) = cursor.item() {
1236 if entry.path.starts_with(&removed_entry.path) {
1237 self.entries_by_id.remove(&entry.id, &());
1238 cursor.next(&());
1239 } else {
1240 break;
1241 }
1242 }
1243 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1244 new_entries_by_path
1245 };
1246
1247 Some(removed_entry.path)
1248 }
1249
1250 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1251 let mut entries_by_path_edits = Vec::new();
1252 let mut entries_by_id_edits = Vec::new();
1253 for entry_id in update.removed_entries {
1254 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1255 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1256 entries_by_id_edits.push(Edit::Remove(entry.id));
1257 }
1258 }
1259
1260 for entry in update.updated_entries {
1261 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1262 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1263 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1264 }
1265 entries_by_id_edits.push(Edit::Insert(PathEntry {
1266 id: entry.id,
1267 path: entry.path.clone(),
1268 is_ignored: entry.is_ignored,
1269 scan_id: 0,
1270 }));
1271 entries_by_path_edits.push(Edit::Insert(entry));
1272 }
1273
1274 self.entries_by_path.edit(entries_by_path_edits, &());
1275 self.entries_by_id.edit(entries_by_id_edits, &());
1276 self.scan_id = update.scan_id as usize;
1277 if update.is_last_update {
1278 self.completed_scan_id = update.scan_id as usize;
1279 }
1280
1281 Ok(())
1282 }
1283
1284 pub fn file_count(&self) -> usize {
1285 self.entries_by_path.summary().file_count
1286 }
1287
1288 pub fn visible_file_count(&self) -> usize {
1289 self.entries_by_path.summary().visible_file_count
1290 }
1291
1292 fn traverse_from_offset(
1293 &self,
1294 include_dirs: bool,
1295 include_ignored: bool,
1296 start_offset: usize,
1297 ) -> Traversal {
1298 let mut cursor = self.entries_by_path.cursor();
1299 cursor.seek(
1300 &TraversalTarget::Count {
1301 count: start_offset,
1302 include_dirs,
1303 include_ignored,
1304 },
1305 Bias::Right,
1306 &(),
1307 );
1308 Traversal {
1309 cursor,
1310 include_dirs,
1311 include_ignored,
1312 }
1313 }
1314
1315 fn traverse_from_path(
1316 &self,
1317 include_dirs: bool,
1318 include_ignored: bool,
1319 path: &Path,
1320 ) -> Traversal {
1321 let mut cursor = self.entries_by_path.cursor();
1322 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1323 Traversal {
1324 cursor,
1325 include_dirs,
1326 include_ignored,
1327 }
1328 }
1329
1330 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1331 self.traverse_from_offset(false, include_ignored, start)
1332 }
1333
1334 pub fn entries(&self, include_ignored: bool) -> Traversal {
1335 self.traverse_from_offset(true, include_ignored, 0)
1336 }
1337
1338 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1339 let empty_path = Path::new("");
1340 self.entries_by_path
1341 .cursor::<()>()
1342 .filter(move |entry| entry.path.as_ref() != empty_path)
1343 .map(|entry| &entry.path)
1344 }
1345
1346 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1347 let mut cursor = self.entries_by_path.cursor();
1348 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1349 let traversal = Traversal {
1350 cursor,
1351 include_dirs: true,
1352 include_ignored: true,
1353 };
1354 ChildEntriesIter {
1355 traversal,
1356 parent_path,
1357 }
1358 }
1359
1360 pub fn root_entry(&self) -> Option<&Entry> {
1361 self.entry_for_path("")
1362 }
1363
1364 pub fn root_name(&self) -> &str {
1365 &self.root_name
1366 }
1367
1368 pub fn scan_id(&self) -> usize {
1369 self.scan_id
1370 }
1371
1372 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1373 let path = path.as_ref();
1374 self.traverse_from_path(true, true, path)
1375 .entry()
1376 .and_then(|entry| {
1377 if entry.path.as_ref() == path {
1378 Some(entry)
1379 } else {
1380 None
1381 }
1382 })
1383 }
1384
1385 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1386 let entry = self.entries_by_id.get(&id, &())?;
1387 self.entry_for_path(&entry.path)
1388 }
1389
1390 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1391 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1392 }
1393}
1394
1395impl LocalSnapshot {
1396 // Gives the most specific git repository for a given path
1397 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1398 self.git_repositories
1399 .iter()
1400 .rev() //git_repository is ordered lexicographically
1401 .find(|repo| repo.manages(path))
1402 .cloned()
1403 }
1404
1405 pub(crate) fn repo_with_dot_git_containing(
1406 &mut self,
1407 path: &Path,
1408 ) -> Option<&mut GitRepositoryEntry> {
1409 // Git repositories cannot be nested, so we don't need to reverse the order
1410 self.git_repositories
1411 .iter_mut()
1412 .find(|repo| repo.in_dot_git(path))
1413 }
1414
1415 #[cfg(test)]
1416 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1417 let root_name = self.root_name.clone();
1418 proto::UpdateWorktree {
1419 project_id,
1420 worktree_id: self.id().to_proto(),
1421 abs_path: self.abs_path().to_string_lossy().into(),
1422 root_name,
1423 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1424 removed_entries: Default::default(),
1425 scan_id: self.scan_id as u64,
1426 is_last_update: true,
1427 }
1428 }
1429
1430 pub(crate) fn build_update(
1431 &self,
1432 other: &Self,
1433 project_id: u64,
1434 worktree_id: u64,
1435 include_ignored: bool,
1436 ) -> proto::UpdateWorktree {
1437 let mut updated_entries = Vec::new();
1438 let mut removed_entries = Vec::new();
1439 let mut self_entries = self
1440 .entries_by_id
1441 .cursor::<()>()
1442 .filter(|e| include_ignored || !e.is_ignored)
1443 .peekable();
1444 let mut other_entries = other
1445 .entries_by_id
1446 .cursor::<()>()
1447 .filter(|e| include_ignored || !e.is_ignored)
1448 .peekable();
1449 loop {
1450 match (self_entries.peek(), other_entries.peek()) {
1451 (Some(self_entry), Some(other_entry)) => {
1452 match Ord::cmp(&self_entry.id, &other_entry.id) {
1453 Ordering::Less => {
1454 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1455 updated_entries.push(entry);
1456 self_entries.next();
1457 }
1458 Ordering::Equal => {
1459 if self_entry.scan_id != other_entry.scan_id {
1460 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1461 updated_entries.push(entry);
1462 }
1463
1464 self_entries.next();
1465 other_entries.next();
1466 }
1467 Ordering::Greater => {
1468 removed_entries.push(other_entry.id.to_proto());
1469 other_entries.next();
1470 }
1471 }
1472 }
1473 (Some(self_entry), None) => {
1474 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1475 updated_entries.push(entry);
1476 self_entries.next();
1477 }
1478 (None, Some(other_entry)) => {
1479 removed_entries.push(other_entry.id.to_proto());
1480 other_entries.next();
1481 }
1482 (None, None) => break,
1483 }
1484 }
1485
1486 proto::UpdateWorktree {
1487 project_id,
1488 worktree_id,
1489 abs_path: self.abs_path().to_string_lossy().into(),
1490 root_name: self.root_name().to_string(),
1491 updated_entries,
1492 removed_entries,
1493 scan_id: self.scan_id as u64,
1494 is_last_update: self.completed_scan_id == self.scan_id,
1495 }
1496 }
1497
1498 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1499 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1500 let abs_path = self.abs_path.join(&entry.path);
1501 match smol::block_on(build_gitignore(&abs_path, fs)) {
1502 Ok(ignore) => {
1503 self.ignores_by_parent_abs_path.insert(
1504 abs_path.parent().unwrap().into(),
1505 (Arc::new(ignore), self.scan_id),
1506 );
1507 }
1508 Err(error) => {
1509 log::error!(
1510 "error loading .gitignore file {:?} - {:?}",
1511 &entry.path,
1512 error
1513 );
1514 }
1515 }
1516 }
1517
1518 self.reuse_entry_id(&mut entry);
1519
1520 if entry.kind == EntryKind::PendingDir {
1521 if let Some(existing_entry) =
1522 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1523 {
1524 entry.kind = existing_entry.kind;
1525 }
1526 }
1527
1528 let scan_id = self.scan_id;
1529 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1530 if let Some(removed) = removed {
1531 if removed.id != entry.id {
1532 self.entries_by_id.remove(&removed.id, &());
1533 }
1534 }
1535 self.entries_by_id.insert_or_replace(
1536 PathEntry {
1537 id: entry.id,
1538 path: entry.path.clone(),
1539 is_ignored: entry.is_ignored,
1540 scan_id,
1541 },
1542 &(),
1543 );
1544
1545 entry
1546 }
1547
1548 fn populate_dir(
1549 &mut self,
1550 parent_path: Arc<Path>,
1551 entries: impl IntoIterator<Item = Entry>,
1552 ignore: Option<Arc<Gitignore>>,
1553 fs: &dyn Fs,
1554 ) {
1555 let mut parent_entry = if let Some(parent_entry) =
1556 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1557 {
1558 parent_entry.clone()
1559 } else {
1560 log::warn!(
1561 "populating a directory {:?} that has been removed",
1562 parent_path
1563 );
1564 return;
1565 };
1566
1567 match parent_entry.kind {
1568 EntryKind::PendingDir => {
1569 parent_entry.kind = EntryKind::Dir;
1570 }
1571 EntryKind::Dir => {}
1572 _ => return,
1573 }
1574
1575 if let Some(ignore) = ignore {
1576 self.ignores_by_parent_abs_path.insert(
1577 self.abs_path.join(&parent_path).into(),
1578 (ignore, self.scan_id),
1579 );
1580 }
1581
1582 if parent_path.file_name() == Some(&DOT_GIT) {
1583 let abs_path = self.abs_path.join(&parent_path);
1584 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1585 if let Err(ix) = self
1586 .git_repositories
1587 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1588 {
1589 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1590 self.git_repositories.insert(
1591 ix,
1592 GitRepositoryEntry {
1593 repo,
1594 scan_id: 0,
1595 content_path,
1596 git_dir_path: parent_path,
1597 },
1598 );
1599 }
1600 }
1601 }
1602
1603 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1604 let mut entries_by_id_edits = Vec::new();
1605
1606 for mut entry in entries {
1607 self.reuse_entry_id(&mut entry);
1608 entries_by_id_edits.push(Edit::Insert(PathEntry {
1609 id: entry.id,
1610 path: entry.path.clone(),
1611 is_ignored: entry.is_ignored,
1612 scan_id: self.scan_id,
1613 }));
1614 entries_by_path_edits.push(Edit::Insert(entry));
1615 }
1616
1617 self.entries_by_path.edit(entries_by_path_edits, &());
1618 self.entries_by_id.edit(entries_by_id_edits, &());
1619 }
1620
1621 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1622 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1623 entry.id = removed_entry_id;
1624 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1625 entry.id = existing_entry.id;
1626 }
1627 }
1628
1629 fn remove_path(&mut self, path: &Path) {
1630 let mut new_entries;
1631 let removed_entries;
1632 {
1633 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1634 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1635 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1636 new_entries.push_tree(cursor.suffix(&()), &());
1637 }
1638 self.entries_by_path = new_entries;
1639
1640 let mut entries_by_id_edits = Vec::new();
1641 for entry in removed_entries.cursor::<()>() {
1642 let removed_entry_id = self
1643 .removed_entry_ids
1644 .entry(entry.inode)
1645 .or_insert(entry.id);
1646 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1647 entries_by_id_edits.push(Edit::Remove(entry.id));
1648 }
1649 self.entries_by_id.edit(entries_by_id_edits, &());
1650
1651 if path.file_name() == Some(&GITIGNORE) {
1652 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1653 if let Some((_, scan_id)) = self
1654 .ignores_by_parent_abs_path
1655 .get_mut(abs_parent_path.as_path())
1656 {
1657 *scan_id = self.snapshot.scan_id;
1658 }
1659 } else if path.file_name() == Some(&DOT_GIT) {
1660 let parent_path = path.parent().unwrap();
1661 if let Ok(ix) = self
1662 .git_repositories
1663 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1664 {
1665 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1666 }
1667 }
1668 }
1669
1670 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1671 let mut inodes = TreeSet::default();
1672 for ancestor in path.ancestors().skip(1) {
1673 if let Some(entry) = self.entry_for_path(ancestor) {
1674 inodes.insert(entry.inode);
1675 }
1676 }
1677 inodes
1678 }
1679
1680 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1681 let mut new_ignores = Vec::new();
1682 for ancestor in abs_path.ancestors().skip(1) {
1683 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1684 new_ignores.push((ancestor, Some(ignore.clone())));
1685 } else {
1686 new_ignores.push((ancestor, None));
1687 }
1688 }
1689
1690 let mut ignore_stack = IgnoreStack::none();
1691 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1692 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1693 ignore_stack = IgnoreStack::all();
1694 break;
1695 } else if let Some(ignore) = ignore {
1696 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1697 }
1698 }
1699
1700 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1701 ignore_stack = IgnoreStack::all();
1702 }
1703
1704 ignore_stack
1705 }
1706
1707 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1708 &self.git_repositories
1709 }
1710}
1711
1712impl GitRepositoryEntry {
1713 // Note that these paths should be relative to the worktree root.
1714 pub(crate) fn manages(&self, path: &Path) -> bool {
1715 path.starts_with(self.content_path.as_ref())
1716 }
1717
1718 // Note that this path should be relative to the worktree root.
1719 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1720 path.starts_with(self.git_dir_path.as_ref())
1721 }
1722}
1723
1724async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1725 let contents = fs.load(abs_path).await?;
1726 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1727 let mut builder = GitignoreBuilder::new(parent);
1728 for line in contents.lines() {
1729 builder.add_line(Some(abs_path.into()), line)?;
1730 }
1731 Ok(builder.build()?)
1732}
1733
1734impl WorktreeId {
1735 pub fn from_usize(handle_id: usize) -> Self {
1736 Self(handle_id)
1737 }
1738
1739 pub(crate) fn from_proto(id: u64) -> Self {
1740 Self(id as usize)
1741 }
1742
1743 pub fn to_proto(&self) -> u64 {
1744 self.0 as u64
1745 }
1746
1747 pub fn to_usize(&self) -> usize {
1748 self.0
1749 }
1750}
1751
1752impl fmt::Display for WorktreeId {
1753 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1754 self.0.fmt(f)
1755 }
1756}
1757
1758impl Deref for Worktree {
1759 type Target = Snapshot;
1760
1761 fn deref(&self) -> &Self::Target {
1762 match self {
1763 Worktree::Local(worktree) => &worktree.snapshot,
1764 Worktree::Remote(worktree) => &worktree.snapshot,
1765 }
1766 }
1767}
1768
1769impl Deref for LocalWorktree {
1770 type Target = LocalSnapshot;
1771
1772 fn deref(&self) -> &Self::Target {
1773 &self.snapshot
1774 }
1775}
1776
1777impl Deref for RemoteWorktree {
1778 type Target = Snapshot;
1779
1780 fn deref(&self) -> &Self::Target {
1781 &self.snapshot
1782 }
1783}
1784
1785impl fmt::Debug for LocalWorktree {
1786 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1787 self.snapshot.fmt(f)
1788 }
1789}
1790
1791impl fmt::Debug for Snapshot {
1792 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1793 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1794 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1795
1796 impl<'a> fmt::Debug for EntriesByPath<'a> {
1797 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1798 f.debug_map()
1799 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1800 .finish()
1801 }
1802 }
1803
1804 impl<'a> fmt::Debug for EntriesById<'a> {
1805 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1806 f.debug_list().entries(self.0.iter()).finish()
1807 }
1808 }
1809
1810 f.debug_struct("Snapshot")
1811 .field("id", &self.id)
1812 .field("root_name", &self.root_name)
1813 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1814 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1815 .finish()
1816 }
1817}
1818
1819#[derive(Clone, PartialEq)]
1820pub struct File {
1821 pub worktree: ModelHandle<Worktree>,
1822 pub path: Arc<Path>,
1823 pub mtime: SystemTime,
1824 pub(crate) entry_id: ProjectEntryId,
1825 pub(crate) is_local: bool,
1826 pub(crate) is_deleted: bool,
1827}
1828
1829impl language::File for File {
1830 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1831 if self.is_local {
1832 Some(self)
1833 } else {
1834 None
1835 }
1836 }
1837
1838 fn mtime(&self) -> SystemTime {
1839 self.mtime
1840 }
1841
1842 fn path(&self) -> &Arc<Path> {
1843 &self.path
1844 }
1845
1846 fn full_path(&self, cx: &AppContext) -> PathBuf {
1847 let mut full_path = PathBuf::new();
1848 let worktree = self.worktree.read(cx);
1849
1850 if worktree.is_visible() {
1851 full_path.push(worktree.root_name());
1852 } else {
1853 let path = worktree.abs_path();
1854
1855 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1856 full_path.push("~");
1857 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1858 } else {
1859 full_path.push(path)
1860 }
1861 }
1862
1863 if self.path.components().next().is_some() {
1864 full_path.push(&self.path);
1865 }
1866
1867 full_path
1868 }
1869
1870 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1871 /// of its worktree, then this method will return the name of the worktree itself.
1872 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1873 self.path
1874 .file_name()
1875 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1876 }
1877
1878 fn is_deleted(&self) -> bool {
1879 self.is_deleted
1880 }
1881
1882 fn as_any(&self) -> &dyn Any {
1883 self
1884 }
1885
1886 fn to_proto(&self) -> rpc::proto::File {
1887 rpc::proto::File {
1888 worktree_id: self.worktree.id() as u64,
1889 entry_id: self.entry_id.to_proto(),
1890 path: self.path.to_string_lossy().into(),
1891 mtime: Some(self.mtime.into()),
1892 is_deleted: self.is_deleted,
1893 }
1894 }
1895}
1896
1897impl language::LocalFile for File {
1898 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1899 self.worktree
1900 .read(cx)
1901 .as_local()
1902 .unwrap()
1903 .abs_path
1904 .join(&self.path)
1905 }
1906
1907 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1908 let worktree = self.worktree.read(cx).as_local().unwrap();
1909 let abs_path = worktree.absolutize(&self.path);
1910 let fs = worktree.fs.clone();
1911 cx.background()
1912 .spawn(async move { fs.load(&abs_path).await })
1913 }
1914
1915 fn buffer_reloaded(
1916 &self,
1917 buffer_id: u64,
1918 version: &clock::Global,
1919 fingerprint: RopeFingerprint,
1920 line_ending: LineEnding,
1921 mtime: SystemTime,
1922 cx: &mut AppContext,
1923 ) {
1924 let worktree = self.worktree.read(cx).as_local().unwrap();
1925 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1926 worktree
1927 .client
1928 .send(proto::BufferReloaded {
1929 project_id,
1930 buffer_id,
1931 version: serialize_version(version),
1932 mtime: Some(mtime.into()),
1933 fingerprint: serialize_fingerprint(fingerprint),
1934 line_ending: serialize_line_ending(line_ending) as i32,
1935 })
1936 .log_err();
1937 }
1938 }
1939}
1940
1941impl File {
1942 pub fn from_proto(
1943 proto: rpc::proto::File,
1944 worktree: ModelHandle<Worktree>,
1945 cx: &AppContext,
1946 ) -> Result<Self> {
1947 let worktree_id = worktree
1948 .read(cx)
1949 .as_remote()
1950 .ok_or_else(|| anyhow!("not remote"))?
1951 .id();
1952
1953 if worktree_id.to_proto() != proto.worktree_id {
1954 return Err(anyhow!("worktree id does not match file"));
1955 }
1956
1957 Ok(Self {
1958 worktree,
1959 path: Path::new(&proto.path).into(),
1960 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1961 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1962 is_local: false,
1963 is_deleted: proto.is_deleted,
1964 })
1965 }
1966
1967 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1968 file.and_then(|f| f.as_any().downcast_ref())
1969 }
1970
1971 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1972 self.worktree.read(cx).id()
1973 }
1974
1975 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1976 if self.is_deleted {
1977 None
1978 } else {
1979 Some(self.entry_id)
1980 }
1981 }
1982}
1983
1984#[derive(Clone, Debug, PartialEq, Eq)]
1985pub struct Entry {
1986 pub id: ProjectEntryId,
1987 pub kind: EntryKind,
1988 pub path: Arc<Path>,
1989 pub inode: u64,
1990 pub mtime: SystemTime,
1991 pub is_symlink: bool,
1992 pub is_ignored: bool,
1993}
1994
1995#[derive(Clone, Copy, Debug, PartialEq, Eq)]
1996pub enum EntryKind {
1997 PendingDir,
1998 Dir,
1999 File(CharBag),
2000}
2001
2002#[derive(Clone, Copy, Debug)]
2003pub enum PathChange {
2004 Added,
2005 Removed,
2006 Updated,
2007 AddedOrUpdated,
2008}
2009
2010impl Entry {
2011 fn new(
2012 path: Arc<Path>,
2013 metadata: &fs::Metadata,
2014 next_entry_id: &AtomicUsize,
2015 root_char_bag: CharBag,
2016 ) -> Self {
2017 Self {
2018 id: ProjectEntryId::new(next_entry_id),
2019 kind: if metadata.is_dir {
2020 EntryKind::PendingDir
2021 } else {
2022 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2023 },
2024 path,
2025 inode: metadata.inode,
2026 mtime: metadata.mtime,
2027 is_symlink: metadata.is_symlink,
2028 is_ignored: false,
2029 }
2030 }
2031
2032 pub fn is_dir(&self) -> bool {
2033 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2034 }
2035
2036 pub fn is_file(&self) -> bool {
2037 matches!(self.kind, EntryKind::File(_))
2038 }
2039}
2040
2041impl sum_tree::Item for Entry {
2042 type Summary = EntrySummary;
2043
2044 fn summary(&self) -> Self::Summary {
2045 let visible_count = if self.is_ignored { 0 } else { 1 };
2046 let file_count;
2047 let visible_file_count;
2048 if self.is_file() {
2049 file_count = 1;
2050 visible_file_count = visible_count;
2051 } else {
2052 file_count = 0;
2053 visible_file_count = 0;
2054 }
2055
2056 EntrySummary {
2057 max_path: self.path.clone(),
2058 count: 1,
2059 visible_count,
2060 file_count,
2061 visible_file_count,
2062 }
2063 }
2064}
2065
2066impl sum_tree::KeyedItem for Entry {
2067 type Key = PathKey;
2068
2069 fn key(&self) -> Self::Key {
2070 PathKey(self.path.clone())
2071 }
2072}
2073
2074#[derive(Clone, Debug)]
2075pub struct EntrySummary {
2076 max_path: Arc<Path>,
2077 count: usize,
2078 visible_count: usize,
2079 file_count: usize,
2080 visible_file_count: usize,
2081}
2082
2083impl Default for EntrySummary {
2084 fn default() -> Self {
2085 Self {
2086 max_path: Arc::from(Path::new("")),
2087 count: 0,
2088 visible_count: 0,
2089 file_count: 0,
2090 visible_file_count: 0,
2091 }
2092 }
2093}
2094
2095impl sum_tree::Summary for EntrySummary {
2096 type Context = ();
2097
2098 fn add_summary(&mut self, rhs: &Self, _: &()) {
2099 self.max_path = rhs.max_path.clone();
2100 self.count += rhs.count;
2101 self.visible_count += rhs.visible_count;
2102 self.file_count += rhs.file_count;
2103 self.visible_file_count += rhs.visible_file_count;
2104 }
2105}
2106
2107#[derive(Clone, Debug)]
2108struct PathEntry {
2109 id: ProjectEntryId,
2110 path: Arc<Path>,
2111 is_ignored: bool,
2112 scan_id: usize,
2113}
2114
2115impl sum_tree::Item for PathEntry {
2116 type Summary = PathEntrySummary;
2117
2118 fn summary(&self) -> Self::Summary {
2119 PathEntrySummary { max_id: self.id }
2120 }
2121}
2122
2123impl sum_tree::KeyedItem for PathEntry {
2124 type Key = ProjectEntryId;
2125
2126 fn key(&self) -> Self::Key {
2127 self.id
2128 }
2129}
2130
2131#[derive(Clone, Debug, Default)]
2132struct PathEntrySummary {
2133 max_id: ProjectEntryId,
2134}
2135
2136impl sum_tree::Summary for PathEntrySummary {
2137 type Context = ();
2138
2139 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2140 self.max_id = summary.max_id;
2141 }
2142}
2143
2144impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2145 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2146 *self = summary.max_id;
2147 }
2148}
2149
2150#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2151pub struct PathKey(Arc<Path>);
2152
2153impl Default for PathKey {
2154 fn default() -> Self {
2155 Self(Path::new("").into())
2156 }
2157}
2158
2159impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2160 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2161 self.0 = summary.max_path.clone();
2162 }
2163}
2164
2165struct BackgroundScanner {
2166 snapshot: Mutex<LocalSnapshot>,
2167 fs: Arc<dyn Fs>,
2168 status_updates_tx: UnboundedSender<ScanState>,
2169 executor: Arc<executor::Background>,
2170 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2171 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2172 finished_initial_scan: bool,
2173}
2174
2175impl BackgroundScanner {
2176 fn new(
2177 snapshot: LocalSnapshot,
2178 fs: Arc<dyn Fs>,
2179 status_updates_tx: UnboundedSender<ScanState>,
2180 executor: Arc<executor::Background>,
2181 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2182 ) -> Self {
2183 Self {
2184 fs,
2185 status_updates_tx,
2186 executor,
2187 refresh_requests_rx,
2188 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2189 snapshot: Mutex::new(snapshot),
2190 finished_initial_scan: false,
2191 }
2192 }
2193
2194 async fn run(
2195 &mut self,
2196 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2197 ) {
2198 use futures::FutureExt as _;
2199
2200 let (root_abs_path, root_inode) = {
2201 let snapshot = self.snapshot.lock();
2202 (
2203 snapshot.abs_path.clone(),
2204 snapshot.root_entry().map(|e| e.inode),
2205 )
2206 };
2207
2208 // Populate ignores above the root.
2209 let ignore_stack;
2210 for ancestor in root_abs_path.ancestors().skip(1) {
2211 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2212 {
2213 self.snapshot
2214 .lock()
2215 .ignores_by_parent_abs_path
2216 .insert(ancestor.into(), (ignore.into(), 0));
2217 }
2218 }
2219 {
2220 let mut snapshot = self.snapshot.lock();
2221 snapshot.scan_id += 1;
2222 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2223 if ignore_stack.is_all() {
2224 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2225 root_entry.is_ignored = true;
2226 snapshot.insert_entry(root_entry, self.fs.as_ref());
2227 }
2228 }
2229 };
2230
2231 // Perform an initial scan of the directory.
2232 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2233 smol::block_on(scan_job_tx.send(ScanJob {
2234 abs_path: root_abs_path,
2235 path: Arc::from(Path::new("")),
2236 ignore_stack,
2237 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2238 scan_queue: scan_job_tx.clone(),
2239 }))
2240 .unwrap();
2241 drop(scan_job_tx);
2242 self.scan_dirs(true, scan_job_rx).await;
2243 {
2244 let mut snapshot = self.snapshot.lock();
2245 snapshot.completed_scan_id = snapshot.scan_id;
2246 }
2247 self.send_status_update(false, None);
2248
2249 // Process any any FS events that occurred while performing the initial scan.
2250 // For these events, update events cannot be as precise, because we didn't
2251 // have the previous state loaded yet.
2252 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2253 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2254 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2255 paths.extend(more_events.into_iter().map(|e| e.path));
2256 }
2257 self.process_events(paths).await;
2258 }
2259
2260 self.finished_initial_scan = true;
2261
2262 // Continue processing events until the worktree is dropped.
2263 loop {
2264 select_biased! {
2265 // Process any path refresh requests from the worktree. Prioritize
2266 // these before handling changes reported by the filesystem.
2267 request = self.refresh_requests_rx.recv().fuse() => {
2268 let Ok((paths, barrier)) = request else { break };
2269 if !self.process_refresh_request(paths, barrier).await {
2270 return;
2271 }
2272 }
2273
2274 events = events_rx.next().fuse() => {
2275 let Some(events) = events else { break };
2276 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2277 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2278 paths.extend(more_events.into_iter().map(|e| e.path));
2279 }
2280 self.process_events(paths).await;
2281 }
2282 }
2283 }
2284 }
2285
2286 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2287 self.reload_entries_for_paths(paths, None).await;
2288 self.send_status_update(false, Some(barrier))
2289 }
2290
2291 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2292 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2293 if let Some(mut paths) = self
2294 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2295 .await
2296 {
2297 paths.sort_unstable();
2298 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2299 }
2300 drop(scan_job_tx);
2301 self.scan_dirs(false, scan_job_rx).await;
2302
2303 self.update_ignore_statuses().await;
2304
2305 let mut snapshot = self.snapshot.lock();
2306 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2307 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2308 snapshot.git_repositories = git_repositories;
2309 snapshot.removed_entry_ids.clear();
2310 snapshot.completed_scan_id = snapshot.scan_id;
2311 drop(snapshot);
2312
2313 self.send_status_update(false, None);
2314 }
2315
2316 async fn scan_dirs(
2317 &self,
2318 enable_progress_updates: bool,
2319 scan_jobs_rx: channel::Receiver<ScanJob>,
2320 ) {
2321 use futures::FutureExt as _;
2322
2323 if self
2324 .status_updates_tx
2325 .unbounded_send(ScanState::Started)
2326 .is_err()
2327 {
2328 return;
2329 }
2330
2331 let progress_update_count = AtomicUsize::new(0);
2332 self.executor
2333 .scoped(|scope| {
2334 for _ in 0..self.executor.num_cpus() {
2335 scope.spawn(async {
2336 let mut last_progress_update_count = 0;
2337 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2338 futures::pin_mut!(progress_update_timer);
2339
2340 loop {
2341 select_biased! {
2342 // Process any path refresh requests before moving on to process
2343 // the scan queue, so that user operations are prioritized.
2344 request = self.refresh_requests_rx.recv().fuse() => {
2345 let Ok((paths, barrier)) = request else { break };
2346 if !self.process_refresh_request(paths, barrier).await {
2347 return;
2348 }
2349 }
2350
2351 // Send periodic progress updates to the worktree. Use an atomic counter
2352 // to ensure that only one of the workers sends a progress update after
2353 // the update interval elapses.
2354 _ = progress_update_timer => {
2355 match progress_update_count.compare_exchange(
2356 last_progress_update_count,
2357 last_progress_update_count + 1,
2358 SeqCst,
2359 SeqCst
2360 ) {
2361 Ok(_) => {
2362 last_progress_update_count += 1;
2363 self.send_status_update(true, None);
2364 }
2365 Err(count) => {
2366 last_progress_update_count = count;
2367 }
2368 }
2369 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2370 }
2371
2372 // Recursively load directories from the file system.
2373 job = scan_jobs_rx.recv().fuse() => {
2374 let Ok(job) = job else { break };
2375 if let Err(err) = self.scan_dir(&job).await {
2376 if job.path.as_ref() != Path::new("") {
2377 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2378 }
2379 }
2380 }
2381 }
2382 }
2383 })
2384 }
2385 })
2386 .await;
2387 }
2388
2389 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2390 let mut prev_state = self.prev_state.lock();
2391 let snapshot = self.snapshot.lock().clone();
2392 let mut old_snapshot = snapshot.snapshot.clone();
2393 mem::swap(&mut old_snapshot, &mut prev_state.0);
2394 let changed_paths = mem::take(&mut prev_state.1);
2395 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2396 self.status_updates_tx
2397 .unbounded_send(ScanState::Updated {
2398 snapshot,
2399 changes,
2400 scanning,
2401 barrier,
2402 })
2403 .is_ok()
2404 }
2405
2406 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2407 let mut new_entries: Vec<Entry> = Vec::new();
2408 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2409 let mut ignore_stack = job.ignore_stack.clone();
2410 let mut new_ignore = None;
2411 let (root_abs_path, root_char_bag, next_entry_id) = {
2412 let snapshot = self.snapshot.lock();
2413 (
2414 snapshot.abs_path().clone(),
2415 snapshot.root_char_bag,
2416 snapshot.next_entry_id.clone(),
2417 )
2418 };
2419 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2420 while let Some(child_abs_path) = child_paths.next().await {
2421 let child_abs_path: Arc<Path> = match child_abs_path {
2422 Ok(child_abs_path) => child_abs_path.into(),
2423 Err(error) => {
2424 log::error!("error processing entry {:?}", error);
2425 continue;
2426 }
2427 };
2428
2429 let child_name = child_abs_path.file_name().unwrap();
2430 let child_path: Arc<Path> = job.path.join(child_name).into();
2431 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2432 Ok(Some(metadata)) => metadata,
2433 Ok(None) => continue,
2434 Err(err) => {
2435 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2436 continue;
2437 }
2438 };
2439
2440 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2441 if child_name == *GITIGNORE {
2442 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2443 Ok(ignore) => {
2444 let ignore = Arc::new(ignore);
2445 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2446 new_ignore = Some(ignore);
2447 }
2448 Err(error) => {
2449 log::error!(
2450 "error loading .gitignore file {:?} - {:?}",
2451 child_name,
2452 error
2453 );
2454 }
2455 }
2456
2457 // Update ignore status of any child entries we've already processed to reflect the
2458 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2459 // there should rarely be too numerous. Update the ignore stack associated with any
2460 // new jobs as well.
2461 let mut new_jobs = new_jobs.iter_mut();
2462 for entry in &mut new_entries {
2463 let entry_abs_path = root_abs_path.join(&entry.path);
2464 entry.is_ignored =
2465 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2466
2467 if entry.is_dir() {
2468 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2469 job.ignore_stack = if entry.is_ignored {
2470 IgnoreStack::all()
2471 } else {
2472 ignore_stack.clone()
2473 };
2474 }
2475 }
2476 }
2477 }
2478
2479 let mut child_entry = Entry::new(
2480 child_path.clone(),
2481 &child_metadata,
2482 &next_entry_id,
2483 root_char_bag,
2484 );
2485
2486 if child_entry.is_dir() {
2487 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2488 child_entry.is_ignored = is_ignored;
2489
2490 // Avoid recursing until crash in the case of a recursive symlink
2491 if !job.ancestor_inodes.contains(&child_entry.inode) {
2492 let mut ancestor_inodes = job.ancestor_inodes.clone();
2493 ancestor_inodes.insert(child_entry.inode);
2494
2495 new_jobs.push(Some(ScanJob {
2496 abs_path: child_abs_path,
2497 path: child_path,
2498 ignore_stack: if is_ignored {
2499 IgnoreStack::all()
2500 } else {
2501 ignore_stack.clone()
2502 },
2503 ancestor_inodes,
2504 scan_queue: job.scan_queue.clone(),
2505 }));
2506 } else {
2507 new_jobs.push(None);
2508 }
2509 } else {
2510 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2511 }
2512
2513 new_entries.push(child_entry);
2514 }
2515
2516 self.snapshot.lock().populate_dir(
2517 job.path.clone(),
2518 new_entries,
2519 new_ignore,
2520 self.fs.as_ref(),
2521 );
2522
2523 for new_job in new_jobs {
2524 if let Some(new_job) = new_job {
2525 job.scan_queue.send(new_job).await.unwrap();
2526 }
2527 }
2528
2529 Ok(())
2530 }
2531
2532 async fn reload_entries_for_paths(
2533 &self,
2534 mut abs_paths: Vec<PathBuf>,
2535 scan_queue_tx: Option<Sender<ScanJob>>,
2536 ) -> Option<Vec<Arc<Path>>> {
2537 let doing_recursive_update = scan_queue_tx.is_some();
2538
2539 abs_paths.sort_unstable();
2540 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2541
2542 let root_abs_path = self.snapshot.lock().abs_path.clone();
2543 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2544 let metadata = futures::future::join_all(
2545 abs_paths
2546 .iter()
2547 .map(|abs_path| self.fs.metadata(&abs_path))
2548 .collect::<Vec<_>>(),
2549 )
2550 .await;
2551
2552 let mut snapshot = self.snapshot.lock();
2553 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2554 snapshot.scan_id += 1;
2555 if is_idle && !doing_recursive_update {
2556 snapshot.completed_scan_id = snapshot.scan_id;
2557 }
2558
2559 // Remove any entries for paths that no longer exist or are being recursively
2560 // refreshed. Do this before adding any new entries, so that renames can be
2561 // detected regardless of the order of the paths.
2562 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2563 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2564 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2565 if matches!(metadata, Ok(None)) || doing_recursive_update {
2566 snapshot.remove_path(path);
2567 }
2568 event_paths.push(path.into());
2569 } else {
2570 log::error!(
2571 "unexpected event {:?} for root path {:?}",
2572 abs_path,
2573 root_canonical_path
2574 );
2575 }
2576 }
2577
2578 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2579 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2580
2581 match metadata {
2582 Ok(Some(metadata)) => {
2583 let ignore_stack =
2584 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2585 let mut fs_entry = Entry::new(
2586 path.clone(),
2587 &metadata,
2588 snapshot.next_entry_id.as_ref(),
2589 snapshot.root_char_bag,
2590 );
2591 fs_entry.is_ignored = ignore_stack.is_all();
2592 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2593
2594 let scan_id = snapshot.scan_id;
2595 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2596 repo.repo.lock().reload_index();
2597 repo.scan_id = scan_id;
2598 }
2599
2600 if let Some(scan_queue_tx) = &scan_queue_tx {
2601 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2602 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2603 ancestor_inodes.insert(metadata.inode);
2604 smol::block_on(scan_queue_tx.send(ScanJob {
2605 abs_path,
2606 path,
2607 ignore_stack,
2608 ancestor_inodes,
2609 scan_queue: scan_queue_tx.clone(),
2610 }))
2611 .unwrap();
2612 }
2613 }
2614 }
2615 Ok(None) => {}
2616 Err(err) => {
2617 // TODO - create a special 'error' entry in the entries tree to mark this
2618 log::error!("error reading file on event {:?}", err);
2619 }
2620 }
2621 }
2622
2623 Some(event_paths)
2624 }
2625
2626 async fn update_ignore_statuses(&self) {
2627 use futures::FutureExt as _;
2628
2629 let mut snapshot = self.snapshot.lock().clone();
2630 let mut ignores_to_update = Vec::new();
2631 let mut ignores_to_delete = Vec::new();
2632 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2633 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2634 if *scan_id > snapshot.completed_scan_id
2635 && snapshot.entry_for_path(parent_path).is_some()
2636 {
2637 ignores_to_update.push(parent_abs_path.clone());
2638 }
2639
2640 let ignore_path = parent_path.join(&*GITIGNORE);
2641 if snapshot.entry_for_path(ignore_path).is_none() {
2642 ignores_to_delete.push(parent_abs_path.clone());
2643 }
2644 }
2645 }
2646
2647 for parent_abs_path in ignores_to_delete {
2648 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2649 self.snapshot
2650 .lock()
2651 .ignores_by_parent_abs_path
2652 .remove(&parent_abs_path);
2653 }
2654
2655 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2656 ignores_to_update.sort_unstable();
2657 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2658 while let Some(parent_abs_path) = ignores_to_update.next() {
2659 while ignores_to_update
2660 .peek()
2661 .map_or(false, |p| p.starts_with(&parent_abs_path))
2662 {
2663 ignores_to_update.next().unwrap();
2664 }
2665
2666 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2667 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2668 abs_path: parent_abs_path,
2669 ignore_stack,
2670 ignore_queue: ignore_queue_tx.clone(),
2671 }))
2672 .unwrap();
2673 }
2674 drop(ignore_queue_tx);
2675
2676 self.executor
2677 .scoped(|scope| {
2678 for _ in 0..self.executor.num_cpus() {
2679 scope.spawn(async {
2680 loop {
2681 select_biased! {
2682 // Process any path refresh requests before moving on to process
2683 // the queue of ignore statuses.
2684 request = self.refresh_requests_rx.recv().fuse() => {
2685 let Ok((paths, barrier)) = request else { break };
2686 if !self.process_refresh_request(paths, barrier).await {
2687 return;
2688 }
2689 }
2690
2691 // Recursively process directories whose ignores have changed.
2692 job = ignore_queue_rx.recv().fuse() => {
2693 let Ok(job) = job else { break };
2694 self.update_ignore_status(job, &snapshot).await;
2695 }
2696 }
2697 }
2698 });
2699 }
2700 })
2701 .await;
2702 }
2703
2704 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2705 let mut ignore_stack = job.ignore_stack;
2706 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2707 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2708 }
2709
2710 let mut entries_by_id_edits = Vec::new();
2711 let mut entries_by_path_edits = Vec::new();
2712 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2713 for mut entry in snapshot.child_entries(path).cloned() {
2714 let was_ignored = entry.is_ignored;
2715 let abs_path = snapshot.abs_path().join(&entry.path);
2716 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2717 if entry.is_dir() {
2718 let child_ignore_stack = if entry.is_ignored {
2719 IgnoreStack::all()
2720 } else {
2721 ignore_stack.clone()
2722 };
2723 job.ignore_queue
2724 .send(UpdateIgnoreStatusJob {
2725 abs_path: abs_path.into(),
2726 ignore_stack: child_ignore_stack,
2727 ignore_queue: job.ignore_queue.clone(),
2728 })
2729 .await
2730 .unwrap();
2731 }
2732
2733 if entry.is_ignored != was_ignored {
2734 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2735 path_entry.scan_id = snapshot.scan_id;
2736 path_entry.is_ignored = entry.is_ignored;
2737 entries_by_id_edits.push(Edit::Insert(path_entry));
2738 entries_by_path_edits.push(Edit::Insert(entry));
2739 }
2740 }
2741
2742 let mut snapshot = self.snapshot.lock();
2743 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2744 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2745 }
2746
2747 fn build_change_set(
2748 &self,
2749 old_snapshot: &Snapshot,
2750 new_snapshot: &Snapshot,
2751 event_paths: Vec<Arc<Path>>,
2752 ) -> HashMap<Arc<Path>, PathChange> {
2753 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2754
2755 let mut changes = HashMap::default();
2756 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2757 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2758 let received_before_initialized = !self.finished_initial_scan;
2759
2760 for path in event_paths {
2761 let path = PathKey(path);
2762 old_paths.seek(&path, Bias::Left, &());
2763 new_paths.seek(&path, Bias::Left, &());
2764
2765 loop {
2766 match (old_paths.item(), new_paths.item()) {
2767 (Some(old_entry), Some(new_entry)) => {
2768 if old_entry.path > path.0
2769 && new_entry.path > path.0
2770 && !old_entry.path.starts_with(&path.0)
2771 && !new_entry.path.starts_with(&path.0)
2772 {
2773 break;
2774 }
2775
2776 match Ord::cmp(&old_entry.path, &new_entry.path) {
2777 Ordering::Less => {
2778 changes.insert(old_entry.path.clone(), Removed);
2779 old_paths.next(&());
2780 }
2781 Ordering::Equal => {
2782 if received_before_initialized {
2783 // If the worktree was not fully initialized when this event was generated,
2784 // we can't know whether this entry was added during the scan or whether
2785 // it was merely updated.
2786 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2787 } else if old_entry.mtime != new_entry.mtime {
2788 changes.insert(new_entry.path.clone(), Updated);
2789 }
2790 old_paths.next(&());
2791 new_paths.next(&());
2792 }
2793 Ordering::Greater => {
2794 changes.insert(new_entry.path.clone(), Added);
2795 new_paths.next(&());
2796 }
2797 }
2798 }
2799 (Some(old_entry), None) => {
2800 changes.insert(old_entry.path.clone(), Removed);
2801 old_paths.next(&());
2802 }
2803 (None, Some(new_entry)) => {
2804 changes.insert(new_entry.path.clone(), Added);
2805 new_paths.next(&());
2806 }
2807 (None, None) => break,
2808 }
2809 }
2810 }
2811 changes
2812 }
2813
2814 async fn progress_timer(&self, running: bool) {
2815 if !running {
2816 return futures::future::pending().await;
2817 }
2818
2819 #[cfg(any(test, feature = "test-support"))]
2820 if self.fs.is_fake() {
2821 return self.executor.simulate_random_delay().await;
2822 }
2823
2824 smol::Timer::after(Duration::from_millis(100)).await;
2825 }
2826}
2827
2828fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2829 let mut result = root_char_bag;
2830 result.extend(
2831 path.to_string_lossy()
2832 .chars()
2833 .map(|c| c.to_ascii_lowercase()),
2834 );
2835 result
2836}
2837
2838struct ScanJob {
2839 abs_path: Arc<Path>,
2840 path: Arc<Path>,
2841 ignore_stack: Arc<IgnoreStack>,
2842 scan_queue: Sender<ScanJob>,
2843 ancestor_inodes: TreeSet<u64>,
2844}
2845
2846struct UpdateIgnoreStatusJob {
2847 abs_path: Arc<Path>,
2848 ignore_stack: Arc<IgnoreStack>,
2849 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2850}
2851
2852pub trait WorktreeHandle {
2853 #[cfg(any(test, feature = "test-support"))]
2854 fn flush_fs_events<'a>(
2855 &self,
2856 cx: &'a gpui::TestAppContext,
2857 ) -> futures::future::LocalBoxFuture<'a, ()>;
2858}
2859
2860impl WorktreeHandle for ModelHandle<Worktree> {
2861 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2862 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2863 // extra directory scans, and emit extra scan-state notifications.
2864 //
2865 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2866 // to ensure that all redundant FS events have already been processed.
2867 #[cfg(any(test, feature = "test-support"))]
2868 fn flush_fs_events<'a>(
2869 &self,
2870 cx: &'a gpui::TestAppContext,
2871 ) -> futures::future::LocalBoxFuture<'a, ()> {
2872 use smol::future::FutureExt;
2873
2874 let filename = "fs-event-sentinel";
2875 let tree = self.clone();
2876 let (fs, root_path) = self.read_with(cx, |tree, _| {
2877 let tree = tree.as_local().unwrap();
2878 (tree.fs.clone(), tree.abs_path().clone())
2879 });
2880
2881 async move {
2882 fs.create_file(&root_path.join(filename), Default::default())
2883 .await
2884 .unwrap();
2885 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2886 .await;
2887
2888 fs.remove_file(&root_path.join(filename), Default::default())
2889 .await
2890 .unwrap();
2891 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2892 .await;
2893
2894 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2895 .await;
2896 }
2897 .boxed_local()
2898 }
2899}
2900
2901#[derive(Clone, Debug)]
2902struct TraversalProgress<'a> {
2903 max_path: &'a Path,
2904 count: usize,
2905 visible_count: usize,
2906 file_count: usize,
2907 visible_file_count: usize,
2908}
2909
2910impl<'a> TraversalProgress<'a> {
2911 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2912 match (include_ignored, include_dirs) {
2913 (true, true) => self.count,
2914 (true, false) => self.file_count,
2915 (false, true) => self.visible_count,
2916 (false, false) => self.visible_file_count,
2917 }
2918 }
2919}
2920
2921impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2922 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2923 self.max_path = summary.max_path.as_ref();
2924 self.count += summary.count;
2925 self.visible_count += summary.visible_count;
2926 self.file_count += summary.file_count;
2927 self.visible_file_count += summary.visible_file_count;
2928 }
2929}
2930
2931impl<'a> Default for TraversalProgress<'a> {
2932 fn default() -> Self {
2933 Self {
2934 max_path: Path::new(""),
2935 count: 0,
2936 visible_count: 0,
2937 file_count: 0,
2938 visible_file_count: 0,
2939 }
2940 }
2941}
2942
2943pub struct Traversal<'a> {
2944 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2945 include_ignored: bool,
2946 include_dirs: bool,
2947}
2948
2949impl<'a> Traversal<'a> {
2950 pub fn advance(&mut self) -> bool {
2951 self.advance_to_offset(self.offset() + 1)
2952 }
2953
2954 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2955 self.cursor.seek_forward(
2956 &TraversalTarget::Count {
2957 count: offset,
2958 include_dirs: self.include_dirs,
2959 include_ignored: self.include_ignored,
2960 },
2961 Bias::Right,
2962 &(),
2963 )
2964 }
2965
2966 pub fn advance_to_sibling(&mut self) -> bool {
2967 while let Some(entry) = self.cursor.item() {
2968 self.cursor.seek_forward(
2969 &TraversalTarget::PathSuccessor(&entry.path),
2970 Bias::Left,
2971 &(),
2972 );
2973 if let Some(entry) = self.cursor.item() {
2974 if (self.include_dirs || !entry.is_dir())
2975 && (self.include_ignored || !entry.is_ignored)
2976 {
2977 return true;
2978 }
2979 }
2980 }
2981 false
2982 }
2983
2984 pub fn entry(&self) -> Option<&'a Entry> {
2985 self.cursor.item()
2986 }
2987
2988 pub fn offset(&self) -> usize {
2989 self.cursor
2990 .start()
2991 .count(self.include_dirs, self.include_ignored)
2992 }
2993}
2994
2995impl<'a> Iterator for Traversal<'a> {
2996 type Item = &'a Entry;
2997
2998 fn next(&mut self) -> Option<Self::Item> {
2999 if let Some(item) = self.entry() {
3000 self.advance();
3001 Some(item)
3002 } else {
3003 None
3004 }
3005 }
3006}
3007
3008#[derive(Debug)]
3009enum TraversalTarget<'a> {
3010 Path(&'a Path),
3011 PathSuccessor(&'a Path),
3012 Count {
3013 count: usize,
3014 include_ignored: bool,
3015 include_dirs: bool,
3016 },
3017}
3018
3019impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3020 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3021 match self {
3022 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3023 TraversalTarget::PathSuccessor(path) => {
3024 if !cursor_location.max_path.starts_with(path) {
3025 Ordering::Equal
3026 } else {
3027 Ordering::Greater
3028 }
3029 }
3030 TraversalTarget::Count {
3031 count,
3032 include_dirs,
3033 include_ignored,
3034 } => Ord::cmp(
3035 count,
3036 &cursor_location.count(*include_dirs, *include_ignored),
3037 ),
3038 }
3039 }
3040}
3041
3042struct ChildEntriesIter<'a> {
3043 parent_path: &'a Path,
3044 traversal: Traversal<'a>,
3045}
3046
3047impl<'a> Iterator for ChildEntriesIter<'a> {
3048 type Item = &'a Entry;
3049
3050 fn next(&mut self) -> Option<Self::Item> {
3051 if let Some(item) = self.traversal.entry() {
3052 if item.path.starts_with(&self.parent_path) {
3053 self.traversal.advance_to_sibling();
3054 return Some(item);
3055 }
3056 }
3057 None
3058 }
3059}
3060
3061impl<'a> From<&'a Entry> for proto::Entry {
3062 fn from(entry: &'a Entry) -> Self {
3063 Self {
3064 id: entry.id.to_proto(),
3065 is_dir: entry.is_dir(),
3066 path: entry.path.to_string_lossy().into(),
3067 inode: entry.inode,
3068 mtime: Some(entry.mtime.into()),
3069 is_symlink: entry.is_symlink,
3070 is_ignored: entry.is_ignored,
3071 }
3072 }
3073}
3074
3075impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3076 type Error = anyhow::Error;
3077
3078 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3079 if let Some(mtime) = entry.mtime {
3080 let kind = if entry.is_dir {
3081 EntryKind::Dir
3082 } else {
3083 let mut char_bag = *root_char_bag;
3084 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3085 EntryKind::File(char_bag)
3086 };
3087 let path: Arc<Path> = PathBuf::from(entry.path).into();
3088 Ok(Entry {
3089 id: ProjectEntryId::from_proto(entry.id),
3090 kind,
3091 path,
3092 inode: entry.inode,
3093 mtime: mtime.into(),
3094 is_symlink: entry.is_symlink,
3095 is_ignored: entry.is_ignored,
3096 })
3097 } else {
3098 Err(anyhow!(
3099 "missing mtime in remote worktree entry {:?}",
3100 entry.path
3101 ))
3102 }
3103 }
3104}
3105
3106#[cfg(test)]
3107mod tests {
3108 use super::*;
3109 use fs::repository::FakeGitRepository;
3110 use fs::{FakeFs, RealFs};
3111 use gpui::{executor::Deterministic, TestAppContext};
3112 use pretty_assertions::assert_eq;
3113 use rand::prelude::*;
3114 use serde_json::json;
3115 use std::{env, fmt::Write};
3116 use util::{http::FakeHttpClient, test::temp_tree};
3117
3118 #[gpui::test]
3119 async fn test_traversal(cx: &mut TestAppContext) {
3120 let fs = FakeFs::new(cx.background());
3121 fs.insert_tree(
3122 "/root",
3123 json!({
3124 ".gitignore": "a/b\n",
3125 "a": {
3126 "b": "",
3127 "c": "",
3128 }
3129 }),
3130 )
3131 .await;
3132
3133 let http_client = FakeHttpClient::with_404_response();
3134 let client = cx.read(|cx| Client::new(http_client, cx));
3135
3136 let tree = Worktree::local(
3137 client,
3138 Path::new("/root"),
3139 true,
3140 fs,
3141 Default::default(),
3142 &mut cx.to_async(),
3143 )
3144 .await
3145 .unwrap();
3146 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3147 .await;
3148
3149 tree.read_with(cx, |tree, _| {
3150 assert_eq!(
3151 tree.entries(false)
3152 .map(|entry| entry.path.as_ref())
3153 .collect::<Vec<_>>(),
3154 vec![
3155 Path::new(""),
3156 Path::new(".gitignore"),
3157 Path::new("a"),
3158 Path::new("a/c"),
3159 ]
3160 );
3161 assert_eq!(
3162 tree.entries(true)
3163 .map(|entry| entry.path.as_ref())
3164 .collect::<Vec<_>>(),
3165 vec![
3166 Path::new(""),
3167 Path::new(".gitignore"),
3168 Path::new("a"),
3169 Path::new("a/b"),
3170 Path::new("a/c"),
3171 ]
3172 );
3173 })
3174 }
3175
3176 #[gpui::test(iterations = 10)]
3177 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3178 let fs = FakeFs::new(cx.background());
3179 fs.insert_tree(
3180 "/root",
3181 json!({
3182 "lib": {
3183 "a": {
3184 "a.txt": ""
3185 },
3186 "b": {
3187 "b.txt": ""
3188 }
3189 }
3190 }),
3191 )
3192 .await;
3193 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3194 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3195
3196 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3197 let tree = Worktree::local(
3198 client,
3199 Path::new("/root"),
3200 true,
3201 fs.clone(),
3202 Default::default(),
3203 &mut cx.to_async(),
3204 )
3205 .await
3206 .unwrap();
3207
3208 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3209 .await;
3210
3211 tree.read_with(cx, |tree, _| {
3212 assert_eq!(
3213 tree.entries(false)
3214 .map(|entry| entry.path.as_ref())
3215 .collect::<Vec<_>>(),
3216 vec![
3217 Path::new(""),
3218 Path::new("lib"),
3219 Path::new("lib/a"),
3220 Path::new("lib/a/a.txt"),
3221 Path::new("lib/a/lib"),
3222 Path::new("lib/b"),
3223 Path::new("lib/b/b.txt"),
3224 Path::new("lib/b/lib"),
3225 ]
3226 );
3227 });
3228
3229 fs.rename(
3230 Path::new("/root/lib/a/lib"),
3231 Path::new("/root/lib/a/lib-2"),
3232 Default::default(),
3233 )
3234 .await
3235 .unwrap();
3236 executor.run_until_parked();
3237 tree.read_with(cx, |tree, _| {
3238 assert_eq!(
3239 tree.entries(false)
3240 .map(|entry| entry.path.as_ref())
3241 .collect::<Vec<_>>(),
3242 vec![
3243 Path::new(""),
3244 Path::new("lib"),
3245 Path::new("lib/a"),
3246 Path::new("lib/a/a.txt"),
3247 Path::new("lib/a/lib-2"),
3248 Path::new("lib/b"),
3249 Path::new("lib/b/b.txt"),
3250 Path::new("lib/b/lib"),
3251 ]
3252 );
3253 });
3254 }
3255
3256 #[gpui::test]
3257 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3258 let parent_dir = temp_tree(json!({
3259 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3260 "tree": {
3261 ".git": {},
3262 ".gitignore": "ignored-dir\n",
3263 "tracked-dir": {
3264 "tracked-file1": "",
3265 "ancestor-ignored-file1": "",
3266 },
3267 "ignored-dir": {
3268 "ignored-file1": ""
3269 }
3270 }
3271 }));
3272 let dir = parent_dir.path().join("tree");
3273
3274 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3275
3276 let tree = Worktree::local(
3277 client,
3278 dir.as_path(),
3279 true,
3280 Arc::new(RealFs),
3281 Default::default(),
3282 &mut cx.to_async(),
3283 )
3284 .await
3285 .unwrap();
3286 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3287 .await;
3288 tree.flush_fs_events(cx).await;
3289 cx.read(|cx| {
3290 let tree = tree.read(cx);
3291 assert!(
3292 !tree
3293 .entry_for_path("tracked-dir/tracked-file1")
3294 .unwrap()
3295 .is_ignored
3296 );
3297 assert!(
3298 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3299 .unwrap()
3300 .is_ignored
3301 );
3302 assert!(
3303 tree.entry_for_path("ignored-dir/ignored-file1")
3304 .unwrap()
3305 .is_ignored
3306 );
3307 });
3308
3309 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3310 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3311 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3312 tree.flush_fs_events(cx).await;
3313 cx.read(|cx| {
3314 let tree = tree.read(cx);
3315 assert!(
3316 !tree
3317 .entry_for_path("tracked-dir/tracked-file2")
3318 .unwrap()
3319 .is_ignored
3320 );
3321 assert!(
3322 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3323 .unwrap()
3324 .is_ignored
3325 );
3326 assert!(
3327 tree.entry_for_path("ignored-dir/ignored-file2")
3328 .unwrap()
3329 .is_ignored
3330 );
3331 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3332 });
3333 }
3334
3335 #[gpui::test]
3336 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3337 let root = temp_tree(json!({
3338 "dir1": {
3339 ".git": {},
3340 "deps": {
3341 "dep1": {
3342 ".git": {},
3343 "src": {
3344 "a.txt": ""
3345 }
3346 }
3347 },
3348 "src": {
3349 "b.txt": ""
3350 }
3351 },
3352 "c.txt": "",
3353 }));
3354
3355 let http_client = FakeHttpClient::with_404_response();
3356 let client = cx.read(|cx| Client::new(http_client, cx));
3357 let tree = Worktree::local(
3358 client,
3359 root.path(),
3360 true,
3361 Arc::new(RealFs),
3362 Default::default(),
3363 &mut cx.to_async(),
3364 )
3365 .await
3366 .unwrap();
3367
3368 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3369 .await;
3370 tree.flush_fs_events(cx).await;
3371
3372 tree.read_with(cx, |tree, _cx| {
3373 let tree = tree.as_local().unwrap();
3374
3375 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3376
3377 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3378 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3379 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3380
3381 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3382 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3383 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3384 });
3385
3386 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3387 let tree = tree.as_local().unwrap();
3388 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3389 });
3390
3391 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3392 tree.flush_fs_events(cx).await;
3393
3394 tree.read_with(cx, |tree, _cx| {
3395 let tree = tree.as_local().unwrap();
3396 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3397 assert_ne!(
3398 original_scan_id, new_scan_id,
3399 "original {original_scan_id}, new {new_scan_id}"
3400 );
3401 });
3402
3403 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3404 tree.flush_fs_events(cx).await;
3405
3406 tree.read_with(cx, |tree, _cx| {
3407 let tree = tree.as_local().unwrap();
3408
3409 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3410 });
3411 }
3412
3413 #[test]
3414 fn test_changed_repos() {
3415 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3416 GitRepositoryEntry {
3417 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3418 scan_id,
3419 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3420 git_dir_path: git_dir_path.as_ref().into(),
3421 }
3422 }
3423
3424 let prev_repos: Vec<GitRepositoryEntry> = vec![
3425 fake_entry("/.git", 0),
3426 fake_entry("/a/.git", 0),
3427 fake_entry("/a/b/.git", 0),
3428 ];
3429
3430 let new_repos: Vec<GitRepositoryEntry> = vec![
3431 fake_entry("/a/.git", 1),
3432 fake_entry("/a/b/.git", 0),
3433 fake_entry("/a/c/.git", 0),
3434 ];
3435
3436 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3437
3438 // Deletion retained
3439 assert!(res
3440 .iter()
3441 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3442 .is_some());
3443
3444 // Update retained
3445 assert!(res
3446 .iter()
3447 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3448 .is_some());
3449
3450 // Addition retained
3451 assert!(res
3452 .iter()
3453 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3454 .is_some());
3455
3456 // Nochange, not retained
3457 assert!(res
3458 .iter()
3459 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3460 .is_none());
3461 }
3462
3463 #[gpui::test]
3464 async fn test_write_file(cx: &mut TestAppContext) {
3465 let dir = temp_tree(json!({
3466 ".git": {},
3467 ".gitignore": "ignored-dir\n",
3468 "tracked-dir": {},
3469 "ignored-dir": {}
3470 }));
3471
3472 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3473
3474 let tree = Worktree::local(
3475 client,
3476 dir.path(),
3477 true,
3478 Arc::new(RealFs),
3479 Default::default(),
3480 &mut cx.to_async(),
3481 )
3482 .await
3483 .unwrap();
3484 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3485 .await;
3486 tree.flush_fs_events(cx).await;
3487
3488 tree.update(cx, |tree, cx| {
3489 tree.as_local().unwrap().write_file(
3490 Path::new("tracked-dir/file.txt"),
3491 "hello".into(),
3492 Default::default(),
3493 cx,
3494 )
3495 })
3496 .await
3497 .unwrap();
3498 tree.update(cx, |tree, cx| {
3499 tree.as_local().unwrap().write_file(
3500 Path::new("ignored-dir/file.txt"),
3501 "world".into(),
3502 Default::default(),
3503 cx,
3504 )
3505 })
3506 .await
3507 .unwrap();
3508
3509 tree.read_with(cx, |tree, _| {
3510 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3511 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3512 assert!(!tracked.is_ignored);
3513 assert!(ignored.is_ignored);
3514 });
3515 }
3516
3517 #[gpui::test(iterations = 30)]
3518 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3519 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3520
3521 let fs = FakeFs::new(cx.background());
3522 fs.insert_tree(
3523 "/root",
3524 json!({
3525 "b": {},
3526 "c": {},
3527 "d": {},
3528 }),
3529 )
3530 .await;
3531
3532 let tree = Worktree::local(
3533 client,
3534 "/root".as_ref(),
3535 true,
3536 fs,
3537 Default::default(),
3538 &mut cx.to_async(),
3539 )
3540 .await
3541 .unwrap();
3542
3543 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3544
3545 let entry = tree
3546 .update(cx, |tree, cx| {
3547 tree.as_local_mut()
3548 .unwrap()
3549 .create_entry("a/e".as_ref(), true, cx)
3550 })
3551 .await
3552 .unwrap();
3553 assert!(entry.is_dir());
3554
3555 cx.foreground().run_until_parked();
3556 tree.read_with(cx, |tree, _| {
3557 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3558 });
3559
3560 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3561 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3562 snapshot1.apply_remote_update(update).unwrap();
3563 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3564 }
3565
3566 #[gpui::test(iterations = 100)]
3567 async fn test_random_worktree_operations_during_initial_scan(
3568 cx: &mut TestAppContext,
3569 mut rng: StdRng,
3570 ) {
3571 let operations = env::var("OPERATIONS")
3572 .map(|o| o.parse().unwrap())
3573 .unwrap_or(5);
3574 let initial_entries = env::var("INITIAL_ENTRIES")
3575 .map(|o| o.parse().unwrap())
3576 .unwrap_or(20);
3577
3578 let root_dir = Path::new("/test");
3579 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3580 fs.as_fake().insert_tree(root_dir, json!({})).await;
3581 for _ in 0..initial_entries {
3582 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3583 }
3584 log::info!("generated initial tree");
3585
3586 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3587 let worktree = Worktree::local(
3588 client.clone(),
3589 root_dir,
3590 true,
3591 fs.clone(),
3592 Default::default(),
3593 &mut cx.to_async(),
3594 )
3595 .await
3596 .unwrap();
3597
3598 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3599
3600 for _ in 0..operations {
3601 worktree
3602 .update(cx, |worktree, cx| {
3603 randomly_mutate_worktree(worktree, &mut rng, cx)
3604 })
3605 .await
3606 .log_err();
3607 worktree.read_with(cx, |tree, _| {
3608 tree.as_local().unwrap().snapshot.check_invariants()
3609 });
3610
3611 if rng.gen_bool(0.6) {
3612 let new_snapshot =
3613 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3614 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3615 snapshot.apply_remote_update(update.clone()).unwrap();
3616 assert_eq!(
3617 snapshot.to_vec(true),
3618 new_snapshot.to_vec(true),
3619 "incorrect snapshot after update {:?}",
3620 update
3621 );
3622 }
3623 }
3624
3625 worktree
3626 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3627 .await;
3628 worktree.read_with(cx, |tree, _| {
3629 tree.as_local().unwrap().snapshot.check_invariants()
3630 });
3631
3632 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3633 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3634 snapshot.apply_remote_update(update.clone()).unwrap();
3635 assert_eq!(
3636 snapshot.to_vec(true),
3637 new_snapshot.to_vec(true),
3638 "incorrect snapshot after update {:?}",
3639 update
3640 );
3641 }
3642
3643 #[gpui::test(iterations = 100)]
3644 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3645 let operations = env::var("OPERATIONS")
3646 .map(|o| o.parse().unwrap())
3647 .unwrap_or(40);
3648 let initial_entries = env::var("INITIAL_ENTRIES")
3649 .map(|o| o.parse().unwrap())
3650 .unwrap_or(20);
3651
3652 let root_dir = Path::new("/test");
3653 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3654 fs.as_fake().insert_tree(root_dir, json!({})).await;
3655 for _ in 0..initial_entries {
3656 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3657 }
3658 log::info!("generated initial tree");
3659
3660 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3661 let worktree = Worktree::local(
3662 client.clone(),
3663 root_dir,
3664 true,
3665 fs.clone(),
3666 Default::default(),
3667 &mut cx.to_async(),
3668 )
3669 .await
3670 .unwrap();
3671
3672 worktree
3673 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3674 .await;
3675
3676 // After the initial scan is complete, the `UpdatedEntries` event can
3677 // be used to follow along with all changes to the worktree's snapshot.
3678 worktree.update(cx, |tree, cx| {
3679 let mut paths = tree
3680 .as_local()
3681 .unwrap()
3682 .paths()
3683 .cloned()
3684 .collect::<Vec<_>>();
3685
3686 cx.subscribe(&worktree, move |tree, _, event, _| {
3687 if let Event::UpdatedEntries(changes) = event {
3688 for (path, change_type) in changes.iter() {
3689 let path = path.clone();
3690 let ix = match paths.binary_search(&path) {
3691 Ok(ix) | Err(ix) => ix,
3692 };
3693 match change_type {
3694 PathChange::Added => {
3695 assert_ne!(paths.get(ix), Some(&path));
3696 paths.insert(ix, path);
3697 }
3698 PathChange::Removed => {
3699 assert_eq!(paths.get(ix), Some(&path));
3700 paths.remove(ix);
3701 }
3702 PathChange::Updated => {
3703 assert_eq!(paths.get(ix), Some(&path));
3704 }
3705 PathChange::AddedOrUpdated => {
3706 if paths[ix] != path {
3707 paths.insert(ix, path);
3708 }
3709 }
3710 }
3711 }
3712 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3713 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3714 }
3715 })
3716 .detach();
3717 });
3718
3719 let mut snapshots = Vec::new();
3720 let mut mutations_len = operations;
3721 while mutations_len > 1 {
3722 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3723 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3724 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3725 let len = rng.gen_range(0..=buffered_event_count);
3726 log::info!("flushing {} events", len);
3727 fs.as_fake().flush_events(len).await;
3728 } else {
3729 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3730 mutations_len -= 1;
3731 }
3732
3733 cx.foreground().run_until_parked();
3734 if rng.gen_bool(0.2) {
3735 log::info!("storing snapshot {}", snapshots.len());
3736 let snapshot =
3737 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3738 snapshots.push(snapshot);
3739 }
3740 }
3741
3742 log::info!("quiescing");
3743 fs.as_fake().flush_events(usize::MAX).await;
3744 cx.foreground().run_until_parked();
3745 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3746 snapshot.check_invariants();
3747
3748 {
3749 let new_worktree = Worktree::local(
3750 client.clone(),
3751 root_dir,
3752 true,
3753 fs.clone(),
3754 Default::default(),
3755 &mut cx.to_async(),
3756 )
3757 .await
3758 .unwrap();
3759 new_worktree
3760 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3761 .await;
3762 let new_snapshot =
3763 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3764 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3765 }
3766
3767 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3768 let include_ignored = rng.gen::<bool>();
3769 if !include_ignored {
3770 let mut entries_by_path_edits = Vec::new();
3771 let mut entries_by_id_edits = Vec::new();
3772 for entry in prev_snapshot
3773 .entries_by_id
3774 .cursor::<()>()
3775 .filter(|e| e.is_ignored)
3776 {
3777 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3778 entries_by_id_edits.push(Edit::Remove(entry.id));
3779 }
3780
3781 prev_snapshot
3782 .entries_by_path
3783 .edit(entries_by_path_edits, &());
3784 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3785 }
3786
3787 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3788 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3789 assert_eq!(
3790 prev_snapshot.to_vec(include_ignored),
3791 snapshot.to_vec(include_ignored),
3792 "wrong update for snapshot {i}. update: {:?}",
3793 update
3794 );
3795 }
3796 }
3797
3798 fn randomly_mutate_worktree(
3799 worktree: &mut Worktree,
3800 rng: &mut impl Rng,
3801 cx: &mut ModelContext<Worktree>,
3802 ) -> Task<Result<()>> {
3803 let worktree = worktree.as_local_mut().unwrap();
3804 let snapshot = worktree.snapshot();
3805 let entry = snapshot.entries(false).choose(rng).unwrap();
3806
3807 match rng.gen_range(0_u32..100) {
3808 0..=33 if entry.path.as_ref() != Path::new("") => {
3809 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3810 worktree.delete_entry(entry.id, cx).unwrap()
3811 }
3812 ..=66 if entry.path.as_ref() != Path::new("") => {
3813 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3814 let new_parent_path = if other_entry.is_dir() {
3815 other_entry.path.clone()
3816 } else {
3817 other_entry.path.parent().unwrap().into()
3818 };
3819 let mut new_path = new_parent_path.join(gen_name(rng));
3820 if new_path.starts_with(&entry.path) {
3821 new_path = gen_name(rng).into();
3822 }
3823
3824 log::info!(
3825 "renaming entry {:?} ({}) to {:?}",
3826 entry.path,
3827 entry.id.0,
3828 new_path
3829 );
3830 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3831 cx.foreground().spawn(async move {
3832 task.await?;
3833 Ok(())
3834 })
3835 }
3836 _ => {
3837 let task = if entry.is_dir() {
3838 let child_path = entry.path.join(gen_name(rng));
3839 let is_dir = rng.gen_bool(0.3);
3840 log::info!(
3841 "creating {} at {:?}",
3842 if is_dir { "dir" } else { "file" },
3843 child_path,
3844 );
3845 worktree.create_entry(child_path, is_dir, cx)
3846 } else {
3847 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3848 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3849 };
3850 cx.foreground().spawn(async move {
3851 task.await?;
3852 Ok(())
3853 })
3854 }
3855 }
3856 }
3857
3858 async fn randomly_mutate_fs(
3859 fs: &Arc<dyn Fs>,
3860 root_path: &Path,
3861 insertion_probability: f64,
3862 rng: &mut impl Rng,
3863 ) {
3864 let mut files = Vec::new();
3865 let mut dirs = Vec::new();
3866 for path in fs.as_fake().paths() {
3867 if path.starts_with(root_path) {
3868 if fs.is_file(&path).await {
3869 files.push(path);
3870 } else {
3871 dirs.push(path);
3872 }
3873 }
3874 }
3875
3876 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3877 let path = dirs.choose(rng).unwrap();
3878 let new_path = path.join(gen_name(rng));
3879
3880 if rng.gen() {
3881 log::info!(
3882 "creating dir {:?}",
3883 new_path.strip_prefix(root_path).unwrap()
3884 );
3885 fs.create_dir(&new_path).await.unwrap();
3886 } else {
3887 log::info!(
3888 "creating file {:?}",
3889 new_path.strip_prefix(root_path).unwrap()
3890 );
3891 fs.create_file(&new_path, Default::default()).await.unwrap();
3892 }
3893 } else if rng.gen_bool(0.05) {
3894 let ignore_dir_path = dirs.choose(rng).unwrap();
3895 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3896
3897 let subdirs = dirs
3898 .iter()
3899 .filter(|d| d.starts_with(&ignore_dir_path))
3900 .cloned()
3901 .collect::<Vec<_>>();
3902 let subfiles = files
3903 .iter()
3904 .filter(|d| d.starts_with(&ignore_dir_path))
3905 .cloned()
3906 .collect::<Vec<_>>();
3907 let files_to_ignore = {
3908 let len = rng.gen_range(0..=subfiles.len());
3909 subfiles.choose_multiple(rng, len)
3910 };
3911 let dirs_to_ignore = {
3912 let len = rng.gen_range(0..subdirs.len());
3913 subdirs.choose_multiple(rng, len)
3914 };
3915
3916 let mut ignore_contents = String::new();
3917 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3918 writeln!(
3919 ignore_contents,
3920 "{}",
3921 path_to_ignore
3922 .strip_prefix(&ignore_dir_path)
3923 .unwrap()
3924 .to_str()
3925 .unwrap()
3926 )
3927 .unwrap();
3928 }
3929 log::info!(
3930 "creating gitignore {:?} with contents:\n{}",
3931 ignore_path.strip_prefix(&root_path).unwrap(),
3932 ignore_contents
3933 );
3934 fs.save(
3935 &ignore_path,
3936 &ignore_contents.as_str().into(),
3937 Default::default(),
3938 )
3939 .await
3940 .unwrap();
3941 } else {
3942 let old_path = {
3943 let file_path = files.choose(rng);
3944 let dir_path = dirs[1..].choose(rng);
3945 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3946 };
3947
3948 let is_rename = rng.gen();
3949 if is_rename {
3950 let new_path_parent = dirs
3951 .iter()
3952 .filter(|d| !d.starts_with(old_path))
3953 .choose(rng)
3954 .unwrap();
3955
3956 let overwrite_existing_dir =
3957 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3958 let new_path = if overwrite_existing_dir {
3959 fs.remove_dir(
3960 &new_path_parent,
3961 RemoveOptions {
3962 recursive: true,
3963 ignore_if_not_exists: true,
3964 },
3965 )
3966 .await
3967 .unwrap();
3968 new_path_parent.to_path_buf()
3969 } else {
3970 new_path_parent.join(gen_name(rng))
3971 };
3972
3973 log::info!(
3974 "renaming {:?} to {}{:?}",
3975 old_path.strip_prefix(&root_path).unwrap(),
3976 if overwrite_existing_dir {
3977 "overwrite "
3978 } else {
3979 ""
3980 },
3981 new_path.strip_prefix(&root_path).unwrap()
3982 );
3983 fs.rename(
3984 &old_path,
3985 &new_path,
3986 fs::RenameOptions {
3987 overwrite: true,
3988 ignore_if_exists: true,
3989 },
3990 )
3991 .await
3992 .unwrap();
3993 } else if fs.is_file(&old_path).await {
3994 log::info!(
3995 "deleting file {:?}",
3996 old_path.strip_prefix(&root_path).unwrap()
3997 );
3998 fs.remove_file(old_path, Default::default()).await.unwrap();
3999 } else {
4000 log::info!(
4001 "deleting dir {:?}",
4002 old_path.strip_prefix(&root_path).unwrap()
4003 );
4004 fs.remove_dir(
4005 &old_path,
4006 RemoveOptions {
4007 recursive: true,
4008 ignore_if_not_exists: true,
4009 },
4010 )
4011 .await
4012 .unwrap();
4013 }
4014 }
4015 }
4016
4017 fn gen_name(rng: &mut impl Rng) -> String {
4018 (0..6)
4019 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4020 .map(char::from)
4021 .collect()
4022 }
4023
4024 impl LocalSnapshot {
4025 fn check_invariants(&self) {
4026 assert_eq!(
4027 self.entries_by_path
4028 .cursor::<()>()
4029 .map(|e| (&e.path, e.id))
4030 .collect::<Vec<_>>(),
4031 self.entries_by_id
4032 .cursor::<()>()
4033 .map(|e| (&e.path, e.id))
4034 .collect::<collections::BTreeSet<_>>()
4035 .into_iter()
4036 .collect::<Vec<_>>(),
4037 "entries_by_path and entries_by_id are inconsistent"
4038 );
4039
4040 let mut files = self.files(true, 0);
4041 let mut visible_files = self.files(false, 0);
4042 for entry in self.entries_by_path.cursor::<()>() {
4043 if entry.is_file() {
4044 assert_eq!(files.next().unwrap().inode, entry.inode);
4045 if !entry.is_ignored {
4046 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4047 }
4048 }
4049 }
4050
4051 assert!(files.next().is_none());
4052 assert!(visible_files.next().is_none());
4053
4054 let mut bfs_paths = Vec::new();
4055 let mut stack = vec![Path::new("")];
4056 while let Some(path) = stack.pop() {
4057 bfs_paths.push(path);
4058 let ix = stack.len();
4059 for child_entry in self.child_entries(path) {
4060 stack.insert(ix, &child_entry.path);
4061 }
4062 }
4063
4064 let dfs_paths_via_iter = self
4065 .entries_by_path
4066 .cursor::<()>()
4067 .map(|e| e.path.as_ref())
4068 .collect::<Vec<_>>();
4069 assert_eq!(bfs_paths, dfs_paths_via_iter);
4070
4071 let dfs_paths_via_traversal = self
4072 .entries(true)
4073 .map(|e| e.path.as_ref())
4074 .collect::<Vec<_>>();
4075 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4076
4077 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4078 let ignore_parent_path =
4079 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4080 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4081 assert!(self
4082 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4083 .is_some());
4084 }
4085 }
4086
4087 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4088 let mut paths = Vec::new();
4089 for entry in self.entries_by_path.cursor::<()>() {
4090 if include_ignored || !entry.is_ignored {
4091 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4092 }
4093 }
4094 paths.sort_by(|a, b| a.0.cmp(b.0));
4095 paths
4096 }
4097 }
4098}