1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 pin::Pin,
48 sync::{
49 atomic::{AtomicUsize, Ordering::SeqCst},
50 Arc,
51 },
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<
72 Arc<Path>,
73 Vec<(
74 LanguageServerId,
75 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
76 )>,
77 >,
78 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
79 client: Arc<Client>,
80 fs: Arc<dyn Fs>,
81 visible: bool,
82}
83
84pub struct RemoteWorktree {
85 snapshot: Snapshot,
86 background_snapshot: Arc<Mutex<Snapshot>>,
87 project_id: u64,
88 client: Arc<Client>,
89 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
90 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
91 replica_id: ReplicaId,
92 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
93 visible: bool,
94 disconnected: bool,
95}
96
97#[derive(Clone)]
98pub struct Snapshot {
99 id: WorktreeId,
100 abs_path: Arc<Path>,
101 root_name: String,
102 root_char_bag: CharBag,
103 entries_by_path: SumTree<Entry>,
104 entries_by_id: SumTree<PathEntry>,
105
106 /// A number that increases every time the worktree begins scanning
107 /// a set of paths from the filesystem. This scanning could be caused
108 /// by some operation performed on the worktree, such as reading or
109 /// writing a file, or by an event reported by the filesystem.
110 scan_id: usize,
111
112 /// The latest scan id that has completed, and whose preceding scans
113 /// have all completed. The current `scan_id` could be more than one
114 /// greater than the `completed_scan_id` if operations are performed
115 /// on the worktree while it is processing a file-system event.
116 completed_scan_id: usize,
117}
118
119#[derive(Clone)]
120pub struct GitRepositoryEntry {
121 pub(crate) repo: Arc<Mutex<dyn GitRepository>>,
122
123 pub(crate) scan_id: usize,
124 // Path to folder containing the .git file or directory
125 pub(crate) content_path: Arc<Path>,
126 // Path to the actual .git folder.
127 // Note: if .git is a file, this points to the folder indicated by the .git file
128 pub(crate) git_dir_path: Arc<Path>,
129}
130
131impl std::fmt::Debug for GitRepositoryEntry {
132 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
133 f.debug_struct("GitRepositoryEntry")
134 .field("content_path", &self.content_path)
135 .field("git_dir_path", &self.git_dir_path)
136 .finish()
137 }
138}
139
140#[derive(Debug)]
141pub struct LocalSnapshot {
142 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
143 git_repositories: Vec<GitRepositoryEntry>,
144 removed_entry_ids: HashMap<u64, ProjectEntryId>,
145 next_entry_id: Arc<AtomicUsize>,
146 snapshot: Snapshot,
147}
148
149impl Clone for LocalSnapshot {
150 fn clone(&self) -> Self {
151 Self {
152 ignores_by_parent_abs_path: self.ignores_by_parent_abs_path.clone(),
153 git_repositories: self.git_repositories.iter().cloned().collect(),
154 removed_entry_ids: self.removed_entry_ids.clone(),
155 next_entry_id: self.next_entry_id.clone(),
156 snapshot: self.snapshot.clone(),
157 }
158 }
159}
160
161impl Deref for LocalSnapshot {
162 type Target = Snapshot;
163
164 fn deref(&self) -> &Self::Target {
165 &self.snapshot
166 }
167}
168
169impl DerefMut for LocalSnapshot {
170 fn deref_mut(&mut self) -> &mut Self::Target {
171 &mut self.snapshot
172 }
173}
174
175enum ScanState {
176 Started,
177 Updated {
178 snapshot: LocalSnapshot,
179 changes: HashMap<Arc<Path>, PathChange>,
180 barrier: Option<barrier::Sender>,
181 scanning: bool,
182 },
183}
184
185struct ShareState {
186 project_id: u64,
187 snapshots_tx: watch::Sender<LocalSnapshot>,
188 resume_updates: watch::Sender<()>,
189 _maintain_remote_snapshot: Task<Option<()>>,
190}
191
192pub enum Event {
193 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
194 UpdatedGitRepositories(Vec<GitRepositoryEntry>),
195}
196
197impl Entity for Worktree {
198 type Event = Event;
199}
200
201impl Worktree {
202 pub async fn local(
203 client: Arc<Client>,
204 path: impl Into<Arc<Path>>,
205 visible: bool,
206 fs: Arc<dyn Fs>,
207 next_entry_id: Arc<AtomicUsize>,
208 cx: &mut AsyncAppContext,
209 ) -> Result<ModelHandle<Self>> {
210 // After determining whether the root entry is a file or a directory, populate the
211 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
212 let abs_path = path.into();
213 let metadata = fs
214 .metadata(&abs_path)
215 .await
216 .context("failed to stat worktree path")?;
217
218 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
219 let root_name = abs_path
220 .file_name()
221 .map_or(String::new(), |f| f.to_string_lossy().to_string());
222
223 let mut snapshot = LocalSnapshot {
224 ignores_by_parent_abs_path: Default::default(),
225 git_repositories: Default::default(),
226 removed_entry_ids: Default::default(),
227 next_entry_id,
228 snapshot: Snapshot {
229 id: WorktreeId::from_usize(cx.model_id()),
230 abs_path: abs_path.clone(),
231 root_name: root_name.clone(),
232 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
233 entries_by_path: Default::default(),
234 entries_by_id: Default::default(),
235 scan_id: 1,
236 completed_scan_id: 0,
237 },
238 };
239
240 if let Some(metadata) = metadata {
241 snapshot.insert_entry(
242 Entry::new(
243 Arc::from(Path::new("")),
244 &metadata,
245 &snapshot.next_entry_id,
246 snapshot.root_char_bag,
247 ),
248 fs.as_ref(),
249 );
250 }
251
252 let (path_changes_tx, path_changes_rx) = channel::unbounded();
253 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
254
255 cx.spawn_weak(|this, mut cx| async move {
256 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
257 this.update(&mut cx, |this, cx| {
258 let this = this.as_local_mut().unwrap();
259 match state {
260 ScanState::Started => {
261 *this.is_scanning.0.borrow_mut() = true;
262 }
263 ScanState::Updated {
264 snapshot,
265 changes,
266 barrier,
267 scanning,
268 } => {
269 *this.is_scanning.0.borrow_mut() = scanning;
270 this.set_snapshot(snapshot, cx);
271 cx.emit(Event::UpdatedEntries(changes));
272 drop(barrier);
273 }
274 }
275 cx.notify();
276 });
277 }
278 })
279 .detach();
280
281 let background_scanner_task = cx.background().spawn({
282 let fs = fs.clone();
283 let snapshot = snapshot.clone();
284 let background = cx.background().clone();
285 async move {
286 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
287 BackgroundScanner::new(
288 snapshot,
289 fs,
290 scan_states_tx,
291 background,
292 path_changes_rx,
293 )
294 .run(events)
295 .await;
296 }
297 });
298
299 Worktree::Local(LocalWorktree {
300 snapshot,
301 is_scanning: watch::channel_with(true),
302 share: None,
303 path_changes_tx,
304 _background_scanner_task: background_scanner_task,
305 diagnostics: Default::default(),
306 diagnostic_summaries: Default::default(),
307 client,
308 fs,
309 visible,
310 })
311 }))
312 }
313
314 pub fn remote(
315 project_remote_id: u64,
316 replica_id: ReplicaId,
317 worktree: proto::WorktreeMetadata,
318 client: Arc<Client>,
319 cx: &mut AppContext,
320 ) -> ModelHandle<Self> {
321 cx.add_model(|cx: &mut ModelContext<Self>| {
322 let snapshot = Snapshot {
323 id: WorktreeId(worktree.id as usize),
324 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
325 root_name: worktree.root_name.clone(),
326 root_char_bag: worktree
327 .root_name
328 .chars()
329 .map(|c| c.to_ascii_lowercase())
330 .collect(),
331 entries_by_path: Default::default(),
332 entries_by_id: Default::default(),
333 scan_id: 1,
334 completed_scan_id: 0,
335 };
336
337 let (updates_tx, mut updates_rx) = mpsc::unbounded();
338 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
339 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
340
341 cx.background()
342 .spawn({
343 let background_snapshot = background_snapshot.clone();
344 async move {
345 while let Some(update) = updates_rx.next().await {
346 if let Err(error) =
347 background_snapshot.lock().apply_remote_update(update)
348 {
349 log::error!("error applying worktree update: {}", error);
350 }
351 snapshot_updated_tx.send(()).await.ok();
352 }
353 }
354 })
355 .detach();
356
357 cx.spawn_weak(|this, mut cx| async move {
358 while (snapshot_updated_rx.recv().await).is_some() {
359 if let Some(this) = this.upgrade(&cx) {
360 this.update(&mut cx, |this, cx| {
361 let this = this.as_remote_mut().unwrap();
362 this.snapshot = this.background_snapshot.lock().clone();
363 cx.emit(Event::UpdatedEntries(Default::default()));
364 cx.notify();
365 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
366 if this.observed_snapshot(*scan_id) {
367 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
368 let _ = tx.send(());
369 } else {
370 break;
371 }
372 }
373 });
374 } else {
375 break;
376 }
377 }
378 })
379 .detach();
380
381 Worktree::Remote(RemoteWorktree {
382 project_id: project_remote_id,
383 replica_id,
384 snapshot: snapshot.clone(),
385 background_snapshot,
386 updates_tx: Some(updates_tx),
387 snapshot_subscriptions: Default::default(),
388 client: client.clone(),
389 diagnostic_summaries: Default::default(),
390 visible: worktree.visible,
391 disconnected: false,
392 })
393 })
394 }
395
396 pub fn as_local(&self) -> Option<&LocalWorktree> {
397 if let Worktree::Local(worktree) = self {
398 Some(worktree)
399 } else {
400 None
401 }
402 }
403
404 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
405 if let Worktree::Remote(worktree) = self {
406 Some(worktree)
407 } else {
408 None
409 }
410 }
411
412 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
413 if let Worktree::Local(worktree) = self {
414 Some(worktree)
415 } else {
416 None
417 }
418 }
419
420 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
421 if let Worktree::Remote(worktree) = self {
422 Some(worktree)
423 } else {
424 None
425 }
426 }
427
428 pub fn is_local(&self) -> bool {
429 matches!(self, Worktree::Local(_))
430 }
431
432 pub fn is_remote(&self) -> bool {
433 !self.is_local()
434 }
435
436 pub fn snapshot(&self) -> Snapshot {
437 match self {
438 Worktree::Local(worktree) => worktree.snapshot().snapshot,
439 Worktree::Remote(worktree) => worktree.snapshot(),
440 }
441 }
442
443 pub fn scan_id(&self) -> usize {
444 match self {
445 Worktree::Local(worktree) => worktree.snapshot.scan_id,
446 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
447 }
448 }
449
450 pub fn completed_scan_id(&self) -> usize {
451 match self {
452 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
453 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
454 }
455 }
456
457 pub fn is_visible(&self) -> bool {
458 match self {
459 Worktree::Local(worktree) => worktree.visible,
460 Worktree::Remote(worktree) => worktree.visible,
461 }
462 }
463
464 pub fn replica_id(&self) -> ReplicaId {
465 match self {
466 Worktree::Local(_) => 0,
467 Worktree::Remote(worktree) => worktree.replica_id,
468 }
469 }
470
471 pub fn diagnostic_summaries(
472 &self,
473 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
474 match self {
475 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
476 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
477 }
478 .iter()
479 .flat_map(|(path, summaries)| {
480 summaries
481 .iter()
482 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
483 })
484 }
485
486 pub fn abs_path(&self) -> Arc<Path> {
487 match self {
488 Worktree::Local(worktree) => worktree.abs_path.clone(),
489 Worktree::Remote(worktree) => worktree.abs_path.clone(),
490 }
491 }
492}
493
494impl LocalWorktree {
495 pub fn contains_abs_path(&self, path: &Path) -> bool {
496 path.starts_with(&self.abs_path)
497 }
498
499 fn absolutize(&self, path: &Path) -> PathBuf {
500 if path.file_name().is_some() {
501 self.abs_path.join(path)
502 } else {
503 self.abs_path.to_path_buf()
504 }
505 }
506
507 pub(crate) fn load_buffer(
508 &mut self,
509 id: u64,
510 path: &Path,
511 cx: &mut ModelContext<Worktree>,
512 ) -> Task<Result<ModelHandle<Buffer>>> {
513 let path = Arc::from(path);
514 cx.spawn(move |this, mut cx| async move {
515 let (file, contents, diff_base) = this
516 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
517 .await?;
518 let text_buffer = cx
519 .background()
520 .spawn(async move { text::Buffer::new(0, id, contents) })
521 .await;
522 Ok(cx.add_model(|cx| {
523 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
524 buffer.git_diff_recalc(cx);
525 buffer
526 }))
527 })
528 }
529
530 pub fn diagnostics_for_path(
531 &self,
532 path: &Path,
533 ) -> Vec<(
534 LanguageServerId,
535 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
536 )> {
537 self.diagnostics.get(path).cloned().unwrap_or_default()
538 }
539
540 pub fn update_diagnostics(
541 &mut self,
542 server_id: LanguageServerId,
543 worktree_path: Arc<Path>,
544 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
545 _: &mut ModelContext<Worktree>,
546 ) -> Result<bool> {
547 let summaries_by_server_id = self
548 .diagnostic_summaries
549 .entry(worktree_path.clone())
550 .or_default();
551
552 let old_summary = summaries_by_server_id
553 .remove(&server_id)
554 .unwrap_or_default();
555
556 let new_summary = DiagnosticSummary::new(&diagnostics);
557 if new_summary.is_empty() {
558 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
559 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
560 diagnostics_by_server_id.remove(ix);
561 }
562 if diagnostics_by_server_id.is_empty() {
563 self.diagnostics.remove(&worktree_path);
564 }
565 }
566 } else {
567 summaries_by_server_id.insert(server_id, new_summary);
568 let diagnostics_by_server_id =
569 self.diagnostics.entry(worktree_path.clone()).or_default();
570 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
571 Ok(ix) => {
572 diagnostics_by_server_id[ix] = (server_id, diagnostics);
573 }
574 Err(ix) => {
575 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
576 }
577 }
578 }
579
580 if !old_summary.is_empty() || !new_summary.is_empty() {
581 if let Some(share) = self.share.as_ref() {
582 self.client
583 .send(proto::UpdateDiagnosticSummary {
584 project_id: share.project_id,
585 worktree_id: self.id().to_proto(),
586 summary: Some(proto::DiagnosticSummary {
587 path: worktree_path.to_string_lossy().to_string(),
588 language_server_id: server_id.0 as u64,
589 error_count: new_summary.error_count as u32,
590 warning_count: new_summary.warning_count as u32,
591 }),
592 })
593 .log_err();
594 }
595 }
596
597 Ok(!old_summary.is_empty() || !new_summary.is_empty())
598 }
599
600 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
601 let updated_repos = Self::changed_repos(
602 &self.snapshot.git_repositories,
603 &new_snapshot.git_repositories,
604 );
605 self.snapshot = new_snapshot;
606
607 if let Some(share) = self.share.as_mut() {
608 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
609 }
610
611 if !updated_repos.is_empty() {
612 cx.emit(Event::UpdatedGitRepositories(updated_repos));
613 }
614 }
615
616 fn changed_repos(
617 old_repos: &[GitRepositoryEntry],
618 new_repos: &[GitRepositoryEntry],
619 ) -> Vec<GitRepositoryEntry> {
620 fn diff<'a>(
621 a: &'a [GitRepositoryEntry],
622 b: &'a [GitRepositoryEntry],
623 updated: &mut HashMap<&'a Path, GitRepositoryEntry>,
624 ) {
625 for a_repo in a {
626 let matched = b.iter().find(|b_repo| {
627 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
628 });
629
630 if matched.is_none() {
631 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
632 }
633 }
634 }
635
636 let mut updated = HashMap::<&Path, GitRepositoryEntry>::default();
637
638 diff(old_repos, new_repos, &mut updated);
639 diff(new_repos, old_repos, &mut updated);
640
641 updated.into_values().collect()
642 }
643
644 pub fn scan_complete(&self) -> impl Future<Output = ()> {
645 let mut is_scanning_rx = self.is_scanning.1.clone();
646 async move {
647 let mut is_scanning = is_scanning_rx.borrow().clone();
648 while is_scanning {
649 if let Some(value) = is_scanning_rx.recv().await {
650 is_scanning = value;
651 } else {
652 break;
653 }
654 }
655 }
656 }
657
658 pub fn snapshot(&self) -> LocalSnapshot {
659 self.snapshot.clone()
660 }
661
662 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
663 proto::WorktreeMetadata {
664 id: self.id().to_proto(),
665 root_name: self.root_name().to_string(),
666 visible: self.visible,
667 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
668 }
669 }
670
671 fn load(
672 &self,
673 path: &Path,
674 cx: &mut ModelContext<Worktree>,
675 ) -> Task<Result<(File, String, Option<String>)>> {
676 let handle = cx.handle();
677 let path = Arc::from(path);
678 let abs_path = self.absolutize(&path);
679 let fs = self.fs.clone();
680 let snapshot = self.snapshot();
681
682 cx.spawn(|this, mut cx| async move {
683 let text = fs.load(&abs_path).await?;
684
685 let diff_base = if let Some(repo) = snapshot.repo_for(&path) {
686 if let Ok(repo_relative) = path.strip_prefix(repo.content_path) {
687 let repo_relative = repo_relative.to_owned();
688 cx.background()
689 .spawn(async move { repo.repo.lock().load_index_text(&repo_relative) })
690 .await
691 } else {
692 None
693 }
694 } else {
695 None
696 };
697
698 // Eagerly populate the snapshot with an updated entry for the loaded file
699 let entry = this
700 .update(&mut cx, |this, cx| {
701 this.as_local().unwrap().refresh_entry(path, None, cx)
702 })
703 .await?;
704
705 Ok((
706 File {
707 entry_id: entry.id,
708 worktree: handle,
709 path: entry.path,
710 mtime: entry.mtime,
711 is_local: true,
712 is_deleted: false,
713 },
714 text,
715 diff_base,
716 ))
717 })
718 }
719
720 pub fn save_buffer(
721 &self,
722 buffer_handle: ModelHandle<Buffer>,
723 path: Arc<Path>,
724 has_changed_file: bool,
725 cx: &mut ModelContext<Worktree>,
726 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
727 let handle = cx.handle();
728 let buffer = buffer_handle.read(cx);
729
730 let rpc = self.client.clone();
731 let buffer_id = buffer.remote_id();
732 let project_id = self.share.as_ref().map(|share| share.project_id);
733
734 let text = buffer.as_rope().clone();
735 let fingerprint = text.fingerprint();
736 let version = buffer.version();
737 let save = self.write_file(path, text, buffer.line_ending(), cx);
738
739 cx.as_mut().spawn(|mut cx| async move {
740 let entry = save.await?;
741
742 if has_changed_file {
743 let new_file = Arc::new(File {
744 entry_id: entry.id,
745 worktree: handle,
746 path: entry.path,
747 mtime: entry.mtime,
748 is_local: true,
749 is_deleted: false,
750 });
751
752 if let Some(project_id) = project_id {
753 rpc.send(proto::UpdateBufferFile {
754 project_id,
755 buffer_id,
756 file: Some(new_file.to_proto()),
757 })
758 .log_err();
759 }
760
761 buffer_handle.update(&mut cx, |buffer, cx| {
762 if has_changed_file {
763 buffer.file_updated(new_file, cx).detach();
764 }
765 });
766 }
767
768 if let Some(project_id) = project_id {
769 rpc.send(proto::BufferSaved {
770 project_id,
771 buffer_id,
772 version: serialize_version(&version),
773 mtime: Some(entry.mtime.into()),
774 fingerprint: serialize_fingerprint(fingerprint),
775 })?;
776 }
777
778 buffer_handle.update(&mut cx, |buffer, cx| {
779 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
780 });
781
782 Ok((version, fingerprint, entry.mtime))
783 })
784 }
785
786 pub fn create_entry(
787 &self,
788 path: impl Into<Arc<Path>>,
789 is_dir: bool,
790 cx: &mut ModelContext<Worktree>,
791 ) -> Task<Result<Entry>> {
792 let path = path.into();
793 let abs_path = self.absolutize(&path);
794 let fs = self.fs.clone();
795 let write = cx.background().spawn(async move {
796 if is_dir {
797 fs.create_dir(&abs_path).await
798 } else {
799 fs.save(&abs_path, &Default::default(), Default::default())
800 .await
801 }
802 });
803
804 cx.spawn(|this, mut cx| async move {
805 write.await?;
806 this.update(&mut cx, |this, cx| {
807 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
808 })
809 .await
810 })
811 }
812
813 pub fn write_file(
814 &self,
815 path: impl Into<Arc<Path>>,
816 text: Rope,
817 line_ending: LineEnding,
818 cx: &mut ModelContext<Worktree>,
819 ) -> Task<Result<Entry>> {
820 let path = path.into();
821 let abs_path = self.absolutize(&path);
822 let fs = self.fs.clone();
823 let write = cx
824 .background()
825 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
826
827 cx.spawn(|this, mut cx| async move {
828 write.await?;
829 this.update(&mut cx, |this, cx| {
830 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
831 })
832 .await
833 })
834 }
835
836 pub fn delete_entry(
837 &self,
838 entry_id: ProjectEntryId,
839 cx: &mut ModelContext<Worktree>,
840 ) -> Option<Task<Result<()>>> {
841 let entry = self.entry_for_id(entry_id)?.clone();
842 let abs_path = self.abs_path.clone();
843 let fs = self.fs.clone();
844
845 let delete = cx.background().spawn(async move {
846 let mut abs_path = fs.canonicalize(&abs_path).await?;
847 if entry.path.file_name().is_some() {
848 abs_path = abs_path.join(&entry.path);
849 }
850 if entry.is_file() {
851 fs.remove_file(&abs_path, Default::default()).await?;
852 } else {
853 fs.remove_dir(
854 &abs_path,
855 RemoveOptions {
856 recursive: true,
857 ignore_if_not_exists: false,
858 },
859 )
860 .await?;
861 }
862 anyhow::Ok(abs_path)
863 });
864
865 Some(cx.spawn(|this, mut cx| async move {
866 let abs_path = delete.await?;
867 let (tx, mut rx) = barrier::channel();
868 this.update(&mut cx, |this, _| {
869 this.as_local_mut()
870 .unwrap()
871 .path_changes_tx
872 .try_send((vec![abs_path], tx))
873 })?;
874 rx.recv().await;
875 Ok(())
876 }))
877 }
878
879 pub fn rename_entry(
880 &self,
881 entry_id: ProjectEntryId,
882 new_path: impl Into<Arc<Path>>,
883 cx: &mut ModelContext<Worktree>,
884 ) -> Option<Task<Result<Entry>>> {
885 let old_path = self.entry_for_id(entry_id)?.path.clone();
886 let new_path = new_path.into();
887 let abs_old_path = self.absolutize(&old_path);
888 let abs_new_path = self.absolutize(&new_path);
889 let fs = self.fs.clone();
890 let rename = cx.background().spawn(async move {
891 fs.rename(&abs_old_path, &abs_new_path, Default::default())
892 .await
893 });
894
895 Some(cx.spawn(|this, mut cx| async move {
896 rename.await?;
897 this.update(&mut cx, |this, cx| {
898 this.as_local_mut()
899 .unwrap()
900 .refresh_entry(new_path.clone(), Some(old_path), cx)
901 })
902 .await
903 }))
904 }
905
906 pub fn copy_entry(
907 &self,
908 entry_id: ProjectEntryId,
909 new_path: impl Into<Arc<Path>>,
910 cx: &mut ModelContext<Worktree>,
911 ) -> Option<Task<Result<Entry>>> {
912 let old_path = self.entry_for_id(entry_id)?.path.clone();
913 let new_path = new_path.into();
914 let abs_old_path = self.absolutize(&old_path);
915 let abs_new_path = self.absolutize(&new_path);
916 let fs = self.fs.clone();
917 let copy = cx.background().spawn(async move {
918 copy_recursive(
919 fs.as_ref(),
920 &abs_old_path,
921 &abs_new_path,
922 Default::default(),
923 )
924 .await
925 });
926
927 Some(cx.spawn(|this, mut cx| async move {
928 copy.await?;
929 this.update(&mut cx, |this, cx| {
930 this.as_local_mut()
931 .unwrap()
932 .refresh_entry(new_path.clone(), None, cx)
933 })
934 .await
935 }))
936 }
937
938 fn refresh_entry(
939 &self,
940 path: Arc<Path>,
941 old_path: Option<Arc<Path>>,
942 cx: &mut ModelContext<Worktree>,
943 ) -> Task<Result<Entry>> {
944 let fs = self.fs.clone();
945 let abs_root_path = self.abs_path.clone();
946 let path_changes_tx = self.path_changes_tx.clone();
947 cx.spawn_weak(move |this, mut cx| async move {
948 let abs_path = fs.canonicalize(&abs_root_path).await?;
949 let mut paths = Vec::with_capacity(2);
950 paths.push(if path.file_name().is_some() {
951 abs_path.join(&path)
952 } else {
953 abs_path.clone()
954 });
955 if let Some(old_path) = old_path {
956 paths.push(if old_path.file_name().is_some() {
957 abs_path.join(&old_path)
958 } else {
959 abs_path.clone()
960 });
961 }
962
963 let (tx, mut rx) = barrier::channel();
964 path_changes_tx.try_send((paths, tx))?;
965 rx.recv().await;
966 this.upgrade(&cx)
967 .ok_or_else(|| anyhow!("worktree was dropped"))?
968 .update(&mut cx, |this, _| {
969 this.entry_for_path(path)
970 .cloned()
971 .ok_or_else(|| anyhow!("failed to read path after update"))
972 })
973 })
974 }
975
976 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
977 let (share_tx, share_rx) = oneshot::channel();
978
979 if let Some(share) = self.share.as_mut() {
980 let _ = share_tx.send(());
981 *share.resume_updates.borrow_mut() = ();
982 } else {
983 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
984 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
985 let worktree_id = cx.model_id() as u64;
986
987 for (path, summaries) in &self.diagnostic_summaries {
988 for (&server_id, summary) in summaries {
989 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
990 project_id,
991 worktree_id,
992 summary: Some(summary.to_proto(server_id, &path)),
993 }) {
994 return Task::ready(Err(e));
995 }
996 }
997 }
998
999 let _maintain_remote_snapshot = cx.background().spawn({
1000 let client = self.client.clone();
1001 async move {
1002 let mut share_tx = Some(share_tx);
1003 let mut prev_snapshot = LocalSnapshot {
1004 ignores_by_parent_abs_path: Default::default(),
1005 git_repositories: Default::default(),
1006 removed_entry_ids: Default::default(),
1007 next_entry_id: Default::default(),
1008 snapshot: Snapshot {
1009 id: WorktreeId(worktree_id as usize),
1010 abs_path: Path::new("").into(),
1011 root_name: Default::default(),
1012 root_char_bag: Default::default(),
1013 entries_by_path: Default::default(),
1014 entries_by_id: Default::default(),
1015 scan_id: 0,
1016 completed_scan_id: 0,
1017 },
1018 };
1019 while let Some(snapshot) = snapshots_rx.recv().await {
1020 #[cfg(any(test, feature = "test-support"))]
1021 const MAX_CHUNK_SIZE: usize = 2;
1022 #[cfg(not(any(test, feature = "test-support")))]
1023 const MAX_CHUNK_SIZE: usize = 256;
1024
1025 let update =
1026 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1027 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1028 let _ = resume_updates_rx.try_recv();
1029 while let Err(error) = client.request(update.clone()).await {
1030 log::error!("failed to send worktree update: {}", error);
1031 log::info!("waiting to resume updates");
1032 if resume_updates_rx.next().await.is_none() {
1033 return Ok(());
1034 }
1035 }
1036 }
1037
1038 if let Some(share_tx) = share_tx.take() {
1039 let _ = share_tx.send(());
1040 }
1041
1042 prev_snapshot = snapshot;
1043 }
1044
1045 Ok::<_, anyhow::Error>(())
1046 }
1047 .log_err()
1048 });
1049
1050 self.share = Some(ShareState {
1051 project_id,
1052 snapshots_tx,
1053 resume_updates: resume_updates_tx,
1054 _maintain_remote_snapshot,
1055 });
1056 }
1057
1058 cx.foreground()
1059 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1060 }
1061
1062 pub fn unshare(&mut self) {
1063 self.share.take();
1064 }
1065
1066 pub fn is_shared(&self) -> bool {
1067 self.share.is_some()
1068 }
1069}
1070
1071impl RemoteWorktree {
1072 fn snapshot(&self) -> Snapshot {
1073 self.snapshot.clone()
1074 }
1075
1076 pub fn disconnected_from_host(&mut self) {
1077 self.updates_tx.take();
1078 self.snapshot_subscriptions.clear();
1079 self.disconnected = true;
1080 }
1081
1082 pub fn save_buffer(
1083 &self,
1084 buffer_handle: ModelHandle<Buffer>,
1085 cx: &mut ModelContext<Worktree>,
1086 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1087 let buffer = buffer_handle.read(cx);
1088 let buffer_id = buffer.remote_id();
1089 let version = buffer.version();
1090 let rpc = self.client.clone();
1091 let project_id = self.project_id;
1092 cx.as_mut().spawn(|mut cx| async move {
1093 let response = rpc
1094 .request(proto::SaveBuffer {
1095 project_id,
1096 buffer_id,
1097 version: serialize_version(&version),
1098 })
1099 .await?;
1100 let version = deserialize_version(&response.version);
1101 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1102 let mtime = response
1103 .mtime
1104 .ok_or_else(|| anyhow!("missing mtime"))?
1105 .into();
1106
1107 buffer_handle.update(&mut cx, |buffer, cx| {
1108 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1109 });
1110
1111 Ok((version, fingerprint, mtime))
1112 })
1113 }
1114
1115 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1116 if let Some(updates_tx) = &self.updates_tx {
1117 updates_tx
1118 .unbounded_send(update)
1119 .expect("consumer runs to completion");
1120 }
1121 }
1122
1123 fn observed_snapshot(&self, scan_id: usize) -> bool {
1124 self.completed_scan_id >= scan_id
1125 }
1126
1127 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1128 let (tx, rx) = oneshot::channel();
1129 if self.observed_snapshot(scan_id) {
1130 let _ = tx.send(());
1131 } else if self.disconnected {
1132 drop(tx);
1133 } else {
1134 match self
1135 .snapshot_subscriptions
1136 .binary_search_by_key(&scan_id, |probe| probe.0)
1137 {
1138 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1139 }
1140 }
1141
1142 async move {
1143 rx.await?;
1144 Ok(())
1145 }
1146 }
1147
1148 pub fn update_diagnostic_summary(
1149 &mut self,
1150 path: Arc<Path>,
1151 summary: &proto::DiagnosticSummary,
1152 ) {
1153 let server_id = LanguageServerId(summary.language_server_id as usize);
1154 let summary = DiagnosticSummary {
1155 error_count: summary.error_count as usize,
1156 warning_count: summary.warning_count as usize,
1157 };
1158
1159 if summary.is_empty() {
1160 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1161 summaries.remove(&server_id);
1162 if summaries.is_empty() {
1163 self.diagnostic_summaries.remove(&path);
1164 }
1165 }
1166 } else {
1167 self.diagnostic_summaries
1168 .entry(path)
1169 .or_default()
1170 .insert(server_id, summary);
1171 }
1172 }
1173
1174 pub fn insert_entry(
1175 &mut self,
1176 entry: proto::Entry,
1177 scan_id: usize,
1178 cx: &mut ModelContext<Worktree>,
1179 ) -> Task<Result<Entry>> {
1180 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1181 cx.spawn(|this, mut cx| async move {
1182 wait_for_snapshot.await?;
1183 this.update(&mut cx, |worktree, _| {
1184 let worktree = worktree.as_remote_mut().unwrap();
1185 let mut snapshot = worktree.background_snapshot.lock();
1186 let entry = snapshot.insert_entry(entry);
1187 worktree.snapshot = snapshot.clone();
1188 entry
1189 })
1190 })
1191 }
1192
1193 pub(crate) fn delete_entry(
1194 &mut self,
1195 id: ProjectEntryId,
1196 scan_id: usize,
1197 cx: &mut ModelContext<Worktree>,
1198 ) -> Task<Result<()>> {
1199 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1200 cx.spawn(|this, mut cx| async move {
1201 wait_for_snapshot.await?;
1202 this.update(&mut cx, |worktree, _| {
1203 let worktree = worktree.as_remote_mut().unwrap();
1204 let mut snapshot = worktree.background_snapshot.lock();
1205 snapshot.delete_entry(id);
1206 worktree.snapshot = snapshot.clone();
1207 });
1208 Ok(())
1209 })
1210 }
1211}
1212
1213impl Snapshot {
1214 pub fn id(&self) -> WorktreeId {
1215 self.id
1216 }
1217
1218 pub fn abs_path(&self) -> &Arc<Path> {
1219 &self.abs_path
1220 }
1221
1222 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1223 self.entries_by_id.get(&entry_id, &()).is_some()
1224 }
1225
1226 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1227 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1228 let old_entry = self.entries_by_id.insert_or_replace(
1229 PathEntry {
1230 id: entry.id,
1231 path: entry.path.clone(),
1232 is_ignored: entry.is_ignored,
1233 scan_id: 0,
1234 },
1235 &(),
1236 );
1237 if let Some(old_entry) = old_entry {
1238 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1239 }
1240 self.entries_by_path.insert_or_replace(entry.clone(), &());
1241 Ok(entry)
1242 }
1243
1244 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1245 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1246 self.entries_by_path = {
1247 let mut cursor = self.entries_by_path.cursor();
1248 let mut new_entries_by_path =
1249 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1250 while let Some(entry) = cursor.item() {
1251 if entry.path.starts_with(&removed_entry.path) {
1252 self.entries_by_id.remove(&entry.id, &());
1253 cursor.next(&());
1254 } else {
1255 break;
1256 }
1257 }
1258 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1259 new_entries_by_path
1260 };
1261
1262 Some(removed_entry.path)
1263 }
1264
1265 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1266 let mut entries_by_path_edits = Vec::new();
1267 let mut entries_by_id_edits = Vec::new();
1268 for entry_id in update.removed_entries {
1269 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1270 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1271 entries_by_id_edits.push(Edit::Remove(entry.id));
1272 }
1273 }
1274
1275 for entry in update.updated_entries {
1276 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1277 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1278 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1279 }
1280 entries_by_id_edits.push(Edit::Insert(PathEntry {
1281 id: entry.id,
1282 path: entry.path.clone(),
1283 is_ignored: entry.is_ignored,
1284 scan_id: 0,
1285 }));
1286 entries_by_path_edits.push(Edit::Insert(entry));
1287 }
1288
1289 self.entries_by_path.edit(entries_by_path_edits, &());
1290 self.entries_by_id.edit(entries_by_id_edits, &());
1291 self.scan_id = update.scan_id as usize;
1292 if update.is_last_update {
1293 self.completed_scan_id = update.scan_id as usize;
1294 }
1295
1296 Ok(())
1297 }
1298
1299 pub fn file_count(&self) -> usize {
1300 self.entries_by_path.summary().file_count
1301 }
1302
1303 pub fn visible_file_count(&self) -> usize {
1304 self.entries_by_path.summary().visible_file_count
1305 }
1306
1307 fn traverse_from_offset(
1308 &self,
1309 include_dirs: bool,
1310 include_ignored: bool,
1311 start_offset: usize,
1312 ) -> Traversal {
1313 let mut cursor = self.entries_by_path.cursor();
1314 cursor.seek(
1315 &TraversalTarget::Count {
1316 count: start_offset,
1317 include_dirs,
1318 include_ignored,
1319 },
1320 Bias::Right,
1321 &(),
1322 );
1323 Traversal {
1324 cursor,
1325 include_dirs,
1326 include_ignored,
1327 }
1328 }
1329
1330 fn traverse_from_path(
1331 &self,
1332 include_dirs: bool,
1333 include_ignored: bool,
1334 path: &Path,
1335 ) -> Traversal {
1336 let mut cursor = self.entries_by_path.cursor();
1337 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1338 Traversal {
1339 cursor,
1340 include_dirs,
1341 include_ignored,
1342 }
1343 }
1344
1345 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1346 self.traverse_from_offset(false, include_ignored, start)
1347 }
1348
1349 pub fn entries(&self, include_ignored: bool) -> Traversal {
1350 self.traverse_from_offset(true, include_ignored, 0)
1351 }
1352
1353 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1354 let empty_path = Path::new("");
1355 self.entries_by_path
1356 .cursor::<()>()
1357 .filter(move |entry| entry.path.as_ref() != empty_path)
1358 .map(|entry| &entry.path)
1359 }
1360
1361 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1362 let mut cursor = self.entries_by_path.cursor();
1363 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1364 let traversal = Traversal {
1365 cursor,
1366 include_dirs: true,
1367 include_ignored: true,
1368 };
1369 ChildEntriesIter {
1370 traversal,
1371 parent_path,
1372 }
1373 }
1374
1375 pub fn root_entry(&self) -> Option<&Entry> {
1376 self.entry_for_path("")
1377 }
1378
1379 pub fn root_name(&self) -> &str {
1380 &self.root_name
1381 }
1382
1383 pub fn scan_id(&self) -> usize {
1384 self.scan_id
1385 }
1386
1387 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1388 let path = path.as_ref();
1389 self.traverse_from_path(true, true, path)
1390 .entry()
1391 .and_then(|entry| {
1392 if entry.path.as_ref() == path {
1393 Some(entry)
1394 } else {
1395 None
1396 }
1397 })
1398 }
1399
1400 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1401 let entry = self.entries_by_id.get(&id, &())?;
1402 self.entry_for_path(&entry.path)
1403 }
1404
1405 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1406 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1407 }
1408}
1409
1410impl LocalSnapshot {
1411 // Gives the most specific git repository for a given path
1412 pub(crate) fn repo_for(&self, path: &Path) -> Option<GitRepositoryEntry> {
1413 self.git_repositories
1414 .iter()
1415 .rev() //git_repository is ordered lexicographically
1416 .find(|repo| repo.manages(path))
1417 .cloned()
1418 }
1419
1420 pub(crate) fn repo_with_dot_git_containing(
1421 &mut self,
1422 path: &Path,
1423 ) -> Option<&mut GitRepositoryEntry> {
1424 // Git repositories cannot be nested, so we don't need to reverse the order
1425 self.git_repositories
1426 .iter_mut()
1427 .find(|repo| repo.in_dot_git(path))
1428 }
1429
1430 #[cfg(test)]
1431 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1432 let root_name = self.root_name.clone();
1433 proto::UpdateWorktree {
1434 project_id,
1435 worktree_id: self.id().to_proto(),
1436 abs_path: self.abs_path().to_string_lossy().into(),
1437 root_name,
1438 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1439 removed_entries: Default::default(),
1440 scan_id: self.scan_id as u64,
1441 is_last_update: true,
1442 }
1443 }
1444
1445 pub(crate) fn build_update(
1446 &self,
1447 other: &Self,
1448 project_id: u64,
1449 worktree_id: u64,
1450 include_ignored: bool,
1451 ) -> proto::UpdateWorktree {
1452 let mut updated_entries = Vec::new();
1453 let mut removed_entries = Vec::new();
1454 let mut self_entries = self
1455 .entries_by_id
1456 .cursor::<()>()
1457 .filter(|e| include_ignored || !e.is_ignored)
1458 .peekable();
1459 let mut other_entries = other
1460 .entries_by_id
1461 .cursor::<()>()
1462 .filter(|e| include_ignored || !e.is_ignored)
1463 .peekable();
1464 loop {
1465 match (self_entries.peek(), other_entries.peek()) {
1466 (Some(self_entry), Some(other_entry)) => {
1467 match Ord::cmp(&self_entry.id, &other_entry.id) {
1468 Ordering::Less => {
1469 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1470 updated_entries.push(entry);
1471 self_entries.next();
1472 }
1473 Ordering::Equal => {
1474 if self_entry.scan_id != other_entry.scan_id {
1475 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1476 updated_entries.push(entry);
1477 }
1478
1479 self_entries.next();
1480 other_entries.next();
1481 }
1482 Ordering::Greater => {
1483 removed_entries.push(other_entry.id.to_proto());
1484 other_entries.next();
1485 }
1486 }
1487 }
1488 (Some(self_entry), None) => {
1489 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1490 updated_entries.push(entry);
1491 self_entries.next();
1492 }
1493 (None, Some(other_entry)) => {
1494 removed_entries.push(other_entry.id.to_proto());
1495 other_entries.next();
1496 }
1497 (None, None) => break,
1498 }
1499 }
1500
1501 proto::UpdateWorktree {
1502 project_id,
1503 worktree_id,
1504 abs_path: self.abs_path().to_string_lossy().into(),
1505 root_name: self.root_name().to_string(),
1506 updated_entries,
1507 removed_entries,
1508 scan_id: self.scan_id as u64,
1509 is_last_update: self.completed_scan_id == self.scan_id,
1510 }
1511 }
1512
1513 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1514 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1515 let abs_path = self.abs_path.join(&entry.path);
1516 match smol::block_on(build_gitignore(&abs_path, fs)) {
1517 Ok(ignore) => {
1518 self.ignores_by_parent_abs_path.insert(
1519 abs_path.parent().unwrap().into(),
1520 (Arc::new(ignore), self.scan_id),
1521 );
1522 }
1523 Err(error) => {
1524 log::error!(
1525 "error loading .gitignore file {:?} - {:?}",
1526 &entry.path,
1527 error
1528 );
1529 }
1530 }
1531 }
1532
1533 self.reuse_entry_id(&mut entry);
1534
1535 if entry.kind == EntryKind::PendingDir {
1536 if let Some(existing_entry) =
1537 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1538 {
1539 entry.kind = existing_entry.kind;
1540 }
1541 }
1542
1543 let scan_id = self.scan_id;
1544 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1545 if let Some(removed) = removed {
1546 if removed.id != entry.id {
1547 self.entries_by_id.remove(&removed.id, &());
1548 }
1549 }
1550 self.entries_by_id.insert_or_replace(
1551 PathEntry {
1552 id: entry.id,
1553 path: entry.path.clone(),
1554 is_ignored: entry.is_ignored,
1555 scan_id,
1556 },
1557 &(),
1558 );
1559
1560 entry
1561 }
1562
1563 fn populate_dir(
1564 &mut self,
1565 parent_path: Arc<Path>,
1566 entries: impl IntoIterator<Item = Entry>,
1567 ignore: Option<Arc<Gitignore>>,
1568 fs: &dyn Fs,
1569 ) {
1570 let mut parent_entry = if let Some(parent_entry) =
1571 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1572 {
1573 parent_entry.clone()
1574 } else {
1575 log::warn!(
1576 "populating a directory {:?} that has been removed",
1577 parent_path
1578 );
1579 return;
1580 };
1581
1582 match parent_entry.kind {
1583 EntryKind::PendingDir => {
1584 parent_entry.kind = EntryKind::Dir;
1585 }
1586 EntryKind::Dir => {}
1587 _ => return,
1588 }
1589
1590 if let Some(ignore) = ignore {
1591 self.ignores_by_parent_abs_path.insert(
1592 self.abs_path.join(&parent_path).into(),
1593 (ignore, self.scan_id),
1594 );
1595 }
1596
1597 if parent_path.file_name() == Some(&DOT_GIT) {
1598 let abs_path = self.abs_path.join(&parent_path);
1599 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1600 if let Err(ix) = self
1601 .git_repositories
1602 .binary_search_by_key(&&content_path, |repo| &repo.content_path)
1603 {
1604 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1605 self.git_repositories.insert(
1606 ix,
1607 GitRepositoryEntry {
1608 repo,
1609 scan_id: 0,
1610 content_path,
1611 git_dir_path: parent_path,
1612 },
1613 );
1614 }
1615 }
1616 }
1617
1618 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1619 let mut entries_by_id_edits = Vec::new();
1620
1621 for mut entry in entries {
1622 self.reuse_entry_id(&mut entry);
1623 entries_by_id_edits.push(Edit::Insert(PathEntry {
1624 id: entry.id,
1625 path: entry.path.clone(),
1626 is_ignored: entry.is_ignored,
1627 scan_id: self.scan_id,
1628 }));
1629 entries_by_path_edits.push(Edit::Insert(entry));
1630 }
1631
1632 self.entries_by_path.edit(entries_by_path_edits, &());
1633 self.entries_by_id.edit(entries_by_id_edits, &());
1634 }
1635
1636 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1637 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1638 entry.id = removed_entry_id;
1639 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1640 entry.id = existing_entry.id;
1641 }
1642 }
1643
1644 fn remove_path(&mut self, path: &Path) {
1645 let mut new_entries;
1646 let removed_entries;
1647 {
1648 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1649 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1650 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1651 new_entries.push_tree(cursor.suffix(&()), &());
1652 }
1653 self.entries_by_path = new_entries;
1654
1655 let mut entries_by_id_edits = Vec::new();
1656 for entry in removed_entries.cursor::<()>() {
1657 let removed_entry_id = self
1658 .removed_entry_ids
1659 .entry(entry.inode)
1660 .or_insert(entry.id);
1661 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1662 entries_by_id_edits.push(Edit::Remove(entry.id));
1663 }
1664 self.entries_by_id.edit(entries_by_id_edits, &());
1665
1666 if path.file_name() == Some(&GITIGNORE) {
1667 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1668 if let Some((_, scan_id)) = self
1669 .ignores_by_parent_abs_path
1670 .get_mut(abs_parent_path.as_path())
1671 {
1672 *scan_id = self.snapshot.scan_id;
1673 }
1674 } else if path.file_name() == Some(&DOT_GIT) {
1675 let parent_path = path.parent().unwrap();
1676 if let Ok(ix) = self
1677 .git_repositories
1678 .binary_search_by_key(&parent_path, |repo| repo.git_dir_path.as_ref())
1679 {
1680 self.git_repositories[ix].scan_id = self.snapshot.scan_id;
1681 }
1682 }
1683 }
1684
1685 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1686 let mut inodes = TreeSet::default();
1687 for ancestor in path.ancestors().skip(1) {
1688 if let Some(entry) = self.entry_for_path(ancestor) {
1689 inodes.insert(entry.inode);
1690 }
1691 }
1692 inodes
1693 }
1694
1695 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1696 let mut new_ignores = Vec::new();
1697 for ancestor in abs_path.ancestors().skip(1) {
1698 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1699 new_ignores.push((ancestor, Some(ignore.clone())));
1700 } else {
1701 new_ignores.push((ancestor, None));
1702 }
1703 }
1704
1705 let mut ignore_stack = IgnoreStack::none();
1706 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1707 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1708 ignore_stack = IgnoreStack::all();
1709 break;
1710 } else if let Some(ignore) = ignore {
1711 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1712 }
1713 }
1714
1715 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1716 ignore_stack = IgnoreStack::all();
1717 }
1718
1719 ignore_stack
1720 }
1721
1722 pub fn git_repo_entries(&self) -> &[GitRepositoryEntry] {
1723 &self.git_repositories
1724 }
1725}
1726
1727impl GitRepositoryEntry {
1728 // Note that these paths should be relative to the worktree root.
1729 pub(crate) fn manages(&self, path: &Path) -> bool {
1730 path.starts_with(self.content_path.as_ref())
1731 }
1732
1733 // Note that this path should be relative to the worktree root.
1734 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
1735 path.starts_with(self.git_dir_path.as_ref())
1736 }
1737}
1738
1739async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1740 let contents = fs.load(abs_path).await?;
1741 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1742 let mut builder = GitignoreBuilder::new(parent);
1743 for line in contents.lines() {
1744 builder.add_line(Some(abs_path.into()), line)?;
1745 }
1746 Ok(builder.build()?)
1747}
1748
1749impl WorktreeId {
1750 pub fn from_usize(handle_id: usize) -> Self {
1751 Self(handle_id)
1752 }
1753
1754 pub(crate) fn from_proto(id: u64) -> Self {
1755 Self(id as usize)
1756 }
1757
1758 pub fn to_proto(&self) -> u64 {
1759 self.0 as u64
1760 }
1761
1762 pub fn to_usize(&self) -> usize {
1763 self.0
1764 }
1765}
1766
1767impl fmt::Display for WorktreeId {
1768 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1769 self.0.fmt(f)
1770 }
1771}
1772
1773impl Deref for Worktree {
1774 type Target = Snapshot;
1775
1776 fn deref(&self) -> &Self::Target {
1777 match self {
1778 Worktree::Local(worktree) => &worktree.snapshot,
1779 Worktree::Remote(worktree) => &worktree.snapshot,
1780 }
1781 }
1782}
1783
1784impl Deref for LocalWorktree {
1785 type Target = LocalSnapshot;
1786
1787 fn deref(&self) -> &Self::Target {
1788 &self.snapshot
1789 }
1790}
1791
1792impl Deref for RemoteWorktree {
1793 type Target = Snapshot;
1794
1795 fn deref(&self) -> &Self::Target {
1796 &self.snapshot
1797 }
1798}
1799
1800impl fmt::Debug for LocalWorktree {
1801 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1802 self.snapshot.fmt(f)
1803 }
1804}
1805
1806impl fmt::Debug for Snapshot {
1807 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1808 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1809 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1810
1811 impl<'a> fmt::Debug for EntriesByPath<'a> {
1812 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1813 f.debug_map()
1814 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1815 .finish()
1816 }
1817 }
1818
1819 impl<'a> fmt::Debug for EntriesById<'a> {
1820 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1821 f.debug_list().entries(self.0.iter()).finish()
1822 }
1823 }
1824
1825 f.debug_struct("Snapshot")
1826 .field("id", &self.id)
1827 .field("root_name", &self.root_name)
1828 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1829 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1830 .finish()
1831 }
1832}
1833
1834#[derive(Clone, PartialEq)]
1835pub struct File {
1836 pub worktree: ModelHandle<Worktree>,
1837 pub path: Arc<Path>,
1838 pub mtime: SystemTime,
1839 pub(crate) entry_id: ProjectEntryId,
1840 pub(crate) is_local: bool,
1841 pub(crate) is_deleted: bool,
1842}
1843
1844impl language::File for File {
1845 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1846 if self.is_local {
1847 Some(self)
1848 } else {
1849 None
1850 }
1851 }
1852
1853 fn mtime(&self) -> SystemTime {
1854 self.mtime
1855 }
1856
1857 fn path(&self) -> &Arc<Path> {
1858 &self.path
1859 }
1860
1861 fn full_path(&self, cx: &AppContext) -> PathBuf {
1862 let mut full_path = PathBuf::new();
1863 let worktree = self.worktree.read(cx);
1864
1865 if worktree.is_visible() {
1866 full_path.push(worktree.root_name());
1867 } else {
1868 let path = worktree.abs_path();
1869
1870 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1871 full_path.push("~");
1872 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1873 } else {
1874 full_path.push(path)
1875 }
1876 }
1877
1878 if self.path.components().next().is_some() {
1879 full_path.push(&self.path);
1880 }
1881
1882 full_path
1883 }
1884
1885 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1886 /// of its worktree, then this method will return the name of the worktree itself.
1887 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1888 self.path
1889 .file_name()
1890 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1891 }
1892
1893 fn is_deleted(&self) -> bool {
1894 self.is_deleted
1895 }
1896
1897 fn as_any(&self) -> &dyn Any {
1898 self
1899 }
1900
1901 fn to_proto(&self) -> rpc::proto::File {
1902 rpc::proto::File {
1903 worktree_id: self.worktree.id() as u64,
1904 entry_id: self.entry_id.to_proto(),
1905 path: self.path.to_string_lossy().into(),
1906 mtime: Some(self.mtime.into()),
1907 is_deleted: self.is_deleted,
1908 }
1909 }
1910}
1911
1912impl language::LocalFile for File {
1913 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1914 self.worktree
1915 .read(cx)
1916 .as_local()
1917 .unwrap()
1918 .abs_path
1919 .join(&self.path)
1920 }
1921
1922 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1923 let worktree = self.worktree.read(cx).as_local().unwrap();
1924 let abs_path = worktree.absolutize(&self.path);
1925 let fs = worktree.fs.clone();
1926 cx.background()
1927 .spawn(async move { fs.load(&abs_path).await })
1928 }
1929
1930 fn buffer_reloaded(
1931 &self,
1932 buffer_id: u64,
1933 version: &clock::Global,
1934 fingerprint: RopeFingerprint,
1935 line_ending: LineEnding,
1936 mtime: SystemTime,
1937 cx: &mut AppContext,
1938 ) {
1939 let worktree = self.worktree.read(cx).as_local().unwrap();
1940 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1941 worktree
1942 .client
1943 .send(proto::BufferReloaded {
1944 project_id,
1945 buffer_id,
1946 version: serialize_version(version),
1947 mtime: Some(mtime.into()),
1948 fingerprint: serialize_fingerprint(fingerprint),
1949 line_ending: serialize_line_ending(line_ending) as i32,
1950 })
1951 .log_err();
1952 }
1953 }
1954}
1955
1956impl File {
1957 pub fn from_proto(
1958 proto: rpc::proto::File,
1959 worktree: ModelHandle<Worktree>,
1960 cx: &AppContext,
1961 ) -> Result<Self> {
1962 let worktree_id = worktree
1963 .read(cx)
1964 .as_remote()
1965 .ok_or_else(|| anyhow!("not remote"))?
1966 .id();
1967
1968 if worktree_id.to_proto() != proto.worktree_id {
1969 return Err(anyhow!("worktree id does not match file"));
1970 }
1971
1972 Ok(Self {
1973 worktree,
1974 path: Path::new(&proto.path).into(),
1975 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
1976 entry_id: ProjectEntryId::from_proto(proto.entry_id),
1977 is_local: false,
1978 is_deleted: proto.is_deleted,
1979 })
1980 }
1981
1982 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
1983 file.and_then(|f| f.as_any().downcast_ref())
1984 }
1985
1986 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
1987 self.worktree.read(cx).id()
1988 }
1989
1990 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
1991 if self.is_deleted {
1992 None
1993 } else {
1994 Some(self.entry_id)
1995 }
1996 }
1997}
1998
1999#[derive(Clone, Debug, PartialEq, Eq)]
2000pub struct Entry {
2001 pub id: ProjectEntryId,
2002 pub kind: EntryKind,
2003 pub path: Arc<Path>,
2004 pub inode: u64,
2005 pub mtime: SystemTime,
2006 pub is_symlink: bool,
2007 pub is_ignored: bool,
2008}
2009
2010#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2011pub enum EntryKind {
2012 PendingDir,
2013 Dir,
2014 File(CharBag),
2015}
2016
2017#[derive(Clone, Copy, Debug)]
2018pub enum PathChange {
2019 Added,
2020 Removed,
2021 Updated,
2022 AddedOrUpdated,
2023}
2024
2025impl Entry {
2026 fn new(
2027 path: Arc<Path>,
2028 metadata: &fs::Metadata,
2029 next_entry_id: &AtomicUsize,
2030 root_char_bag: CharBag,
2031 ) -> Self {
2032 Self {
2033 id: ProjectEntryId::new(next_entry_id),
2034 kind: if metadata.is_dir {
2035 EntryKind::PendingDir
2036 } else {
2037 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2038 },
2039 path,
2040 inode: metadata.inode,
2041 mtime: metadata.mtime,
2042 is_symlink: metadata.is_symlink,
2043 is_ignored: false,
2044 }
2045 }
2046
2047 pub fn is_dir(&self) -> bool {
2048 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2049 }
2050
2051 pub fn is_file(&self) -> bool {
2052 matches!(self.kind, EntryKind::File(_))
2053 }
2054}
2055
2056impl sum_tree::Item for Entry {
2057 type Summary = EntrySummary;
2058
2059 fn summary(&self) -> Self::Summary {
2060 let visible_count = if self.is_ignored { 0 } else { 1 };
2061 let file_count;
2062 let visible_file_count;
2063 if self.is_file() {
2064 file_count = 1;
2065 visible_file_count = visible_count;
2066 } else {
2067 file_count = 0;
2068 visible_file_count = 0;
2069 }
2070
2071 EntrySummary {
2072 max_path: self.path.clone(),
2073 count: 1,
2074 visible_count,
2075 file_count,
2076 visible_file_count,
2077 }
2078 }
2079}
2080
2081impl sum_tree::KeyedItem for Entry {
2082 type Key = PathKey;
2083
2084 fn key(&self) -> Self::Key {
2085 PathKey(self.path.clone())
2086 }
2087}
2088
2089#[derive(Clone, Debug)]
2090pub struct EntrySummary {
2091 max_path: Arc<Path>,
2092 count: usize,
2093 visible_count: usize,
2094 file_count: usize,
2095 visible_file_count: usize,
2096}
2097
2098impl Default for EntrySummary {
2099 fn default() -> Self {
2100 Self {
2101 max_path: Arc::from(Path::new("")),
2102 count: 0,
2103 visible_count: 0,
2104 file_count: 0,
2105 visible_file_count: 0,
2106 }
2107 }
2108}
2109
2110impl sum_tree::Summary for EntrySummary {
2111 type Context = ();
2112
2113 fn add_summary(&mut self, rhs: &Self, _: &()) {
2114 self.max_path = rhs.max_path.clone();
2115 self.count += rhs.count;
2116 self.visible_count += rhs.visible_count;
2117 self.file_count += rhs.file_count;
2118 self.visible_file_count += rhs.visible_file_count;
2119 }
2120}
2121
2122#[derive(Clone, Debug)]
2123struct PathEntry {
2124 id: ProjectEntryId,
2125 path: Arc<Path>,
2126 is_ignored: bool,
2127 scan_id: usize,
2128}
2129
2130impl sum_tree::Item for PathEntry {
2131 type Summary = PathEntrySummary;
2132
2133 fn summary(&self) -> Self::Summary {
2134 PathEntrySummary { max_id: self.id }
2135 }
2136}
2137
2138impl sum_tree::KeyedItem for PathEntry {
2139 type Key = ProjectEntryId;
2140
2141 fn key(&self) -> Self::Key {
2142 self.id
2143 }
2144}
2145
2146#[derive(Clone, Debug, Default)]
2147struct PathEntrySummary {
2148 max_id: ProjectEntryId,
2149}
2150
2151impl sum_tree::Summary for PathEntrySummary {
2152 type Context = ();
2153
2154 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2155 self.max_id = summary.max_id;
2156 }
2157}
2158
2159impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2160 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2161 *self = summary.max_id;
2162 }
2163}
2164
2165#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2166pub struct PathKey(Arc<Path>);
2167
2168impl Default for PathKey {
2169 fn default() -> Self {
2170 Self(Path::new("").into())
2171 }
2172}
2173
2174impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2175 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2176 self.0 = summary.max_path.clone();
2177 }
2178}
2179
2180struct BackgroundScanner {
2181 snapshot: Mutex<LocalSnapshot>,
2182 fs: Arc<dyn Fs>,
2183 status_updates_tx: UnboundedSender<ScanState>,
2184 executor: Arc<executor::Background>,
2185 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2186 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2187 finished_initial_scan: bool,
2188}
2189
2190impl BackgroundScanner {
2191 fn new(
2192 snapshot: LocalSnapshot,
2193 fs: Arc<dyn Fs>,
2194 status_updates_tx: UnboundedSender<ScanState>,
2195 executor: Arc<executor::Background>,
2196 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2197 ) -> Self {
2198 Self {
2199 fs,
2200 status_updates_tx,
2201 executor,
2202 refresh_requests_rx,
2203 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2204 snapshot: Mutex::new(snapshot),
2205 finished_initial_scan: false,
2206 }
2207 }
2208
2209 async fn run(
2210 &mut self,
2211 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2212 ) {
2213 use futures::FutureExt as _;
2214
2215 let (root_abs_path, root_inode) = {
2216 let snapshot = self.snapshot.lock();
2217 (
2218 snapshot.abs_path.clone(),
2219 snapshot.root_entry().map(|e| e.inode),
2220 )
2221 };
2222
2223 // Populate ignores above the root.
2224 let ignore_stack;
2225 for ancestor in root_abs_path.ancestors().skip(1) {
2226 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2227 {
2228 self.snapshot
2229 .lock()
2230 .ignores_by_parent_abs_path
2231 .insert(ancestor.into(), (ignore.into(), 0));
2232 }
2233 }
2234 {
2235 let mut snapshot = self.snapshot.lock();
2236 snapshot.scan_id += 1;
2237 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2238 if ignore_stack.is_all() {
2239 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2240 root_entry.is_ignored = true;
2241 snapshot.insert_entry(root_entry, self.fs.as_ref());
2242 }
2243 }
2244 };
2245
2246 // Perform an initial scan of the directory.
2247 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2248 smol::block_on(scan_job_tx.send(ScanJob {
2249 abs_path: root_abs_path,
2250 path: Arc::from(Path::new("")),
2251 ignore_stack,
2252 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2253 scan_queue: scan_job_tx.clone(),
2254 }))
2255 .unwrap();
2256 drop(scan_job_tx);
2257 self.scan_dirs(true, scan_job_rx).await;
2258 {
2259 let mut snapshot = self.snapshot.lock();
2260 snapshot.completed_scan_id = snapshot.scan_id;
2261 }
2262 self.send_status_update(false, None);
2263
2264 // Process any any FS events that occurred while performing the initial scan.
2265 // For these events, update events cannot be as precise, because we didn't
2266 // have the previous state loaded yet.
2267 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2268 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2269 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2270 paths.extend(more_events.into_iter().map(|e| e.path));
2271 }
2272 self.process_events(paths).await;
2273 }
2274
2275 self.finished_initial_scan = true;
2276
2277 // Continue processing events until the worktree is dropped.
2278 loop {
2279 select_biased! {
2280 // Process any path refresh requests from the worktree. Prioritize
2281 // these before handling changes reported by the filesystem.
2282 request = self.refresh_requests_rx.recv().fuse() => {
2283 let Ok((paths, barrier)) = request else { break };
2284 if !self.process_refresh_request(paths, barrier).await {
2285 return;
2286 }
2287 }
2288
2289 events = events_rx.next().fuse() => {
2290 let Some(events) = events else { break };
2291 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2292 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2293 paths.extend(more_events.into_iter().map(|e| e.path));
2294 }
2295 self.process_events(paths).await;
2296 }
2297 }
2298 }
2299 }
2300
2301 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2302 self.reload_entries_for_paths(paths, None).await;
2303 self.send_status_update(false, Some(barrier))
2304 }
2305
2306 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2307 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2308 if let Some(mut paths) = self
2309 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2310 .await
2311 {
2312 paths.sort_unstable();
2313 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2314 }
2315 drop(scan_job_tx);
2316 self.scan_dirs(false, scan_job_rx).await;
2317
2318 self.update_ignore_statuses().await;
2319
2320 let mut snapshot = self.snapshot.lock();
2321 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2322 git_repositories.retain(|repo| snapshot.entry_for_path(&repo.git_dir_path).is_some());
2323 snapshot.git_repositories = git_repositories;
2324 snapshot.removed_entry_ids.clear();
2325 snapshot.completed_scan_id = snapshot.scan_id;
2326 drop(snapshot);
2327
2328 self.send_status_update(false, None);
2329 }
2330
2331 async fn scan_dirs(
2332 &self,
2333 enable_progress_updates: bool,
2334 scan_jobs_rx: channel::Receiver<ScanJob>,
2335 ) {
2336 use futures::FutureExt as _;
2337
2338 if self
2339 .status_updates_tx
2340 .unbounded_send(ScanState::Started)
2341 .is_err()
2342 {
2343 return;
2344 }
2345
2346 let progress_update_count = AtomicUsize::new(0);
2347 self.executor
2348 .scoped(|scope| {
2349 for _ in 0..self.executor.num_cpus() {
2350 scope.spawn(async {
2351 let mut last_progress_update_count = 0;
2352 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2353 futures::pin_mut!(progress_update_timer);
2354
2355 loop {
2356 select_biased! {
2357 // Process any path refresh requests before moving on to process
2358 // the scan queue, so that user operations are prioritized.
2359 request = self.refresh_requests_rx.recv().fuse() => {
2360 let Ok((paths, barrier)) = request else { break };
2361 if !self.process_refresh_request(paths, barrier).await {
2362 return;
2363 }
2364 }
2365
2366 // Send periodic progress updates to the worktree. Use an atomic counter
2367 // to ensure that only one of the workers sends a progress update after
2368 // the update interval elapses.
2369 _ = progress_update_timer => {
2370 match progress_update_count.compare_exchange(
2371 last_progress_update_count,
2372 last_progress_update_count + 1,
2373 SeqCst,
2374 SeqCst
2375 ) {
2376 Ok(_) => {
2377 last_progress_update_count += 1;
2378 self.send_status_update(true, None);
2379 }
2380 Err(count) => {
2381 last_progress_update_count = count;
2382 }
2383 }
2384 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2385 }
2386
2387 // Recursively load directories from the file system.
2388 job = scan_jobs_rx.recv().fuse() => {
2389 let Ok(job) = job else { break };
2390 if let Err(err) = self.scan_dir(&job).await {
2391 if job.path.as_ref() != Path::new("") {
2392 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2393 }
2394 }
2395 }
2396 }
2397 }
2398 })
2399 }
2400 })
2401 .await;
2402 }
2403
2404 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2405 let mut prev_state = self.prev_state.lock();
2406 let snapshot = self.snapshot.lock().clone();
2407 let mut old_snapshot = snapshot.snapshot.clone();
2408 mem::swap(&mut old_snapshot, &mut prev_state.0);
2409 let changed_paths = mem::take(&mut prev_state.1);
2410 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2411 self.status_updates_tx
2412 .unbounded_send(ScanState::Updated {
2413 snapshot,
2414 changes,
2415 scanning,
2416 barrier,
2417 })
2418 .is_ok()
2419 }
2420
2421 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2422 let mut new_entries: Vec<Entry> = Vec::new();
2423 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2424 let mut ignore_stack = job.ignore_stack.clone();
2425 let mut new_ignore = None;
2426 let (root_abs_path, root_char_bag, next_entry_id) = {
2427 let snapshot = self.snapshot.lock();
2428 (
2429 snapshot.abs_path().clone(),
2430 snapshot.root_char_bag,
2431 snapshot.next_entry_id.clone(),
2432 )
2433 };
2434 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2435 while let Some(child_abs_path) = child_paths.next().await {
2436 let child_abs_path: Arc<Path> = match child_abs_path {
2437 Ok(child_abs_path) => child_abs_path.into(),
2438 Err(error) => {
2439 log::error!("error processing entry {:?}", error);
2440 continue;
2441 }
2442 };
2443
2444 let child_name = child_abs_path.file_name().unwrap();
2445 let child_path: Arc<Path> = job.path.join(child_name).into();
2446 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2447 Ok(Some(metadata)) => metadata,
2448 Ok(None) => continue,
2449 Err(err) => {
2450 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2451 continue;
2452 }
2453 };
2454
2455 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2456 if child_name == *GITIGNORE {
2457 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2458 Ok(ignore) => {
2459 let ignore = Arc::new(ignore);
2460 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2461 new_ignore = Some(ignore);
2462 }
2463 Err(error) => {
2464 log::error!(
2465 "error loading .gitignore file {:?} - {:?}",
2466 child_name,
2467 error
2468 );
2469 }
2470 }
2471
2472 // Update ignore status of any child entries we've already processed to reflect the
2473 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2474 // there should rarely be too numerous. Update the ignore stack associated with any
2475 // new jobs as well.
2476 let mut new_jobs = new_jobs.iter_mut();
2477 for entry in &mut new_entries {
2478 let entry_abs_path = root_abs_path.join(&entry.path);
2479 entry.is_ignored =
2480 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2481
2482 if entry.is_dir() {
2483 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2484 job.ignore_stack = if entry.is_ignored {
2485 IgnoreStack::all()
2486 } else {
2487 ignore_stack.clone()
2488 };
2489 }
2490 }
2491 }
2492 }
2493
2494 let mut child_entry = Entry::new(
2495 child_path.clone(),
2496 &child_metadata,
2497 &next_entry_id,
2498 root_char_bag,
2499 );
2500
2501 if child_entry.is_dir() {
2502 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2503 child_entry.is_ignored = is_ignored;
2504
2505 // Avoid recursing until crash in the case of a recursive symlink
2506 if !job.ancestor_inodes.contains(&child_entry.inode) {
2507 let mut ancestor_inodes = job.ancestor_inodes.clone();
2508 ancestor_inodes.insert(child_entry.inode);
2509
2510 new_jobs.push(Some(ScanJob {
2511 abs_path: child_abs_path,
2512 path: child_path,
2513 ignore_stack: if is_ignored {
2514 IgnoreStack::all()
2515 } else {
2516 ignore_stack.clone()
2517 },
2518 ancestor_inodes,
2519 scan_queue: job.scan_queue.clone(),
2520 }));
2521 } else {
2522 new_jobs.push(None);
2523 }
2524 } else {
2525 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2526 }
2527
2528 new_entries.push(child_entry);
2529 }
2530
2531 self.snapshot.lock().populate_dir(
2532 job.path.clone(),
2533 new_entries,
2534 new_ignore,
2535 self.fs.as_ref(),
2536 );
2537
2538 for new_job in new_jobs {
2539 if let Some(new_job) = new_job {
2540 job.scan_queue.send(new_job).await.unwrap();
2541 }
2542 }
2543
2544 Ok(())
2545 }
2546
2547 async fn reload_entries_for_paths(
2548 &self,
2549 mut abs_paths: Vec<PathBuf>,
2550 scan_queue_tx: Option<Sender<ScanJob>>,
2551 ) -> Option<Vec<Arc<Path>>> {
2552 let doing_recursive_update = scan_queue_tx.is_some();
2553
2554 abs_paths.sort_unstable();
2555 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2556
2557 let root_abs_path = self.snapshot.lock().abs_path.clone();
2558 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2559 let metadata = futures::future::join_all(
2560 abs_paths
2561 .iter()
2562 .map(|abs_path| self.fs.metadata(&abs_path))
2563 .collect::<Vec<_>>(),
2564 )
2565 .await;
2566
2567 let mut snapshot = self.snapshot.lock();
2568 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2569 snapshot.scan_id += 1;
2570 if is_idle && !doing_recursive_update {
2571 snapshot.completed_scan_id = snapshot.scan_id;
2572 }
2573
2574 // Remove any entries for paths that no longer exist or are being recursively
2575 // refreshed. Do this before adding any new entries, so that renames can be
2576 // detected regardless of the order of the paths.
2577 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2578 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2579 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2580 if matches!(metadata, Ok(None)) || doing_recursive_update {
2581 snapshot.remove_path(path);
2582 }
2583 event_paths.push(path.into());
2584 } else {
2585 log::error!(
2586 "unexpected event {:?} for root path {:?}",
2587 abs_path,
2588 root_canonical_path
2589 );
2590 }
2591 }
2592
2593 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2594 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2595
2596 match metadata {
2597 Ok(Some(metadata)) => {
2598 let ignore_stack =
2599 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2600 let mut fs_entry = Entry::new(
2601 path.clone(),
2602 &metadata,
2603 snapshot.next_entry_id.as_ref(),
2604 snapshot.root_char_bag,
2605 );
2606 fs_entry.is_ignored = ignore_stack.is_all();
2607 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2608
2609 let scan_id = snapshot.scan_id;
2610 if let Some(repo) = snapshot.repo_with_dot_git_containing(&path) {
2611 repo.repo.lock().reload_index();
2612 repo.scan_id = scan_id;
2613 }
2614
2615 if let Some(scan_queue_tx) = &scan_queue_tx {
2616 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2617 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2618 ancestor_inodes.insert(metadata.inode);
2619 smol::block_on(scan_queue_tx.send(ScanJob {
2620 abs_path,
2621 path,
2622 ignore_stack,
2623 ancestor_inodes,
2624 scan_queue: scan_queue_tx.clone(),
2625 }))
2626 .unwrap();
2627 }
2628 }
2629 }
2630 Ok(None) => {}
2631 Err(err) => {
2632 // TODO - create a special 'error' entry in the entries tree to mark this
2633 log::error!("error reading file on event {:?}", err);
2634 }
2635 }
2636 }
2637
2638 Some(event_paths)
2639 }
2640
2641 async fn update_ignore_statuses(&self) {
2642 use futures::FutureExt as _;
2643
2644 let mut snapshot = self.snapshot.lock().clone();
2645 let mut ignores_to_update = Vec::new();
2646 let mut ignores_to_delete = Vec::new();
2647 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2648 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2649 if *scan_id > snapshot.completed_scan_id
2650 && snapshot.entry_for_path(parent_path).is_some()
2651 {
2652 ignores_to_update.push(parent_abs_path.clone());
2653 }
2654
2655 let ignore_path = parent_path.join(&*GITIGNORE);
2656 if snapshot.entry_for_path(ignore_path).is_none() {
2657 ignores_to_delete.push(parent_abs_path.clone());
2658 }
2659 }
2660 }
2661
2662 for parent_abs_path in ignores_to_delete {
2663 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2664 self.snapshot
2665 .lock()
2666 .ignores_by_parent_abs_path
2667 .remove(&parent_abs_path);
2668 }
2669
2670 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2671 ignores_to_update.sort_unstable();
2672 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2673 while let Some(parent_abs_path) = ignores_to_update.next() {
2674 while ignores_to_update
2675 .peek()
2676 .map_or(false, |p| p.starts_with(&parent_abs_path))
2677 {
2678 ignores_to_update.next().unwrap();
2679 }
2680
2681 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2682 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2683 abs_path: parent_abs_path,
2684 ignore_stack,
2685 ignore_queue: ignore_queue_tx.clone(),
2686 }))
2687 .unwrap();
2688 }
2689 drop(ignore_queue_tx);
2690
2691 self.executor
2692 .scoped(|scope| {
2693 for _ in 0..self.executor.num_cpus() {
2694 scope.spawn(async {
2695 loop {
2696 select_biased! {
2697 // Process any path refresh requests before moving on to process
2698 // the queue of ignore statuses.
2699 request = self.refresh_requests_rx.recv().fuse() => {
2700 let Ok((paths, barrier)) = request else { break };
2701 if !self.process_refresh_request(paths, barrier).await {
2702 return;
2703 }
2704 }
2705
2706 // Recursively process directories whose ignores have changed.
2707 job = ignore_queue_rx.recv().fuse() => {
2708 let Ok(job) = job else { break };
2709 self.update_ignore_status(job, &snapshot).await;
2710 }
2711 }
2712 }
2713 });
2714 }
2715 })
2716 .await;
2717 }
2718
2719 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2720 let mut ignore_stack = job.ignore_stack;
2721 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2722 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2723 }
2724
2725 let mut entries_by_id_edits = Vec::new();
2726 let mut entries_by_path_edits = Vec::new();
2727 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2728 for mut entry in snapshot.child_entries(path).cloned() {
2729 let was_ignored = entry.is_ignored;
2730 let abs_path = snapshot.abs_path().join(&entry.path);
2731 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2732 if entry.is_dir() {
2733 let child_ignore_stack = if entry.is_ignored {
2734 IgnoreStack::all()
2735 } else {
2736 ignore_stack.clone()
2737 };
2738 job.ignore_queue
2739 .send(UpdateIgnoreStatusJob {
2740 abs_path: abs_path.into(),
2741 ignore_stack: child_ignore_stack,
2742 ignore_queue: job.ignore_queue.clone(),
2743 })
2744 .await
2745 .unwrap();
2746 }
2747
2748 if entry.is_ignored != was_ignored {
2749 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2750 path_entry.scan_id = snapshot.scan_id;
2751 path_entry.is_ignored = entry.is_ignored;
2752 entries_by_id_edits.push(Edit::Insert(path_entry));
2753 entries_by_path_edits.push(Edit::Insert(entry));
2754 }
2755 }
2756
2757 let mut snapshot = self.snapshot.lock();
2758 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2759 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2760 }
2761
2762 fn build_change_set(
2763 &self,
2764 old_snapshot: &Snapshot,
2765 new_snapshot: &Snapshot,
2766 event_paths: Vec<Arc<Path>>,
2767 ) -> HashMap<Arc<Path>, PathChange> {
2768 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2769
2770 let mut changes = HashMap::default();
2771 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2772 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2773 let received_before_initialized = !self.finished_initial_scan;
2774
2775 for path in event_paths {
2776 let path = PathKey(path);
2777 old_paths.seek(&path, Bias::Left, &());
2778 new_paths.seek(&path, Bias::Left, &());
2779
2780 loop {
2781 match (old_paths.item(), new_paths.item()) {
2782 (Some(old_entry), Some(new_entry)) => {
2783 if old_entry.path > path.0
2784 && new_entry.path > path.0
2785 && !old_entry.path.starts_with(&path.0)
2786 && !new_entry.path.starts_with(&path.0)
2787 {
2788 break;
2789 }
2790
2791 match Ord::cmp(&old_entry.path, &new_entry.path) {
2792 Ordering::Less => {
2793 changes.insert(old_entry.path.clone(), Removed);
2794 old_paths.next(&());
2795 }
2796 Ordering::Equal => {
2797 if received_before_initialized {
2798 // If the worktree was not fully initialized when this event was generated,
2799 // we can't know whether this entry was added during the scan or whether
2800 // it was merely updated.
2801 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2802 } else if old_entry.mtime != new_entry.mtime {
2803 changes.insert(new_entry.path.clone(), Updated);
2804 }
2805 old_paths.next(&());
2806 new_paths.next(&());
2807 }
2808 Ordering::Greater => {
2809 changes.insert(new_entry.path.clone(), Added);
2810 new_paths.next(&());
2811 }
2812 }
2813 }
2814 (Some(old_entry), None) => {
2815 changes.insert(old_entry.path.clone(), Removed);
2816 old_paths.next(&());
2817 }
2818 (None, Some(new_entry)) => {
2819 changes.insert(new_entry.path.clone(), Added);
2820 new_paths.next(&());
2821 }
2822 (None, None) => break,
2823 }
2824 }
2825 }
2826 changes
2827 }
2828
2829 async fn progress_timer(&self, running: bool) {
2830 if !running {
2831 return futures::future::pending().await;
2832 }
2833
2834 #[cfg(any(test, feature = "test-support"))]
2835 if self.fs.is_fake() {
2836 return self.executor.simulate_random_delay().await;
2837 }
2838
2839 smol::Timer::after(Duration::from_millis(100)).await;
2840 }
2841}
2842
2843fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2844 let mut result = root_char_bag;
2845 result.extend(
2846 path.to_string_lossy()
2847 .chars()
2848 .map(|c| c.to_ascii_lowercase()),
2849 );
2850 result
2851}
2852
2853struct ScanJob {
2854 abs_path: Arc<Path>,
2855 path: Arc<Path>,
2856 ignore_stack: Arc<IgnoreStack>,
2857 scan_queue: Sender<ScanJob>,
2858 ancestor_inodes: TreeSet<u64>,
2859}
2860
2861struct UpdateIgnoreStatusJob {
2862 abs_path: Arc<Path>,
2863 ignore_stack: Arc<IgnoreStack>,
2864 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2865}
2866
2867pub trait WorktreeHandle {
2868 #[cfg(any(test, feature = "test-support"))]
2869 fn flush_fs_events<'a>(
2870 &self,
2871 cx: &'a gpui::TestAppContext,
2872 ) -> futures::future::LocalBoxFuture<'a, ()>;
2873}
2874
2875impl WorktreeHandle for ModelHandle<Worktree> {
2876 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2877 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2878 // extra directory scans, and emit extra scan-state notifications.
2879 //
2880 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2881 // to ensure that all redundant FS events have already been processed.
2882 #[cfg(any(test, feature = "test-support"))]
2883 fn flush_fs_events<'a>(
2884 &self,
2885 cx: &'a gpui::TestAppContext,
2886 ) -> futures::future::LocalBoxFuture<'a, ()> {
2887 use smol::future::FutureExt;
2888
2889 let filename = "fs-event-sentinel";
2890 let tree = self.clone();
2891 let (fs, root_path) = self.read_with(cx, |tree, _| {
2892 let tree = tree.as_local().unwrap();
2893 (tree.fs.clone(), tree.abs_path().clone())
2894 });
2895
2896 async move {
2897 fs.create_file(&root_path.join(filename), Default::default())
2898 .await
2899 .unwrap();
2900 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2901 .await;
2902
2903 fs.remove_file(&root_path.join(filename), Default::default())
2904 .await
2905 .unwrap();
2906 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2907 .await;
2908
2909 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2910 .await;
2911 }
2912 .boxed_local()
2913 }
2914}
2915
2916#[derive(Clone, Debug)]
2917struct TraversalProgress<'a> {
2918 max_path: &'a Path,
2919 count: usize,
2920 visible_count: usize,
2921 file_count: usize,
2922 visible_file_count: usize,
2923}
2924
2925impl<'a> TraversalProgress<'a> {
2926 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2927 match (include_ignored, include_dirs) {
2928 (true, true) => self.count,
2929 (true, false) => self.file_count,
2930 (false, true) => self.visible_count,
2931 (false, false) => self.visible_file_count,
2932 }
2933 }
2934}
2935
2936impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2937 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2938 self.max_path = summary.max_path.as_ref();
2939 self.count += summary.count;
2940 self.visible_count += summary.visible_count;
2941 self.file_count += summary.file_count;
2942 self.visible_file_count += summary.visible_file_count;
2943 }
2944}
2945
2946impl<'a> Default for TraversalProgress<'a> {
2947 fn default() -> Self {
2948 Self {
2949 max_path: Path::new(""),
2950 count: 0,
2951 visible_count: 0,
2952 file_count: 0,
2953 visible_file_count: 0,
2954 }
2955 }
2956}
2957
2958pub struct Traversal<'a> {
2959 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
2960 include_ignored: bool,
2961 include_dirs: bool,
2962}
2963
2964impl<'a> Traversal<'a> {
2965 pub fn advance(&mut self) -> bool {
2966 self.advance_to_offset(self.offset() + 1)
2967 }
2968
2969 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
2970 self.cursor.seek_forward(
2971 &TraversalTarget::Count {
2972 count: offset,
2973 include_dirs: self.include_dirs,
2974 include_ignored: self.include_ignored,
2975 },
2976 Bias::Right,
2977 &(),
2978 )
2979 }
2980
2981 pub fn advance_to_sibling(&mut self) -> bool {
2982 while let Some(entry) = self.cursor.item() {
2983 self.cursor.seek_forward(
2984 &TraversalTarget::PathSuccessor(&entry.path),
2985 Bias::Left,
2986 &(),
2987 );
2988 if let Some(entry) = self.cursor.item() {
2989 if (self.include_dirs || !entry.is_dir())
2990 && (self.include_ignored || !entry.is_ignored)
2991 {
2992 return true;
2993 }
2994 }
2995 }
2996 false
2997 }
2998
2999 pub fn entry(&self) -> Option<&'a Entry> {
3000 self.cursor.item()
3001 }
3002
3003 pub fn offset(&self) -> usize {
3004 self.cursor
3005 .start()
3006 .count(self.include_dirs, self.include_ignored)
3007 }
3008}
3009
3010impl<'a> Iterator for Traversal<'a> {
3011 type Item = &'a Entry;
3012
3013 fn next(&mut self) -> Option<Self::Item> {
3014 if let Some(item) = self.entry() {
3015 self.advance();
3016 Some(item)
3017 } else {
3018 None
3019 }
3020 }
3021}
3022
3023#[derive(Debug)]
3024enum TraversalTarget<'a> {
3025 Path(&'a Path),
3026 PathSuccessor(&'a Path),
3027 Count {
3028 count: usize,
3029 include_ignored: bool,
3030 include_dirs: bool,
3031 },
3032}
3033
3034impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3035 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3036 match self {
3037 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3038 TraversalTarget::PathSuccessor(path) => {
3039 if !cursor_location.max_path.starts_with(path) {
3040 Ordering::Equal
3041 } else {
3042 Ordering::Greater
3043 }
3044 }
3045 TraversalTarget::Count {
3046 count,
3047 include_dirs,
3048 include_ignored,
3049 } => Ord::cmp(
3050 count,
3051 &cursor_location.count(*include_dirs, *include_ignored),
3052 ),
3053 }
3054 }
3055}
3056
3057struct ChildEntriesIter<'a> {
3058 parent_path: &'a Path,
3059 traversal: Traversal<'a>,
3060}
3061
3062impl<'a> Iterator for ChildEntriesIter<'a> {
3063 type Item = &'a Entry;
3064
3065 fn next(&mut self) -> Option<Self::Item> {
3066 if let Some(item) = self.traversal.entry() {
3067 if item.path.starts_with(&self.parent_path) {
3068 self.traversal.advance_to_sibling();
3069 return Some(item);
3070 }
3071 }
3072 None
3073 }
3074}
3075
3076impl<'a> From<&'a Entry> for proto::Entry {
3077 fn from(entry: &'a Entry) -> Self {
3078 Self {
3079 id: entry.id.to_proto(),
3080 is_dir: entry.is_dir(),
3081 path: entry.path.to_string_lossy().into(),
3082 inode: entry.inode,
3083 mtime: Some(entry.mtime.into()),
3084 is_symlink: entry.is_symlink,
3085 is_ignored: entry.is_ignored,
3086 }
3087 }
3088}
3089
3090impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3091 type Error = anyhow::Error;
3092
3093 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3094 if let Some(mtime) = entry.mtime {
3095 let kind = if entry.is_dir {
3096 EntryKind::Dir
3097 } else {
3098 let mut char_bag = *root_char_bag;
3099 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3100 EntryKind::File(char_bag)
3101 };
3102 let path: Arc<Path> = PathBuf::from(entry.path).into();
3103 Ok(Entry {
3104 id: ProjectEntryId::from_proto(entry.id),
3105 kind,
3106 path,
3107 inode: entry.inode,
3108 mtime: mtime.into(),
3109 is_symlink: entry.is_symlink,
3110 is_ignored: entry.is_ignored,
3111 })
3112 } else {
3113 Err(anyhow!(
3114 "missing mtime in remote worktree entry {:?}",
3115 entry.path
3116 ))
3117 }
3118 }
3119}
3120
3121#[cfg(test)]
3122mod tests {
3123 use super::*;
3124 use fs::repository::FakeGitRepository;
3125 use fs::{FakeFs, RealFs};
3126 use gpui::{executor::Deterministic, TestAppContext};
3127 use pretty_assertions::assert_eq;
3128 use rand::prelude::*;
3129 use serde_json::json;
3130 use std::{env, fmt::Write};
3131 use util::{http::FakeHttpClient, test::temp_tree};
3132
3133 #[gpui::test]
3134 async fn test_traversal(cx: &mut TestAppContext) {
3135 let fs = FakeFs::new(cx.background());
3136 fs.insert_tree(
3137 "/root",
3138 json!({
3139 ".gitignore": "a/b\n",
3140 "a": {
3141 "b": "",
3142 "c": "",
3143 }
3144 }),
3145 )
3146 .await;
3147
3148 let http_client = FakeHttpClient::with_404_response();
3149 let client = cx.read(|cx| Client::new(http_client, cx));
3150
3151 let tree = Worktree::local(
3152 client,
3153 Path::new("/root"),
3154 true,
3155 fs,
3156 Default::default(),
3157 &mut cx.to_async(),
3158 )
3159 .await
3160 .unwrap();
3161 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3162 .await;
3163
3164 tree.read_with(cx, |tree, _| {
3165 assert_eq!(
3166 tree.entries(false)
3167 .map(|entry| entry.path.as_ref())
3168 .collect::<Vec<_>>(),
3169 vec![
3170 Path::new(""),
3171 Path::new(".gitignore"),
3172 Path::new("a"),
3173 Path::new("a/c"),
3174 ]
3175 );
3176 assert_eq!(
3177 tree.entries(true)
3178 .map(|entry| entry.path.as_ref())
3179 .collect::<Vec<_>>(),
3180 vec![
3181 Path::new(""),
3182 Path::new(".gitignore"),
3183 Path::new("a"),
3184 Path::new("a/b"),
3185 Path::new("a/c"),
3186 ]
3187 );
3188 })
3189 }
3190
3191 #[gpui::test(iterations = 10)]
3192 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3193 let fs = FakeFs::new(cx.background());
3194 fs.insert_tree(
3195 "/root",
3196 json!({
3197 "lib": {
3198 "a": {
3199 "a.txt": ""
3200 },
3201 "b": {
3202 "b.txt": ""
3203 }
3204 }
3205 }),
3206 )
3207 .await;
3208 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3209 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3210
3211 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3212 let tree = Worktree::local(
3213 client,
3214 Path::new("/root"),
3215 true,
3216 fs.clone(),
3217 Default::default(),
3218 &mut cx.to_async(),
3219 )
3220 .await
3221 .unwrap();
3222
3223 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3224 .await;
3225
3226 tree.read_with(cx, |tree, _| {
3227 assert_eq!(
3228 tree.entries(false)
3229 .map(|entry| entry.path.as_ref())
3230 .collect::<Vec<_>>(),
3231 vec![
3232 Path::new(""),
3233 Path::new("lib"),
3234 Path::new("lib/a"),
3235 Path::new("lib/a/a.txt"),
3236 Path::new("lib/a/lib"),
3237 Path::new("lib/b"),
3238 Path::new("lib/b/b.txt"),
3239 Path::new("lib/b/lib"),
3240 ]
3241 );
3242 });
3243
3244 fs.rename(
3245 Path::new("/root/lib/a/lib"),
3246 Path::new("/root/lib/a/lib-2"),
3247 Default::default(),
3248 )
3249 .await
3250 .unwrap();
3251 executor.run_until_parked();
3252 tree.read_with(cx, |tree, _| {
3253 assert_eq!(
3254 tree.entries(false)
3255 .map(|entry| entry.path.as_ref())
3256 .collect::<Vec<_>>(),
3257 vec![
3258 Path::new(""),
3259 Path::new("lib"),
3260 Path::new("lib/a"),
3261 Path::new("lib/a/a.txt"),
3262 Path::new("lib/a/lib-2"),
3263 Path::new("lib/b"),
3264 Path::new("lib/b/b.txt"),
3265 Path::new("lib/b/lib"),
3266 ]
3267 );
3268 });
3269 }
3270
3271 #[gpui::test]
3272 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3273 let parent_dir = temp_tree(json!({
3274 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3275 "tree": {
3276 ".git": {},
3277 ".gitignore": "ignored-dir\n",
3278 "tracked-dir": {
3279 "tracked-file1": "",
3280 "ancestor-ignored-file1": "",
3281 },
3282 "ignored-dir": {
3283 "ignored-file1": ""
3284 }
3285 }
3286 }));
3287 let dir = parent_dir.path().join("tree");
3288
3289 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3290
3291 let tree = Worktree::local(
3292 client,
3293 dir.as_path(),
3294 true,
3295 Arc::new(RealFs),
3296 Default::default(),
3297 &mut cx.to_async(),
3298 )
3299 .await
3300 .unwrap();
3301 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3302 .await;
3303 tree.flush_fs_events(cx).await;
3304 cx.read(|cx| {
3305 let tree = tree.read(cx);
3306 assert!(
3307 !tree
3308 .entry_for_path("tracked-dir/tracked-file1")
3309 .unwrap()
3310 .is_ignored
3311 );
3312 assert!(
3313 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3314 .unwrap()
3315 .is_ignored
3316 );
3317 assert!(
3318 tree.entry_for_path("ignored-dir/ignored-file1")
3319 .unwrap()
3320 .is_ignored
3321 );
3322 });
3323
3324 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3325 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3326 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3327 tree.flush_fs_events(cx).await;
3328 cx.read(|cx| {
3329 let tree = tree.read(cx);
3330 assert!(
3331 !tree
3332 .entry_for_path("tracked-dir/tracked-file2")
3333 .unwrap()
3334 .is_ignored
3335 );
3336 assert!(
3337 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3338 .unwrap()
3339 .is_ignored
3340 );
3341 assert!(
3342 tree.entry_for_path("ignored-dir/ignored-file2")
3343 .unwrap()
3344 .is_ignored
3345 );
3346 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3347 });
3348 }
3349
3350 #[gpui::test]
3351 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3352 let root = temp_tree(json!({
3353 "dir1": {
3354 ".git": {},
3355 "deps": {
3356 "dep1": {
3357 ".git": {},
3358 "src": {
3359 "a.txt": ""
3360 }
3361 }
3362 },
3363 "src": {
3364 "b.txt": ""
3365 }
3366 },
3367 "c.txt": "",
3368 }));
3369
3370 let http_client = FakeHttpClient::with_404_response();
3371 let client = cx.read(|cx| Client::new(http_client, cx));
3372 let tree = Worktree::local(
3373 client,
3374 root.path(),
3375 true,
3376 Arc::new(RealFs),
3377 Default::default(),
3378 &mut cx.to_async(),
3379 )
3380 .await
3381 .unwrap();
3382
3383 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3384 .await;
3385 tree.flush_fs_events(cx).await;
3386
3387 tree.read_with(cx, |tree, _cx| {
3388 let tree = tree.as_local().unwrap();
3389
3390 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3391
3392 let repo = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3393 assert_eq!(repo.content_path.as_ref(), Path::new("dir1"));
3394 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/.git"));
3395
3396 let repo = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3397 assert_eq!(repo.content_path.as_ref(), Path::new("dir1/deps/dep1"));
3398 assert_eq!(repo.git_dir_path.as_ref(), Path::new("dir1/deps/dep1/.git"),);
3399 });
3400
3401 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3402 let tree = tree.as_local().unwrap();
3403 tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id
3404 });
3405
3406 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3407 tree.flush_fs_events(cx).await;
3408
3409 tree.read_with(cx, |tree, _cx| {
3410 let tree = tree.as_local().unwrap();
3411 let new_scan_id = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap().scan_id;
3412 assert_ne!(
3413 original_scan_id, new_scan_id,
3414 "original {original_scan_id}, new {new_scan_id}"
3415 );
3416 });
3417
3418 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3419 tree.flush_fs_events(cx).await;
3420
3421 tree.read_with(cx, |tree, _cx| {
3422 let tree = tree.as_local().unwrap();
3423
3424 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3425 });
3426 }
3427
3428 #[test]
3429 fn test_changed_repos() {
3430 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> GitRepositoryEntry {
3431 GitRepositoryEntry {
3432 repo: Arc::new(Mutex::new(FakeGitRepository::default())),
3433 scan_id,
3434 content_path: git_dir_path.as_ref().parent().unwrap().into(),
3435 git_dir_path: git_dir_path.as_ref().into(),
3436 }
3437 }
3438
3439 let prev_repos: Vec<GitRepositoryEntry> = vec![
3440 fake_entry("/.git", 0),
3441 fake_entry("/a/.git", 0),
3442 fake_entry("/a/b/.git", 0),
3443 ];
3444
3445 let new_repos: Vec<GitRepositoryEntry> = vec![
3446 fake_entry("/a/.git", 1),
3447 fake_entry("/a/b/.git", 0),
3448 fake_entry("/a/c/.git", 0),
3449 ];
3450
3451 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3452
3453 // Deletion retained
3454 assert!(res
3455 .iter()
3456 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3457 .is_some());
3458
3459 // Update retained
3460 assert!(res
3461 .iter()
3462 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3463 .is_some());
3464
3465 // Addition retained
3466 assert!(res
3467 .iter()
3468 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3469 .is_some());
3470
3471 // Nochange, not retained
3472 assert!(res
3473 .iter()
3474 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3475 .is_none());
3476 }
3477
3478 #[gpui::test]
3479 async fn test_write_file(cx: &mut TestAppContext) {
3480 let dir = temp_tree(json!({
3481 ".git": {},
3482 ".gitignore": "ignored-dir\n",
3483 "tracked-dir": {},
3484 "ignored-dir": {}
3485 }));
3486
3487 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3488
3489 let tree = Worktree::local(
3490 client,
3491 dir.path(),
3492 true,
3493 Arc::new(RealFs),
3494 Default::default(),
3495 &mut cx.to_async(),
3496 )
3497 .await
3498 .unwrap();
3499 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3500 .await;
3501 tree.flush_fs_events(cx).await;
3502
3503 tree.update(cx, |tree, cx| {
3504 tree.as_local().unwrap().write_file(
3505 Path::new("tracked-dir/file.txt"),
3506 "hello".into(),
3507 Default::default(),
3508 cx,
3509 )
3510 })
3511 .await
3512 .unwrap();
3513 tree.update(cx, |tree, cx| {
3514 tree.as_local().unwrap().write_file(
3515 Path::new("ignored-dir/file.txt"),
3516 "world".into(),
3517 Default::default(),
3518 cx,
3519 )
3520 })
3521 .await
3522 .unwrap();
3523
3524 tree.read_with(cx, |tree, _| {
3525 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3526 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3527 assert!(!tracked.is_ignored);
3528 assert!(ignored.is_ignored);
3529 });
3530 }
3531
3532 #[gpui::test(iterations = 30)]
3533 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3534 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3535
3536 let fs = FakeFs::new(cx.background());
3537 fs.insert_tree(
3538 "/root",
3539 json!({
3540 "b": {},
3541 "c": {},
3542 "d": {},
3543 }),
3544 )
3545 .await;
3546
3547 let tree = Worktree::local(
3548 client,
3549 "/root".as_ref(),
3550 true,
3551 fs,
3552 Default::default(),
3553 &mut cx.to_async(),
3554 )
3555 .await
3556 .unwrap();
3557
3558 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3559
3560 let entry = tree
3561 .update(cx, |tree, cx| {
3562 tree.as_local_mut()
3563 .unwrap()
3564 .create_entry("a/e".as_ref(), true, cx)
3565 })
3566 .await
3567 .unwrap();
3568 assert!(entry.is_dir());
3569
3570 cx.foreground().run_until_parked();
3571 tree.read_with(cx, |tree, _| {
3572 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3573 });
3574
3575 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3576 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3577 snapshot1.apply_remote_update(update).unwrap();
3578 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3579 }
3580
3581 #[gpui::test(iterations = 100)]
3582 async fn test_random_worktree_operations_during_initial_scan(
3583 cx: &mut TestAppContext,
3584 mut rng: StdRng,
3585 ) {
3586 let operations = env::var("OPERATIONS")
3587 .map(|o| o.parse().unwrap())
3588 .unwrap_or(5);
3589 let initial_entries = env::var("INITIAL_ENTRIES")
3590 .map(|o| o.parse().unwrap())
3591 .unwrap_or(20);
3592
3593 let root_dir = Path::new("/test");
3594 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3595 fs.as_fake().insert_tree(root_dir, json!({})).await;
3596 for _ in 0..initial_entries {
3597 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3598 }
3599 log::info!("generated initial tree");
3600
3601 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3602 let worktree = Worktree::local(
3603 client.clone(),
3604 root_dir,
3605 true,
3606 fs.clone(),
3607 Default::default(),
3608 &mut cx.to_async(),
3609 )
3610 .await
3611 .unwrap();
3612
3613 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3614
3615 for _ in 0..operations {
3616 worktree
3617 .update(cx, |worktree, cx| {
3618 randomly_mutate_worktree(worktree, &mut rng, cx)
3619 })
3620 .await
3621 .log_err();
3622 worktree.read_with(cx, |tree, _| {
3623 tree.as_local().unwrap().snapshot.check_invariants()
3624 });
3625
3626 if rng.gen_bool(0.6) {
3627 let new_snapshot =
3628 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3629 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3630 snapshot.apply_remote_update(update.clone()).unwrap();
3631 assert_eq!(
3632 snapshot.to_vec(true),
3633 new_snapshot.to_vec(true),
3634 "incorrect snapshot after update {:?}",
3635 update
3636 );
3637 }
3638 }
3639
3640 worktree
3641 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3642 .await;
3643 worktree.read_with(cx, |tree, _| {
3644 tree.as_local().unwrap().snapshot.check_invariants()
3645 });
3646
3647 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3648 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3649 snapshot.apply_remote_update(update.clone()).unwrap();
3650 assert_eq!(
3651 snapshot.to_vec(true),
3652 new_snapshot.to_vec(true),
3653 "incorrect snapshot after update {:?}",
3654 update
3655 );
3656 }
3657
3658 #[gpui::test(iterations = 100)]
3659 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3660 let operations = env::var("OPERATIONS")
3661 .map(|o| o.parse().unwrap())
3662 .unwrap_or(40);
3663 let initial_entries = env::var("INITIAL_ENTRIES")
3664 .map(|o| o.parse().unwrap())
3665 .unwrap_or(20);
3666
3667 let root_dir = Path::new("/test");
3668 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3669 fs.as_fake().insert_tree(root_dir, json!({})).await;
3670 for _ in 0..initial_entries {
3671 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3672 }
3673 log::info!("generated initial tree");
3674
3675 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3676 let worktree = Worktree::local(
3677 client.clone(),
3678 root_dir,
3679 true,
3680 fs.clone(),
3681 Default::default(),
3682 &mut cx.to_async(),
3683 )
3684 .await
3685 .unwrap();
3686
3687 worktree
3688 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3689 .await;
3690
3691 // After the initial scan is complete, the `UpdatedEntries` event can
3692 // be used to follow along with all changes to the worktree's snapshot.
3693 worktree.update(cx, |tree, cx| {
3694 let mut paths = tree
3695 .as_local()
3696 .unwrap()
3697 .paths()
3698 .cloned()
3699 .collect::<Vec<_>>();
3700
3701 cx.subscribe(&worktree, move |tree, _, event, _| {
3702 if let Event::UpdatedEntries(changes) = event {
3703 for (path, change_type) in changes.iter() {
3704 let path = path.clone();
3705 let ix = match paths.binary_search(&path) {
3706 Ok(ix) | Err(ix) => ix,
3707 };
3708 match change_type {
3709 PathChange::Added => {
3710 assert_ne!(paths.get(ix), Some(&path));
3711 paths.insert(ix, path);
3712 }
3713 PathChange::Removed => {
3714 assert_eq!(paths.get(ix), Some(&path));
3715 paths.remove(ix);
3716 }
3717 PathChange::Updated => {
3718 assert_eq!(paths.get(ix), Some(&path));
3719 }
3720 PathChange::AddedOrUpdated => {
3721 if paths[ix] != path {
3722 paths.insert(ix, path);
3723 }
3724 }
3725 }
3726 }
3727 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3728 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3729 }
3730 })
3731 .detach();
3732 });
3733
3734 let mut snapshots = Vec::new();
3735 let mut mutations_len = operations;
3736 while mutations_len > 1 {
3737 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3738 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3739 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3740 let len = rng.gen_range(0..=buffered_event_count);
3741 log::info!("flushing {} events", len);
3742 fs.as_fake().flush_events(len).await;
3743 } else {
3744 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3745 mutations_len -= 1;
3746 }
3747
3748 cx.foreground().run_until_parked();
3749 if rng.gen_bool(0.2) {
3750 log::info!("storing snapshot {}", snapshots.len());
3751 let snapshot =
3752 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3753 snapshots.push(snapshot);
3754 }
3755 }
3756
3757 log::info!("quiescing");
3758 fs.as_fake().flush_events(usize::MAX).await;
3759 cx.foreground().run_until_parked();
3760 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3761 snapshot.check_invariants();
3762
3763 {
3764 let new_worktree = Worktree::local(
3765 client.clone(),
3766 root_dir,
3767 true,
3768 fs.clone(),
3769 Default::default(),
3770 &mut cx.to_async(),
3771 )
3772 .await
3773 .unwrap();
3774 new_worktree
3775 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3776 .await;
3777 let new_snapshot =
3778 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3779 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3780 }
3781
3782 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3783 let include_ignored = rng.gen::<bool>();
3784 if !include_ignored {
3785 let mut entries_by_path_edits = Vec::new();
3786 let mut entries_by_id_edits = Vec::new();
3787 for entry in prev_snapshot
3788 .entries_by_id
3789 .cursor::<()>()
3790 .filter(|e| e.is_ignored)
3791 {
3792 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3793 entries_by_id_edits.push(Edit::Remove(entry.id));
3794 }
3795
3796 prev_snapshot
3797 .entries_by_path
3798 .edit(entries_by_path_edits, &());
3799 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3800 }
3801
3802 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3803 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3804 assert_eq!(
3805 prev_snapshot.to_vec(include_ignored),
3806 snapshot.to_vec(include_ignored),
3807 "wrong update for snapshot {i}. update: {:?}",
3808 update
3809 );
3810 }
3811 }
3812
3813 fn randomly_mutate_worktree(
3814 worktree: &mut Worktree,
3815 rng: &mut impl Rng,
3816 cx: &mut ModelContext<Worktree>,
3817 ) -> Task<Result<()>> {
3818 let worktree = worktree.as_local_mut().unwrap();
3819 let snapshot = worktree.snapshot();
3820 let entry = snapshot.entries(false).choose(rng).unwrap();
3821
3822 match rng.gen_range(0_u32..100) {
3823 0..=33 if entry.path.as_ref() != Path::new("") => {
3824 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3825 worktree.delete_entry(entry.id, cx).unwrap()
3826 }
3827 ..=66 if entry.path.as_ref() != Path::new("") => {
3828 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3829 let new_parent_path = if other_entry.is_dir() {
3830 other_entry.path.clone()
3831 } else {
3832 other_entry.path.parent().unwrap().into()
3833 };
3834 let mut new_path = new_parent_path.join(gen_name(rng));
3835 if new_path.starts_with(&entry.path) {
3836 new_path = gen_name(rng).into();
3837 }
3838
3839 log::info!(
3840 "renaming entry {:?} ({}) to {:?}",
3841 entry.path,
3842 entry.id.0,
3843 new_path
3844 );
3845 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3846 cx.foreground().spawn(async move {
3847 task.await?;
3848 Ok(())
3849 })
3850 }
3851 _ => {
3852 let task = if entry.is_dir() {
3853 let child_path = entry.path.join(gen_name(rng));
3854 let is_dir = rng.gen_bool(0.3);
3855 log::info!(
3856 "creating {} at {:?}",
3857 if is_dir { "dir" } else { "file" },
3858 child_path,
3859 );
3860 worktree.create_entry(child_path, is_dir, cx)
3861 } else {
3862 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3863 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3864 };
3865 cx.foreground().spawn(async move {
3866 task.await?;
3867 Ok(())
3868 })
3869 }
3870 }
3871 }
3872
3873 async fn randomly_mutate_fs(
3874 fs: &Arc<dyn Fs>,
3875 root_path: &Path,
3876 insertion_probability: f64,
3877 rng: &mut impl Rng,
3878 ) {
3879 let mut files = Vec::new();
3880 let mut dirs = Vec::new();
3881 for path in fs.as_fake().paths() {
3882 if path.starts_with(root_path) {
3883 if fs.is_file(&path).await {
3884 files.push(path);
3885 } else {
3886 dirs.push(path);
3887 }
3888 }
3889 }
3890
3891 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3892 let path = dirs.choose(rng).unwrap();
3893 let new_path = path.join(gen_name(rng));
3894
3895 if rng.gen() {
3896 log::info!(
3897 "creating dir {:?}",
3898 new_path.strip_prefix(root_path).unwrap()
3899 );
3900 fs.create_dir(&new_path).await.unwrap();
3901 } else {
3902 log::info!(
3903 "creating file {:?}",
3904 new_path.strip_prefix(root_path).unwrap()
3905 );
3906 fs.create_file(&new_path, Default::default()).await.unwrap();
3907 }
3908 } else if rng.gen_bool(0.05) {
3909 let ignore_dir_path = dirs.choose(rng).unwrap();
3910 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3911
3912 let subdirs = dirs
3913 .iter()
3914 .filter(|d| d.starts_with(&ignore_dir_path))
3915 .cloned()
3916 .collect::<Vec<_>>();
3917 let subfiles = files
3918 .iter()
3919 .filter(|d| d.starts_with(&ignore_dir_path))
3920 .cloned()
3921 .collect::<Vec<_>>();
3922 let files_to_ignore = {
3923 let len = rng.gen_range(0..=subfiles.len());
3924 subfiles.choose_multiple(rng, len)
3925 };
3926 let dirs_to_ignore = {
3927 let len = rng.gen_range(0..subdirs.len());
3928 subdirs.choose_multiple(rng, len)
3929 };
3930
3931 let mut ignore_contents = String::new();
3932 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
3933 writeln!(
3934 ignore_contents,
3935 "{}",
3936 path_to_ignore
3937 .strip_prefix(&ignore_dir_path)
3938 .unwrap()
3939 .to_str()
3940 .unwrap()
3941 )
3942 .unwrap();
3943 }
3944 log::info!(
3945 "creating gitignore {:?} with contents:\n{}",
3946 ignore_path.strip_prefix(&root_path).unwrap(),
3947 ignore_contents
3948 );
3949 fs.save(
3950 &ignore_path,
3951 &ignore_contents.as_str().into(),
3952 Default::default(),
3953 )
3954 .await
3955 .unwrap();
3956 } else {
3957 let old_path = {
3958 let file_path = files.choose(rng);
3959 let dir_path = dirs[1..].choose(rng);
3960 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
3961 };
3962
3963 let is_rename = rng.gen();
3964 if is_rename {
3965 let new_path_parent = dirs
3966 .iter()
3967 .filter(|d| !d.starts_with(old_path))
3968 .choose(rng)
3969 .unwrap();
3970
3971 let overwrite_existing_dir =
3972 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
3973 let new_path = if overwrite_existing_dir {
3974 fs.remove_dir(
3975 &new_path_parent,
3976 RemoveOptions {
3977 recursive: true,
3978 ignore_if_not_exists: true,
3979 },
3980 )
3981 .await
3982 .unwrap();
3983 new_path_parent.to_path_buf()
3984 } else {
3985 new_path_parent.join(gen_name(rng))
3986 };
3987
3988 log::info!(
3989 "renaming {:?} to {}{:?}",
3990 old_path.strip_prefix(&root_path).unwrap(),
3991 if overwrite_existing_dir {
3992 "overwrite "
3993 } else {
3994 ""
3995 },
3996 new_path.strip_prefix(&root_path).unwrap()
3997 );
3998 fs.rename(
3999 &old_path,
4000 &new_path,
4001 fs::RenameOptions {
4002 overwrite: true,
4003 ignore_if_exists: true,
4004 },
4005 )
4006 .await
4007 .unwrap();
4008 } else if fs.is_file(&old_path).await {
4009 log::info!(
4010 "deleting file {:?}",
4011 old_path.strip_prefix(&root_path).unwrap()
4012 );
4013 fs.remove_file(old_path, Default::default()).await.unwrap();
4014 } else {
4015 log::info!(
4016 "deleting dir {:?}",
4017 old_path.strip_prefix(&root_path).unwrap()
4018 );
4019 fs.remove_dir(
4020 &old_path,
4021 RemoveOptions {
4022 recursive: true,
4023 ignore_if_not_exists: true,
4024 },
4025 )
4026 .await
4027 .unwrap();
4028 }
4029 }
4030 }
4031
4032 fn gen_name(rng: &mut impl Rng) -> String {
4033 (0..6)
4034 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4035 .map(char::from)
4036 .collect()
4037 }
4038
4039 impl LocalSnapshot {
4040 fn check_invariants(&self) {
4041 assert_eq!(
4042 self.entries_by_path
4043 .cursor::<()>()
4044 .map(|e| (&e.path, e.id))
4045 .collect::<Vec<_>>(),
4046 self.entries_by_id
4047 .cursor::<()>()
4048 .map(|e| (&e.path, e.id))
4049 .collect::<collections::BTreeSet<_>>()
4050 .into_iter()
4051 .collect::<Vec<_>>(),
4052 "entries_by_path and entries_by_id are inconsistent"
4053 );
4054
4055 let mut files = self.files(true, 0);
4056 let mut visible_files = self.files(false, 0);
4057 for entry in self.entries_by_path.cursor::<()>() {
4058 if entry.is_file() {
4059 assert_eq!(files.next().unwrap().inode, entry.inode);
4060 if !entry.is_ignored {
4061 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4062 }
4063 }
4064 }
4065
4066 assert!(files.next().is_none());
4067 assert!(visible_files.next().is_none());
4068
4069 let mut bfs_paths = Vec::new();
4070 let mut stack = vec![Path::new("")];
4071 while let Some(path) = stack.pop() {
4072 bfs_paths.push(path);
4073 let ix = stack.len();
4074 for child_entry in self.child_entries(path) {
4075 stack.insert(ix, &child_entry.path);
4076 }
4077 }
4078
4079 let dfs_paths_via_iter = self
4080 .entries_by_path
4081 .cursor::<()>()
4082 .map(|e| e.path.as_ref())
4083 .collect::<Vec<_>>();
4084 assert_eq!(bfs_paths, dfs_paths_via_iter);
4085
4086 let dfs_paths_via_traversal = self
4087 .entries(true)
4088 .map(|e| e.path.as_ref())
4089 .collect::<Vec<_>>();
4090 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4091
4092 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4093 let ignore_parent_path =
4094 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4095 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4096 assert!(self
4097 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4098 .is_some());
4099 }
4100 }
4101
4102 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4103 let mut paths = Vec::new();
4104 for entry in self.entries_by_path.cursor::<()>() {
4105 if include_ignored || !entry.is_ignored {
4106 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4107 }
4108 }
4109 paths.sort_by(|a, b| a.0.cmp(b.0));
4110 paths
4111 }
4112 }
4113}