1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 pin::Pin,
48 sync::{
49 atomic::{AtomicUsize, Ordering::SeqCst},
50 Arc,
51 },
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<
72 Arc<Path>,
73 Vec<(
74 LanguageServerId,
75 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
76 )>,
77 >,
78 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
79 client: Arc<Client>,
80 fs: Arc<dyn Fs>,
81 visible: bool,
82}
83
84pub struct RemoteWorktree {
85 snapshot: Snapshot,
86 background_snapshot: Arc<Mutex<Snapshot>>,
87 project_id: u64,
88 client: Arc<Client>,
89 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
90 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
91 replica_id: ReplicaId,
92 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
93 visible: bool,
94 disconnected: bool,
95}
96
97#[derive(Clone)]
98pub struct Snapshot {
99 id: WorktreeId,
100 abs_path: Arc<Path>,
101 root_name: String,
102 root_char_bag: CharBag,
103 entries_by_path: SumTree<Entry>,
104 entries_by_id: SumTree<PathEntry>,
105 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
106
107 /// A number that increases every time the worktree begins scanning
108 /// a set of paths from the filesystem. This scanning could be caused
109 /// by some operation performed on the worktree, such as reading or
110 /// writing a file, or by an event reported by the filesystem.
111 scan_id: usize,
112
113 /// The latest scan id that has completed, and whose preceding scans
114 /// have all completed. The current `scan_id` could be more than one
115 /// greater than the `completed_scan_id` if operations are performed
116 /// on the worktree while it is processing a file-system event.
117 completed_scan_id: usize,
118}
119
120#[derive(Clone, Debug)]
121pub struct RepositoryEntry {
122 // Path to the actual .git folder.
123 // Note: if .git is a file, this points to the folder indicated by the .git file
124 pub(crate) git_dir_path: Arc<Path>,
125 pub(crate) git_dir_entry_id: ProjectEntryId,
126 pub(crate) work_directory: RepositoryWorkDirectory,
127 pub(crate) scan_id: usize,
128 pub(crate) branch: Option<Arc<str>>,
129}
130
131impl RepositoryEntry {
132 // Note that this path should be relative to the worktree root.
133 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
134 path.starts_with(self.git_dir_path.as_ref())
135 }
136
137 pub fn branch(&self) -> Option<Arc<str>> {
138 self.branch.clone()
139 }
140
141 pub fn work_directory(&self) -> Arc<Path> {
142 self.work_directory.0.clone()
143 }
144}
145
146/// This path corresponds to the 'content path' (the folder that contains the .git)
147#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
148pub struct RepositoryWorkDirectory(Arc<Path>);
149
150impl RepositoryWorkDirectory {
151 // Note that these paths should be relative to the worktree root.
152 pub(crate) fn contains(&self, path: &Path) -> bool {
153 path.starts_with(self.0.as_ref())
154 }
155
156 pub(crate) fn relativize(&self, path: &Path) -> Option<RepoPath> {
157 path.strip_prefix(self.0.as_ref())
158 .ok()
159 .map(move |path| RepoPath(path.to_owned()))
160 }
161}
162
163impl Deref for RepositoryWorkDirectory {
164 type Target = Path;
165
166 fn deref(&self) -> &Self::Target {
167 self.0.as_ref()
168 }
169}
170
171impl<'a> From<&'a str> for RepositoryWorkDirectory {
172 fn from(value: &'a str) -> Self {
173 RepositoryWorkDirectory(Path::new(value).into())
174 }
175}
176
177#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
178pub struct RepoPath(PathBuf);
179
180impl AsRef<Path> for RepoPath {
181 fn as_ref(&self) -> &Path {
182 self.0.as_ref()
183 }
184}
185
186impl Deref for RepoPath {
187 type Target = PathBuf;
188
189 fn deref(&self) -> &Self::Target {
190 &self.0
191 }
192}
193
194impl AsRef<Path> for RepositoryWorkDirectory {
195 fn as_ref(&self) -> &Path {
196 self.0.as_ref()
197 }
198}
199
200impl Default for RepositoryWorkDirectory {
201 fn default() -> Self {
202 RepositoryWorkDirectory(Arc::from(Path::new("")))
203 }
204}
205
206#[derive(Debug, Clone)]
207pub struct LocalSnapshot {
208 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
209 // The ProjectEntryId corresponds to the entry for the .git dir
210 git_repositories: TreeMap<ProjectEntryId, Arc<Mutex<dyn GitRepository>>>,
211 removed_entry_ids: HashMap<u64, ProjectEntryId>,
212 next_entry_id: Arc<AtomicUsize>,
213 snapshot: Snapshot,
214}
215
216impl Deref for LocalSnapshot {
217 type Target = Snapshot;
218
219 fn deref(&self) -> &Self::Target {
220 &self.snapshot
221 }
222}
223
224impl DerefMut for LocalSnapshot {
225 fn deref_mut(&mut self) -> &mut Self::Target {
226 &mut self.snapshot
227 }
228}
229
230enum ScanState {
231 Started,
232 Updated {
233 snapshot: LocalSnapshot,
234 changes: HashMap<Arc<Path>, PathChange>,
235 barrier: Option<barrier::Sender>,
236 scanning: bool,
237 },
238}
239
240struct ShareState {
241 project_id: u64,
242 snapshots_tx: watch::Sender<LocalSnapshot>,
243 resume_updates: watch::Sender<()>,
244 _maintain_remote_snapshot: Task<Option<()>>,
245}
246
247pub enum Event {
248 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
249 UpdatedGitRepositories(Vec<RepositoryEntry>),
250}
251
252impl Entity for Worktree {
253 type Event = Event;
254}
255
256impl Worktree {
257 pub async fn local(
258 client: Arc<Client>,
259 path: impl Into<Arc<Path>>,
260 visible: bool,
261 fs: Arc<dyn Fs>,
262 next_entry_id: Arc<AtomicUsize>,
263 cx: &mut AsyncAppContext,
264 ) -> Result<ModelHandle<Self>> {
265 // After determining whether the root entry is a file or a directory, populate the
266 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
267 let abs_path = path.into();
268 let metadata = fs
269 .metadata(&abs_path)
270 .await
271 .context("failed to stat worktree path")?;
272
273 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
274 let root_name = abs_path
275 .file_name()
276 .map_or(String::new(), |f| f.to_string_lossy().to_string());
277
278 let mut snapshot = LocalSnapshot {
279 ignores_by_parent_abs_path: Default::default(),
280 removed_entry_ids: Default::default(),
281 git_repositories: Default::default(),
282 next_entry_id,
283 snapshot: Snapshot {
284 id: WorktreeId::from_usize(cx.model_id()),
285 abs_path: abs_path.clone(),
286 root_name: root_name.clone(),
287 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
288 entries_by_path: Default::default(),
289 entries_by_id: Default::default(),
290 repository_entries: Default::default(),
291 scan_id: 1,
292 completed_scan_id: 0,
293 },
294 };
295
296 if let Some(metadata) = metadata {
297 snapshot.insert_entry(
298 Entry::new(
299 Arc::from(Path::new("")),
300 &metadata,
301 &snapshot.next_entry_id,
302 snapshot.root_char_bag,
303 ),
304 fs.as_ref(),
305 );
306 }
307
308 let (path_changes_tx, path_changes_rx) = channel::unbounded();
309 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
310
311 cx.spawn_weak(|this, mut cx| async move {
312 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
313 this.update(&mut cx, |this, cx| {
314 let this = this.as_local_mut().unwrap();
315 match state {
316 ScanState::Started => {
317 *this.is_scanning.0.borrow_mut() = true;
318 }
319 ScanState::Updated {
320 snapshot,
321 changes,
322 barrier,
323 scanning,
324 } => {
325 *this.is_scanning.0.borrow_mut() = scanning;
326 this.set_snapshot(snapshot, cx);
327 cx.emit(Event::UpdatedEntries(changes));
328 drop(barrier);
329 }
330 }
331 cx.notify();
332 });
333 }
334 })
335 .detach();
336
337 let background_scanner_task = cx.background().spawn({
338 let fs = fs.clone();
339 let snapshot = snapshot.clone();
340 let background = cx.background().clone();
341 async move {
342 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
343 BackgroundScanner::new(
344 snapshot,
345 fs,
346 scan_states_tx,
347 background,
348 path_changes_rx,
349 )
350 .run(events)
351 .await;
352 }
353 });
354
355 Worktree::Local(LocalWorktree {
356 snapshot,
357 is_scanning: watch::channel_with(true),
358 share: None,
359 path_changes_tx,
360 _background_scanner_task: background_scanner_task,
361 diagnostics: Default::default(),
362 diagnostic_summaries: Default::default(),
363 client,
364 fs,
365 visible,
366 })
367 }))
368 }
369
370 pub fn remote(
371 project_remote_id: u64,
372 replica_id: ReplicaId,
373 worktree: proto::WorktreeMetadata,
374 client: Arc<Client>,
375 cx: &mut AppContext,
376 ) -> ModelHandle<Self> {
377 cx.add_model(|cx: &mut ModelContext<Self>| {
378 let snapshot = Snapshot {
379 id: WorktreeId(worktree.id as usize),
380 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
381 root_name: worktree.root_name.clone(),
382 root_char_bag: worktree
383 .root_name
384 .chars()
385 .map(|c| c.to_ascii_lowercase())
386 .collect(),
387 entries_by_path: Default::default(),
388 entries_by_id: Default::default(),
389 repository_entries: Default::default(),
390 scan_id: 1,
391 completed_scan_id: 0,
392 };
393
394 let (updates_tx, mut updates_rx) = mpsc::unbounded();
395 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
396 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
397
398 cx.background()
399 .spawn({
400 let background_snapshot = background_snapshot.clone();
401 async move {
402 while let Some(update) = updates_rx.next().await {
403 if let Err(error) =
404 background_snapshot.lock().apply_remote_update(update)
405 {
406 log::error!("error applying worktree update: {}", error);
407 }
408 snapshot_updated_tx.send(()).await.ok();
409 }
410 }
411 })
412 .detach();
413
414 cx.spawn_weak(|this, mut cx| async move {
415 while (snapshot_updated_rx.recv().await).is_some() {
416 if let Some(this) = this.upgrade(&cx) {
417 this.update(&mut cx, |this, cx| {
418 let this = this.as_remote_mut().unwrap();
419 this.snapshot = this.background_snapshot.lock().clone();
420 cx.emit(Event::UpdatedEntries(Default::default()));
421 cx.notify();
422 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
423 if this.observed_snapshot(*scan_id) {
424 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
425 let _ = tx.send(());
426 } else {
427 break;
428 }
429 }
430 });
431 } else {
432 break;
433 }
434 }
435 })
436 .detach();
437
438 Worktree::Remote(RemoteWorktree {
439 project_id: project_remote_id,
440 replica_id,
441 snapshot: snapshot.clone(),
442 background_snapshot,
443 updates_tx: Some(updates_tx),
444 snapshot_subscriptions: Default::default(),
445 client: client.clone(),
446 diagnostic_summaries: Default::default(),
447 visible: worktree.visible,
448 disconnected: false,
449 })
450 })
451 }
452
453 pub fn as_local(&self) -> Option<&LocalWorktree> {
454 if let Worktree::Local(worktree) = self {
455 Some(worktree)
456 } else {
457 None
458 }
459 }
460
461 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
462 if let Worktree::Remote(worktree) = self {
463 Some(worktree)
464 } else {
465 None
466 }
467 }
468
469 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
470 if let Worktree::Local(worktree) = self {
471 Some(worktree)
472 } else {
473 None
474 }
475 }
476
477 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
478 if let Worktree::Remote(worktree) = self {
479 Some(worktree)
480 } else {
481 None
482 }
483 }
484
485 pub fn is_local(&self) -> bool {
486 matches!(self, Worktree::Local(_))
487 }
488
489 pub fn is_remote(&self) -> bool {
490 !self.is_local()
491 }
492
493 pub fn snapshot(&self) -> Snapshot {
494 match self {
495 Worktree::Local(worktree) => worktree.snapshot().snapshot,
496 Worktree::Remote(worktree) => worktree.snapshot(),
497 }
498 }
499
500 pub fn scan_id(&self) -> usize {
501 match self {
502 Worktree::Local(worktree) => worktree.snapshot.scan_id,
503 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
504 }
505 }
506
507 pub fn completed_scan_id(&self) -> usize {
508 match self {
509 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
510 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
511 }
512 }
513
514 pub fn is_visible(&self) -> bool {
515 match self {
516 Worktree::Local(worktree) => worktree.visible,
517 Worktree::Remote(worktree) => worktree.visible,
518 }
519 }
520
521 pub fn replica_id(&self) -> ReplicaId {
522 match self {
523 Worktree::Local(_) => 0,
524 Worktree::Remote(worktree) => worktree.replica_id,
525 }
526 }
527
528 pub fn diagnostic_summaries(
529 &self,
530 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
531 match self {
532 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
533 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
534 }
535 .iter()
536 .flat_map(|(path, summaries)| {
537 summaries
538 .iter()
539 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
540 })
541 }
542
543 pub fn abs_path(&self) -> Arc<Path> {
544 match self {
545 Worktree::Local(worktree) => worktree.abs_path.clone(),
546 Worktree::Remote(worktree) => worktree.abs_path.clone(),
547 }
548 }
549}
550
551impl LocalWorktree {
552 pub fn contains_abs_path(&self, path: &Path) -> bool {
553 path.starts_with(&self.abs_path)
554 }
555
556 fn absolutize(&self, path: &Path) -> PathBuf {
557 if path.file_name().is_some() {
558 self.abs_path.join(path)
559 } else {
560 self.abs_path.to_path_buf()
561 }
562 }
563
564 pub(crate) fn load_buffer(
565 &mut self,
566 id: u64,
567 path: &Path,
568 cx: &mut ModelContext<Worktree>,
569 ) -> Task<Result<ModelHandle<Buffer>>> {
570 let path = Arc::from(path);
571 cx.spawn(move |this, mut cx| async move {
572 let (file, contents, diff_base) = this
573 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
574 .await?;
575 let text_buffer = cx
576 .background()
577 .spawn(async move { text::Buffer::new(0, id, contents) })
578 .await;
579 Ok(cx.add_model(|cx| {
580 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
581 buffer.git_diff_recalc(cx);
582 buffer
583 }))
584 })
585 }
586
587 pub fn diagnostics_for_path(
588 &self,
589 path: &Path,
590 ) -> Vec<(
591 LanguageServerId,
592 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
593 )> {
594 self.diagnostics.get(path).cloned().unwrap_or_default()
595 }
596
597 pub fn update_diagnostics(
598 &mut self,
599 server_id: LanguageServerId,
600 worktree_path: Arc<Path>,
601 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
602 _: &mut ModelContext<Worktree>,
603 ) -> Result<bool> {
604 let summaries_by_server_id = self
605 .diagnostic_summaries
606 .entry(worktree_path.clone())
607 .or_default();
608
609 let old_summary = summaries_by_server_id
610 .remove(&server_id)
611 .unwrap_or_default();
612
613 let new_summary = DiagnosticSummary::new(&diagnostics);
614 if new_summary.is_empty() {
615 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
616 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
617 diagnostics_by_server_id.remove(ix);
618 }
619 if diagnostics_by_server_id.is_empty() {
620 self.diagnostics.remove(&worktree_path);
621 }
622 }
623 } else {
624 summaries_by_server_id.insert(server_id, new_summary);
625 let diagnostics_by_server_id =
626 self.diagnostics.entry(worktree_path.clone()).or_default();
627 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
628 Ok(ix) => {
629 diagnostics_by_server_id[ix] = (server_id, diagnostics);
630 }
631 Err(ix) => {
632 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
633 }
634 }
635 }
636
637 if !old_summary.is_empty() || !new_summary.is_empty() {
638 if let Some(share) = self.share.as_ref() {
639 self.client
640 .send(proto::UpdateDiagnosticSummary {
641 project_id: share.project_id,
642 worktree_id: self.id().to_proto(),
643 summary: Some(proto::DiagnosticSummary {
644 path: worktree_path.to_string_lossy().to_string(),
645 language_server_id: server_id.0 as u64,
646 error_count: new_summary.error_count as u32,
647 warning_count: new_summary.warning_count as u32,
648 }),
649 })
650 .log_err();
651 }
652 }
653
654 Ok(!old_summary.is_empty() || !new_summary.is_empty())
655 }
656
657 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
658 let updated_repos = Self::changed_repos(
659 &self.snapshot.repository_entries,
660 &new_snapshot.repository_entries,
661 );
662 self.snapshot = new_snapshot;
663
664 if let Some(share) = self.share.as_mut() {
665 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
666 }
667
668 if !updated_repos.is_empty() {
669 cx.emit(Event::UpdatedGitRepositories(updated_repos));
670 }
671 }
672
673 fn changed_repos(
674 old_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
675 new_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
676 ) -> Vec<RepositoryEntry> {
677 fn diff<'a>(
678 a: impl Iterator<Item = &'a RepositoryEntry>,
679 mut b: impl Iterator<Item = &'a RepositoryEntry>,
680 updated: &mut HashMap<&'a Path, RepositoryEntry>,
681 ) {
682 for a_repo in a {
683 let matched = b.find(|b_repo| {
684 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
685 });
686
687 if matched.is_none() {
688 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
689 }
690 }
691 }
692
693 let mut updated = HashMap::<&Path, RepositoryEntry>::default();
694
695 diff(old_repos.values(), new_repos.values(), &mut updated);
696 diff(new_repos.values(), old_repos.values(), &mut updated);
697
698 updated.into_values().collect()
699 }
700
701 pub fn scan_complete(&self) -> impl Future<Output = ()> {
702 let mut is_scanning_rx = self.is_scanning.1.clone();
703 async move {
704 let mut is_scanning = is_scanning_rx.borrow().clone();
705 while is_scanning {
706 if let Some(value) = is_scanning_rx.recv().await {
707 is_scanning = value;
708 } else {
709 break;
710 }
711 }
712 }
713 }
714
715 pub fn snapshot(&self) -> LocalSnapshot {
716 self.snapshot.clone()
717 }
718
719 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
720 proto::WorktreeMetadata {
721 id: self.id().to_proto(),
722 root_name: self.root_name().to_string(),
723 visible: self.visible,
724 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
725 }
726 }
727
728 fn load(
729 &self,
730 path: &Path,
731 cx: &mut ModelContext<Worktree>,
732 ) -> Task<Result<(File, String, Option<String>)>> {
733 let handle = cx.handle();
734 let path = Arc::from(path);
735 let abs_path = self.absolutize(&path);
736 let fs = self.fs.clone();
737 let snapshot = self.snapshot();
738
739 let mut index_task = None;
740
741 if let Some(repo) = snapshot.repo_for(&path) {
742 let repo_path = repo.work_directory.relativize(&path).unwrap();
743 if let Some(repo) = self.git_repositories.get(&repo.git_dir_entry_id) {
744 let repo = repo.to_owned();
745 index_task = Some(
746 cx.background()
747 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
748 );
749 }
750 }
751
752 cx.spawn(|this, mut cx| async move {
753 let text = fs.load(&abs_path).await?;
754
755 let diff_base = if let Some(index_task) = index_task {
756 index_task.await
757 } else {
758 None
759 };
760
761 // Eagerly populate the snapshot with an updated entry for the loaded file
762 let entry = this
763 .update(&mut cx, |this, cx| {
764 this.as_local().unwrap().refresh_entry(path, None, cx)
765 })
766 .await?;
767
768 Ok((
769 File {
770 entry_id: entry.id,
771 worktree: handle,
772 path: entry.path,
773 mtime: entry.mtime,
774 is_local: true,
775 is_deleted: false,
776 },
777 text,
778 diff_base,
779 ))
780 })
781 }
782
783 pub fn save_buffer(
784 &self,
785 buffer_handle: ModelHandle<Buffer>,
786 path: Arc<Path>,
787 has_changed_file: bool,
788 cx: &mut ModelContext<Worktree>,
789 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
790 let handle = cx.handle();
791 let buffer = buffer_handle.read(cx);
792
793 let rpc = self.client.clone();
794 let buffer_id = buffer.remote_id();
795 let project_id = self.share.as_ref().map(|share| share.project_id);
796
797 let text = buffer.as_rope().clone();
798 let fingerprint = text.fingerprint();
799 let version = buffer.version();
800 let save = self.write_file(path, text, buffer.line_ending(), cx);
801
802 cx.as_mut().spawn(|mut cx| async move {
803 let entry = save.await?;
804
805 if has_changed_file {
806 let new_file = Arc::new(File {
807 entry_id: entry.id,
808 worktree: handle,
809 path: entry.path,
810 mtime: entry.mtime,
811 is_local: true,
812 is_deleted: false,
813 });
814
815 if let Some(project_id) = project_id {
816 rpc.send(proto::UpdateBufferFile {
817 project_id,
818 buffer_id,
819 file: Some(new_file.to_proto()),
820 })
821 .log_err();
822 }
823
824 buffer_handle.update(&mut cx, |buffer, cx| {
825 if has_changed_file {
826 buffer.file_updated(new_file, cx).detach();
827 }
828 });
829 }
830
831 if let Some(project_id) = project_id {
832 rpc.send(proto::BufferSaved {
833 project_id,
834 buffer_id,
835 version: serialize_version(&version),
836 mtime: Some(entry.mtime.into()),
837 fingerprint: serialize_fingerprint(fingerprint),
838 })?;
839 }
840
841 buffer_handle.update(&mut cx, |buffer, cx| {
842 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
843 });
844
845 Ok((version, fingerprint, entry.mtime))
846 })
847 }
848
849 pub fn create_entry(
850 &self,
851 path: impl Into<Arc<Path>>,
852 is_dir: bool,
853 cx: &mut ModelContext<Worktree>,
854 ) -> Task<Result<Entry>> {
855 let path = path.into();
856 let abs_path = self.absolutize(&path);
857 let fs = self.fs.clone();
858 let write = cx.background().spawn(async move {
859 if is_dir {
860 fs.create_dir(&abs_path).await
861 } else {
862 fs.save(&abs_path, &Default::default(), Default::default())
863 .await
864 }
865 });
866
867 cx.spawn(|this, mut cx| async move {
868 write.await?;
869 this.update(&mut cx, |this, cx| {
870 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
871 })
872 .await
873 })
874 }
875
876 pub fn write_file(
877 &self,
878 path: impl Into<Arc<Path>>,
879 text: Rope,
880 line_ending: LineEnding,
881 cx: &mut ModelContext<Worktree>,
882 ) -> Task<Result<Entry>> {
883 let path = path.into();
884 let abs_path = self.absolutize(&path);
885 let fs = self.fs.clone();
886 let write = cx
887 .background()
888 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
889
890 cx.spawn(|this, mut cx| async move {
891 write.await?;
892 this.update(&mut cx, |this, cx| {
893 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
894 })
895 .await
896 })
897 }
898
899 pub fn delete_entry(
900 &self,
901 entry_id: ProjectEntryId,
902 cx: &mut ModelContext<Worktree>,
903 ) -> Option<Task<Result<()>>> {
904 let entry = self.entry_for_id(entry_id)?.clone();
905 let abs_path = self.abs_path.clone();
906 let fs = self.fs.clone();
907
908 let delete = cx.background().spawn(async move {
909 let mut abs_path = fs.canonicalize(&abs_path).await?;
910 if entry.path.file_name().is_some() {
911 abs_path = abs_path.join(&entry.path);
912 }
913 if entry.is_file() {
914 fs.remove_file(&abs_path, Default::default()).await?;
915 } else {
916 fs.remove_dir(
917 &abs_path,
918 RemoveOptions {
919 recursive: true,
920 ignore_if_not_exists: false,
921 },
922 )
923 .await?;
924 }
925 anyhow::Ok(abs_path)
926 });
927
928 Some(cx.spawn(|this, mut cx| async move {
929 let abs_path = delete.await?;
930 let (tx, mut rx) = barrier::channel();
931 this.update(&mut cx, |this, _| {
932 this.as_local_mut()
933 .unwrap()
934 .path_changes_tx
935 .try_send((vec![abs_path], tx))
936 })?;
937 rx.recv().await;
938 Ok(())
939 }))
940 }
941
942 pub fn rename_entry(
943 &self,
944 entry_id: ProjectEntryId,
945 new_path: impl Into<Arc<Path>>,
946 cx: &mut ModelContext<Worktree>,
947 ) -> Option<Task<Result<Entry>>> {
948 let old_path = self.entry_for_id(entry_id)?.path.clone();
949 let new_path = new_path.into();
950 let abs_old_path = self.absolutize(&old_path);
951 let abs_new_path = self.absolutize(&new_path);
952 let fs = self.fs.clone();
953 let rename = cx.background().spawn(async move {
954 fs.rename(&abs_old_path, &abs_new_path, Default::default())
955 .await
956 });
957
958 Some(cx.spawn(|this, mut cx| async move {
959 rename.await?;
960 this.update(&mut cx, |this, cx| {
961 this.as_local_mut()
962 .unwrap()
963 .refresh_entry(new_path.clone(), Some(old_path), cx)
964 })
965 .await
966 }))
967 }
968
969 pub fn copy_entry(
970 &self,
971 entry_id: ProjectEntryId,
972 new_path: impl Into<Arc<Path>>,
973 cx: &mut ModelContext<Worktree>,
974 ) -> Option<Task<Result<Entry>>> {
975 let old_path = self.entry_for_id(entry_id)?.path.clone();
976 let new_path = new_path.into();
977 let abs_old_path = self.absolutize(&old_path);
978 let abs_new_path = self.absolutize(&new_path);
979 let fs = self.fs.clone();
980 let copy = cx.background().spawn(async move {
981 copy_recursive(
982 fs.as_ref(),
983 &abs_old_path,
984 &abs_new_path,
985 Default::default(),
986 )
987 .await
988 });
989
990 Some(cx.spawn(|this, mut cx| async move {
991 copy.await?;
992 this.update(&mut cx, |this, cx| {
993 this.as_local_mut()
994 .unwrap()
995 .refresh_entry(new_path.clone(), None, cx)
996 })
997 .await
998 }))
999 }
1000
1001 fn refresh_entry(
1002 &self,
1003 path: Arc<Path>,
1004 old_path: Option<Arc<Path>>,
1005 cx: &mut ModelContext<Worktree>,
1006 ) -> Task<Result<Entry>> {
1007 let fs = self.fs.clone();
1008 let abs_root_path = self.abs_path.clone();
1009 let path_changes_tx = self.path_changes_tx.clone();
1010 cx.spawn_weak(move |this, mut cx| async move {
1011 let abs_path = fs.canonicalize(&abs_root_path).await?;
1012 let mut paths = Vec::with_capacity(2);
1013 paths.push(if path.file_name().is_some() {
1014 abs_path.join(&path)
1015 } else {
1016 abs_path.clone()
1017 });
1018 if let Some(old_path) = old_path {
1019 paths.push(if old_path.file_name().is_some() {
1020 abs_path.join(&old_path)
1021 } else {
1022 abs_path.clone()
1023 });
1024 }
1025
1026 let (tx, mut rx) = barrier::channel();
1027 path_changes_tx.try_send((paths, tx))?;
1028 rx.recv().await;
1029 this.upgrade(&cx)
1030 .ok_or_else(|| anyhow!("worktree was dropped"))?
1031 .update(&mut cx, |this, _| {
1032 this.entry_for_path(path)
1033 .cloned()
1034 .ok_or_else(|| anyhow!("failed to read path after update"))
1035 })
1036 })
1037 }
1038
1039 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1040 let (share_tx, share_rx) = oneshot::channel();
1041
1042 if let Some(share) = self.share.as_mut() {
1043 let _ = share_tx.send(());
1044 *share.resume_updates.borrow_mut() = ();
1045 } else {
1046 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1047 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1048 let worktree_id = cx.model_id() as u64;
1049
1050 for (path, summaries) in &self.diagnostic_summaries {
1051 for (&server_id, summary) in summaries {
1052 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1053 project_id,
1054 worktree_id,
1055 summary: Some(summary.to_proto(server_id, &path)),
1056 }) {
1057 return Task::ready(Err(e));
1058 }
1059 }
1060 }
1061
1062 let _maintain_remote_snapshot = cx.background().spawn({
1063 let client = self.client.clone();
1064 async move {
1065 let mut share_tx = Some(share_tx);
1066 let mut prev_snapshot = LocalSnapshot {
1067 ignores_by_parent_abs_path: Default::default(),
1068 removed_entry_ids: Default::default(),
1069 next_entry_id: Default::default(),
1070 git_repositories: Default::default(),
1071 snapshot: Snapshot {
1072 id: WorktreeId(worktree_id as usize),
1073 abs_path: Path::new("").into(),
1074 root_name: Default::default(),
1075 root_char_bag: Default::default(),
1076 entries_by_path: Default::default(),
1077 entries_by_id: Default::default(),
1078 repository_entries: Default::default(),
1079 scan_id: 0,
1080 completed_scan_id: 0,
1081 },
1082 };
1083 while let Some(snapshot) = snapshots_rx.recv().await {
1084 #[cfg(any(test, feature = "test-support"))]
1085 const MAX_CHUNK_SIZE: usize = 2;
1086 #[cfg(not(any(test, feature = "test-support")))]
1087 const MAX_CHUNK_SIZE: usize = 256;
1088
1089 let update =
1090 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1091 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1092 let _ = resume_updates_rx.try_recv();
1093 while let Err(error) = client.request(update.clone()).await {
1094 log::error!("failed to send worktree update: {}", error);
1095 log::info!("waiting to resume updates");
1096 if resume_updates_rx.next().await.is_none() {
1097 return Ok(());
1098 }
1099 }
1100 }
1101
1102 if let Some(share_tx) = share_tx.take() {
1103 let _ = share_tx.send(());
1104 }
1105
1106 prev_snapshot = snapshot;
1107 }
1108
1109 Ok::<_, anyhow::Error>(())
1110 }
1111 .log_err()
1112 });
1113
1114 self.share = Some(ShareState {
1115 project_id,
1116 snapshots_tx,
1117 resume_updates: resume_updates_tx,
1118 _maintain_remote_snapshot,
1119 });
1120 }
1121
1122 cx.foreground()
1123 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1124 }
1125
1126 pub fn unshare(&mut self) {
1127 self.share.take();
1128 }
1129
1130 pub fn is_shared(&self) -> bool {
1131 self.share.is_some()
1132 }
1133
1134 pub fn load_index_text(
1135 &self,
1136 repo: RepositoryEntry,
1137 repo_path: RepoPath,
1138 cx: &mut ModelContext<Worktree>,
1139 ) -> Task<Option<String>> {
1140 let Some(git_ptr) = self.git_repositories.get(&repo.git_dir_entry_id).map(|git_ptr| git_ptr.to_owned()) else {
1141 return Task::Ready(Some(None))
1142 };
1143 cx.background()
1144 .spawn(async move { git_ptr.lock().load_index_text(&repo_path) })
1145 }
1146}
1147
1148impl RemoteWorktree {
1149 fn snapshot(&self) -> Snapshot {
1150 self.snapshot.clone()
1151 }
1152
1153 pub fn disconnected_from_host(&mut self) {
1154 self.updates_tx.take();
1155 self.snapshot_subscriptions.clear();
1156 self.disconnected = true;
1157 }
1158
1159 pub fn save_buffer(
1160 &self,
1161 buffer_handle: ModelHandle<Buffer>,
1162 cx: &mut ModelContext<Worktree>,
1163 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1164 let buffer = buffer_handle.read(cx);
1165 let buffer_id = buffer.remote_id();
1166 let version = buffer.version();
1167 let rpc = self.client.clone();
1168 let project_id = self.project_id;
1169 cx.as_mut().spawn(|mut cx| async move {
1170 let response = rpc
1171 .request(proto::SaveBuffer {
1172 project_id,
1173 buffer_id,
1174 version: serialize_version(&version),
1175 })
1176 .await?;
1177 let version = deserialize_version(&response.version);
1178 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1179 let mtime = response
1180 .mtime
1181 .ok_or_else(|| anyhow!("missing mtime"))?
1182 .into();
1183
1184 buffer_handle.update(&mut cx, |buffer, cx| {
1185 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1186 });
1187
1188 Ok((version, fingerprint, mtime))
1189 })
1190 }
1191
1192 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1193 if let Some(updates_tx) = &self.updates_tx {
1194 updates_tx
1195 .unbounded_send(update)
1196 .expect("consumer runs to completion");
1197 }
1198 }
1199
1200 fn observed_snapshot(&self, scan_id: usize) -> bool {
1201 self.completed_scan_id >= scan_id
1202 }
1203
1204 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1205 let (tx, rx) = oneshot::channel();
1206 if self.observed_snapshot(scan_id) {
1207 let _ = tx.send(());
1208 } else if self.disconnected {
1209 drop(tx);
1210 } else {
1211 match self
1212 .snapshot_subscriptions
1213 .binary_search_by_key(&scan_id, |probe| probe.0)
1214 {
1215 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1216 }
1217 }
1218
1219 async move {
1220 rx.await?;
1221 Ok(())
1222 }
1223 }
1224
1225 pub fn update_diagnostic_summary(
1226 &mut self,
1227 path: Arc<Path>,
1228 summary: &proto::DiagnosticSummary,
1229 ) {
1230 let server_id = LanguageServerId(summary.language_server_id as usize);
1231 let summary = DiagnosticSummary {
1232 error_count: summary.error_count as usize,
1233 warning_count: summary.warning_count as usize,
1234 };
1235
1236 if summary.is_empty() {
1237 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1238 summaries.remove(&server_id);
1239 if summaries.is_empty() {
1240 self.diagnostic_summaries.remove(&path);
1241 }
1242 }
1243 } else {
1244 self.diagnostic_summaries
1245 .entry(path)
1246 .or_default()
1247 .insert(server_id, summary);
1248 }
1249 }
1250
1251 pub fn insert_entry(
1252 &mut self,
1253 entry: proto::Entry,
1254 scan_id: usize,
1255 cx: &mut ModelContext<Worktree>,
1256 ) -> Task<Result<Entry>> {
1257 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1258 cx.spawn(|this, mut cx| async move {
1259 wait_for_snapshot.await?;
1260 this.update(&mut cx, |worktree, _| {
1261 let worktree = worktree.as_remote_mut().unwrap();
1262 let mut snapshot = worktree.background_snapshot.lock();
1263 let entry = snapshot.insert_entry(entry);
1264 worktree.snapshot = snapshot.clone();
1265 entry
1266 })
1267 })
1268 }
1269
1270 pub(crate) fn delete_entry(
1271 &mut self,
1272 id: ProjectEntryId,
1273 scan_id: usize,
1274 cx: &mut ModelContext<Worktree>,
1275 ) -> Task<Result<()>> {
1276 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1277 cx.spawn(|this, mut cx| async move {
1278 wait_for_snapshot.await?;
1279 this.update(&mut cx, |worktree, _| {
1280 let worktree = worktree.as_remote_mut().unwrap();
1281 let mut snapshot = worktree.background_snapshot.lock();
1282 snapshot.delete_entry(id);
1283 worktree.snapshot = snapshot.clone();
1284 });
1285 Ok(())
1286 })
1287 }
1288}
1289
1290impl Snapshot {
1291 pub fn id(&self) -> WorktreeId {
1292 self.id
1293 }
1294
1295 pub fn abs_path(&self) -> &Arc<Path> {
1296 &self.abs_path
1297 }
1298
1299 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1300 self.entries_by_id.get(&entry_id, &()).is_some()
1301 }
1302
1303 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1304 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1305 let old_entry = self.entries_by_id.insert_or_replace(
1306 PathEntry {
1307 id: entry.id,
1308 path: entry.path.clone(),
1309 is_ignored: entry.is_ignored,
1310 scan_id: 0,
1311 },
1312 &(),
1313 );
1314 if let Some(old_entry) = old_entry {
1315 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1316 }
1317 self.entries_by_path.insert_or_replace(entry.clone(), &());
1318 Ok(entry)
1319 }
1320
1321 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1322 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1323 self.entries_by_path = {
1324 let mut cursor = self.entries_by_path.cursor();
1325 let mut new_entries_by_path =
1326 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1327 while let Some(entry) = cursor.item() {
1328 if entry.path.starts_with(&removed_entry.path) {
1329 self.entries_by_id.remove(&entry.id, &());
1330 cursor.next(&());
1331 } else {
1332 break;
1333 }
1334 }
1335 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1336 new_entries_by_path
1337 };
1338
1339 Some(removed_entry.path)
1340 }
1341
1342 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1343 let mut entries_by_path_edits = Vec::new();
1344 let mut entries_by_id_edits = Vec::new();
1345 for entry_id in update.removed_entries {
1346 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1347 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1348 entries_by_id_edits.push(Edit::Remove(entry.id));
1349 }
1350 }
1351
1352 for entry in update.updated_entries {
1353 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1354 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1355 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1356 }
1357 entries_by_id_edits.push(Edit::Insert(PathEntry {
1358 id: entry.id,
1359 path: entry.path.clone(),
1360 is_ignored: entry.is_ignored,
1361 scan_id: 0,
1362 }));
1363 entries_by_path_edits.push(Edit::Insert(entry));
1364 }
1365
1366 self.entries_by_path.edit(entries_by_path_edits, &());
1367 self.entries_by_id.edit(entries_by_id_edits, &());
1368 self.scan_id = update.scan_id as usize;
1369 if update.is_last_update {
1370 self.completed_scan_id = update.scan_id as usize;
1371 }
1372
1373 Ok(())
1374 }
1375
1376 pub fn file_count(&self) -> usize {
1377 self.entries_by_path.summary().file_count
1378 }
1379
1380 pub fn visible_file_count(&self) -> usize {
1381 self.entries_by_path.summary().visible_file_count
1382 }
1383
1384 fn traverse_from_offset(
1385 &self,
1386 include_dirs: bool,
1387 include_ignored: bool,
1388 start_offset: usize,
1389 ) -> Traversal {
1390 let mut cursor = self.entries_by_path.cursor();
1391 cursor.seek(
1392 &TraversalTarget::Count {
1393 count: start_offset,
1394 include_dirs,
1395 include_ignored,
1396 },
1397 Bias::Right,
1398 &(),
1399 );
1400 Traversal {
1401 cursor,
1402 include_dirs,
1403 include_ignored,
1404 }
1405 }
1406
1407 fn traverse_from_path(
1408 &self,
1409 include_dirs: bool,
1410 include_ignored: bool,
1411 path: &Path,
1412 ) -> Traversal {
1413 let mut cursor = self.entries_by_path.cursor();
1414 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1415 Traversal {
1416 cursor,
1417 include_dirs,
1418 include_ignored,
1419 }
1420 }
1421
1422 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1423 self.traverse_from_offset(false, include_ignored, start)
1424 }
1425
1426 pub fn entries(&self, include_ignored: bool) -> Traversal {
1427 self.traverse_from_offset(true, include_ignored, 0)
1428 }
1429
1430 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1431 let empty_path = Path::new("");
1432 self.entries_by_path
1433 .cursor::<()>()
1434 .filter(move |entry| entry.path.as_ref() != empty_path)
1435 .map(|entry| &entry.path)
1436 }
1437
1438 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1439 let mut cursor = self.entries_by_path.cursor();
1440 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1441 let traversal = Traversal {
1442 cursor,
1443 include_dirs: true,
1444 include_ignored: true,
1445 };
1446 ChildEntriesIter {
1447 traversal,
1448 parent_path,
1449 }
1450 }
1451
1452 pub fn root_entry(&self) -> Option<&Entry> {
1453 self.entry_for_path("")
1454 }
1455
1456 pub fn root_name(&self) -> &str {
1457 &self.root_name
1458 }
1459
1460 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1461 self.repository_entries
1462 .get(&"".into())
1463 .map(|entry| entry.to_owned())
1464 }
1465
1466 pub fn git_entries(&self) -> impl Iterator<Item = &RepositoryEntry> {
1467 self.repository_entries.values()
1468 }
1469
1470 pub fn scan_id(&self) -> usize {
1471 self.scan_id
1472 }
1473
1474 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1475 let path = path.as_ref();
1476 self.traverse_from_path(true, true, path)
1477 .entry()
1478 .and_then(|entry| {
1479 if entry.path.as_ref() == path {
1480 Some(entry)
1481 } else {
1482 None
1483 }
1484 })
1485 }
1486
1487 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1488 let entry = self.entries_by_id.get(&id, &())?;
1489 self.entry_for_path(&entry.path)
1490 }
1491
1492 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1493 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1494 }
1495}
1496
1497impl LocalSnapshot {
1498 pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1499 let mut max_len = 0;
1500 let mut current_candidate = None;
1501 for (work_directory, repo) in (&self.repository_entries).iter() {
1502 if work_directory.contains(path) {
1503 if work_directory.0.as_os_str().len() >= max_len {
1504 current_candidate = Some(repo);
1505 max_len = work_directory.0.as_os_str().len();
1506 } else {
1507 break;
1508 }
1509 }
1510 }
1511
1512 current_candidate.map(|entry| entry.to_owned())
1513 }
1514
1515 pub(crate) fn repo_for_metadata(
1516 &self,
1517 path: &Path,
1518 ) -> Option<(RepositoryWorkDirectory, Arc<Mutex<dyn GitRepository>>)> {
1519 self.repository_entries
1520 .iter()
1521 .find(|(_, repo)| repo.in_dot_git(path))
1522 .map(|(work_directory, entry)| {
1523 (
1524 work_directory.to_owned(),
1525 self.git_repositories
1526 .get(&entry.git_dir_entry_id)
1527 .expect("These two data structures should be in sync")
1528 .to_owned(),
1529 )
1530 })
1531 }
1532
1533 #[cfg(test)]
1534 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1535 let root_name = self.root_name.clone();
1536 proto::UpdateWorktree {
1537 project_id,
1538 worktree_id: self.id().to_proto(),
1539 abs_path: self.abs_path().to_string_lossy().into(),
1540 root_name,
1541 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1542 removed_entries: Default::default(),
1543 scan_id: self.scan_id as u64,
1544 is_last_update: true,
1545 }
1546 }
1547
1548 pub(crate) fn build_update(
1549 &self,
1550 other: &Self,
1551 project_id: u64,
1552 worktree_id: u64,
1553 include_ignored: bool,
1554 ) -> proto::UpdateWorktree {
1555 let mut updated_entries = Vec::new();
1556 let mut removed_entries = Vec::new();
1557 let mut self_entries = self
1558 .entries_by_id
1559 .cursor::<()>()
1560 .filter(|e| include_ignored || !e.is_ignored)
1561 .peekable();
1562 let mut other_entries = other
1563 .entries_by_id
1564 .cursor::<()>()
1565 .filter(|e| include_ignored || !e.is_ignored)
1566 .peekable();
1567 loop {
1568 match (self_entries.peek(), other_entries.peek()) {
1569 (Some(self_entry), Some(other_entry)) => {
1570 match Ord::cmp(&self_entry.id, &other_entry.id) {
1571 Ordering::Less => {
1572 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1573 updated_entries.push(entry);
1574 self_entries.next();
1575 }
1576 Ordering::Equal => {
1577 if self_entry.scan_id != other_entry.scan_id {
1578 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1579 updated_entries.push(entry);
1580 }
1581
1582 self_entries.next();
1583 other_entries.next();
1584 }
1585 Ordering::Greater => {
1586 removed_entries.push(other_entry.id.to_proto());
1587 other_entries.next();
1588 }
1589 }
1590 }
1591 (Some(self_entry), None) => {
1592 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1593 updated_entries.push(entry);
1594 self_entries.next();
1595 }
1596 (None, Some(other_entry)) => {
1597 removed_entries.push(other_entry.id.to_proto());
1598 other_entries.next();
1599 }
1600 (None, None) => break,
1601 }
1602 }
1603
1604 proto::UpdateWorktree {
1605 project_id,
1606 worktree_id,
1607 abs_path: self.abs_path().to_string_lossy().into(),
1608 root_name: self.root_name().to_string(),
1609 updated_entries,
1610 removed_entries,
1611 scan_id: self.scan_id as u64,
1612 is_last_update: self.completed_scan_id == self.scan_id,
1613 }
1614 }
1615
1616 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1617 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1618 let abs_path = self.abs_path.join(&entry.path);
1619 match smol::block_on(build_gitignore(&abs_path, fs)) {
1620 Ok(ignore) => {
1621 self.ignores_by_parent_abs_path.insert(
1622 abs_path.parent().unwrap().into(),
1623 (Arc::new(ignore), self.scan_id),
1624 );
1625 }
1626 Err(error) => {
1627 log::error!(
1628 "error loading .gitignore file {:?} - {:?}",
1629 &entry.path,
1630 error
1631 );
1632 }
1633 }
1634 }
1635
1636 self.reuse_entry_id(&mut entry);
1637
1638 if entry.kind == EntryKind::PendingDir {
1639 if let Some(existing_entry) =
1640 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1641 {
1642 entry.kind = existing_entry.kind;
1643 }
1644 }
1645
1646 let scan_id = self.scan_id;
1647 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1648 if let Some(removed) = removed {
1649 if removed.id != entry.id {
1650 self.entries_by_id.remove(&removed.id, &());
1651 }
1652 }
1653 self.entries_by_id.insert_or_replace(
1654 PathEntry {
1655 id: entry.id,
1656 path: entry.path.clone(),
1657 is_ignored: entry.is_ignored,
1658 scan_id,
1659 },
1660 &(),
1661 );
1662
1663 entry
1664 }
1665
1666 fn populate_dir(
1667 &mut self,
1668 parent_path: Arc<Path>,
1669 entries: impl IntoIterator<Item = Entry>,
1670 ignore: Option<Arc<Gitignore>>,
1671 fs: &dyn Fs,
1672 ) {
1673 let mut parent_entry = if let Some(parent_entry) =
1674 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1675 {
1676 parent_entry.clone()
1677 } else {
1678 log::warn!(
1679 "populating a directory {:?} that has been removed",
1680 parent_path
1681 );
1682 return;
1683 };
1684
1685 match parent_entry.kind {
1686 EntryKind::PendingDir => {
1687 parent_entry.kind = EntryKind::Dir;
1688 }
1689 EntryKind::Dir => {}
1690 _ => return,
1691 }
1692
1693 if let Some(ignore) = ignore {
1694 self.ignores_by_parent_abs_path.insert(
1695 self.abs_path.join(&parent_path).into(),
1696 (ignore, self.scan_id),
1697 );
1698 }
1699
1700 if parent_path.file_name() == Some(&DOT_GIT) {
1701 let abs_path = self.abs_path.join(&parent_path);
1702 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1703
1704 let key = RepositoryWorkDirectory(content_path.clone());
1705 if self.repository_entries.get(&key).is_none() {
1706 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1707 let repo_lock = repo.lock();
1708 self.repository_entries.insert(
1709 key.clone(),
1710 RepositoryEntry {
1711 git_dir_path: parent_path.clone(),
1712 git_dir_entry_id: parent_entry.id,
1713 work_directory: key,
1714 scan_id: 0,
1715 branch: repo_lock.branch_name().map(Into::into),
1716 },
1717 );
1718 drop(repo_lock);
1719
1720 self.git_repositories.insert(parent_entry.id, repo)
1721 }
1722 }
1723 }
1724
1725 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1726 let mut entries_by_id_edits = Vec::new();
1727
1728 for mut entry in entries {
1729 self.reuse_entry_id(&mut entry);
1730 entries_by_id_edits.push(Edit::Insert(PathEntry {
1731 id: entry.id,
1732 path: entry.path.clone(),
1733 is_ignored: entry.is_ignored,
1734 scan_id: self.scan_id,
1735 }));
1736 entries_by_path_edits.push(Edit::Insert(entry));
1737 }
1738
1739 self.entries_by_path.edit(entries_by_path_edits, &());
1740 self.entries_by_id.edit(entries_by_id_edits, &());
1741 }
1742
1743 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1744 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1745 entry.id = removed_entry_id;
1746 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1747 entry.id = existing_entry.id;
1748 }
1749 }
1750
1751 fn remove_path(&mut self, path: &Path) {
1752 let mut new_entries;
1753 let removed_entries;
1754 {
1755 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1756 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1757 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1758 new_entries.push_tree(cursor.suffix(&()), &());
1759 }
1760 self.entries_by_path = new_entries;
1761
1762 let mut entries_by_id_edits = Vec::new();
1763 for entry in removed_entries.cursor::<()>() {
1764 let removed_entry_id = self
1765 .removed_entry_ids
1766 .entry(entry.inode)
1767 .or_insert(entry.id);
1768 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1769 entries_by_id_edits.push(Edit::Remove(entry.id));
1770 }
1771 self.entries_by_id.edit(entries_by_id_edits, &());
1772
1773 if path.file_name() == Some(&GITIGNORE) {
1774 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1775 if let Some((_, scan_id)) = self
1776 .ignores_by_parent_abs_path
1777 .get_mut(abs_parent_path.as_path())
1778 {
1779 *scan_id = self.snapshot.scan_id;
1780 }
1781 } else if path.file_name() == Some(&DOT_GIT) {
1782 let repo_entry_key = RepositoryWorkDirectory(path.parent().unwrap().into());
1783 self.snapshot
1784 .repository_entries
1785 .update(&repo_entry_key, |repo| repo.scan_id = self.snapshot.scan_id);
1786 }
1787 }
1788
1789 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1790 let mut inodes = TreeSet::default();
1791 for ancestor in path.ancestors().skip(1) {
1792 if let Some(entry) = self.entry_for_path(ancestor) {
1793 inodes.insert(entry.inode);
1794 }
1795 }
1796 inodes
1797 }
1798
1799 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1800 let mut new_ignores = Vec::new();
1801 for ancestor in abs_path.ancestors().skip(1) {
1802 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1803 new_ignores.push((ancestor, Some(ignore.clone())));
1804 } else {
1805 new_ignores.push((ancestor, None));
1806 }
1807 }
1808
1809 let mut ignore_stack = IgnoreStack::none();
1810 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1811 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1812 ignore_stack = IgnoreStack::all();
1813 break;
1814 } else if let Some(ignore) = ignore {
1815 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1816 }
1817 }
1818
1819 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1820 ignore_stack = IgnoreStack::all();
1821 }
1822
1823 ignore_stack
1824 }
1825}
1826
1827async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1828 let contents = fs.load(abs_path).await?;
1829 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1830 let mut builder = GitignoreBuilder::new(parent);
1831 for line in contents.lines() {
1832 builder.add_line(Some(abs_path.into()), line)?;
1833 }
1834 Ok(builder.build()?)
1835}
1836
1837impl WorktreeId {
1838 pub fn from_usize(handle_id: usize) -> Self {
1839 Self(handle_id)
1840 }
1841
1842 pub(crate) fn from_proto(id: u64) -> Self {
1843 Self(id as usize)
1844 }
1845
1846 pub fn to_proto(&self) -> u64 {
1847 self.0 as u64
1848 }
1849
1850 pub fn to_usize(&self) -> usize {
1851 self.0
1852 }
1853}
1854
1855impl fmt::Display for WorktreeId {
1856 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1857 self.0.fmt(f)
1858 }
1859}
1860
1861impl Deref for Worktree {
1862 type Target = Snapshot;
1863
1864 fn deref(&self) -> &Self::Target {
1865 match self {
1866 Worktree::Local(worktree) => &worktree.snapshot,
1867 Worktree::Remote(worktree) => &worktree.snapshot,
1868 }
1869 }
1870}
1871
1872impl Deref for LocalWorktree {
1873 type Target = LocalSnapshot;
1874
1875 fn deref(&self) -> &Self::Target {
1876 &self.snapshot
1877 }
1878}
1879
1880impl Deref for RemoteWorktree {
1881 type Target = Snapshot;
1882
1883 fn deref(&self) -> &Self::Target {
1884 &self.snapshot
1885 }
1886}
1887
1888impl fmt::Debug for LocalWorktree {
1889 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1890 self.snapshot.fmt(f)
1891 }
1892}
1893
1894impl fmt::Debug for Snapshot {
1895 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1896 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1897 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1898
1899 impl<'a> fmt::Debug for EntriesByPath<'a> {
1900 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1901 f.debug_map()
1902 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1903 .finish()
1904 }
1905 }
1906
1907 impl<'a> fmt::Debug for EntriesById<'a> {
1908 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1909 f.debug_list().entries(self.0.iter()).finish()
1910 }
1911 }
1912
1913 f.debug_struct("Snapshot")
1914 .field("id", &self.id)
1915 .field("root_name", &self.root_name)
1916 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1917 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1918 .finish()
1919 }
1920}
1921
1922#[derive(Clone, PartialEq)]
1923pub struct File {
1924 pub worktree: ModelHandle<Worktree>,
1925 pub path: Arc<Path>,
1926 pub mtime: SystemTime,
1927 pub(crate) entry_id: ProjectEntryId,
1928 pub(crate) is_local: bool,
1929 pub(crate) is_deleted: bool,
1930}
1931
1932impl language::File for File {
1933 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1934 if self.is_local {
1935 Some(self)
1936 } else {
1937 None
1938 }
1939 }
1940
1941 fn mtime(&self) -> SystemTime {
1942 self.mtime
1943 }
1944
1945 fn path(&self) -> &Arc<Path> {
1946 &self.path
1947 }
1948
1949 fn full_path(&self, cx: &AppContext) -> PathBuf {
1950 let mut full_path = PathBuf::new();
1951 let worktree = self.worktree.read(cx);
1952
1953 if worktree.is_visible() {
1954 full_path.push(worktree.root_name());
1955 } else {
1956 let path = worktree.abs_path();
1957
1958 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1959 full_path.push("~");
1960 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1961 } else {
1962 full_path.push(path)
1963 }
1964 }
1965
1966 if self.path.components().next().is_some() {
1967 full_path.push(&self.path);
1968 }
1969
1970 full_path
1971 }
1972
1973 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1974 /// of its worktree, then this method will return the name of the worktree itself.
1975 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1976 self.path
1977 .file_name()
1978 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1979 }
1980
1981 fn is_deleted(&self) -> bool {
1982 self.is_deleted
1983 }
1984
1985 fn as_any(&self) -> &dyn Any {
1986 self
1987 }
1988
1989 fn to_proto(&self) -> rpc::proto::File {
1990 rpc::proto::File {
1991 worktree_id: self.worktree.id() as u64,
1992 entry_id: self.entry_id.to_proto(),
1993 path: self.path.to_string_lossy().into(),
1994 mtime: Some(self.mtime.into()),
1995 is_deleted: self.is_deleted,
1996 }
1997 }
1998}
1999
2000impl language::LocalFile for File {
2001 fn abs_path(&self, cx: &AppContext) -> PathBuf {
2002 self.worktree
2003 .read(cx)
2004 .as_local()
2005 .unwrap()
2006 .abs_path
2007 .join(&self.path)
2008 }
2009
2010 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2011 let worktree = self.worktree.read(cx).as_local().unwrap();
2012 let abs_path = worktree.absolutize(&self.path);
2013 let fs = worktree.fs.clone();
2014 cx.background()
2015 .spawn(async move { fs.load(&abs_path).await })
2016 }
2017
2018 fn buffer_reloaded(
2019 &self,
2020 buffer_id: u64,
2021 version: &clock::Global,
2022 fingerprint: RopeFingerprint,
2023 line_ending: LineEnding,
2024 mtime: SystemTime,
2025 cx: &mut AppContext,
2026 ) {
2027 let worktree = self.worktree.read(cx).as_local().unwrap();
2028 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2029 worktree
2030 .client
2031 .send(proto::BufferReloaded {
2032 project_id,
2033 buffer_id,
2034 version: serialize_version(version),
2035 mtime: Some(mtime.into()),
2036 fingerprint: serialize_fingerprint(fingerprint),
2037 line_ending: serialize_line_ending(line_ending) as i32,
2038 })
2039 .log_err();
2040 }
2041 }
2042}
2043
2044impl File {
2045 pub fn from_proto(
2046 proto: rpc::proto::File,
2047 worktree: ModelHandle<Worktree>,
2048 cx: &AppContext,
2049 ) -> Result<Self> {
2050 let worktree_id = worktree
2051 .read(cx)
2052 .as_remote()
2053 .ok_or_else(|| anyhow!("not remote"))?
2054 .id();
2055
2056 if worktree_id.to_proto() != proto.worktree_id {
2057 return Err(anyhow!("worktree id does not match file"));
2058 }
2059
2060 Ok(Self {
2061 worktree,
2062 path: Path::new(&proto.path).into(),
2063 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2064 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2065 is_local: false,
2066 is_deleted: proto.is_deleted,
2067 })
2068 }
2069
2070 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2071 file.and_then(|f| f.as_any().downcast_ref())
2072 }
2073
2074 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2075 self.worktree.read(cx).id()
2076 }
2077
2078 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2079 if self.is_deleted {
2080 None
2081 } else {
2082 Some(self.entry_id)
2083 }
2084 }
2085}
2086
2087#[derive(Clone, Debug, PartialEq, Eq)]
2088pub struct Entry {
2089 pub id: ProjectEntryId,
2090 pub kind: EntryKind,
2091 pub path: Arc<Path>,
2092 pub inode: u64,
2093 pub mtime: SystemTime,
2094 pub is_symlink: bool,
2095 pub is_ignored: bool,
2096}
2097
2098#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2099pub enum EntryKind {
2100 PendingDir,
2101 Dir,
2102 File(CharBag),
2103}
2104
2105#[derive(Clone, Copy, Debug)]
2106pub enum PathChange {
2107 Added,
2108 Removed,
2109 Updated,
2110 AddedOrUpdated,
2111}
2112
2113impl Entry {
2114 fn new(
2115 path: Arc<Path>,
2116 metadata: &fs::Metadata,
2117 next_entry_id: &AtomicUsize,
2118 root_char_bag: CharBag,
2119 ) -> Self {
2120 Self {
2121 id: ProjectEntryId::new(next_entry_id),
2122 kind: if metadata.is_dir {
2123 EntryKind::PendingDir
2124 } else {
2125 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2126 },
2127 path,
2128 inode: metadata.inode,
2129 mtime: metadata.mtime,
2130 is_symlink: metadata.is_symlink,
2131 is_ignored: false,
2132 }
2133 }
2134
2135 pub fn is_dir(&self) -> bool {
2136 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2137 }
2138
2139 pub fn is_file(&self) -> bool {
2140 matches!(self.kind, EntryKind::File(_))
2141 }
2142}
2143
2144impl sum_tree::Item for Entry {
2145 type Summary = EntrySummary;
2146
2147 fn summary(&self) -> Self::Summary {
2148 let visible_count = if self.is_ignored { 0 } else { 1 };
2149 let file_count;
2150 let visible_file_count;
2151 if self.is_file() {
2152 file_count = 1;
2153 visible_file_count = visible_count;
2154 } else {
2155 file_count = 0;
2156 visible_file_count = 0;
2157 }
2158
2159 EntrySummary {
2160 max_path: self.path.clone(),
2161 count: 1,
2162 visible_count,
2163 file_count,
2164 visible_file_count,
2165 }
2166 }
2167}
2168
2169impl sum_tree::KeyedItem for Entry {
2170 type Key = PathKey;
2171
2172 fn key(&self) -> Self::Key {
2173 PathKey(self.path.clone())
2174 }
2175}
2176
2177#[derive(Clone, Debug)]
2178pub struct EntrySummary {
2179 max_path: Arc<Path>,
2180 count: usize,
2181 visible_count: usize,
2182 file_count: usize,
2183 visible_file_count: usize,
2184}
2185
2186impl Default for EntrySummary {
2187 fn default() -> Self {
2188 Self {
2189 max_path: Arc::from(Path::new("")),
2190 count: 0,
2191 visible_count: 0,
2192 file_count: 0,
2193 visible_file_count: 0,
2194 }
2195 }
2196}
2197
2198impl sum_tree::Summary for EntrySummary {
2199 type Context = ();
2200
2201 fn add_summary(&mut self, rhs: &Self, _: &()) {
2202 self.max_path = rhs.max_path.clone();
2203 self.count += rhs.count;
2204 self.visible_count += rhs.visible_count;
2205 self.file_count += rhs.file_count;
2206 self.visible_file_count += rhs.visible_file_count;
2207 }
2208}
2209
2210#[derive(Clone, Debug)]
2211struct PathEntry {
2212 id: ProjectEntryId,
2213 path: Arc<Path>,
2214 is_ignored: bool,
2215 scan_id: usize,
2216}
2217
2218impl sum_tree::Item for PathEntry {
2219 type Summary = PathEntrySummary;
2220
2221 fn summary(&self) -> Self::Summary {
2222 PathEntrySummary { max_id: self.id }
2223 }
2224}
2225
2226impl sum_tree::KeyedItem for PathEntry {
2227 type Key = ProjectEntryId;
2228
2229 fn key(&self) -> Self::Key {
2230 self.id
2231 }
2232}
2233
2234#[derive(Clone, Debug, Default)]
2235struct PathEntrySummary {
2236 max_id: ProjectEntryId,
2237}
2238
2239impl sum_tree::Summary for PathEntrySummary {
2240 type Context = ();
2241
2242 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2243 self.max_id = summary.max_id;
2244 }
2245}
2246
2247impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2248 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2249 *self = summary.max_id;
2250 }
2251}
2252
2253#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2254pub struct PathKey(Arc<Path>);
2255
2256impl Default for PathKey {
2257 fn default() -> Self {
2258 Self(Path::new("").into())
2259 }
2260}
2261
2262impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2263 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2264 self.0 = summary.max_path.clone();
2265 }
2266}
2267
2268struct BackgroundScanner {
2269 snapshot: Mutex<LocalSnapshot>,
2270 fs: Arc<dyn Fs>,
2271 status_updates_tx: UnboundedSender<ScanState>,
2272 executor: Arc<executor::Background>,
2273 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2274 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2275 finished_initial_scan: bool,
2276}
2277
2278impl BackgroundScanner {
2279 fn new(
2280 snapshot: LocalSnapshot,
2281 fs: Arc<dyn Fs>,
2282 status_updates_tx: UnboundedSender<ScanState>,
2283 executor: Arc<executor::Background>,
2284 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2285 ) -> Self {
2286 Self {
2287 fs,
2288 status_updates_tx,
2289 executor,
2290 refresh_requests_rx,
2291 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2292 snapshot: Mutex::new(snapshot),
2293 finished_initial_scan: false,
2294 }
2295 }
2296
2297 async fn run(
2298 &mut self,
2299 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2300 ) {
2301 use futures::FutureExt as _;
2302
2303 let (root_abs_path, root_inode) = {
2304 let snapshot = self.snapshot.lock();
2305 (
2306 snapshot.abs_path.clone(),
2307 snapshot.root_entry().map(|e| e.inode),
2308 )
2309 };
2310
2311 // Populate ignores above the root.
2312 let ignore_stack;
2313 for ancestor in root_abs_path.ancestors().skip(1) {
2314 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2315 {
2316 self.snapshot
2317 .lock()
2318 .ignores_by_parent_abs_path
2319 .insert(ancestor.into(), (ignore.into(), 0));
2320 }
2321 }
2322 {
2323 let mut snapshot = self.snapshot.lock();
2324 snapshot.scan_id += 1;
2325 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2326 if ignore_stack.is_all() {
2327 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2328 root_entry.is_ignored = true;
2329 snapshot.insert_entry(root_entry, self.fs.as_ref());
2330 }
2331 }
2332 };
2333
2334 // Perform an initial scan of the directory.
2335 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2336 smol::block_on(scan_job_tx.send(ScanJob {
2337 abs_path: root_abs_path,
2338 path: Arc::from(Path::new("")),
2339 ignore_stack,
2340 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2341 scan_queue: scan_job_tx.clone(),
2342 }))
2343 .unwrap();
2344 drop(scan_job_tx);
2345 self.scan_dirs(true, scan_job_rx).await;
2346 {
2347 let mut snapshot = self.snapshot.lock();
2348 snapshot.completed_scan_id = snapshot.scan_id;
2349 }
2350 self.send_status_update(false, None);
2351
2352 // Process any any FS events that occurred while performing the initial scan.
2353 // For these events, update events cannot be as precise, because we didn't
2354 // have the previous state loaded yet.
2355 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2356 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2357 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2358 paths.extend(more_events.into_iter().map(|e| e.path));
2359 }
2360 self.process_events(paths).await;
2361 }
2362
2363 self.finished_initial_scan = true;
2364
2365 // Continue processing events until the worktree is dropped.
2366 loop {
2367 select_biased! {
2368 // Process any path refresh requests from the worktree. Prioritize
2369 // these before handling changes reported by the filesystem.
2370 request = self.refresh_requests_rx.recv().fuse() => {
2371 let Ok((paths, barrier)) = request else { break };
2372 if !self.process_refresh_request(paths, barrier).await {
2373 return;
2374 }
2375 }
2376
2377 events = events_rx.next().fuse() => {
2378 let Some(events) = events else { break };
2379 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2380 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2381 paths.extend(more_events.into_iter().map(|e| e.path));
2382 }
2383 self.process_events(paths).await;
2384 }
2385 }
2386 }
2387 }
2388
2389 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2390 self.reload_entries_for_paths(paths, None).await;
2391 self.send_status_update(false, Some(barrier))
2392 }
2393
2394 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2395 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2396 if let Some(mut paths) = self
2397 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2398 .await
2399 {
2400 paths.sort_unstable();
2401 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2402 }
2403 drop(scan_job_tx);
2404 self.scan_dirs(false, scan_job_rx).await;
2405
2406 self.update_ignore_statuses().await;
2407
2408 let mut snapshot = self.snapshot.lock();
2409
2410 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2411 git_repositories.retain(|project_entry_id, _| snapshot.contains_entry(*project_entry_id));
2412 snapshot.git_repositories = git_repositories;
2413
2414 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2415 git_repository_entries.retain(|_, entry| snapshot.contains_entry(entry.git_dir_entry_id));
2416 snapshot.snapshot.repository_entries = git_repository_entries;
2417
2418 snapshot.removed_entry_ids.clear();
2419 snapshot.completed_scan_id = snapshot.scan_id;
2420
2421 drop(snapshot);
2422
2423 self.send_status_update(false, None);
2424 }
2425
2426 async fn scan_dirs(
2427 &self,
2428 enable_progress_updates: bool,
2429 scan_jobs_rx: channel::Receiver<ScanJob>,
2430 ) {
2431 use futures::FutureExt as _;
2432
2433 if self
2434 .status_updates_tx
2435 .unbounded_send(ScanState::Started)
2436 .is_err()
2437 {
2438 return;
2439 }
2440
2441 let progress_update_count = AtomicUsize::new(0);
2442 self.executor
2443 .scoped(|scope| {
2444 for _ in 0..self.executor.num_cpus() {
2445 scope.spawn(async {
2446 let mut last_progress_update_count = 0;
2447 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2448 futures::pin_mut!(progress_update_timer);
2449
2450 loop {
2451 select_biased! {
2452 // Process any path refresh requests before moving on to process
2453 // the scan queue, so that user operations are prioritized.
2454 request = self.refresh_requests_rx.recv().fuse() => {
2455 let Ok((paths, barrier)) = request else { break };
2456 if !self.process_refresh_request(paths, barrier).await {
2457 return;
2458 }
2459 }
2460
2461 // Send periodic progress updates to the worktree. Use an atomic counter
2462 // to ensure that only one of the workers sends a progress update after
2463 // the update interval elapses.
2464 _ = progress_update_timer => {
2465 match progress_update_count.compare_exchange(
2466 last_progress_update_count,
2467 last_progress_update_count + 1,
2468 SeqCst,
2469 SeqCst
2470 ) {
2471 Ok(_) => {
2472 last_progress_update_count += 1;
2473 self.send_status_update(true, None);
2474 }
2475 Err(count) => {
2476 last_progress_update_count = count;
2477 }
2478 }
2479 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2480 }
2481
2482 // Recursively load directories from the file system.
2483 job = scan_jobs_rx.recv().fuse() => {
2484 let Ok(job) = job else { break };
2485 if let Err(err) = self.scan_dir(&job).await {
2486 if job.path.as_ref() != Path::new("") {
2487 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2488 }
2489 }
2490 }
2491 }
2492 }
2493 })
2494 }
2495 })
2496 .await;
2497 }
2498
2499 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2500 let mut prev_state = self.prev_state.lock();
2501 let snapshot = self.snapshot.lock().clone();
2502 let mut old_snapshot = snapshot.snapshot.clone();
2503 mem::swap(&mut old_snapshot, &mut prev_state.0);
2504 let changed_paths = mem::take(&mut prev_state.1);
2505 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2506 self.status_updates_tx
2507 .unbounded_send(ScanState::Updated {
2508 snapshot,
2509 changes,
2510 scanning,
2511 barrier,
2512 })
2513 .is_ok()
2514 }
2515
2516 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2517 let mut new_entries: Vec<Entry> = Vec::new();
2518 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2519 let mut ignore_stack = job.ignore_stack.clone();
2520 let mut new_ignore = None;
2521 let (root_abs_path, root_char_bag, next_entry_id) = {
2522 let snapshot = self.snapshot.lock();
2523 (
2524 snapshot.abs_path().clone(),
2525 snapshot.root_char_bag,
2526 snapshot.next_entry_id.clone(),
2527 )
2528 };
2529 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2530 while let Some(child_abs_path) = child_paths.next().await {
2531 let child_abs_path: Arc<Path> = match child_abs_path {
2532 Ok(child_abs_path) => child_abs_path.into(),
2533 Err(error) => {
2534 log::error!("error processing entry {:?}", error);
2535 continue;
2536 }
2537 };
2538
2539 let child_name = child_abs_path.file_name().unwrap();
2540 let child_path: Arc<Path> = job.path.join(child_name).into();
2541 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2542 Ok(Some(metadata)) => metadata,
2543 Ok(None) => continue,
2544 Err(err) => {
2545 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2546 continue;
2547 }
2548 };
2549
2550 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2551 if child_name == *GITIGNORE {
2552 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2553 Ok(ignore) => {
2554 let ignore = Arc::new(ignore);
2555 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2556 new_ignore = Some(ignore);
2557 }
2558 Err(error) => {
2559 log::error!(
2560 "error loading .gitignore file {:?} - {:?}",
2561 child_name,
2562 error
2563 );
2564 }
2565 }
2566
2567 // Update ignore status of any child entries we've already processed to reflect the
2568 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2569 // there should rarely be too numerous. Update the ignore stack associated with any
2570 // new jobs as well.
2571 let mut new_jobs = new_jobs.iter_mut();
2572 for entry in &mut new_entries {
2573 let entry_abs_path = root_abs_path.join(&entry.path);
2574 entry.is_ignored =
2575 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2576
2577 if entry.is_dir() {
2578 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2579 job.ignore_stack = if entry.is_ignored {
2580 IgnoreStack::all()
2581 } else {
2582 ignore_stack.clone()
2583 };
2584 }
2585 }
2586 }
2587 }
2588
2589 let mut child_entry = Entry::new(
2590 child_path.clone(),
2591 &child_metadata,
2592 &next_entry_id,
2593 root_char_bag,
2594 );
2595
2596 if child_entry.is_dir() {
2597 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2598 child_entry.is_ignored = is_ignored;
2599
2600 // Avoid recursing until crash in the case of a recursive symlink
2601 if !job.ancestor_inodes.contains(&child_entry.inode) {
2602 let mut ancestor_inodes = job.ancestor_inodes.clone();
2603 ancestor_inodes.insert(child_entry.inode);
2604
2605 new_jobs.push(Some(ScanJob {
2606 abs_path: child_abs_path,
2607 path: child_path,
2608 ignore_stack: if is_ignored {
2609 IgnoreStack::all()
2610 } else {
2611 ignore_stack.clone()
2612 },
2613 ancestor_inodes,
2614 scan_queue: job.scan_queue.clone(),
2615 }));
2616 } else {
2617 new_jobs.push(None);
2618 }
2619 } else {
2620 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2621 }
2622
2623 new_entries.push(child_entry);
2624 }
2625
2626 self.snapshot.lock().populate_dir(
2627 job.path.clone(),
2628 new_entries,
2629 new_ignore,
2630 self.fs.as_ref(),
2631 );
2632
2633 for new_job in new_jobs {
2634 if let Some(new_job) = new_job {
2635 job.scan_queue.send(new_job).await.unwrap();
2636 }
2637 }
2638
2639 Ok(())
2640 }
2641
2642 async fn reload_entries_for_paths(
2643 &self,
2644 mut abs_paths: Vec<PathBuf>,
2645 scan_queue_tx: Option<Sender<ScanJob>>,
2646 ) -> Option<Vec<Arc<Path>>> {
2647 let doing_recursive_update = scan_queue_tx.is_some();
2648
2649 abs_paths.sort_unstable();
2650 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2651
2652 let root_abs_path = self.snapshot.lock().abs_path.clone();
2653 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2654 let metadata = futures::future::join_all(
2655 abs_paths
2656 .iter()
2657 .map(|abs_path| self.fs.metadata(&abs_path))
2658 .collect::<Vec<_>>(),
2659 )
2660 .await;
2661
2662 let mut snapshot = self.snapshot.lock();
2663 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2664 snapshot.scan_id += 1;
2665 if is_idle && !doing_recursive_update {
2666 snapshot.completed_scan_id = snapshot.scan_id;
2667 }
2668
2669 // Remove any entries for paths that no longer exist or are being recursively
2670 // refreshed. Do this before adding any new entries, so that renames can be
2671 // detected regardless of the order of the paths.
2672 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2673 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2674 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2675 if matches!(metadata, Ok(None)) || doing_recursive_update {
2676 snapshot.remove_path(path);
2677 }
2678 event_paths.push(path.into());
2679 } else {
2680 log::error!(
2681 "unexpected event {:?} for root path {:?}",
2682 abs_path,
2683 root_canonical_path
2684 );
2685 }
2686 }
2687
2688 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2689 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2690
2691 match metadata {
2692 Ok(Some(metadata)) => {
2693 let ignore_stack =
2694 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2695 let mut fs_entry = Entry::new(
2696 path.clone(),
2697 &metadata,
2698 snapshot.next_entry_id.as_ref(),
2699 snapshot.root_char_bag,
2700 );
2701 fs_entry.is_ignored = ignore_stack.is_all();
2702 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2703
2704 let scan_id = snapshot.scan_id;
2705
2706 let repo_with_path_in_dotgit = snapshot.repo_for_metadata(&path);
2707 if let Some((key, repo)) = repo_with_path_in_dotgit {
2708 let repo = repo.lock();
2709 repo.reload_index();
2710 let branch = repo.branch_name();
2711
2712 snapshot.repository_entries.update(&key, |entry| {
2713 entry.scan_id = scan_id;
2714 entry.branch = branch.map(Into::into)
2715 });
2716 }
2717
2718 if let Some(scan_queue_tx) = &scan_queue_tx {
2719 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2720 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2721 ancestor_inodes.insert(metadata.inode);
2722 smol::block_on(scan_queue_tx.send(ScanJob {
2723 abs_path,
2724 path,
2725 ignore_stack,
2726 ancestor_inodes,
2727 scan_queue: scan_queue_tx.clone(),
2728 }))
2729 .unwrap();
2730 }
2731 }
2732 }
2733 Ok(None) => {}
2734 Err(err) => {
2735 // TODO - create a special 'error' entry in the entries tree to mark this
2736 log::error!("error reading file on event {:?}", err);
2737 }
2738 }
2739 }
2740
2741 Some(event_paths)
2742 }
2743
2744 async fn update_ignore_statuses(&self) {
2745 use futures::FutureExt as _;
2746
2747 let mut snapshot = self.snapshot.lock().clone();
2748 let mut ignores_to_update = Vec::new();
2749 let mut ignores_to_delete = Vec::new();
2750 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2751 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2752 if *scan_id > snapshot.completed_scan_id
2753 && snapshot.entry_for_path(parent_path).is_some()
2754 {
2755 ignores_to_update.push(parent_abs_path.clone());
2756 }
2757
2758 let ignore_path = parent_path.join(&*GITIGNORE);
2759 if snapshot.entry_for_path(ignore_path).is_none() {
2760 ignores_to_delete.push(parent_abs_path.clone());
2761 }
2762 }
2763 }
2764
2765 for parent_abs_path in ignores_to_delete {
2766 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2767 self.snapshot
2768 .lock()
2769 .ignores_by_parent_abs_path
2770 .remove(&parent_abs_path);
2771 }
2772
2773 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2774 ignores_to_update.sort_unstable();
2775 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2776 while let Some(parent_abs_path) = ignores_to_update.next() {
2777 while ignores_to_update
2778 .peek()
2779 .map_or(false, |p| p.starts_with(&parent_abs_path))
2780 {
2781 ignores_to_update.next().unwrap();
2782 }
2783
2784 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2785 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2786 abs_path: parent_abs_path,
2787 ignore_stack,
2788 ignore_queue: ignore_queue_tx.clone(),
2789 }))
2790 .unwrap();
2791 }
2792 drop(ignore_queue_tx);
2793
2794 self.executor
2795 .scoped(|scope| {
2796 for _ in 0..self.executor.num_cpus() {
2797 scope.spawn(async {
2798 loop {
2799 select_biased! {
2800 // Process any path refresh requests before moving on to process
2801 // the queue of ignore statuses.
2802 request = self.refresh_requests_rx.recv().fuse() => {
2803 let Ok((paths, barrier)) = request else { break };
2804 if !self.process_refresh_request(paths, barrier).await {
2805 return;
2806 }
2807 }
2808
2809 // Recursively process directories whose ignores have changed.
2810 job = ignore_queue_rx.recv().fuse() => {
2811 let Ok(job) = job else { break };
2812 self.update_ignore_status(job, &snapshot).await;
2813 }
2814 }
2815 }
2816 });
2817 }
2818 })
2819 .await;
2820 }
2821
2822 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2823 let mut ignore_stack = job.ignore_stack;
2824 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2825 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2826 }
2827
2828 let mut entries_by_id_edits = Vec::new();
2829 let mut entries_by_path_edits = Vec::new();
2830 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2831 for mut entry in snapshot.child_entries(path).cloned() {
2832 let was_ignored = entry.is_ignored;
2833 let abs_path = snapshot.abs_path().join(&entry.path);
2834 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2835 if entry.is_dir() {
2836 let child_ignore_stack = if entry.is_ignored {
2837 IgnoreStack::all()
2838 } else {
2839 ignore_stack.clone()
2840 };
2841 job.ignore_queue
2842 .send(UpdateIgnoreStatusJob {
2843 abs_path: abs_path.into(),
2844 ignore_stack: child_ignore_stack,
2845 ignore_queue: job.ignore_queue.clone(),
2846 })
2847 .await
2848 .unwrap();
2849 }
2850
2851 if entry.is_ignored != was_ignored {
2852 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2853 path_entry.scan_id = snapshot.scan_id;
2854 path_entry.is_ignored = entry.is_ignored;
2855 entries_by_id_edits.push(Edit::Insert(path_entry));
2856 entries_by_path_edits.push(Edit::Insert(entry));
2857 }
2858 }
2859
2860 let mut snapshot = self.snapshot.lock();
2861 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2862 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2863 }
2864
2865 fn build_change_set(
2866 &self,
2867 old_snapshot: &Snapshot,
2868 new_snapshot: &Snapshot,
2869 event_paths: Vec<Arc<Path>>,
2870 ) -> HashMap<Arc<Path>, PathChange> {
2871 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2872
2873 let mut changes = HashMap::default();
2874 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2875 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2876 let received_before_initialized = !self.finished_initial_scan;
2877
2878 for path in event_paths {
2879 let path = PathKey(path);
2880 old_paths.seek(&path, Bias::Left, &());
2881 new_paths.seek(&path, Bias::Left, &());
2882
2883 loop {
2884 match (old_paths.item(), new_paths.item()) {
2885 (Some(old_entry), Some(new_entry)) => {
2886 if old_entry.path > path.0
2887 && new_entry.path > path.0
2888 && !old_entry.path.starts_with(&path.0)
2889 && !new_entry.path.starts_with(&path.0)
2890 {
2891 break;
2892 }
2893
2894 match Ord::cmp(&old_entry.path, &new_entry.path) {
2895 Ordering::Less => {
2896 changes.insert(old_entry.path.clone(), Removed);
2897 old_paths.next(&());
2898 }
2899 Ordering::Equal => {
2900 if received_before_initialized {
2901 // If the worktree was not fully initialized when this event was generated,
2902 // we can't know whether this entry was added during the scan or whether
2903 // it was merely updated.
2904 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2905 } else if old_entry.mtime != new_entry.mtime {
2906 changes.insert(new_entry.path.clone(), Updated);
2907 }
2908 old_paths.next(&());
2909 new_paths.next(&());
2910 }
2911 Ordering::Greater => {
2912 changes.insert(new_entry.path.clone(), Added);
2913 new_paths.next(&());
2914 }
2915 }
2916 }
2917 (Some(old_entry), None) => {
2918 changes.insert(old_entry.path.clone(), Removed);
2919 old_paths.next(&());
2920 }
2921 (None, Some(new_entry)) => {
2922 changes.insert(new_entry.path.clone(), Added);
2923 new_paths.next(&());
2924 }
2925 (None, None) => break,
2926 }
2927 }
2928 }
2929 changes
2930 }
2931
2932 async fn progress_timer(&self, running: bool) {
2933 if !running {
2934 return futures::future::pending().await;
2935 }
2936
2937 #[cfg(any(test, feature = "test-support"))]
2938 if self.fs.is_fake() {
2939 return self.executor.simulate_random_delay().await;
2940 }
2941
2942 smol::Timer::after(Duration::from_millis(100)).await;
2943 }
2944}
2945
2946fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2947 let mut result = root_char_bag;
2948 result.extend(
2949 path.to_string_lossy()
2950 .chars()
2951 .map(|c| c.to_ascii_lowercase()),
2952 );
2953 result
2954}
2955
2956struct ScanJob {
2957 abs_path: Arc<Path>,
2958 path: Arc<Path>,
2959 ignore_stack: Arc<IgnoreStack>,
2960 scan_queue: Sender<ScanJob>,
2961 ancestor_inodes: TreeSet<u64>,
2962}
2963
2964struct UpdateIgnoreStatusJob {
2965 abs_path: Arc<Path>,
2966 ignore_stack: Arc<IgnoreStack>,
2967 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2968}
2969
2970pub trait WorktreeHandle {
2971 #[cfg(any(test, feature = "test-support"))]
2972 fn flush_fs_events<'a>(
2973 &self,
2974 cx: &'a gpui::TestAppContext,
2975 ) -> futures::future::LocalBoxFuture<'a, ()>;
2976}
2977
2978impl WorktreeHandle for ModelHandle<Worktree> {
2979 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2980 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2981 // extra directory scans, and emit extra scan-state notifications.
2982 //
2983 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2984 // to ensure that all redundant FS events have already been processed.
2985 #[cfg(any(test, feature = "test-support"))]
2986 fn flush_fs_events<'a>(
2987 &self,
2988 cx: &'a gpui::TestAppContext,
2989 ) -> futures::future::LocalBoxFuture<'a, ()> {
2990 use smol::future::FutureExt;
2991
2992 let filename = "fs-event-sentinel";
2993 let tree = self.clone();
2994 let (fs, root_path) = self.read_with(cx, |tree, _| {
2995 let tree = tree.as_local().unwrap();
2996 (tree.fs.clone(), tree.abs_path().clone())
2997 });
2998
2999 async move {
3000 fs.create_file(&root_path.join(filename), Default::default())
3001 .await
3002 .unwrap();
3003 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
3004 .await;
3005
3006 fs.remove_file(&root_path.join(filename), Default::default())
3007 .await
3008 .unwrap();
3009 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3010 .await;
3011
3012 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3013 .await;
3014 }
3015 .boxed_local()
3016 }
3017}
3018
3019#[derive(Clone, Debug)]
3020struct TraversalProgress<'a> {
3021 max_path: &'a Path,
3022 count: usize,
3023 visible_count: usize,
3024 file_count: usize,
3025 visible_file_count: usize,
3026}
3027
3028impl<'a> TraversalProgress<'a> {
3029 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3030 match (include_ignored, include_dirs) {
3031 (true, true) => self.count,
3032 (true, false) => self.file_count,
3033 (false, true) => self.visible_count,
3034 (false, false) => self.visible_file_count,
3035 }
3036 }
3037}
3038
3039impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3040 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3041 self.max_path = summary.max_path.as_ref();
3042 self.count += summary.count;
3043 self.visible_count += summary.visible_count;
3044 self.file_count += summary.file_count;
3045 self.visible_file_count += summary.visible_file_count;
3046 }
3047}
3048
3049impl<'a> Default for TraversalProgress<'a> {
3050 fn default() -> Self {
3051 Self {
3052 max_path: Path::new(""),
3053 count: 0,
3054 visible_count: 0,
3055 file_count: 0,
3056 visible_file_count: 0,
3057 }
3058 }
3059}
3060
3061pub struct Traversal<'a> {
3062 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3063 include_ignored: bool,
3064 include_dirs: bool,
3065}
3066
3067impl<'a> Traversal<'a> {
3068 pub fn advance(&mut self) -> bool {
3069 self.advance_to_offset(self.offset() + 1)
3070 }
3071
3072 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3073 self.cursor.seek_forward(
3074 &TraversalTarget::Count {
3075 count: offset,
3076 include_dirs: self.include_dirs,
3077 include_ignored: self.include_ignored,
3078 },
3079 Bias::Right,
3080 &(),
3081 )
3082 }
3083
3084 pub fn advance_to_sibling(&mut self) -> bool {
3085 while let Some(entry) = self.cursor.item() {
3086 self.cursor.seek_forward(
3087 &TraversalTarget::PathSuccessor(&entry.path),
3088 Bias::Left,
3089 &(),
3090 );
3091 if let Some(entry) = self.cursor.item() {
3092 if (self.include_dirs || !entry.is_dir())
3093 && (self.include_ignored || !entry.is_ignored)
3094 {
3095 return true;
3096 }
3097 }
3098 }
3099 false
3100 }
3101
3102 pub fn entry(&self) -> Option<&'a Entry> {
3103 self.cursor.item()
3104 }
3105
3106 pub fn offset(&self) -> usize {
3107 self.cursor
3108 .start()
3109 .count(self.include_dirs, self.include_ignored)
3110 }
3111}
3112
3113impl<'a> Iterator for Traversal<'a> {
3114 type Item = &'a Entry;
3115
3116 fn next(&mut self) -> Option<Self::Item> {
3117 if let Some(item) = self.entry() {
3118 self.advance();
3119 Some(item)
3120 } else {
3121 None
3122 }
3123 }
3124}
3125
3126#[derive(Debug)]
3127enum TraversalTarget<'a> {
3128 Path(&'a Path),
3129 PathSuccessor(&'a Path),
3130 Count {
3131 count: usize,
3132 include_ignored: bool,
3133 include_dirs: bool,
3134 },
3135}
3136
3137impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3138 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3139 match self {
3140 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3141 TraversalTarget::PathSuccessor(path) => {
3142 if !cursor_location.max_path.starts_with(path) {
3143 Ordering::Equal
3144 } else {
3145 Ordering::Greater
3146 }
3147 }
3148 TraversalTarget::Count {
3149 count,
3150 include_dirs,
3151 include_ignored,
3152 } => Ord::cmp(
3153 count,
3154 &cursor_location.count(*include_dirs, *include_ignored),
3155 ),
3156 }
3157 }
3158}
3159
3160struct ChildEntriesIter<'a> {
3161 parent_path: &'a Path,
3162 traversal: Traversal<'a>,
3163}
3164
3165impl<'a> Iterator for ChildEntriesIter<'a> {
3166 type Item = &'a Entry;
3167
3168 fn next(&mut self) -> Option<Self::Item> {
3169 if let Some(item) = self.traversal.entry() {
3170 if item.path.starts_with(&self.parent_path) {
3171 self.traversal.advance_to_sibling();
3172 return Some(item);
3173 }
3174 }
3175 None
3176 }
3177}
3178
3179impl<'a> From<&'a Entry> for proto::Entry {
3180 fn from(entry: &'a Entry) -> Self {
3181 Self {
3182 id: entry.id.to_proto(),
3183 is_dir: entry.is_dir(),
3184 path: entry.path.to_string_lossy().into(),
3185 inode: entry.inode,
3186 mtime: Some(entry.mtime.into()),
3187 is_symlink: entry.is_symlink,
3188 is_ignored: entry.is_ignored,
3189 }
3190 }
3191}
3192
3193impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3194 type Error = anyhow::Error;
3195
3196 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3197 if let Some(mtime) = entry.mtime {
3198 let kind = if entry.is_dir {
3199 EntryKind::Dir
3200 } else {
3201 let mut char_bag = *root_char_bag;
3202 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3203 EntryKind::File(char_bag)
3204 };
3205 let path: Arc<Path> = PathBuf::from(entry.path).into();
3206 Ok(Entry {
3207 id: ProjectEntryId::from_proto(entry.id),
3208 kind,
3209 path,
3210 inode: entry.inode,
3211 mtime: mtime.into(),
3212 is_symlink: entry.is_symlink,
3213 is_ignored: entry.is_ignored,
3214 })
3215 } else {
3216 Err(anyhow!(
3217 "missing mtime in remote worktree entry {:?}",
3218 entry.path
3219 ))
3220 }
3221 }
3222}
3223
3224#[cfg(test)]
3225mod tests {
3226 use super::*;
3227 use fs::{FakeFs, RealFs};
3228 use gpui::{executor::Deterministic, TestAppContext};
3229 use pretty_assertions::assert_eq;
3230 use rand::prelude::*;
3231 use serde_json::json;
3232 use std::{env, fmt::Write};
3233 use util::{http::FakeHttpClient, test::temp_tree};
3234
3235 #[gpui::test]
3236 async fn test_traversal(cx: &mut TestAppContext) {
3237 let fs = FakeFs::new(cx.background());
3238 fs.insert_tree(
3239 "/root",
3240 json!({
3241 ".gitignore": "a/b\n",
3242 "a": {
3243 "b": "",
3244 "c": "",
3245 }
3246 }),
3247 )
3248 .await;
3249
3250 let http_client = FakeHttpClient::with_404_response();
3251 let client = cx.read(|cx| Client::new(http_client, cx));
3252
3253 let tree = Worktree::local(
3254 client,
3255 Path::new("/root"),
3256 true,
3257 fs,
3258 Default::default(),
3259 &mut cx.to_async(),
3260 )
3261 .await
3262 .unwrap();
3263 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3264 .await;
3265
3266 tree.read_with(cx, |tree, _| {
3267 assert_eq!(
3268 tree.entries(false)
3269 .map(|entry| entry.path.as_ref())
3270 .collect::<Vec<_>>(),
3271 vec![
3272 Path::new(""),
3273 Path::new(".gitignore"),
3274 Path::new("a"),
3275 Path::new("a/c"),
3276 ]
3277 );
3278 assert_eq!(
3279 tree.entries(true)
3280 .map(|entry| entry.path.as_ref())
3281 .collect::<Vec<_>>(),
3282 vec![
3283 Path::new(""),
3284 Path::new(".gitignore"),
3285 Path::new("a"),
3286 Path::new("a/b"),
3287 Path::new("a/c"),
3288 ]
3289 );
3290 })
3291 }
3292
3293 #[gpui::test(iterations = 10)]
3294 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3295 let fs = FakeFs::new(cx.background());
3296 fs.insert_tree(
3297 "/root",
3298 json!({
3299 "lib": {
3300 "a": {
3301 "a.txt": ""
3302 },
3303 "b": {
3304 "b.txt": ""
3305 }
3306 }
3307 }),
3308 )
3309 .await;
3310 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3311 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3312
3313 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3314 let tree = Worktree::local(
3315 client,
3316 Path::new("/root"),
3317 true,
3318 fs.clone(),
3319 Default::default(),
3320 &mut cx.to_async(),
3321 )
3322 .await
3323 .unwrap();
3324
3325 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3326 .await;
3327
3328 tree.read_with(cx, |tree, _| {
3329 assert_eq!(
3330 tree.entries(false)
3331 .map(|entry| entry.path.as_ref())
3332 .collect::<Vec<_>>(),
3333 vec![
3334 Path::new(""),
3335 Path::new("lib"),
3336 Path::new("lib/a"),
3337 Path::new("lib/a/a.txt"),
3338 Path::new("lib/a/lib"),
3339 Path::new("lib/b"),
3340 Path::new("lib/b/b.txt"),
3341 Path::new("lib/b/lib"),
3342 ]
3343 );
3344 });
3345
3346 fs.rename(
3347 Path::new("/root/lib/a/lib"),
3348 Path::new("/root/lib/a/lib-2"),
3349 Default::default(),
3350 )
3351 .await
3352 .unwrap();
3353 executor.run_until_parked();
3354 tree.read_with(cx, |tree, _| {
3355 assert_eq!(
3356 tree.entries(false)
3357 .map(|entry| entry.path.as_ref())
3358 .collect::<Vec<_>>(),
3359 vec![
3360 Path::new(""),
3361 Path::new("lib"),
3362 Path::new("lib/a"),
3363 Path::new("lib/a/a.txt"),
3364 Path::new("lib/a/lib-2"),
3365 Path::new("lib/b"),
3366 Path::new("lib/b/b.txt"),
3367 Path::new("lib/b/lib"),
3368 ]
3369 );
3370 });
3371 }
3372
3373 #[gpui::test]
3374 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3375 let parent_dir = temp_tree(json!({
3376 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3377 "tree": {
3378 ".git": {},
3379 ".gitignore": "ignored-dir\n",
3380 "tracked-dir": {
3381 "tracked-file1": "",
3382 "ancestor-ignored-file1": "",
3383 },
3384 "ignored-dir": {
3385 "ignored-file1": ""
3386 }
3387 }
3388 }));
3389 let dir = parent_dir.path().join("tree");
3390
3391 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3392
3393 let tree = Worktree::local(
3394 client,
3395 dir.as_path(),
3396 true,
3397 Arc::new(RealFs),
3398 Default::default(),
3399 &mut cx.to_async(),
3400 )
3401 .await
3402 .unwrap();
3403 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3404 .await;
3405 tree.flush_fs_events(cx).await;
3406 cx.read(|cx| {
3407 let tree = tree.read(cx);
3408 assert!(
3409 !tree
3410 .entry_for_path("tracked-dir/tracked-file1")
3411 .unwrap()
3412 .is_ignored
3413 );
3414 assert!(
3415 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3416 .unwrap()
3417 .is_ignored
3418 );
3419 assert!(
3420 tree.entry_for_path("ignored-dir/ignored-file1")
3421 .unwrap()
3422 .is_ignored
3423 );
3424 });
3425
3426 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3427 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3428 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3429 tree.flush_fs_events(cx).await;
3430 cx.read(|cx| {
3431 let tree = tree.read(cx);
3432 assert!(
3433 !tree
3434 .entry_for_path("tracked-dir/tracked-file2")
3435 .unwrap()
3436 .is_ignored
3437 );
3438 assert!(
3439 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3440 .unwrap()
3441 .is_ignored
3442 );
3443 assert!(
3444 tree.entry_for_path("ignored-dir/ignored-file2")
3445 .unwrap()
3446 .is_ignored
3447 );
3448 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3449 });
3450 }
3451
3452 #[gpui::test]
3453 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3454 let root = temp_tree(json!({
3455 "dir1": {
3456 ".git": {},
3457 "deps": {
3458 "dep1": {
3459 ".git": {},
3460 "src": {
3461 "a.txt": ""
3462 }
3463 }
3464 },
3465 "src": {
3466 "b.txt": ""
3467 }
3468 },
3469 "c.txt": "",
3470 }));
3471
3472 let http_client = FakeHttpClient::with_404_response();
3473 let client = cx.read(|cx| Client::new(http_client, cx));
3474 let tree = Worktree::local(
3475 client,
3476 root.path(),
3477 true,
3478 Arc::new(RealFs),
3479 Default::default(),
3480 &mut cx.to_async(),
3481 )
3482 .await
3483 .unwrap();
3484
3485 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3486 .await;
3487 tree.flush_fs_events(cx).await;
3488
3489 tree.read_with(cx, |tree, _cx| {
3490 let tree = tree.as_local().unwrap();
3491
3492 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3493
3494 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3495 assert_eq!(entry.work_directory.0.as_ref(), Path::new("dir1"));
3496 assert_eq!(entry.git_dir_path.as_ref(), Path::new("dir1/.git"));
3497
3498 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3499 assert_eq!(entry.work_directory.deref(), Path::new("dir1/deps/dep1"));
3500 assert_eq!(
3501 entry.git_dir_path.as_ref(),
3502 Path::new("dir1/deps/dep1/.git"),
3503 );
3504 });
3505
3506 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3507 let tree = tree.as_local().unwrap();
3508 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3509 entry.scan_id
3510 });
3511
3512 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3513 tree.flush_fs_events(cx).await;
3514
3515 tree.read_with(cx, |tree, _cx| {
3516 let tree = tree.as_local().unwrap();
3517 let new_scan_id = {
3518 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3519 entry.scan_id
3520 };
3521 assert_ne!(
3522 original_scan_id, new_scan_id,
3523 "original {original_scan_id}, new {new_scan_id}"
3524 );
3525 });
3526
3527 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3528 tree.flush_fs_events(cx).await;
3529
3530 tree.read_with(cx, |tree, _cx| {
3531 let tree = tree.as_local().unwrap();
3532
3533 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3534 });
3535 }
3536
3537 #[test]
3538 fn test_changed_repos() {
3539 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> RepositoryEntry {
3540 RepositoryEntry {
3541 scan_id,
3542 git_dir_path: git_dir_path.as_ref().into(),
3543 git_dir_entry_id: ProjectEntryId(0),
3544 work_directory: RepositoryWorkDirectory(
3545 Path::new(&format!("don't-care-{}", scan_id)).into(),
3546 ),
3547 branch: None,
3548 }
3549 }
3550
3551 let mut prev_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3552 prev_repos.insert(
3553 RepositoryWorkDirectory(Path::new("don't-care-1").into()),
3554 fake_entry("/.git", 0),
3555 );
3556 prev_repos.insert(
3557 RepositoryWorkDirectory(Path::new("don't-care-2").into()),
3558 fake_entry("/a/.git", 0),
3559 );
3560 prev_repos.insert(
3561 RepositoryWorkDirectory(Path::new("don't-care-3").into()),
3562 fake_entry("/a/b/.git", 0),
3563 );
3564
3565 let mut new_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3566 new_repos.insert(
3567 RepositoryWorkDirectory(Path::new("don't-care-4").into()),
3568 fake_entry("/a/.git", 1),
3569 );
3570 new_repos.insert(
3571 RepositoryWorkDirectory(Path::new("don't-care-5").into()),
3572 fake_entry("/a/b/.git", 0),
3573 );
3574 new_repos.insert(
3575 RepositoryWorkDirectory(Path::new("don't-care-6").into()),
3576 fake_entry("/a/c/.git", 0),
3577 );
3578
3579 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3580
3581 // Deletion retained
3582 assert!(res
3583 .iter()
3584 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3585 .is_some());
3586
3587 // Update retained
3588 assert!(res
3589 .iter()
3590 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3591 .is_some());
3592
3593 // Addition retained
3594 assert!(res
3595 .iter()
3596 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3597 .is_some());
3598
3599 // Nochange, not retained
3600 assert!(res
3601 .iter()
3602 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3603 .is_none());
3604 }
3605
3606 #[gpui::test]
3607 async fn test_write_file(cx: &mut TestAppContext) {
3608 let dir = temp_tree(json!({
3609 ".git": {},
3610 ".gitignore": "ignored-dir\n",
3611 "tracked-dir": {},
3612 "ignored-dir": {}
3613 }));
3614
3615 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3616
3617 let tree = Worktree::local(
3618 client,
3619 dir.path(),
3620 true,
3621 Arc::new(RealFs),
3622 Default::default(),
3623 &mut cx.to_async(),
3624 )
3625 .await
3626 .unwrap();
3627 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3628 .await;
3629 tree.flush_fs_events(cx).await;
3630
3631 tree.update(cx, |tree, cx| {
3632 tree.as_local().unwrap().write_file(
3633 Path::new("tracked-dir/file.txt"),
3634 "hello".into(),
3635 Default::default(),
3636 cx,
3637 )
3638 })
3639 .await
3640 .unwrap();
3641 tree.update(cx, |tree, cx| {
3642 tree.as_local().unwrap().write_file(
3643 Path::new("ignored-dir/file.txt"),
3644 "world".into(),
3645 Default::default(),
3646 cx,
3647 )
3648 })
3649 .await
3650 .unwrap();
3651
3652 tree.read_with(cx, |tree, _| {
3653 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3654 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3655 assert!(!tracked.is_ignored);
3656 assert!(ignored.is_ignored);
3657 });
3658 }
3659
3660 #[gpui::test(iterations = 30)]
3661 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3662 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3663
3664 let fs = FakeFs::new(cx.background());
3665 fs.insert_tree(
3666 "/root",
3667 json!({
3668 "b": {},
3669 "c": {},
3670 "d": {},
3671 }),
3672 )
3673 .await;
3674
3675 let tree = Worktree::local(
3676 client,
3677 "/root".as_ref(),
3678 true,
3679 fs,
3680 Default::default(),
3681 &mut cx.to_async(),
3682 )
3683 .await
3684 .unwrap();
3685
3686 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3687
3688 let entry = tree
3689 .update(cx, |tree, cx| {
3690 tree.as_local_mut()
3691 .unwrap()
3692 .create_entry("a/e".as_ref(), true, cx)
3693 })
3694 .await
3695 .unwrap();
3696 assert!(entry.is_dir());
3697
3698 cx.foreground().run_until_parked();
3699 tree.read_with(cx, |tree, _| {
3700 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3701 });
3702
3703 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3704 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3705 snapshot1.apply_remote_update(update).unwrap();
3706 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3707 }
3708
3709 #[gpui::test(iterations = 100)]
3710 async fn test_random_worktree_operations_during_initial_scan(
3711 cx: &mut TestAppContext,
3712 mut rng: StdRng,
3713 ) {
3714 let operations = env::var("OPERATIONS")
3715 .map(|o| o.parse().unwrap())
3716 .unwrap_or(5);
3717 let initial_entries = env::var("INITIAL_ENTRIES")
3718 .map(|o| o.parse().unwrap())
3719 .unwrap_or(20);
3720
3721 let root_dir = Path::new("/test");
3722 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3723 fs.as_fake().insert_tree(root_dir, json!({})).await;
3724 for _ in 0..initial_entries {
3725 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3726 }
3727 log::info!("generated initial tree");
3728
3729 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3730 let worktree = Worktree::local(
3731 client.clone(),
3732 root_dir,
3733 true,
3734 fs.clone(),
3735 Default::default(),
3736 &mut cx.to_async(),
3737 )
3738 .await
3739 .unwrap();
3740
3741 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3742
3743 for _ in 0..operations {
3744 worktree
3745 .update(cx, |worktree, cx| {
3746 randomly_mutate_worktree(worktree, &mut rng, cx)
3747 })
3748 .await
3749 .log_err();
3750 worktree.read_with(cx, |tree, _| {
3751 tree.as_local().unwrap().snapshot.check_invariants()
3752 });
3753
3754 if rng.gen_bool(0.6) {
3755 let new_snapshot =
3756 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3757 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3758 snapshot.apply_remote_update(update.clone()).unwrap();
3759 assert_eq!(
3760 snapshot.to_vec(true),
3761 new_snapshot.to_vec(true),
3762 "incorrect snapshot after update {:?}",
3763 update
3764 );
3765 }
3766 }
3767
3768 worktree
3769 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3770 .await;
3771 worktree.read_with(cx, |tree, _| {
3772 tree.as_local().unwrap().snapshot.check_invariants()
3773 });
3774
3775 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3776 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3777 snapshot.apply_remote_update(update.clone()).unwrap();
3778 assert_eq!(
3779 snapshot.to_vec(true),
3780 new_snapshot.to_vec(true),
3781 "incorrect snapshot after update {:?}",
3782 update
3783 );
3784 }
3785
3786 #[gpui::test(iterations = 100)]
3787 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3788 let operations = env::var("OPERATIONS")
3789 .map(|o| o.parse().unwrap())
3790 .unwrap_or(40);
3791 let initial_entries = env::var("INITIAL_ENTRIES")
3792 .map(|o| o.parse().unwrap())
3793 .unwrap_or(20);
3794
3795 let root_dir = Path::new("/test");
3796 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3797 fs.as_fake().insert_tree(root_dir, json!({})).await;
3798 for _ in 0..initial_entries {
3799 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3800 }
3801 log::info!("generated initial tree");
3802
3803 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3804 let worktree = Worktree::local(
3805 client.clone(),
3806 root_dir,
3807 true,
3808 fs.clone(),
3809 Default::default(),
3810 &mut cx.to_async(),
3811 )
3812 .await
3813 .unwrap();
3814
3815 worktree
3816 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3817 .await;
3818
3819 // After the initial scan is complete, the `UpdatedEntries` event can
3820 // be used to follow along with all changes to the worktree's snapshot.
3821 worktree.update(cx, |tree, cx| {
3822 let mut paths = tree
3823 .as_local()
3824 .unwrap()
3825 .paths()
3826 .cloned()
3827 .collect::<Vec<_>>();
3828
3829 cx.subscribe(&worktree, move |tree, _, event, _| {
3830 if let Event::UpdatedEntries(changes) = event {
3831 for (path, change_type) in changes.iter() {
3832 let path = path.clone();
3833 let ix = match paths.binary_search(&path) {
3834 Ok(ix) | Err(ix) => ix,
3835 };
3836 match change_type {
3837 PathChange::Added => {
3838 assert_ne!(paths.get(ix), Some(&path));
3839 paths.insert(ix, path);
3840 }
3841 PathChange::Removed => {
3842 assert_eq!(paths.get(ix), Some(&path));
3843 paths.remove(ix);
3844 }
3845 PathChange::Updated => {
3846 assert_eq!(paths.get(ix), Some(&path));
3847 }
3848 PathChange::AddedOrUpdated => {
3849 if paths[ix] != path {
3850 paths.insert(ix, path);
3851 }
3852 }
3853 }
3854 }
3855 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3856 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3857 }
3858 })
3859 .detach();
3860 });
3861
3862 let mut snapshots = Vec::new();
3863 let mut mutations_len = operations;
3864 while mutations_len > 1 {
3865 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3866 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3867 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3868 let len = rng.gen_range(0..=buffered_event_count);
3869 log::info!("flushing {} events", len);
3870 fs.as_fake().flush_events(len).await;
3871 } else {
3872 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3873 mutations_len -= 1;
3874 }
3875
3876 cx.foreground().run_until_parked();
3877 if rng.gen_bool(0.2) {
3878 log::info!("storing snapshot {}", snapshots.len());
3879 let snapshot =
3880 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3881 snapshots.push(snapshot);
3882 }
3883 }
3884
3885 log::info!("quiescing");
3886 fs.as_fake().flush_events(usize::MAX).await;
3887 cx.foreground().run_until_parked();
3888 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3889 snapshot.check_invariants();
3890
3891 {
3892 let new_worktree = Worktree::local(
3893 client.clone(),
3894 root_dir,
3895 true,
3896 fs.clone(),
3897 Default::default(),
3898 &mut cx.to_async(),
3899 )
3900 .await
3901 .unwrap();
3902 new_worktree
3903 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3904 .await;
3905 let new_snapshot =
3906 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3907 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3908 }
3909
3910 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3911 let include_ignored = rng.gen::<bool>();
3912 if !include_ignored {
3913 let mut entries_by_path_edits = Vec::new();
3914 let mut entries_by_id_edits = Vec::new();
3915 for entry in prev_snapshot
3916 .entries_by_id
3917 .cursor::<()>()
3918 .filter(|e| e.is_ignored)
3919 {
3920 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3921 entries_by_id_edits.push(Edit::Remove(entry.id));
3922 }
3923
3924 prev_snapshot
3925 .entries_by_path
3926 .edit(entries_by_path_edits, &());
3927 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3928 }
3929
3930 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3931 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3932 assert_eq!(
3933 prev_snapshot.to_vec(include_ignored),
3934 snapshot.to_vec(include_ignored),
3935 "wrong update for snapshot {i}. update: {:?}",
3936 update
3937 );
3938 }
3939 }
3940
3941 fn randomly_mutate_worktree(
3942 worktree: &mut Worktree,
3943 rng: &mut impl Rng,
3944 cx: &mut ModelContext<Worktree>,
3945 ) -> Task<Result<()>> {
3946 let worktree = worktree.as_local_mut().unwrap();
3947 let snapshot = worktree.snapshot();
3948 let entry = snapshot.entries(false).choose(rng).unwrap();
3949
3950 match rng.gen_range(0_u32..100) {
3951 0..=33 if entry.path.as_ref() != Path::new("") => {
3952 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3953 worktree.delete_entry(entry.id, cx).unwrap()
3954 }
3955 ..=66 if entry.path.as_ref() != Path::new("") => {
3956 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3957 let new_parent_path = if other_entry.is_dir() {
3958 other_entry.path.clone()
3959 } else {
3960 other_entry.path.parent().unwrap().into()
3961 };
3962 let mut new_path = new_parent_path.join(gen_name(rng));
3963 if new_path.starts_with(&entry.path) {
3964 new_path = gen_name(rng).into();
3965 }
3966
3967 log::info!(
3968 "renaming entry {:?} ({}) to {:?}",
3969 entry.path,
3970 entry.id.0,
3971 new_path
3972 );
3973 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3974 cx.foreground().spawn(async move {
3975 task.await?;
3976 Ok(())
3977 })
3978 }
3979 _ => {
3980 let task = if entry.is_dir() {
3981 let child_path = entry.path.join(gen_name(rng));
3982 let is_dir = rng.gen_bool(0.3);
3983 log::info!(
3984 "creating {} at {:?}",
3985 if is_dir { "dir" } else { "file" },
3986 child_path,
3987 );
3988 worktree.create_entry(child_path, is_dir, cx)
3989 } else {
3990 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3991 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3992 };
3993 cx.foreground().spawn(async move {
3994 task.await?;
3995 Ok(())
3996 })
3997 }
3998 }
3999 }
4000
4001 async fn randomly_mutate_fs(
4002 fs: &Arc<dyn Fs>,
4003 root_path: &Path,
4004 insertion_probability: f64,
4005 rng: &mut impl Rng,
4006 ) {
4007 let mut files = Vec::new();
4008 let mut dirs = Vec::new();
4009 for path in fs.as_fake().paths() {
4010 if path.starts_with(root_path) {
4011 if fs.is_file(&path).await {
4012 files.push(path);
4013 } else {
4014 dirs.push(path);
4015 }
4016 }
4017 }
4018
4019 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4020 let path = dirs.choose(rng).unwrap();
4021 let new_path = path.join(gen_name(rng));
4022
4023 if rng.gen() {
4024 log::info!(
4025 "creating dir {:?}",
4026 new_path.strip_prefix(root_path).unwrap()
4027 );
4028 fs.create_dir(&new_path).await.unwrap();
4029 } else {
4030 log::info!(
4031 "creating file {:?}",
4032 new_path.strip_prefix(root_path).unwrap()
4033 );
4034 fs.create_file(&new_path, Default::default()).await.unwrap();
4035 }
4036 } else if rng.gen_bool(0.05) {
4037 let ignore_dir_path = dirs.choose(rng).unwrap();
4038 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4039
4040 let subdirs = dirs
4041 .iter()
4042 .filter(|d| d.starts_with(&ignore_dir_path))
4043 .cloned()
4044 .collect::<Vec<_>>();
4045 let subfiles = files
4046 .iter()
4047 .filter(|d| d.starts_with(&ignore_dir_path))
4048 .cloned()
4049 .collect::<Vec<_>>();
4050 let files_to_ignore = {
4051 let len = rng.gen_range(0..=subfiles.len());
4052 subfiles.choose_multiple(rng, len)
4053 };
4054 let dirs_to_ignore = {
4055 let len = rng.gen_range(0..subdirs.len());
4056 subdirs.choose_multiple(rng, len)
4057 };
4058
4059 let mut ignore_contents = String::new();
4060 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4061 writeln!(
4062 ignore_contents,
4063 "{}",
4064 path_to_ignore
4065 .strip_prefix(&ignore_dir_path)
4066 .unwrap()
4067 .to_str()
4068 .unwrap()
4069 )
4070 .unwrap();
4071 }
4072 log::info!(
4073 "creating gitignore {:?} with contents:\n{}",
4074 ignore_path.strip_prefix(&root_path).unwrap(),
4075 ignore_contents
4076 );
4077 fs.save(
4078 &ignore_path,
4079 &ignore_contents.as_str().into(),
4080 Default::default(),
4081 )
4082 .await
4083 .unwrap();
4084 } else {
4085 let old_path = {
4086 let file_path = files.choose(rng);
4087 let dir_path = dirs[1..].choose(rng);
4088 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4089 };
4090
4091 let is_rename = rng.gen();
4092 if is_rename {
4093 let new_path_parent = dirs
4094 .iter()
4095 .filter(|d| !d.starts_with(old_path))
4096 .choose(rng)
4097 .unwrap();
4098
4099 let overwrite_existing_dir =
4100 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4101 let new_path = if overwrite_existing_dir {
4102 fs.remove_dir(
4103 &new_path_parent,
4104 RemoveOptions {
4105 recursive: true,
4106 ignore_if_not_exists: true,
4107 },
4108 )
4109 .await
4110 .unwrap();
4111 new_path_parent.to_path_buf()
4112 } else {
4113 new_path_parent.join(gen_name(rng))
4114 };
4115
4116 log::info!(
4117 "renaming {:?} to {}{:?}",
4118 old_path.strip_prefix(&root_path).unwrap(),
4119 if overwrite_existing_dir {
4120 "overwrite "
4121 } else {
4122 ""
4123 },
4124 new_path.strip_prefix(&root_path).unwrap()
4125 );
4126 fs.rename(
4127 &old_path,
4128 &new_path,
4129 fs::RenameOptions {
4130 overwrite: true,
4131 ignore_if_exists: true,
4132 },
4133 )
4134 .await
4135 .unwrap();
4136 } else if fs.is_file(&old_path).await {
4137 log::info!(
4138 "deleting file {:?}",
4139 old_path.strip_prefix(&root_path).unwrap()
4140 );
4141 fs.remove_file(old_path, Default::default()).await.unwrap();
4142 } else {
4143 log::info!(
4144 "deleting dir {:?}",
4145 old_path.strip_prefix(&root_path).unwrap()
4146 );
4147 fs.remove_dir(
4148 &old_path,
4149 RemoveOptions {
4150 recursive: true,
4151 ignore_if_not_exists: true,
4152 },
4153 )
4154 .await
4155 .unwrap();
4156 }
4157 }
4158 }
4159
4160 fn gen_name(rng: &mut impl Rng) -> String {
4161 (0..6)
4162 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4163 .map(char::from)
4164 .collect()
4165 }
4166
4167 impl LocalSnapshot {
4168 fn check_invariants(&self) {
4169 assert_eq!(
4170 self.entries_by_path
4171 .cursor::<()>()
4172 .map(|e| (&e.path, e.id))
4173 .collect::<Vec<_>>(),
4174 self.entries_by_id
4175 .cursor::<()>()
4176 .map(|e| (&e.path, e.id))
4177 .collect::<collections::BTreeSet<_>>()
4178 .into_iter()
4179 .collect::<Vec<_>>(),
4180 "entries_by_path and entries_by_id are inconsistent"
4181 );
4182
4183 let mut files = self.files(true, 0);
4184 let mut visible_files = self.files(false, 0);
4185 for entry in self.entries_by_path.cursor::<()>() {
4186 if entry.is_file() {
4187 assert_eq!(files.next().unwrap().inode, entry.inode);
4188 if !entry.is_ignored {
4189 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4190 }
4191 }
4192 }
4193
4194 assert!(files.next().is_none());
4195 assert!(visible_files.next().is_none());
4196
4197 let mut bfs_paths = Vec::new();
4198 let mut stack = vec![Path::new("")];
4199 while let Some(path) = stack.pop() {
4200 bfs_paths.push(path);
4201 let ix = stack.len();
4202 for child_entry in self.child_entries(path) {
4203 stack.insert(ix, &child_entry.path);
4204 }
4205 }
4206
4207 let dfs_paths_via_iter = self
4208 .entries_by_path
4209 .cursor::<()>()
4210 .map(|e| e.path.as_ref())
4211 .collect::<Vec<_>>();
4212 assert_eq!(bfs_paths, dfs_paths_via_iter);
4213
4214 let dfs_paths_via_traversal = self
4215 .entries(true)
4216 .map(|e| e.path.as_ref())
4217 .collect::<Vec<_>>();
4218 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4219
4220 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4221 let ignore_parent_path =
4222 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4223 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4224 assert!(self
4225 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4226 .is_some());
4227 }
4228 }
4229
4230 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4231 let mut paths = Vec::new();
4232 for entry in self.entries_by_path.cursor::<()>() {
4233 if include_ignored || !entry.is_ignored {
4234 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4235 }
4236 }
4237 paths.sort_by(|a, b| a.0.cmp(b.0));
4238 paths
4239 }
4240 }
4241}