1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 pin::Pin,
48 sync::{
49 atomic::{AtomicUsize, Ordering::SeqCst},
50 Arc,
51 },
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<
72 Arc<Path>,
73 Vec<(
74 LanguageServerId,
75 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
76 )>,
77 >,
78 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
79 client: Arc<Client>,
80 fs: Arc<dyn Fs>,
81 visible: bool,
82}
83
84pub struct RemoteWorktree {
85 snapshot: Snapshot,
86 background_snapshot: Arc<Mutex<Snapshot>>,
87 project_id: u64,
88 client: Arc<Client>,
89 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
90 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
91 replica_id: ReplicaId,
92 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
93 visible: bool,
94 disconnected: bool,
95}
96
97#[derive(Clone)]
98pub struct Snapshot {
99 id: WorktreeId,
100 abs_path: Arc<Path>,
101 root_name: String,
102 root_char_bag: CharBag,
103 entries_by_path: SumTree<Entry>,
104 entries_by_id: SumTree<PathEntry>,
105 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
106
107 /// A number that increases every time the worktree begins scanning
108 /// a set of paths from the filesystem. This scanning could be caused
109 /// by some operation performed on the worktree, such as reading or
110 /// writing a file, or by an event reported by the filesystem.
111 scan_id: usize,
112
113 /// The latest scan id that has completed, and whose preceding scans
114 /// have all completed. The current `scan_id` could be more than one
115 /// greater than the `completed_scan_id` if operations are performed
116 /// on the worktree while it is processing a file-system event.
117 completed_scan_id: usize,
118}
119
120#[derive(Clone, Debug)]
121pub struct RepositoryEntry {
122 // Path to the actual .git folder.
123 // Note: if .git is a file, this points to the folder indicated by the .git file
124 pub(crate) git_dir_path: Arc<Path>,
125 pub(crate) git_dir_entry_id: ProjectEntryId,
126 pub(crate) work_directory: RepositoryWorkDirectory,
127 pub(crate) scan_id: usize,
128 pub(crate) branch: Option<Arc<str>>,
129}
130
131impl RepositoryEntry {
132 // Note that this path should be relative to the worktree root.
133 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
134 path.starts_with(self.git_dir_path.as_ref())
135 }
136
137 pub fn branch(&self) -> Option<Arc<str>> {
138 self.branch.clone()
139 }
140}
141
142/// This path corresponds to the 'content path' (the folder that contains the .git)
143#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
144pub struct RepositoryWorkDirectory(Arc<Path>);
145
146impl RepositoryWorkDirectory {
147 // Note that these paths should be relative to the worktree root.
148 pub(crate) fn contains(&self, path: &Path) -> bool {
149 path.starts_with(self.0.as_ref())
150 }
151
152 pub(crate) fn relativize(&self, path: &Path) -> Option<RepoPath> {
153 path.strip_prefix(self.0.as_ref())
154 .ok()
155 .map(move |path| RepoPath(path.to_owned()))
156 }
157}
158
159impl Deref for RepositoryWorkDirectory {
160 type Target = Path;
161
162 fn deref(&self) -> &Self::Target {
163 self.0.as_ref()
164 }
165}
166
167impl<'a> From<&'a str> for RepositoryWorkDirectory {
168 fn from(value: &'a str) -> Self {
169 RepositoryWorkDirectory(Path::new(value).into())
170 }
171}
172
173#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
174pub struct RepoPath(PathBuf);
175
176impl AsRef<Path> for RepoPath {
177 fn as_ref(&self) -> &Path {
178 self.0.as_ref()
179 }
180}
181
182impl Deref for RepoPath {
183 type Target = PathBuf;
184
185 fn deref(&self) -> &Self::Target {
186 &self.0
187 }
188}
189
190impl AsRef<Path> for RepositoryWorkDirectory {
191 fn as_ref(&self) -> &Path {
192 self.0.as_ref()
193 }
194}
195
196impl Default for RepositoryWorkDirectory {
197 fn default() -> Self {
198 RepositoryWorkDirectory(Arc::from(Path::new("")))
199 }
200}
201
202#[derive(Debug, Clone)]
203pub struct LocalSnapshot {
204 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
205 // The ProjectEntryId corresponds to the entry for the .git dir
206 git_repositories: TreeMap<ProjectEntryId, Arc<Mutex<dyn GitRepository>>>,
207 removed_entry_ids: HashMap<u64, ProjectEntryId>,
208 next_entry_id: Arc<AtomicUsize>,
209 snapshot: Snapshot,
210}
211
212impl Deref for LocalSnapshot {
213 type Target = Snapshot;
214
215 fn deref(&self) -> &Self::Target {
216 &self.snapshot
217 }
218}
219
220impl DerefMut for LocalSnapshot {
221 fn deref_mut(&mut self) -> &mut Self::Target {
222 &mut self.snapshot
223 }
224}
225
226enum ScanState {
227 Started,
228 Updated {
229 snapshot: LocalSnapshot,
230 changes: HashMap<Arc<Path>, PathChange>,
231 barrier: Option<barrier::Sender>,
232 scanning: bool,
233 },
234}
235
236struct ShareState {
237 project_id: u64,
238 snapshots_tx: watch::Sender<LocalSnapshot>,
239 resume_updates: watch::Sender<()>,
240 _maintain_remote_snapshot: Task<Option<()>>,
241}
242
243pub enum Event {
244 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
245 UpdatedGitRepositories(Vec<RepositoryEntry>),
246}
247
248impl Entity for Worktree {
249 type Event = Event;
250}
251
252impl Worktree {
253 pub async fn local(
254 client: Arc<Client>,
255 path: impl Into<Arc<Path>>,
256 visible: bool,
257 fs: Arc<dyn Fs>,
258 next_entry_id: Arc<AtomicUsize>,
259 cx: &mut AsyncAppContext,
260 ) -> Result<ModelHandle<Self>> {
261 // After determining whether the root entry is a file or a directory, populate the
262 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
263 let abs_path = path.into();
264 let metadata = fs
265 .metadata(&abs_path)
266 .await
267 .context("failed to stat worktree path")?;
268
269 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
270 let root_name = abs_path
271 .file_name()
272 .map_or(String::new(), |f| f.to_string_lossy().to_string());
273
274 let mut snapshot = LocalSnapshot {
275 ignores_by_parent_abs_path: Default::default(),
276 removed_entry_ids: Default::default(),
277 git_repositories: Default::default(),
278 next_entry_id,
279 snapshot: Snapshot {
280 id: WorktreeId::from_usize(cx.model_id()),
281 abs_path: abs_path.clone(),
282 root_name: root_name.clone(),
283 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
284 entries_by_path: Default::default(),
285 entries_by_id: Default::default(),
286 repository_entries: Default::default(),
287 scan_id: 1,
288 completed_scan_id: 0,
289 },
290 };
291
292 if let Some(metadata) = metadata {
293 snapshot.insert_entry(
294 Entry::new(
295 Arc::from(Path::new("")),
296 &metadata,
297 &snapshot.next_entry_id,
298 snapshot.root_char_bag,
299 ),
300 fs.as_ref(),
301 );
302 }
303
304 let (path_changes_tx, path_changes_rx) = channel::unbounded();
305 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
306
307 cx.spawn_weak(|this, mut cx| async move {
308 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
309 this.update(&mut cx, |this, cx| {
310 let this = this.as_local_mut().unwrap();
311 match state {
312 ScanState::Started => {
313 *this.is_scanning.0.borrow_mut() = true;
314 }
315 ScanState::Updated {
316 snapshot,
317 changes,
318 barrier,
319 scanning,
320 } => {
321 *this.is_scanning.0.borrow_mut() = scanning;
322 this.set_snapshot(snapshot, cx);
323 cx.emit(Event::UpdatedEntries(changes));
324 drop(barrier);
325 }
326 }
327 cx.notify();
328 });
329 }
330 })
331 .detach();
332
333 let background_scanner_task = cx.background().spawn({
334 let fs = fs.clone();
335 let snapshot = snapshot.clone();
336 let background = cx.background().clone();
337 async move {
338 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
339 BackgroundScanner::new(
340 snapshot,
341 fs,
342 scan_states_tx,
343 background,
344 path_changes_rx,
345 )
346 .run(events)
347 .await;
348 }
349 });
350
351 Worktree::Local(LocalWorktree {
352 snapshot,
353 is_scanning: watch::channel_with(true),
354 share: None,
355 path_changes_tx,
356 _background_scanner_task: background_scanner_task,
357 diagnostics: Default::default(),
358 diagnostic_summaries: Default::default(),
359 client,
360 fs,
361 visible,
362 })
363 }))
364 }
365
366 pub fn remote(
367 project_remote_id: u64,
368 replica_id: ReplicaId,
369 worktree: proto::WorktreeMetadata,
370 client: Arc<Client>,
371 cx: &mut AppContext,
372 ) -> ModelHandle<Self> {
373 cx.add_model(|cx: &mut ModelContext<Self>| {
374 let snapshot = Snapshot {
375 id: WorktreeId(worktree.id as usize),
376 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
377 root_name: worktree.root_name.clone(),
378 root_char_bag: worktree
379 .root_name
380 .chars()
381 .map(|c| c.to_ascii_lowercase())
382 .collect(),
383 entries_by_path: Default::default(),
384 entries_by_id: Default::default(),
385 repository_entries: Default::default(),
386 scan_id: 1,
387 completed_scan_id: 0,
388 };
389
390 let (updates_tx, mut updates_rx) = mpsc::unbounded();
391 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
392 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
393
394 cx.background()
395 .spawn({
396 let background_snapshot = background_snapshot.clone();
397 async move {
398 while let Some(update) = updates_rx.next().await {
399 if let Err(error) =
400 background_snapshot.lock().apply_remote_update(update)
401 {
402 log::error!("error applying worktree update: {}", error);
403 }
404 snapshot_updated_tx.send(()).await.ok();
405 }
406 }
407 })
408 .detach();
409
410 cx.spawn_weak(|this, mut cx| async move {
411 while (snapshot_updated_rx.recv().await).is_some() {
412 if let Some(this) = this.upgrade(&cx) {
413 this.update(&mut cx, |this, cx| {
414 let this = this.as_remote_mut().unwrap();
415 this.snapshot = this.background_snapshot.lock().clone();
416 cx.emit(Event::UpdatedEntries(Default::default()));
417 cx.notify();
418 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
419 if this.observed_snapshot(*scan_id) {
420 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
421 let _ = tx.send(());
422 } else {
423 break;
424 }
425 }
426 });
427 } else {
428 break;
429 }
430 }
431 })
432 .detach();
433
434 Worktree::Remote(RemoteWorktree {
435 project_id: project_remote_id,
436 replica_id,
437 snapshot: snapshot.clone(),
438 background_snapshot,
439 updates_tx: Some(updates_tx),
440 snapshot_subscriptions: Default::default(),
441 client: client.clone(),
442 diagnostic_summaries: Default::default(),
443 visible: worktree.visible,
444 disconnected: false,
445 })
446 })
447 }
448
449 pub fn as_local(&self) -> Option<&LocalWorktree> {
450 if let Worktree::Local(worktree) = self {
451 Some(worktree)
452 } else {
453 None
454 }
455 }
456
457 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
458 if let Worktree::Remote(worktree) = self {
459 Some(worktree)
460 } else {
461 None
462 }
463 }
464
465 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
466 if let Worktree::Local(worktree) = self {
467 Some(worktree)
468 } else {
469 None
470 }
471 }
472
473 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
474 if let Worktree::Remote(worktree) = self {
475 Some(worktree)
476 } else {
477 None
478 }
479 }
480
481 pub fn is_local(&self) -> bool {
482 matches!(self, Worktree::Local(_))
483 }
484
485 pub fn is_remote(&self) -> bool {
486 !self.is_local()
487 }
488
489 pub fn snapshot(&self) -> Snapshot {
490 match self {
491 Worktree::Local(worktree) => worktree.snapshot().snapshot,
492 Worktree::Remote(worktree) => worktree.snapshot(),
493 }
494 }
495
496 pub fn scan_id(&self) -> usize {
497 match self {
498 Worktree::Local(worktree) => worktree.snapshot.scan_id,
499 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
500 }
501 }
502
503 pub fn completed_scan_id(&self) -> usize {
504 match self {
505 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
506 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
507 }
508 }
509
510 pub fn is_visible(&self) -> bool {
511 match self {
512 Worktree::Local(worktree) => worktree.visible,
513 Worktree::Remote(worktree) => worktree.visible,
514 }
515 }
516
517 pub fn replica_id(&self) -> ReplicaId {
518 match self {
519 Worktree::Local(_) => 0,
520 Worktree::Remote(worktree) => worktree.replica_id,
521 }
522 }
523
524 pub fn diagnostic_summaries(
525 &self,
526 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
527 match self {
528 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
529 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
530 }
531 .iter()
532 .flat_map(|(path, summaries)| {
533 summaries
534 .iter()
535 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
536 })
537 }
538
539 pub fn abs_path(&self) -> Arc<Path> {
540 match self {
541 Worktree::Local(worktree) => worktree.abs_path.clone(),
542 Worktree::Remote(worktree) => worktree.abs_path.clone(),
543 }
544 }
545}
546
547impl LocalWorktree {
548 pub fn contains_abs_path(&self, path: &Path) -> bool {
549 path.starts_with(&self.abs_path)
550 }
551
552 fn absolutize(&self, path: &Path) -> PathBuf {
553 if path.file_name().is_some() {
554 self.abs_path.join(path)
555 } else {
556 self.abs_path.to_path_buf()
557 }
558 }
559
560 pub(crate) fn load_buffer(
561 &mut self,
562 id: u64,
563 path: &Path,
564 cx: &mut ModelContext<Worktree>,
565 ) -> Task<Result<ModelHandle<Buffer>>> {
566 let path = Arc::from(path);
567 cx.spawn(move |this, mut cx| async move {
568 let (file, contents, diff_base) = this
569 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
570 .await?;
571 let text_buffer = cx
572 .background()
573 .spawn(async move { text::Buffer::new(0, id, contents) })
574 .await;
575 Ok(cx.add_model(|cx| {
576 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
577 buffer.git_diff_recalc(cx);
578 buffer
579 }))
580 })
581 }
582
583 pub fn diagnostics_for_path(
584 &self,
585 path: &Path,
586 ) -> Vec<(
587 LanguageServerId,
588 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
589 )> {
590 self.diagnostics.get(path).cloned().unwrap_or_default()
591 }
592
593 pub fn update_diagnostics(
594 &mut self,
595 server_id: LanguageServerId,
596 worktree_path: Arc<Path>,
597 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
598 _: &mut ModelContext<Worktree>,
599 ) -> Result<bool> {
600 let summaries_by_server_id = self
601 .diagnostic_summaries
602 .entry(worktree_path.clone())
603 .or_default();
604
605 let old_summary = summaries_by_server_id
606 .remove(&server_id)
607 .unwrap_or_default();
608
609 let new_summary = DiagnosticSummary::new(&diagnostics);
610 if new_summary.is_empty() {
611 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
612 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
613 diagnostics_by_server_id.remove(ix);
614 }
615 if diagnostics_by_server_id.is_empty() {
616 self.diagnostics.remove(&worktree_path);
617 }
618 }
619 } else {
620 summaries_by_server_id.insert(server_id, new_summary);
621 let diagnostics_by_server_id =
622 self.diagnostics.entry(worktree_path.clone()).or_default();
623 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
624 Ok(ix) => {
625 diagnostics_by_server_id[ix] = (server_id, diagnostics);
626 }
627 Err(ix) => {
628 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
629 }
630 }
631 }
632
633 if !old_summary.is_empty() || !new_summary.is_empty() {
634 if let Some(share) = self.share.as_ref() {
635 self.client
636 .send(proto::UpdateDiagnosticSummary {
637 project_id: share.project_id,
638 worktree_id: self.id().to_proto(),
639 summary: Some(proto::DiagnosticSummary {
640 path: worktree_path.to_string_lossy().to_string(),
641 language_server_id: server_id.0 as u64,
642 error_count: new_summary.error_count as u32,
643 warning_count: new_summary.warning_count as u32,
644 }),
645 })
646 .log_err();
647 }
648 }
649
650 Ok(!old_summary.is_empty() || !new_summary.is_empty())
651 }
652
653 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
654 let updated_repos = Self::changed_repos(
655 &self.snapshot.repository_entries,
656 &new_snapshot.repository_entries,
657 );
658 self.snapshot = new_snapshot;
659
660 if let Some(share) = self.share.as_mut() {
661 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
662 }
663
664 if !updated_repos.is_empty() {
665 cx.emit(Event::UpdatedGitRepositories(updated_repos));
666 }
667 }
668
669 fn changed_repos(
670 old_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
671 new_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
672 ) -> Vec<RepositoryEntry> {
673 fn diff<'a>(
674 a: impl Iterator<Item = &'a RepositoryEntry>,
675 mut b: impl Iterator<Item = &'a RepositoryEntry>,
676 updated: &mut HashMap<&'a Path, RepositoryEntry>,
677 ) {
678 for a_repo in a {
679 let matched = b.find(|b_repo| {
680 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
681 });
682
683 if matched.is_none() {
684 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
685 }
686 }
687 }
688
689 let mut updated = HashMap::<&Path, RepositoryEntry>::default();
690
691 diff(old_repos.values(), new_repos.values(), &mut updated);
692 diff(new_repos.values(), old_repos.values(), &mut updated);
693
694 updated.into_values().collect()
695 }
696
697 pub fn scan_complete(&self) -> impl Future<Output = ()> {
698 let mut is_scanning_rx = self.is_scanning.1.clone();
699 async move {
700 let mut is_scanning = is_scanning_rx.borrow().clone();
701 while is_scanning {
702 if let Some(value) = is_scanning_rx.recv().await {
703 is_scanning = value;
704 } else {
705 break;
706 }
707 }
708 }
709 }
710
711 pub fn snapshot(&self) -> LocalSnapshot {
712 self.snapshot.clone()
713 }
714
715 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
716 proto::WorktreeMetadata {
717 id: self.id().to_proto(),
718 root_name: self.root_name().to_string(),
719 visible: self.visible,
720 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
721 }
722 }
723
724 fn load(
725 &self,
726 path: &Path,
727 cx: &mut ModelContext<Worktree>,
728 ) -> Task<Result<(File, String, Option<String>)>> {
729 let handle = cx.handle();
730 let path = Arc::from(path);
731 let abs_path = self.absolutize(&path);
732 let fs = self.fs.clone();
733 let snapshot = self.snapshot();
734
735 let mut index_task = None;
736
737 if let Some(repo) = snapshot.repo_for(&path) {
738 let repo_path = repo.work_directory.relativize(&path).unwrap();
739 if let Some(repo) = self.git_repositories.get(&repo.git_dir_entry_id) {
740 let repo = repo.to_owned();
741 index_task = Some(
742 cx.background()
743 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
744 );
745 }
746 }
747
748 cx.spawn(|this, mut cx| async move {
749 let text = fs.load(&abs_path).await?;
750
751 let diff_base = if let Some(index_task) = index_task {
752 index_task.await
753 } else {
754 None
755 };
756
757 // Eagerly populate the snapshot with an updated entry for the loaded file
758 let entry = this
759 .update(&mut cx, |this, cx| {
760 this.as_local().unwrap().refresh_entry(path, None, cx)
761 })
762 .await?;
763
764 Ok((
765 File {
766 entry_id: entry.id,
767 worktree: handle,
768 path: entry.path,
769 mtime: entry.mtime,
770 is_local: true,
771 is_deleted: false,
772 },
773 text,
774 diff_base,
775 ))
776 })
777 }
778
779 pub fn save_buffer(
780 &self,
781 buffer_handle: ModelHandle<Buffer>,
782 path: Arc<Path>,
783 has_changed_file: bool,
784 cx: &mut ModelContext<Worktree>,
785 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
786 let handle = cx.handle();
787 let buffer = buffer_handle.read(cx);
788
789 let rpc = self.client.clone();
790 let buffer_id = buffer.remote_id();
791 let project_id = self.share.as_ref().map(|share| share.project_id);
792
793 let text = buffer.as_rope().clone();
794 let fingerprint = text.fingerprint();
795 let version = buffer.version();
796 let save = self.write_file(path, text, buffer.line_ending(), cx);
797
798 cx.as_mut().spawn(|mut cx| async move {
799 let entry = save.await?;
800
801 if has_changed_file {
802 let new_file = Arc::new(File {
803 entry_id: entry.id,
804 worktree: handle,
805 path: entry.path,
806 mtime: entry.mtime,
807 is_local: true,
808 is_deleted: false,
809 });
810
811 if let Some(project_id) = project_id {
812 rpc.send(proto::UpdateBufferFile {
813 project_id,
814 buffer_id,
815 file: Some(new_file.to_proto()),
816 })
817 .log_err();
818 }
819
820 buffer_handle.update(&mut cx, |buffer, cx| {
821 if has_changed_file {
822 buffer.file_updated(new_file, cx).detach();
823 }
824 });
825 }
826
827 if let Some(project_id) = project_id {
828 rpc.send(proto::BufferSaved {
829 project_id,
830 buffer_id,
831 version: serialize_version(&version),
832 mtime: Some(entry.mtime.into()),
833 fingerprint: serialize_fingerprint(fingerprint),
834 })?;
835 }
836
837 buffer_handle.update(&mut cx, |buffer, cx| {
838 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
839 });
840
841 Ok((version, fingerprint, entry.mtime))
842 })
843 }
844
845 pub fn create_entry(
846 &self,
847 path: impl Into<Arc<Path>>,
848 is_dir: bool,
849 cx: &mut ModelContext<Worktree>,
850 ) -> Task<Result<Entry>> {
851 let path = path.into();
852 let abs_path = self.absolutize(&path);
853 let fs = self.fs.clone();
854 let write = cx.background().spawn(async move {
855 if is_dir {
856 fs.create_dir(&abs_path).await
857 } else {
858 fs.save(&abs_path, &Default::default(), Default::default())
859 .await
860 }
861 });
862
863 cx.spawn(|this, mut cx| async move {
864 write.await?;
865 this.update(&mut cx, |this, cx| {
866 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
867 })
868 .await
869 })
870 }
871
872 pub fn write_file(
873 &self,
874 path: impl Into<Arc<Path>>,
875 text: Rope,
876 line_ending: LineEnding,
877 cx: &mut ModelContext<Worktree>,
878 ) -> Task<Result<Entry>> {
879 let path = path.into();
880 let abs_path = self.absolutize(&path);
881 let fs = self.fs.clone();
882 let write = cx
883 .background()
884 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
885
886 cx.spawn(|this, mut cx| async move {
887 write.await?;
888 this.update(&mut cx, |this, cx| {
889 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
890 })
891 .await
892 })
893 }
894
895 pub fn delete_entry(
896 &self,
897 entry_id: ProjectEntryId,
898 cx: &mut ModelContext<Worktree>,
899 ) -> Option<Task<Result<()>>> {
900 let entry = self.entry_for_id(entry_id)?.clone();
901 let abs_path = self.abs_path.clone();
902 let fs = self.fs.clone();
903
904 let delete = cx.background().spawn(async move {
905 let mut abs_path = fs.canonicalize(&abs_path).await?;
906 if entry.path.file_name().is_some() {
907 abs_path = abs_path.join(&entry.path);
908 }
909 if entry.is_file() {
910 fs.remove_file(&abs_path, Default::default()).await?;
911 } else {
912 fs.remove_dir(
913 &abs_path,
914 RemoveOptions {
915 recursive: true,
916 ignore_if_not_exists: false,
917 },
918 )
919 .await?;
920 }
921 anyhow::Ok(abs_path)
922 });
923
924 Some(cx.spawn(|this, mut cx| async move {
925 let abs_path = delete.await?;
926 let (tx, mut rx) = barrier::channel();
927 this.update(&mut cx, |this, _| {
928 this.as_local_mut()
929 .unwrap()
930 .path_changes_tx
931 .try_send((vec![abs_path], tx))
932 })?;
933 rx.recv().await;
934 Ok(())
935 }))
936 }
937
938 pub fn rename_entry(
939 &self,
940 entry_id: ProjectEntryId,
941 new_path: impl Into<Arc<Path>>,
942 cx: &mut ModelContext<Worktree>,
943 ) -> Option<Task<Result<Entry>>> {
944 let old_path = self.entry_for_id(entry_id)?.path.clone();
945 let new_path = new_path.into();
946 let abs_old_path = self.absolutize(&old_path);
947 let abs_new_path = self.absolutize(&new_path);
948 let fs = self.fs.clone();
949 let rename = cx.background().spawn(async move {
950 fs.rename(&abs_old_path, &abs_new_path, Default::default())
951 .await
952 });
953
954 Some(cx.spawn(|this, mut cx| async move {
955 rename.await?;
956 this.update(&mut cx, |this, cx| {
957 this.as_local_mut()
958 .unwrap()
959 .refresh_entry(new_path.clone(), Some(old_path), cx)
960 })
961 .await
962 }))
963 }
964
965 pub fn copy_entry(
966 &self,
967 entry_id: ProjectEntryId,
968 new_path: impl Into<Arc<Path>>,
969 cx: &mut ModelContext<Worktree>,
970 ) -> Option<Task<Result<Entry>>> {
971 let old_path = self.entry_for_id(entry_id)?.path.clone();
972 let new_path = new_path.into();
973 let abs_old_path = self.absolutize(&old_path);
974 let abs_new_path = self.absolutize(&new_path);
975 let fs = self.fs.clone();
976 let copy = cx.background().spawn(async move {
977 copy_recursive(
978 fs.as_ref(),
979 &abs_old_path,
980 &abs_new_path,
981 Default::default(),
982 )
983 .await
984 });
985
986 Some(cx.spawn(|this, mut cx| async move {
987 copy.await?;
988 this.update(&mut cx, |this, cx| {
989 this.as_local_mut()
990 .unwrap()
991 .refresh_entry(new_path.clone(), None, cx)
992 })
993 .await
994 }))
995 }
996
997 fn refresh_entry(
998 &self,
999 path: Arc<Path>,
1000 old_path: Option<Arc<Path>>,
1001 cx: &mut ModelContext<Worktree>,
1002 ) -> Task<Result<Entry>> {
1003 let fs = self.fs.clone();
1004 let abs_root_path = self.abs_path.clone();
1005 let path_changes_tx = self.path_changes_tx.clone();
1006 cx.spawn_weak(move |this, mut cx| async move {
1007 let abs_path = fs.canonicalize(&abs_root_path).await?;
1008 let mut paths = Vec::with_capacity(2);
1009 paths.push(if path.file_name().is_some() {
1010 abs_path.join(&path)
1011 } else {
1012 abs_path.clone()
1013 });
1014 if let Some(old_path) = old_path {
1015 paths.push(if old_path.file_name().is_some() {
1016 abs_path.join(&old_path)
1017 } else {
1018 abs_path.clone()
1019 });
1020 }
1021
1022 let (tx, mut rx) = barrier::channel();
1023 path_changes_tx.try_send((paths, tx))?;
1024 rx.recv().await;
1025 this.upgrade(&cx)
1026 .ok_or_else(|| anyhow!("worktree was dropped"))?
1027 .update(&mut cx, |this, _| {
1028 this.entry_for_path(path)
1029 .cloned()
1030 .ok_or_else(|| anyhow!("failed to read path after update"))
1031 })
1032 })
1033 }
1034
1035 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1036 let (share_tx, share_rx) = oneshot::channel();
1037
1038 if let Some(share) = self.share.as_mut() {
1039 let _ = share_tx.send(());
1040 *share.resume_updates.borrow_mut() = ();
1041 } else {
1042 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1043 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1044 let worktree_id = cx.model_id() as u64;
1045
1046 for (path, summaries) in &self.diagnostic_summaries {
1047 for (&server_id, summary) in summaries {
1048 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1049 project_id,
1050 worktree_id,
1051 summary: Some(summary.to_proto(server_id, &path)),
1052 }) {
1053 return Task::ready(Err(e));
1054 }
1055 }
1056 }
1057
1058 let _maintain_remote_snapshot = cx.background().spawn({
1059 let client = self.client.clone();
1060 async move {
1061 let mut share_tx = Some(share_tx);
1062 let mut prev_snapshot = LocalSnapshot {
1063 ignores_by_parent_abs_path: Default::default(),
1064 removed_entry_ids: Default::default(),
1065 next_entry_id: Default::default(),
1066 git_repositories: Default::default(),
1067 snapshot: Snapshot {
1068 id: WorktreeId(worktree_id as usize),
1069 abs_path: Path::new("").into(),
1070 root_name: Default::default(),
1071 root_char_bag: Default::default(),
1072 entries_by_path: Default::default(),
1073 entries_by_id: Default::default(),
1074 repository_entries: Default::default(),
1075 scan_id: 0,
1076 completed_scan_id: 0,
1077 },
1078 };
1079 while let Some(snapshot) = snapshots_rx.recv().await {
1080 #[cfg(any(test, feature = "test-support"))]
1081 const MAX_CHUNK_SIZE: usize = 2;
1082 #[cfg(not(any(test, feature = "test-support")))]
1083 const MAX_CHUNK_SIZE: usize = 256;
1084
1085 let update =
1086 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1087 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1088 let _ = resume_updates_rx.try_recv();
1089 while let Err(error) = client.request(update.clone()).await {
1090 log::error!("failed to send worktree update: {}", error);
1091 log::info!("waiting to resume updates");
1092 if resume_updates_rx.next().await.is_none() {
1093 return Ok(());
1094 }
1095 }
1096 }
1097
1098 if let Some(share_tx) = share_tx.take() {
1099 let _ = share_tx.send(());
1100 }
1101
1102 prev_snapshot = snapshot;
1103 }
1104
1105 Ok::<_, anyhow::Error>(())
1106 }
1107 .log_err()
1108 });
1109
1110 self.share = Some(ShareState {
1111 project_id,
1112 snapshots_tx,
1113 resume_updates: resume_updates_tx,
1114 _maintain_remote_snapshot,
1115 });
1116 }
1117
1118 cx.foreground()
1119 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1120 }
1121
1122 pub fn unshare(&mut self) {
1123 self.share.take();
1124 }
1125
1126 pub fn is_shared(&self) -> bool {
1127 self.share.is_some()
1128 }
1129
1130 pub fn load_index_text(
1131 &self,
1132 repo: RepositoryEntry,
1133 repo_path: RepoPath,
1134 cx: &mut ModelContext<Worktree>,
1135 ) -> Task<Option<String>> {
1136 let Some(git_ptr) = self.git_repositories.get(&repo.git_dir_entry_id).map(|git_ptr| git_ptr.to_owned()) else {
1137 return Task::Ready(Some(None))
1138 };
1139 cx.background()
1140 .spawn(async move { git_ptr.lock().load_index_text(&repo_path) })
1141 }
1142}
1143
1144impl RemoteWorktree {
1145 fn snapshot(&self) -> Snapshot {
1146 self.snapshot.clone()
1147 }
1148
1149 pub fn disconnected_from_host(&mut self) {
1150 self.updates_tx.take();
1151 self.snapshot_subscriptions.clear();
1152 self.disconnected = true;
1153 }
1154
1155 pub fn save_buffer(
1156 &self,
1157 buffer_handle: ModelHandle<Buffer>,
1158 cx: &mut ModelContext<Worktree>,
1159 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1160 let buffer = buffer_handle.read(cx);
1161 let buffer_id = buffer.remote_id();
1162 let version = buffer.version();
1163 let rpc = self.client.clone();
1164 let project_id = self.project_id;
1165 cx.as_mut().spawn(|mut cx| async move {
1166 let response = rpc
1167 .request(proto::SaveBuffer {
1168 project_id,
1169 buffer_id,
1170 version: serialize_version(&version),
1171 })
1172 .await?;
1173 let version = deserialize_version(&response.version);
1174 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1175 let mtime = response
1176 .mtime
1177 .ok_or_else(|| anyhow!("missing mtime"))?
1178 .into();
1179
1180 buffer_handle.update(&mut cx, |buffer, cx| {
1181 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1182 });
1183
1184 Ok((version, fingerprint, mtime))
1185 })
1186 }
1187
1188 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1189 if let Some(updates_tx) = &self.updates_tx {
1190 updates_tx
1191 .unbounded_send(update)
1192 .expect("consumer runs to completion");
1193 }
1194 }
1195
1196 fn observed_snapshot(&self, scan_id: usize) -> bool {
1197 self.completed_scan_id >= scan_id
1198 }
1199
1200 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1201 let (tx, rx) = oneshot::channel();
1202 if self.observed_snapshot(scan_id) {
1203 let _ = tx.send(());
1204 } else if self.disconnected {
1205 drop(tx);
1206 } else {
1207 match self
1208 .snapshot_subscriptions
1209 .binary_search_by_key(&scan_id, |probe| probe.0)
1210 {
1211 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1212 }
1213 }
1214
1215 async move {
1216 rx.await?;
1217 Ok(())
1218 }
1219 }
1220
1221 pub fn update_diagnostic_summary(
1222 &mut self,
1223 path: Arc<Path>,
1224 summary: &proto::DiagnosticSummary,
1225 ) {
1226 let server_id = LanguageServerId(summary.language_server_id as usize);
1227 let summary = DiagnosticSummary {
1228 error_count: summary.error_count as usize,
1229 warning_count: summary.warning_count as usize,
1230 };
1231
1232 if summary.is_empty() {
1233 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1234 summaries.remove(&server_id);
1235 if summaries.is_empty() {
1236 self.diagnostic_summaries.remove(&path);
1237 }
1238 }
1239 } else {
1240 self.diagnostic_summaries
1241 .entry(path)
1242 .or_default()
1243 .insert(server_id, summary);
1244 }
1245 }
1246
1247 pub fn insert_entry(
1248 &mut self,
1249 entry: proto::Entry,
1250 scan_id: usize,
1251 cx: &mut ModelContext<Worktree>,
1252 ) -> Task<Result<Entry>> {
1253 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1254 cx.spawn(|this, mut cx| async move {
1255 wait_for_snapshot.await?;
1256 this.update(&mut cx, |worktree, _| {
1257 let worktree = worktree.as_remote_mut().unwrap();
1258 let mut snapshot = worktree.background_snapshot.lock();
1259 let entry = snapshot.insert_entry(entry);
1260 worktree.snapshot = snapshot.clone();
1261 entry
1262 })
1263 })
1264 }
1265
1266 pub(crate) fn delete_entry(
1267 &mut self,
1268 id: ProjectEntryId,
1269 scan_id: usize,
1270 cx: &mut ModelContext<Worktree>,
1271 ) -> Task<Result<()>> {
1272 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1273 cx.spawn(|this, mut cx| async move {
1274 wait_for_snapshot.await?;
1275 this.update(&mut cx, |worktree, _| {
1276 let worktree = worktree.as_remote_mut().unwrap();
1277 let mut snapshot = worktree.background_snapshot.lock();
1278 snapshot.delete_entry(id);
1279 worktree.snapshot = snapshot.clone();
1280 });
1281 Ok(())
1282 })
1283 }
1284}
1285
1286impl Snapshot {
1287 pub fn id(&self) -> WorktreeId {
1288 self.id
1289 }
1290
1291 pub fn abs_path(&self) -> &Arc<Path> {
1292 &self.abs_path
1293 }
1294
1295 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1296 self.entries_by_id.get(&entry_id, &()).is_some()
1297 }
1298
1299 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1300 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1301 let old_entry = self.entries_by_id.insert_or_replace(
1302 PathEntry {
1303 id: entry.id,
1304 path: entry.path.clone(),
1305 is_ignored: entry.is_ignored,
1306 scan_id: 0,
1307 },
1308 &(),
1309 );
1310 if let Some(old_entry) = old_entry {
1311 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1312 }
1313 self.entries_by_path.insert_or_replace(entry.clone(), &());
1314 Ok(entry)
1315 }
1316
1317 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1318 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1319 self.entries_by_path = {
1320 let mut cursor = self.entries_by_path.cursor();
1321 let mut new_entries_by_path =
1322 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1323 while let Some(entry) = cursor.item() {
1324 if entry.path.starts_with(&removed_entry.path) {
1325 self.entries_by_id.remove(&entry.id, &());
1326 cursor.next(&());
1327 } else {
1328 break;
1329 }
1330 }
1331 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1332 new_entries_by_path
1333 };
1334
1335 Some(removed_entry.path)
1336 }
1337
1338 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1339 let mut entries_by_path_edits = Vec::new();
1340 let mut entries_by_id_edits = Vec::new();
1341 for entry_id in update.removed_entries {
1342 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1343 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1344 entries_by_id_edits.push(Edit::Remove(entry.id));
1345 }
1346 }
1347
1348 for entry in update.updated_entries {
1349 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1350 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1351 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1352 }
1353 entries_by_id_edits.push(Edit::Insert(PathEntry {
1354 id: entry.id,
1355 path: entry.path.clone(),
1356 is_ignored: entry.is_ignored,
1357 scan_id: 0,
1358 }));
1359 entries_by_path_edits.push(Edit::Insert(entry));
1360 }
1361
1362 self.entries_by_path.edit(entries_by_path_edits, &());
1363 self.entries_by_id.edit(entries_by_id_edits, &());
1364 self.scan_id = update.scan_id as usize;
1365 if update.is_last_update {
1366 self.completed_scan_id = update.scan_id as usize;
1367 }
1368
1369 Ok(())
1370 }
1371
1372 pub fn file_count(&self) -> usize {
1373 self.entries_by_path.summary().file_count
1374 }
1375
1376 pub fn visible_file_count(&self) -> usize {
1377 self.entries_by_path.summary().visible_file_count
1378 }
1379
1380 fn traverse_from_offset(
1381 &self,
1382 include_dirs: bool,
1383 include_ignored: bool,
1384 start_offset: usize,
1385 ) -> Traversal {
1386 let mut cursor = self.entries_by_path.cursor();
1387 cursor.seek(
1388 &TraversalTarget::Count {
1389 count: start_offset,
1390 include_dirs,
1391 include_ignored,
1392 },
1393 Bias::Right,
1394 &(),
1395 );
1396 Traversal {
1397 cursor,
1398 include_dirs,
1399 include_ignored,
1400 }
1401 }
1402
1403 fn traverse_from_path(
1404 &self,
1405 include_dirs: bool,
1406 include_ignored: bool,
1407 path: &Path,
1408 ) -> Traversal {
1409 let mut cursor = self.entries_by_path.cursor();
1410 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1411 Traversal {
1412 cursor,
1413 include_dirs,
1414 include_ignored,
1415 }
1416 }
1417
1418 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1419 self.traverse_from_offset(false, include_ignored, start)
1420 }
1421
1422 pub fn entries(&self, include_ignored: bool) -> Traversal {
1423 self.traverse_from_offset(true, include_ignored, 0)
1424 }
1425
1426 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1427 let empty_path = Path::new("");
1428 self.entries_by_path
1429 .cursor::<()>()
1430 .filter(move |entry| entry.path.as_ref() != empty_path)
1431 .map(|entry| &entry.path)
1432 }
1433
1434 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1435 let mut cursor = self.entries_by_path.cursor();
1436 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1437 let traversal = Traversal {
1438 cursor,
1439 include_dirs: true,
1440 include_ignored: true,
1441 };
1442 ChildEntriesIter {
1443 traversal,
1444 parent_path,
1445 }
1446 }
1447
1448 pub fn root_entry(&self) -> Option<&Entry> {
1449 self.entry_for_path("")
1450 }
1451
1452 pub fn root_name(&self) -> &str {
1453 &self.root_name
1454 }
1455
1456 pub fn root_git_entry(&self) -> Option<RepositoryEntry> {
1457 self.repository_entries
1458 .get(&"".into())
1459 .map(|entry| entry.to_owned())
1460 }
1461
1462 pub fn scan_id(&self) -> usize {
1463 self.scan_id
1464 }
1465
1466 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1467 let path = path.as_ref();
1468 self.traverse_from_path(true, true, path)
1469 .entry()
1470 .and_then(|entry| {
1471 if entry.path.as_ref() == path {
1472 Some(entry)
1473 } else {
1474 None
1475 }
1476 })
1477 }
1478
1479 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1480 let entry = self.entries_by_id.get(&id, &())?;
1481 self.entry_for_path(&entry.path)
1482 }
1483
1484 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1485 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1486 }
1487}
1488
1489impl LocalSnapshot {
1490 pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1491 let mut max_len = 0;
1492 let mut current_candidate = None;
1493 for (work_directory, repo) in (&self.repository_entries).iter() {
1494 if work_directory.contains(path) {
1495 if work_directory.0.as_os_str().len() >= max_len {
1496 current_candidate = Some(repo);
1497 max_len = work_directory.0.as_os_str().len();
1498 } else {
1499 break;
1500 }
1501 }
1502 }
1503
1504 current_candidate.map(|entry| entry.to_owned())
1505 }
1506
1507 pub(crate) fn repo_for_metadata(
1508 &self,
1509 path: &Path,
1510 ) -> Option<(RepositoryWorkDirectory, Arc<Mutex<dyn GitRepository>>)> {
1511 self.repository_entries
1512 .iter()
1513 .find(|(_, repo)| repo.in_dot_git(path))
1514 .map(|(work_directory, entry)| {
1515 (
1516 work_directory.to_owned(),
1517 self.git_repositories
1518 .get(&entry.git_dir_entry_id)
1519 .expect("These two data structures should be in sync")
1520 .to_owned(),
1521 )
1522 })
1523 }
1524
1525 #[cfg(test)]
1526 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1527 let root_name = self.root_name.clone();
1528 proto::UpdateWorktree {
1529 project_id,
1530 worktree_id: self.id().to_proto(),
1531 abs_path: self.abs_path().to_string_lossy().into(),
1532 root_name,
1533 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1534 removed_entries: Default::default(),
1535 scan_id: self.scan_id as u64,
1536 is_last_update: true,
1537 }
1538 }
1539
1540 pub(crate) fn build_update(
1541 &self,
1542 other: &Self,
1543 project_id: u64,
1544 worktree_id: u64,
1545 include_ignored: bool,
1546 ) -> proto::UpdateWorktree {
1547 let mut updated_entries = Vec::new();
1548 let mut removed_entries = Vec::new();
1549 let mut self_entries = self
1550 .entries_by_id
1551 .cursor::<()>()
1552 .filter(|e| include_ignored || !e.is_ignored)
1553 .peekable();
1554 let mut other_entries = other
1555 .entries_by_id
1556 .cursor::<()>()
1557 .filter(|e| include_ignored || !e.is_ignored)
1558 .peekable();
1559 loop {
1560 match (self_entries.peek(), other_entries.peek()) {
1561 (Some(self_entry), Some(other_entry)) => {
1562 match Ord::cmp(&self_entry.id, &other_entry.id) {
1563 Ordering::Less => {
1564 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1565 updated_entries.push(entry);
1566 self_entries.next();
1567 }
1568 Ordering::Equal => {
1569 if self_entry.scan_id != other_entry.scan_id {
1570 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1571 updated_entries.push(entry);
1572 }
1573
1574 self_entries.next();
1575 other_entries.next();
1576 }
1577 Ordering::Greater => {
1578 removed_entries.push(other_entry.id.to_proto());
1579 other_entries.next();
1580 }
1581 }
1582 }
1583 (Some(self_entry), None) => {
1584 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1585 updated_entries.push(entry);
1586 self_entries.next();
1587 }
1588 (None, Some(other_entry)) => {
1589 removed_entries.push(other_entry.id.to_proto());
1590 other_entries.next();
1591 }
1592 (None, None) => break,
1593 }
1594 }
1595
1596 proto::UpdateWorktree {
1597 project_id,
1598 worktree_id,
1599 abs_path: self.abs_path().to_string_lossy().into(),
1600 root_name: self.root_name().to_string(),
1601 updated_entries,
1602 removed_entries,
1603 scan_id: self.scan_id as u64,
1604 is_last_update: self.completed_scan_id == self.scan_id,
1605 }
1606 }
1607
1608 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1609 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1610 let abs_path = self.abs_path.join(&entry.path);
1611 match smol::block_on(build_gitignore(&abs_path, fs)) {
1612 Ok(ignore) => {
1613 self.ignores_by_parent_abs_path.insert(
1614 abs_path.parent().unwrap().into(),
1615 (Arc::new(ignore), self.scan_id),
1616 );
1617 }
1618 Err(error) => {
1619 log::error!(
1620 "error loading .gitignore file {:?} - {:?}",
1621 &entry.path,
1622 error
1623 );
1624 }
1625 }
1626 }
1627
1628 self.reuse_entry_id(&mut entry);
1629
1630 if entry.kind == EntryKind::PendingDir {
1631 if let Some(existing_entry) =
1632 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1633 {
1634 entry.kind = existing_entry.kind;
1635 }
1636 }
1637
1638 let scan_id = self.scan_id;
1639 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1640 if let Some(removed) = removed {
1641 if removed.id != entry.id {
1642 self.entries_by_id.remove(&removed.id, &());
1643 }
1644 }
1645 self.entries_by_id.insert_or_replace(
1646 PathEntry {
1647 id: entry.id,
1648 path: entry.path.clone(),
1649 is_ignored: entry.is_ignored,
1650 scan_id,
1651 },
1652 &(),
1653 );
1654
1655 entry
1656 }
1657
1658 fn populate_dir(
1659 &mut self,
1660 parent_path: Arc<Path>,
1661 entries: impl IntoIterator<Item = Entry>,
1662 ignore: Option<Arc<Gitignore>>,
1663 fs: &dyn Fs,
1664 ) {
1665 let mut parent_entry = if let Some(parent_entry) =
1666 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1667 {
1668 parent_entry.clone()
1669 } else {
1670 log::warn!(
1671 "populating a directory {:?} that has been removed",
1672 parent_path
1673 );
1674 return;
1675 };
1676
1677 match parent_entry.kind {
1678 EntryKind::PendingDir => {
1679 parent_entry.kind = EntryKind::Dir;
1680 }
1681 EntryKind::Dir => {}
1682 _ => return,
1683 }
1684
1685 if let Some(ignore) = ignore {
1686 self.ignores_by_parent_abs_path.insert(
1687 self.abs_path.join(&parent_path).into(),
1688 (ignore, self.scan_id),
1689 );
1690 }
1691
1692 if parent_path.file_name() == Some(&DOT_GIT) {
1693 let abs_path = self.abs_path.join(&parent_path);
1694 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1695
1696 let key = RepositoryWorkDirectory(content_path.clone());
1697 if self.repository_entries.get(&key).is_none() {
1698 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1699 let repo_lock = repo.lock();
1700 self.repository_entries.insert(
1701 key.clone(),
1702 RepositoryEntry {
1703 git_dir_path: parent_path.clone(),
1704 git_dir_entry_id: parent_entry.id,
1705 work_directory: key,
1706 scan_id: 0,
1707 branch: repo_lock.branch_name().map(Into::into),
1708 },
1709 );
1710 drop(repo_lock);
1711
1712 self.git_repositories.insert(parent_entry.id, repo)
1713 }
1714 }
1715 }
1716
1717 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1718 let mut entries_by_id_edits = Vec::new();
1719
1720 for mut entry in entries {
1721 self.reuse_entry_id(&mut entry);
1722 entries_by_id_edits.push(Edit::Insert(PathEntry {
1723 id: entry.id,
1724 path: entry.path.clone(),
1725 is_ignored: entry.is_ignored,
1726 scan_id: self.scan_id,
1727 }));
1728 entries_by_path_edits.push(Edit::Insert(entry));
1729 }
1730
1731 self.entries_by_path.edit(entries_by_path_edits, &());
1732 self.entries_by_id.edit(entries_by_id_edits, &());
1733 }
1734
1735 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1736 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1737 entry.id = removed_entry_id;
1738 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1739 entry.id = existing_entry.id;
1740 }
1741 }
1742
1743 fn remove_path(&mut self, path: &Path) {
1744 let mut new_entries;
1745 let removed_entries;
1746 {
1747 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1748 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1749 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1750 new_entries.push_tree(cursor.suffix(&()), &());
1751 }
1752 self.entries_by_path = new_entries;
1753
1754 let mut entries_by_id_edits = Vec::new();
1755 for entry in removed_entries.cursor::<()>() {
1756 let removed_entry_id = self
1757 .removed_entry_ids
1758 .entry(entry.inode)
1759 .or_insert(entry.id);
1760 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1761 entries_by_id_edits.push(Edit::Remove(entry.id));
1762 }
1763 self.entries_by_id.edit(entries_by_id_edits, &());
1764
1765 if path.file_name() == Some(&GITIGNORE) {
1766 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1767 if let Some((_, scan_id)) = self
1768 .ignores_by_parent_abs_path
1769 .get_mut(abs_parent_path.as_path())
1770 {
1771 *scan_id = self.snapshot.scan_id;
1772 }
1773 } else if path.file_name() == Some(&DOT_GIT) {
1774 let repo_entry_key = RepositoryWorkDirectory(path.parent().unwrap().into());
1775 self.snapshot
1776 .repository_entries
1777 .update(&repo_entry_key, |repo| repo.scan_id = self.snapshot.scan_id);
1778 }
1779 }
1780
1781 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1782 let mut inodes = TreeSet::default();
1783 for ancestor in path.ancestors().skip(1) {
1784 if let Some(entry) = self.entry_for_path(ancestor) {
1785 inodes.insert(entry.inode);
1786 }
1787 }
1788 inodes
1789 }
1790
1791 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1792 let mut new_ignores = Vec::new();
1793 for ancestor in abs_path.ancestors().skip(1) {
1794 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1795 new_ignores.push((ancestor, Some(ignore.clone())));
1796 } else {
1797 new_ignores.push((ancestor, None));
1798 }
1799 }
1800
1801 let mut ignore_stack = IgnoreStack::none();
1802 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1803 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1804 ignore_stack = IgnoreStack::all();
1805 break;
1806 } else if let Some(ignore) = ignore {
1807 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1808 }
1809 }
1810
1811 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1812 ignore_stack = IgnoreStack::all();
1813 }
1814
1815 ignore_stack
1816 }
1817}
1818
1819async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1820 let contents = fs.load(abs_path).await?;
1821 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1822 let mut builder = GitignoreBuilder::new(parent);
1823 for line in contents.lines() {
1824 builder.add_line(Some(abs_path.into()), line)?;
1825 }
1826 Ok(builder.build()?)
1827}
1828
1829impl WorktreeId {
1830 pub fn from_usize(handle_id: usize) -> Self {
1831 Self(handle_id)
1832 }
1833
1834 pub(crate) fn from_proto(id: u64) -> Self {
1835 Self(id as usize)
1836 }
1837
1838 pub fn to_proto(&self) -> u64 {
1839 self.0 as u64
1840 }
1841
1842 pub fn to_usize(&self) -> usize {
1843 self.0
1844 }
1845}
1846
1847impl fmt::Display for WorktreeId {
1848 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1849 self.0.fmt(f)
1850 }
1851}
1852
1853impl Deref for Worktree {
1854 type Target = Snapshot;
1855
1856 fn deref(&self) -> &Self::Target {
1857 match self {
1858 Worktree::Local(worktree) => &worktree.snapshot,
1859 Worktree::Remote(worktree) => &worktree.snapshot,
1860 }
1861 }
1862}
1863
1864impl Deref for LocalWorktree {
1865 type Target = LocalSnapshot;
1866
1867 fn deref(&self) -> &Self::Target {
1868 &self.snapshot
1869 }
1870}
1871
1872impl Deref for RemoteWorktree {
1873 type Target = Snapshot;
1874
1875 fn deref(&self) -> &Self::Target {
1876 &self.snapshot
1877 }
1878}
1879
1880impl fmt::Debug for LocalWorktree {
1881 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1882 self.snapshot.fmt(f)
1883 }
1884}
1885
1886impl fmt::Debug for Snapshot {
1887 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1888 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1889 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1890
1891 impl<'a> fmt::Debug for EntriesByPath<'a> {
1892 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1893 f.debug_map()
1894 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1895 .finish()
1896 }
1897 }
1898
1899 impl<'a> fmt::Debug for EntriesById<'a> {
1900 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1901 f.debug_list().entries(self.0.iter()).finish()
1902 }
1903 }
1904
1905 f.debug_struct("Snapshot")
1906 .field("id", &self.id)
1907 .field("root_name", &self.root_name)
1908 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1909 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1910 .finish()
1911 }
1912}
1913
1914#[derive(Clone, PartialEq)]
1915pub struct File {
1916 pub worktree: ModelHandle<Worktree>,
1917 pub path: Arc<Path>,
1918 pub mtime: SystemTime,
1919 pub(crate) entry_id: ProjectEntryId,
1920 pub(crate) is_local: bool,
1921 pub(crate) is_deleted: bool,
1922}
1923
1924impl language::File for File {
1925 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1926 if self.is_local {
1927 Some(self)
1928 } else {
1929 None
1930 }
1931 }
1932
1933 fn mtime(&self) -> SystemTime {
1934 self.mtime
1935 }
1936
1937 fn path(&self) -> &Arc<Path> {
1938 &self.path
1939 }
1940
1941 fn full_path(&self, cx: &AppContext) -> PathBuf {
1942 let mut full_path = PathBuf::new();
1943 let worktree = self.worktree.read(cx);
1944
1945 if worktree.is_visible() {
1946 full_path.push(worktree.root_name());
1947 } else {
1948 let path = worktree.abs_path();
1949
1950 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1951 full_path.push("~");
1952 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1953 } else {
1954 full_path.push(path)
1955 }
1956 }
1957
1958 if self.path.components().next().is_some() {
1959 full_path.push(&self.path);
1960 }
1961
1962 full_path
1963 }
1964
1965 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1966 /// of its worktree, then this method will return the name of the worktree itself.
1967 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1968 self.path
1969 .file_name()
1970 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1971 }
1972
1973 fn is_deleted(&self) -> bool {
1974 self.is_deleted
1975 }
1976
1977 fn as_any(&self) -> &dyn Any {
1978 self
1979 }
1980
1981 fn to_proto(&self) -> rpc::proto::File {
1982 rpc::proto::File {
1983 worktree_id: self.worktree.id() as u64,
1984 entry_id: self.entry_id.to_proto(),
1985 path: self.path.to_string_lossy().into(),
1986 mtime: Some(self.mtime.into()),
1987 is_deleted: self.is_deleted,
1988 }
1989 }
1990}
1991
1992impl language::LocalFile for File {
1993 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1994 self.worktree
1995 .read(cx)
1996 .as_local()
1997 .unwrap()
1998 .abs_path
1999 .join(&self.path)
2000 }
2001
2002 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
2003 let worktree = self.worktree.read(cx).as_local().unwrap();
2004 let abs_path = worktree.absolutize(&self.path);
2005 let fs = worktree.fs.clone();
2006 cx.background()
2007 .spawn(async move { fs.load(&abs_path).await })
2008 }
2009
2010 fn buffer_reloaded(
2011 &self,
2012 buffer_id: u64,
2013 version: &clock::Global,
2014 fingerprint: RopeFingerprint,
2015 line_ending: LineEnding,
2016 mtime: SystemTime,
2017 cx: &mut AppContext,
2018 ) {
2019 let worktree = self.worktree.read(cx).as_local().unwrap();
2020 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
2021 worktree
2022 .client
2023 .send(proto::BufferReloaded {
2024 project_id,
2025 buffer_id,
2026 version: serialize_version(version),
2027 mtime: Some(mtime.into()),
2028 fingerprint: serialize_fingerprint(fingerprint),
2029 line_ending: serialize_line_ending(line_ending) as i32,
2030 })
2031 .log_err();
2032 }
2033 }
2034}
2035
2036impl File {
2037 pub fn from_proto(
2038 proto: rpc::proto::File,
2039 worktree: ModelHandle<Worktree>,
2040 cx: &AppContext,
2041 ) -> Result<Self> {
2042 let worktree_id = worktree
2043 .read(cx)
2044 .as_remote()
2045 .ok_or_else(|| anyhow!("not remote"))?
2046 .id();
2047
2048 if worktree_id.to_proto() != proto.worktree_id {
2049 return Err(anyhow!("worktree id does not match file"));
2050 }
2051
2052 Ok(Self {
2053 worktree,
2054 path: Path::new(&proto.path).into(),
2055 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2056 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2057 is_local: false,
2058 is_deleted: proto.is_deleted,
2059 })
2060 }
2061
2062 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2063 file.and_then(|f| f.as_any().downcast_ref())
2064 }
2065
2066 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2067 self.worktree.read(cx).id()
2068 }
2069
2070 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2071 if self.is_deleted {
2072 None
2073 } else {
2074 Some(self.entry_id)
2075 }
2076 }
2077}
2078
2079#[derive(Clone, Debug, PartialEq, Eq)]
2080pub struct Entry {
2081 pub id: ProjectEntryId,
2082 pub kind: EntryKind,
2083 pub path: Arc<Path>,
2084 pub inode: u64,
2085 pub mtime: SystemTime,
2086 pub is_symlink: bool,
2087 pub is_ignored: bool,
2088}
2089
2090#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2091pub enum EntryKind {
2092 PendingDir,
2093 Dir,
2094 File(CharBag),
2095}
2096
2097#[derive(Clone, Copy, Debug)]
2098pub enum PathChange {
2099 Added,
2100 Removed,
2101 Updated,
2102 AddedOrUpdated,
2103}
2104
2105impl Entry {
2106 fn new(
2107 path: Arc<Path>,
2108 metadata: &fs::Metadata,
2109 next_entry_id: &AtomicUsize,
2110 root_char_bag: CharBag,
2111 ) -> Self {
2112 Self {
2113 id: ProjectEntryId::new(next_entry_id),
2114 kind: if metadata.is_dir {
2115 EntryKind::PendingDir
2116 } else {
2117 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2118 },
2119 path,
2120 inode: metadata.inode,
2121 mtime: metadata.mtime,
2122 is_symlink: metadata.is_symlink,
2123 is_ignored: false,
2124 }
2125 }
2126
2127 pub fn is_dir(&self) -> bool {
2128 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2129 }
2130
2131 pub fn is_file(&self) -> bool {
2132 matches!(self.kind, EntryKind::File(_))
2133 }
2134}
2135
2136impl sum_tree::Item for Entry {
2137 type Summary = EntrySummary;
2138
2139 fn summary(&self) -> Self::Summary {
2140 let visible_count = if self.is_ignored { 0 } else { 1 };
2141 let file_count;
2142 let visible_file_count;
2143 if self.is_file() {
2144 file_count = 1;
2145 visible_file_count = visible_count;
2146 } else {
2147 file_count = 0;
2148 visible_file_count = 0;
2149 }
2150
2151 EntrySummary {
2152 max_path: self.path.clone(),
2153 count: 1,
2154 visible_count,
2155 file_count,
2156 visible_file_count,
2157 }
2158 }
2159}
2160
2161impl sum_tree::KeyedItem for Entry {
2162 type Key = PathKey;
2163
2164 fn key(&self) -> Self::Key {
2165 PathKey(self.path.clone())
2166 }
2167}
2168
2169#[derive(Clone, Debug)]
2170pub struct EntrySummary {
2171 max_path: Arc<Path>,
2172 count: usize,
2173 visible_count: usize,
2174 file_count: usize,
2175 visible_file_count: usize,
2176}
2177
2178impl Default for EntrySummary {
2179 fn default() -> Self {
2180 Self {
2181 max_path: Arc::from(Path::new("")),
2182 count: 0,
2183 visible_count: 0,
2184 file_count: 0,
2185 visible_file_count: 0,
2186 }
2187 }
2188}
2189
2190impl sum_tree::Summary for EntrySummary {
2191 type Context = ();
2192
2193 fn add_summary(&mut self, rhs: &Self, _: &()) {
2194 self.max_path = rhs.max_path.clone();
2195 self.count += rhs.count;
2196 self.visible_count += rhs.visible_count;
2197 self.file_count += rhs.file_count;
2198 self.visible_file_count += rhs.visible_file_count;
2199 }
2200}
2201
2202#[derive(Clone, Debug)]
2203struct PathEntry {
2204 id: ProjectEntryId,
2205 path: Arc<Path>,
2206 is_ignored: bool,
2207 scan_id: usize,
2208}
2209
2210impl sum_tree::Item for PathEntry {
2211 type Summary = PathEntrySummary;
2212
2213 fn summary(&self) -> Self::Summary {
2214 PathEntrySummary { max_id: self.id }
2215 }
2216}
2217
2218impl sum_tree::KeyedItem for PathEntry {
2219 type Key = ProjectEntryId;
2220
2221 fn key(&self) -> Self::Key {
2222 self.id
2223 }
2224}
2225
2226#[derive(Clone, Debug, Default)]
2227struct PathEntrySummary {
2228 max_id: ProjectEntryId,
2229}
2230
2231impl sum_tree::Summary for PathEntrySummary {
2232 type Context = ();
2233
2234 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2235 self.max_id = summary.max_id;
2236 }
2237}
2238
2239impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2240 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2241 *self = summary.max_id;
2242 }
2243}
2244
2245#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2246pub struct PathKey(Arc<Path>);
2247
2248impl Default for PathKey {
2249 fn default() -> Self {
2250 Self(Path::new("").into())
2251 }
2252}
2253
2254impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2255 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2256 self.0 = summary.max_path.clone();
2257 }
2258}
2259
2260struct BackgroundScanner {
2261 snapshot: Mutex<LocalSnapshot>,
2262 fs: Arc<dyn Fs>,
2263 status_updates_tx: UnboundedSender<ScanState>,
2264 executor: Arc<executor::Background>,
2265 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2266 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2267 finished_initial_scan: bool,
2268}
2269
2270impl BackgroundScanner {
2271 fn new(
2272 snapshot: LocalSnapshot,
2273 fs: Arc<dyn Fs>,
2274 status_updates_tx: UnboundedSender<ScanState>,
2275 executor: Arc<executor::Background>,
2276 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2277 ) -> Self {
2278 Self {
2279 fs,
2280 status_updates_tx,
2281 executor,
2282 refresh_requests_rx,
2283 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2284 snapshot: Mutex::new(snapshot),
2285 finished_initial_scan: false,
2286 }
2287 }
2288
2289 async fn run(
2290 &mut self,
2291 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2292 ) {
2293 use futures::FutureExt as _;
2294
2295 let (root_abs_path, root_inode) = {
2296 let snapshot = self.snapshot.lock();
2297 (
2298 snapshot.abs_path.clone(),
2299 snapshot.root_entry().map(|e| e.inode),
2300 )
2301 };
2302
2303 // Populate ignores above the root.
2304 let ignore_stack;
2305 for ancestor in root_abs_path.ancestors().skip(1) {
2306 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2307 {
2308 self.snapshot
2309 .lock()
2310 .ignores_by_parent_abs_path
2311 .insert(ancestor.into(), (ignore.into(), 0));
2312 }
2313 }
2314 {
2315 let mut snapshot = self.snapshot.lock();
2316 snapshot.scan_id += 1;
2317 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2318 if ignore_stack.is_all() {
2319 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2320 root_entry.is_ignored = true;
2321 snapshot.insert_entry(root_entry, self.fs.as_ref());
2322 }
2323 }
2324 };
2325
2326 // Perform an initial scan of the directory.
2327 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2328 smol::block_on(scan_job_tx.send(ScanJob {
2329 abs_path: root_abs_path,
2330 path: Arc::from(Path::new("")),
2331 ignore_stack,
2332 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2333 scan_queue: scan_job_tx.clone(),
2334 }))
2335 .unwrap();
2336 drop(scan_job_tx);
2337 self.scan_dirs(true, scan_job_rx).await;
2338 {
2339 let mut snapshot = self.snapshot.lock();
2340 snapshot.completed_scan_id = snapshot.scan_id;
2341 }
2342 self.send_status_update(false, None);
2343
2344 // Process any any FS events that occurred while performing the initial scan.
2345 // For these events, update events cannot be as precise, because we didn't
2346 // have the previous state loaded yet.
2347 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2348 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2349 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2350 paths.extend(more_events.into_iter().map(|e| e.path));
2351 }
2352 self.process_events(paths).await;
2353 }
2354
2355 self.finished_initial_scan = true;
2356
2357 // Continue processing events until the worktree is dropped.
2358 loop {
2359 select_biased! {
2360 // Process any path refresh requests from the worktree. Prioritize
2361 // these before handling changes reported by the filesystem.
2362 request = self.refresh_requests_rx.recv().fuse() => {
2363 let Ok((paths, barrier)) = request else { break };
2364 if !self.process_refresh_request(paths, barrier).await {
2365 return;
2366 }
2367 }
2368
2369 events = events_rx.next().fuse() => {
2370 let Some(events) = events else { break };
2371 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2372 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2373 paths.extend(more_events.into_iter().map(|e| e.path));
2374 }
2375 self.process_events(paths).await;
2376 }
2377 }
2378 }
2379 }
2380
2381 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2382 self.reload_entries_for_paths(paths, None).await;
2383 self.send_status_update(false, Some(barrier))
2384 }
2385
2386 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2387 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2388 if let Some(mut paths) = self
2389 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2390 .await
2391 {
2392 paths.sort_unstable();
2393 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2394 }
2395 drop(scan_job_tx);
2396 self.scan_dirs(false, scan_job_rx).await;
2397
2398 self.update_ignore_statuses().await;
2399
2400 let mut snapshot = self.snapshot.lock();
2401
2402 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2403 git_repositories.retain(|project_entry_id, _| snapshot.contains_entry(*project_entry_id));
2404 snapshot.git_repositories = git_repositories;
2405
2406 let mut git_repository_entries = mem::take(&mut snapshot.snapshot.repository_entries);
2407 git_repository_entries.retain(|_, entry| snapshot.contains_entry(entry.git_dir_entry_id));
2408 snapshot.snapshot.repository_entries = git_repository_entries;
2409
2410 snapshot.removed_entry_ids.clear();
2411 snapshot.completed_scan_id = snapshot.scan_id;
2412
2413 drop(snapshot);
2414
2415 self.send_status_update(false, None);
2416 }
2417
2418 async fn scan_dirs(
2419 &self,
2420 enable_progress_updates: bool,
2421 scan_jobs_rx: channel::Receiver<ScanJob>,
2422 ) {
2423 use futures::FutureExt as _;
2424
2425 if self
2426 .status_updates_tx
2427 .unbounded_send(ScanState::Started)
2428 .is_err()
2429 {
2430 return;
2431 }
2432
2433 let progress_update_count = AtomicUsize::new(0);
2434 self.executor
2435 .scoped(|scope| {
2436 for _ in 0..self.executor.num_cpus() {
2437 scope.spawn(async {
2438 let mut last_progress_update_count = 0;
2439 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2440 futures::pin_mut!(progress_update_timer);
2441
2442 loop {
2443 select_biased! {
2444 // Process any path refresh requests before moving on to process
2445 // the scan queue, so that user operations are prioritized.
2446 request = self.refresh_requests_rx.recv().fuse() => {
2447 let Ok((paths, barrier)) = request else { break };
2448 if !self.process_refresh_request(paths, barrier).await {
2449 return;
2450 }
2451 }
2452
2453 // Send periodic progress updates to the worktree. Use an atomic counter
2454 // to ensure that only one of the workers sends a progress update after
2455 // the update interval elapses.
2456 _ = progress_update_timer => {
2457 match progress_update_count.compare_exchange(
2458 last_progress_update_count,
2459 last_progress_update_count + 1,
2460 SeqCst,
2461 SeqCst
2462 ) {
2463 Ok(_) => {
2464 last_progress_update_count += 1;
2465 self.send_status_update(true, None);
2466 }
2467 Err(count) => {
2468 last_progress_update_count = count;
2469 }
2470 }
2471 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2472 }
2473
2474 // Recursively load directories from the file system.
2475 job = scan_jobs_rx.recv().fuse() => {
2476 let Ok(job) = job else { break };
2477 if let Err(err) = self.scan_dir(&job).await {
2478 if job.path.as_ref() != Path::new("") {
2479 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2480 }
2481 }
2482 }
2483 }
2484 }
2485 })
2486 }
2487 })
2488 .await;
2489 }
2490
2491 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2492 let mut prev_state = self.prev_state.lock();
2493 let snapshot = self.snapshot.lock().clone();
2494 let mut old_snapshot = snapshot.snapshot.clone();
2495 mem::swap(&mut old_snapshot, &mut prev_state.0);
2496 let changed_paths = mem::take(&mut prev_state.1);
2497 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2498 self.status_updates_tx
2499 .unbounded_send(ScanState::Updated {
2500 snapshot,
2501 changes,
2502 scanning,
2503 barrier,
2504 })
2505 .is_ok()
2506 }
2507
2508 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2509 let mut new_entries: Vec<Entry> = Vec::new();
2510 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2511 let mut ignore_stack = job.ignore_stack.clone();
2512 let mut new_ignore = None;
2513 let (root_abs_path, root_char_bag, next_entry_id) = {
2514 let snapshot = self.snapshot.lock();
2515 (
2516 snapshot.abs_path().clone(),
2517 snapshot.root_char_bag,
2518 snapshot.next_entry_id.clone(),
2519 )
2520 };
2521 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2522 while let Some(child_abs_path) = child_paths.next().await {
2523 let child_abs_path: Arc<Path> = match child_abs_path {
2524 Ok(child_abs_path) => child_abs_path.into(),
2525 Err(error) => {
2526 log::error!("error processing entry {:?}", error);
2527 continue;
2528 }
2529 };
2530
2531 let child_name = child_abs_path.file_name().unwrap();
2532 let child_path: Arc<Path> = job.path.join(child_name).into();
2533 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2534 Ok(Some(metadata)) => metadata,
2535 Ok(None) => continue,
2536 Err(err) => {
2537 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2538 continue;
2539 }
2540 };
2541
2542 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2543 if child_name == *GITIGNORE {
2544 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2545 Ok(ignore) => {
2546 let ignore = Arc::new(ignore);
2547 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2548 new_ignore = Some(ignore);
2549 }
2550 Err(error) => {
2551 log::error!(
2552 "error loading .gitignore file {:?} - {:?}",
2553 child_name,
2554 error
2555 );
2556 }
2557 }
2558
2559 // Update ignore status of any child entries we've already processed to reflect the
2560 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2561 // there should rarely be too numerous. Update the ignore stack associated with any
2562 // new jobs as well.
2563 let mut new_jobs = new_jobs.iter_mut();
2564 for entry in &mut new_entries {
2565 let entry_abs_path = root_abs_path.join(&entry.path);
2566 entry.is_ignored =
2567 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2568
2569 if entry.is_dir() {
2570 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2571 job.ignore_stack = if entry.is_ignored {
2572 IgnoreStack::all()
2573 } else {
2574 ignore_stack.clone()
2575 };
2576 }
2577 }
2578 }
2579 }
2580
2581 let mut child_entry = Entry::new(
2582 child_path.clone(),
2583 &child_metadata,
2584 &next_entry_id,
2585 root_char_bag,
2586 );
2587
2588 if child_entry.is_dir() {
2589 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2590 child_entry.is_ignored = is_ignored;
2591
2592 // Avoid recursing until crash in the case of a recursive symlink
2593 if !job.ancestor_inodes.contains(&child_entry.inode) {
2594 let mut ancestor_inodes = job.ancestor_inodes.clone();
2595 ancestor_inodes.insert(child_entry.inode);
2596
2597 new_jobs.push(Some(ScanJob {
2598 abs_path: child_abs_path,
2599 path: child_path,
2600 ignore_stack: if is_ignored {
2601 IgnoreStack::all()
2602 } else {
2603 ignore_stack.clone()
2604 },
2605 ancestor_inodes,
2606 scan_queue: job.scan_queue.clone(),
2607 }));
2608 } else {
2609 new_jobs.push(None);
2610 }
2611 } else {
2612 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2613 }
2614
2615 new_entries.push(child_entry);
2616 }
2617
2618 self.snapshot.lock().populate_dir(
2619 job.path.clone(),
2620 new_entries,
2621 new_ignore,
2622 self.fs.as_ref(),
2623 );
2624
2625 for new_job in new_jobs {
2626 if let Some(new_job) = new_job {
2627 job.scan_queue.send(new_job).await.unwrap();
2628 }
2629 }
2630
2631 Ok(())
2632 }
2633
2634 async fn reload_entries_for_paths(
2635 &self,
2636 mut abs_paths: Vec<PathBuf>,
2637 scan_queue_tx: Option<Sender<ScanJob>>,
2638 ) -> Option<Vec<Arc<Path>>> {
2639 let doing_recursive_update = scan_queue_tx.is_some();
2640
2641 abs_paths.sort_unstable();
2642 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2643
2644 let root_abs_path = self.snapshot.lock().abs_path.clone();
2645 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2646 let metadata = futures::future::join_all(
2647 abs_paths
2648 .iter()
2649 .map(|abs_path| self.fs.metadata(&abs_path))
2650 .collect::<Vec<_>>(),
2651 )
2652 .await;
2653
2654 let mut snapshot = self.snapshot.lock();
2655 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2656 snapshot.scan_id += 1;
2657 if is_idle && !doing_recursive_update {
2658 snapshot.completed_scan_id = snapshot.scan_id;
2659 }
2660
2661 // Remove any entries for paths that no longer exist or are being recursively
2662 // refreshed. Do this before adding any new entries, so that renames can be
2663 // detected regardless of the order of the paths.
2664 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2665 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2666 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2667 if matches!(metadata, Ok(None)) || doing_recursive_update {
2668 snapshot.remove_path(path);
2669 }
2670 event_paths.push(path.into());
2671 } else {
2672 log::error!(
2673 "unexpected event {:?} for root path {:?}",
2674 abs_path,
2675 root_canonical_path
2676 );
2677 }
2678 }
2679
2680 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2681 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2682
2683 match metadata {
2684 Ok(Some(metadata)) => {
2685 let ignore_stack =
2686 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2687 let mut fs_entry = Entry::new(
2688 path.clone(),
2689 &metadata,
2690 snapshot.next_entry_id.as_ref(),
2691 snapshot.root_char_bag,
2692 );
2693 fs_entry.is_ignored = ignore_stack.is_all();
2694 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2695
2696 let scan_id = snapshot.scan_id;
2697
2698 let repo_with_path_in_dotgit = snapshot.repo_for_metadata(&path);
2699 if let Some((key, repo)) = repo_with_path_in_dotgit {
2700 let repo = repo.lock();
2701 repo.reload_index();
2702 let branch = repo.branch_name();
2703
2704 snapshot.repository_entries.update(&key, |entry| {
2705 entry.scan_id = scan_id;
2706 entry.branch = branch.map(Into::into)
2707 });
2708 }
2709
2710 if let Some(scan_queue_tx) = &scan_queue_tx {
2711 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2712 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2713 ancestor_inodes.insert(metadata.inode);
2714 smol::block_on(scan_queue_tx.send(ScanJob {
2715 abs_path,
2716 path,
2717 ignore_stack,
2718 ancestor_inodes,
2719 scan_queue: scan_queue_tx.clone(),
2720 }))
2721 .unwrap();
2722 }
2723 }
2724 }
2725 Ok(None) => {}
2726 Err(err) => {
2727 // TODO - create a special 'error' entry in the entries tree to mark this
2728 log::error!("error reading file on event {:?}", err);
2729 }
2730 }
2731 }
2732
2733 Some(event_paths)
2734 }
2735
2736 async fn update_ignore_statuses(&self) {
2737 use futures::FutureExt as _;
2738
2739 let mut snapshot = self.snapshot.lock().clone();
2740 let mut ignores_to_update = Vec::new();
2741 let mut ignores_to_delete = Vec::new();
2742 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2743 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2744 if *scan_id > snapshot.completed_scan_id
2745 && snapshot.entry_for_path(parent_path).is_some()
2746 {
2747 ignores_to_update.push(parent_abs_path.clone());
2748 }
2749
2750 let ignore_path = parent_path.join(&*GITIGNORE);
2751 if snapshot.entry_for_path(ignore_path).is_none() {
2752 ignores_to_delete.push(parent_abs_path.clone());
2753 }
2754 }
2755 }
2756
2757 for parent_abs_path in ignores_to_delete {
2758 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2759 self.snapshot
2760 .lock()
2761 .ignores_by_parent_abs_path
2762 .remove(&parent_abs_path);
2763 }
2764
2765 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2766 ignores_to_update.sort_unstable();
2767 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2768 while let Some(parent_abs_path) = ignores_to_update.next() {
2769 while ignores_to_update
2770 .peek()
2771 .map_or(false, |p| p.starts_with(&parent_abs_path))
2772 {
2773 ignores_to_update.next().unwrap();
2774 }
2775
2776 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2777 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2778 abs_path: parent_abs_path,
2779 ignore_stack,
2780 ignore_queue: ignore_queue_tx.clone(),
2781 }))
2782 .unwrap();
2783 }
2784 drop(ignore_queue_tx);
2785
2786 self.executor
2787 .scoped(|scope| {
2788 for _ in 0..self.executor.num_cpus() {
2789 scope.spawn(async {
2790 loop {
2791 select_biased! {
2792 // Process any path refresh requests before moving on to process
2793 // the queue of ignore statuses.
2794 request = self.refresh_requests_rx.recv().fuse() => {
2795 let Ok((paths, barrier)) = request else { break };
2796 if !self.process_refresh_request(paths, barrier).await {
2797 return;
2798 }
2799 }
2800
2801 // Recursively process directories whose ignores have changed.
2802 job = ignore_queue_rx.recv().fuse() => {
2803 let Ok(job) = job else { break };
2804 self.update_ignore_status(job, &snapshot).await;
2805 }
2806 }
2807 }
2808 });
2809 }
2810 })
2811 .await;
2812 }
2813
2814 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2815 let mut ignore_stack = job.ignore_stack;
2816 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2817 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2818 }
2819
2820 let mut entries_by_id_edits = Vec::new();
2821 let mut entries_by_path_edits = Vec::new();
2822 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2823 for mut entry in snapshot.child_entries(path).cloned() {
2824 let was_ignored = entry.is_ignored;
2825 let abs_path = snapshot.abs_path().join(&entry.path);
2826 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2827 if entry.is_dir() {
2828 let child_ignore_stack = if entry.is_ignored {
2829 IgnoreStack::all()
2830 } else {
2831 ignore_stack.clone()
2832 };
2833 job.ignore_queue
2834 .send(UpdateIgnoreStatusJob {
2835 abs_path: abs_path.into(),
2836 ignore_stack: child_ignore_stack,
2837 ignore_queue: job.ignore_queue.clone(),
2838 })
2839 .await
2840 .unwrap();
2841 }
2842
2843 if entry.is_ignored != was_ignored {
2844 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2845 path_entry.scan_id = snapshot.scan_id;
2846 path_entry.is_ignored = entry.is_ignored;
2847 entries_by_id_edits.push(Edit::Insert(path_entry));
2848 entries_by_path_edits.push(Edit::Insert(entry));
2849 }
2850 }
2851
2852 let mut snapshot = self.snapshot.lock();
2853 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2854 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2855 }
2856
2857 fn build_change_set(
2858 &self,
2859 old_snapshot: &Snapshot,
2860 new_snapshot: &Snapshot,
2861 event_paths: Vec<Arc<Path>>,
2862 ) -> HashMap<Arc<Path>, PathChange> {
2863 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2864
2865 let mut changes = HashMap::default();
2866 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2867 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2868 let received_before_initialized = !self.finished_initial_scan;
2869
2870 for path in event_paths {
2871 let path = PathKey(path);
2872 old_paths.seek(&path, Bias::Left, &());
2873 new_paths.seek(&path, Bias::Left, &());
2874
2875 loop {
2876 match (old_paths.item(), new_paths.item()) {
2877 (Some(old_entry), Some(new_entry)) => {
2878 if old_entry.path > path.0
2879 && new_entry.path > path.0
2880 && !old_entry.path.starts_with(&path.0)
2881 && !new_entry.path.starts_with(&path.0)
2882 {
2883 break;
2884 }
2885
2886 match Ord::cmp(&old_entry.path, &new_entry.path) {
2887 Ordering::Less => {
2888 changes.insert(old_entry.path.clone(), Removed);
2889 old_paths.next(&());
2890 }
2891 Ordering::Equal => {
2892 if received_before_initialized {
2893 // If the worktree was not fully initialized when this event was generated,
2894 // we can't know whether this entry was added during the scan or whether
2895 // it was merely updated.
2896 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2897 } else if old_entry.mtime != new_entry.mtime {
2898 changes.insert(new_entry.path.clone(), Updated);
2899 }
2900 old_paths.next(&());
2901 new_paths.next(&());
2902 }
2903 Ordering::Greater => {
2904 changes.insert(new_entry.path.clone(), Added);
2905 new_paths.next(&());
2906 }
2907 }
2908 }
2909 (Some(old_entry), None) => {
2910 changes.insert(old_entry.path.clone(), Removed);
2911 old_paths.next(&());
2912 }
2913 (None, Some(new_entry)) => {
2914 changes.insert(new_entry.path.clone(), Added);
2915 new_paths.next(&());
2916 }
2917 (None, None) => break,
2918 }
2919 }
2920 }
2921 changes
2922 }
2923
2924 async fn progress_timer(&self, running: bool) {
2925 if !running {
2926 return futures::future::pending().await;
2927 }
2928
2929 #[cfg(any(test, feature = "test-support"))]
2930 if self.fs.is_fake() {
2931 return self.executor.simulate_random_delay().await;
2932 }
2933
2934 smol::Timer::after(Duration::from_millis(100)).await;
2935 }
2936}
2937
2938fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2939 let mut result = root_char_bag;
2940 result.extend(
2941 path.to_string_lossy()
2942 .chars()
2943 .map(|c| c.to_ascii_lowercase()),
2944 );
2945 result
2946}
2947
2948struct ScanJob {
2949 abs_path: Arc<Path>,
2950 path: Arc<Path>,
2951 ignore_stack: Arc<IgnoreStack>,
2952 scan_queue: Sender<ScanJob>,
2953 ancestor_inodes: TreeSet<u64>,
2954}
2955
2956struct UpdateIgnoreStatusJob {
2957 abs_path: Arc<Path>,
2958 ignore_stack: Arc<IgnoreStack>,
2959 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2960}
2961
2962pub trait WorktreeHandle {
2963 #[cfg(any(test, feature = "test-support"))]
2964 fn flush_fs_events<'a>(
2965 &self,
2966 cx: &'a gpui::TestAppContext,
2967 ) -> futures::future::LocalBoxFuture<'a, ()>;
2968}
2969
2970impl WorktreeHandle for ModelHandle<Worktree> {
2971 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2972 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2973 // extra directory scans, and emit extra scan-state notifications.
2974 //
2975 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2976 // to ensure that all redundant FS events have already been processed.
2977 #[cfg(any(test, feature = "test-support"))]
2978 fn flush_fs_events<'a>(
2979 &self,
2980 cx: &'a gpui::TestAppContext,
2981 ) -> futures::future::LocalBoxFuture<'a, ()> {
2982 use smol::future::FutureExt;
2983
2984 let filename = "fs-event-sentinel";
2985 let tree = self.clone();
2986 let (fs, root_path) = self.read_with(cx, |tree, _| {
2987 let tree = tree.as_local().unwrap();
2988 (tree.fs.clone(), tree.abs_path().clone())
2989 });
2990
2991 async move {
2992 fs.create_file(&root_path.join(filename), Default::default())
2993 .await
2994 .unwrap();
2995 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2996 .await;
2997
2998 fs.remove_file(&root_path.join(filename), Default::default())
2999 .await
3000 .unwrap();
3001 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
3002 .await;
3003
3004 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3005 .await;
3006 }
3007 .boxed_local()
3008 }
3009}
3010
3011#[derive(Clone, Debug)]
3012struct TraversalProgress<'a> {
3013 max_path: &'a Path,
3014 count: usize,
3015 visible_count: usize,
3016 file_count: usize,
3017 visible_file_count: usize,
3018}
3019
3020impl<'a> TraversalProgress<'a> {
3021 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
3022 match (include_ignored, include_dirs) {
3023 (true, true) => self.count,
3024 (true, false) => self.file_count,
3025 (false, true) => self.visible_count,
3026 (false, false) => self.visible_file_count,
3027 }
3028 }
3029}
3030
3031impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
3032 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
3033 self.max_path = summary.max_path.as_ref();
3034 self.count += summary.count;
3035 self.visible_count += summary.visible_count;
3036 self.file_count += summary.file_count;
3037 self.visible_file_count += summary.visible_file_count;
3038 }
3039}
3040
3041impl<'a> Default for TraversalProgress<'a> {
3042 fn default() -> Self {
3043 Self {
3044 max_path: Path::new(""),
3045 count: 0,
3046 visible_count: 0,
3047 file_count: 0,
3048 visible_file_count: 0,
3049 }
3050 }
3051}
3052
3053pub struct Traversal<'a> {
3054 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3055 include_ignored: bool,
3056 include_dirs: bool,
3057}
3058
3059impl<'a> Traversal<'a> {
3060 pub fn advance(&mut self) -> bool {
3061 self.advance_to_offset(self.offset() + 1)
3062 }
3063
3064 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3065 self.cursor.seek_forward(
3066 &TraversalTarget::Count {
3067 count: offset,
3068 include_dirs: self.include_dirs,
3069 include_ignored: self.include_ignored,
3070 },
3071 Bias::Right,
3072 &(),
3073 )
3074 }
3075
3076 pub fn advance_to_sibling(&mut self) -> bool {
3077 while let Some(entry) = self.cursor.item() {
3078 self.cursor.seek_forward(
3079 &TraversalTarget::PathSuccessor(&entry.path),
3080 Bias::Left,
3081 &(),
3082 );
3083 if let Some(entry) = self.cursor.item() {
3084 if (self.include_dirs || !entry.is_dir())
3085 && (self.include_ignored || !entry.is_ignored)
3086 {
3087 return true;
3088 }
3089 }
3090 }
3091 false
3092 }
3093
3094 pub fn entry(&self) -> Option<&'a Entry> {
3095 self.cursor.item()
3096 }
3097
3098 pub fn offset(&self) -> usize {
3099 self.cursor
3100 .start()
3101 .count(self.include_dirs, self.include_ignored)
3102 }
3103}
3104
3105impl<'a> Iterator for Traversal<'a> {
3106 type Item = &'a Entry;
3107
3108 fn next(&mut self) -> Option<Self::Item> {
3109 if let Some(item) = self.entry() {
3110 self.advance();
3111 Some(item)
3112 } else {
3113 None
3114 }
3115 }
3116}
3117
3118#[derive(Debug)]
3119enum TraversalTarget<'a> {
3120 Path(&'a Path),
3121 PathSuccessor(&'a Path),
3122 Count {
3123 count: usize,
3124 include_ignored: bool,
3125 include_dirs: bool,
3126 },
3127}
3128
3129impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3130 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3131 match self {
3132 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3133 TraversalTarget::PathSuccessor(path) => {
3134 if !cursor_location.max_path.starts_with(path) {
3135 Ordering::Equal
3136 } else {
3137 Ordering::Greater
3138 }
3139 }
3140 TraversalTarget::Count {
3141 count,
3142 include_dirs,
3143 include_ignored,
3144 } => Ord::cmp(
3145 count,
3146 &cursor_location.count(*include_dirs, *include_ignored),
3147 ),
3148 }
3149 }
3150}
3151
3152struct ChildEntriesIter<'a> {
3153 parent_path: &'a Path,
3154 traversal: Traversal<'a>,
3155}
3156
3157impl<'a> Iterator for ChildEntriesIter<'a> {
3158 type Item = &'a Entry;
3159
3160 fn next(&mut self) -> Option<Self::Item> {
3161 if let Some(item) = self.traversal.entry() {
3162 if item.path.starts_with(&self.parent_path) {
3163 self.traversal.advance_to_sibling();
3164 return Some(item);
3165 }
3166 }
3167 None
3168 }
3169}
3170
3171impl<'a> From<&'a Entry> for proto::Entry {
3172 fn from(entry: &'a Entry) -> Self {
3173 Self {
3174 id: entry.id.to_proto(),
3175 is_dir: entry.is_dir(),
3176 path: entry.path.to_string_lossy().into(),
3177 inode: entry.inode,
3178 mtime: Some(entry.mtime.into()),
3179 is_symlink: entry.is_symlink,
3180 is_ignored: entry.is_ignored,
3181 }
3182 }
3183}
3184
3185impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3186 type Error = anyhow::Error;
3187
3188 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3189 if let Some(mtime) = entry.mtime {
3190 let kind = if entry.is_dir {
3191 EntryKind::Dir
3192 } else {
3193 let mut char_bag = *root_char_bag;
3194 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3195 EntryKind::File(char_bag)
3196 };
3197 let path: Arc<Path> = PathBuf::from(entry.path).into();
3198 Ok(Entry {
3199 id: ProjectEntryId::from_proto(entry.id),
3200 kind,
3201 path,
3202 inode: entry.inode,
3203 mtime: mtime.into(),
3204 is_symlink: entry.is_symlink,
3205 is_ignored: entry.is_ignored,
3206 })
3207 } else {
3208 Err(anyhow!(
3209 "missing mtime in remote worktree entry {:?}",
3210 entry.path
3211 ))
3212 }
3213 }
3214}
3215
3216#[cfg(test)]
3217mod tests {
3218 use super::*;
3219 use fs::{FakeFs, RealFs};
3220 use gpui::{executor::Deterministic, TestAppContext};
3221 use pretty_assertions::assert_eq;
3222 use rand::prelude::*;
3223 use serde_json::json;
3224 use std::{env, fmt::Write};
3225 use util::{http::FakeHttpClient, test::temp_tree};
3226
3227 #[gpui::test]
3228 async fn test_traversal(cx: &mut TestAppContext) {
3229 let fs = FakeFs::new(cx.background());
3230 fs.insert_tree(
3231 "/root",
3232 json!({
3233 ".gitignore": "a/b\n",
3234 "a": {
3235 "b": "",
3236 "c": "",
3237 }
3238 }),
3239 )
3240 .await;
3241
3242 let http_client = FakeHttpClient::with_404_response();
3243 let client = cx.read(|cx| Client::new(http_client, cx));
3244
3245 let tree = Worktree::local(
3246 client,
3247 Path::new("/root"),
3248 true,
3249 fs,
3250 Default::default(),
3251 &mut cx.to_async(),
3252 )
3253 .await
3254 .unwrap();
3255 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3256 .await;
3257
3258 tree.read_with(cx, |tree, _| {
3259 assert_eq!(
3260 tree.entries(false)
3261 .map(|entry| entry.path.as_ref())
3262 .collect::<Vec<_>>(),
3263 vec![
3264 Path::new(""),
3265 Path::new(".gitignore"),
3266 Path::new("a"),
3267 Path::new("a/c"),
3268 ]
3269 );
3270 assert_eq!(
3271 tree.entries(true)
3272 .map(|entry| entry.path.as_ref())
3273 .collect::<Vec<_>>(),
3274 vec![
3275 Path::new(""),
3276 Path::new(".gitignore"),
3277 Path::new("a"),
3278 Path::new("a/b"),
3279 Path::new("a/c"),
3280 ]
3281 );
3282 })
3283 }
3284
3285 #[gpui::test(iterations = 10)]
3286 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3287 let fs = FakeFs::new(cx.background());
3288 fs.insert_tree(
3289 "/root",
3290 json!({
3291 "lib": {
3292 "a": {
3293 "a.txt": ""
3294 },
3295 "b": {
3296 "b.txt": ""
3297 }
3298 }
3299 }),
3300 )
3301 .await;
3302 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3303 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3304
3305 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3306 let tree = Worktree::local(
3307 client,
3308 Path::new("/root"),
3309 true,
3310 fs.clone(),
3311 Default::default(),
3312 &mut cx.to_async(),
3313 )
3314 .await
3315 .unwrap();
3316
3317 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3318 .await;
3319
3320 tree.read_with(cx, |tree, _| {
3321 assert_eq!(
3322 tree.entries(false)
3323 .map(|entry| entry.path.as_ref())
3324 .collect::<Vec<_>>(),
3325 vec![
3326 Path::new(""),
3327 Path::new("lib"),
3328 Path::new("lib/a"),
3329 Path::new("lib/a/a.txt"),
3330 Path::new("lib/a/lib"),
3331 Path::new("lib/b"),
3332 Path::new("lib/b/b.txt"),
3333 Path::new("lib/b/lib"),
3334 ]
3335 );
3336 });
3337
3338 fs.rename(
3339 Path::new("/root/lib/a/lib"),
3340 Path::new("/root/lib/a/lib-2"),
3341 Default::default(),
3342 )
3343 .await
3344 .unwrap();
3345 executor.run_until_parked();
3346 tree.read_with(cx, |tree, _| {
3347 assert_eq!(
3348 tree.entries(false)
3349 .map(|entry| entry.path.as_ref())
3350 .collect::<Vec<_>>(),
3351 vec![
3352 Path::new(""),
3353 Path::new("lib"),
3354 Path::new("lib/a"),
3355 Path::new("lib/a/a.txt"),
3356 Path::new("lib/a/lib-2"),
3357 Path::new("lib/b"),
3358 Path::new("lib/b/b.txt"),
3359 Path::new("lib/b/lib"),
3360 ]
3361 );
3362 });
3363 }
3364
3365 #[gpui::test]
3366 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3367 let parent_dir = temp_tree(json!({
3368 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3369 "tree": {
3370 ".git": {},
3371 ".gitignore": "ignored-dir\n",
3372 "tracked-dir": {
3373 "tracked-file1": "",
3374 "ancestor-ignored-file1": "",
3375 },
3376 "ignored-dir": {
3377 "ignored-file1": ""
3378 }
3379 }
3380 }));
3381 let dir = parent_dir.path().join("tree");
3382
3383 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3384
3385 let tree = Worktree::local(
3386 client,
3387 dir.as_path(),
3388 true,
3389 Arc::new(RealFs),
3390 Default::default(),
3391 &mut cx.to_async(),
3392 )
3393 .await
3394 .unwrap();
3395 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3396 .await;
3397 tree.flush_fs_events(cx).await;
3398 cx.read(|cx| {
3399 let tree = tree.read(cx);
3400 assert!(
3401 !tree
3402 .entry_for_path("tracked-dir/tracked-file1")
3403 .unwrap()
3404 .is_ignored
3405 );
3406 assert!(
3407 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3408 .unwrap()
3409 .is_ignored
3410 );
3411 assert!(
3412 tree.entry_for_path("ignored-dir/ignored-file1")
3413 .unwrap()
3414 .is_ignored
3415 );
3416 });
3417
3418 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3419 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3420 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3421 tree.flush_fs_events(cx).await;
3422 cx.read(|cx| {
3423 let tree = tree.read(cx);
3424 assert!(
3425 !tree
3426 .entry_for_path("tracked-dir/tracked-file2")
3427 .unwrap()
3428 .is_ignored
3429 );
3430 assert!(
3431 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3432 .unwrap()
3433 .is_ignored
3434 );
3435 assert!(
3436 tree.entry_for_path("ignored-dir/ignored-file2")
3437 .unwrap()
3438 .is_ignored
3439 );
3440 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3441 });
3442 }
3443
3444 #[gpui::test]
3445 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3446 let root = temp_tree(json!({
3447 "dir1": {
3448 ".git": {},
3449 "deps": {
3450 "dep1": {
3451 ".git": {},
3452 "src": {
3453 "a.txt": ""
3454 }
3455 }
3456 },
3457 "src": {
3458 "b.txt": ""
3459 }
3460 },
3461 "c.txt": "",
3462 }));
3463
3464 let http_client = FakeHttpClient::with_404_response();
3465 let client = cx.read(|cx| Client::new(http_client, cx));
3466 let tree = Worktree::local(
3467 client,
3468 root.path(),
3469 true,
3470 Arc::new(RealFs),
3471 Default::default(),
3472 &mut cx.to_async(),
3473 )
3474 .await
3475 .unwrap();
3476
3477 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3478 .await;
3479 tree.flush_fs_events(cx).await;
3480
3481 tree.read_with(cx, |tree, _cx| {
3482 let tree = tree.as_local().unwrap();
3483
3484 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3485
3486 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3487 assert_eq!(entry.work_directory.0.as_ref(), Path::new("dir1"));
3488 assert_eq!(entry.git_dir_path.as_ref(), Path::new("dir1/.git"));
3489
3490 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3491 assert_eq!(entry.work_directory.deref(), Path::new("dir1/deps/dep1"));
3492 assert_eq!(
3493 entry.git_dir_path.as_ref(),
3494 Path::new("dir1/deps/dep1/.git"),
3495 );
3496 });
3497
3498 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3499 let tree = tree.as_local().unwrap();
3500 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3501 entry.scan_id
3502 });
3503
3504 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3505 tree.flush_fs_events(cx).await;
3506
3507 tree.read_with(cx, |tree, _cx| {
3508 let tree = tree.as_local().unwrap();
3509 let new_scan_id = {
3510 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3511 entry.scan_id
3512 };
3513 assert_ne!(
3514 original_scan_id, new_scan_id,
3515 "original {original_scan_id}, new {new_scan_id}"
3516 );
3517 });
3518
3519 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3520 tree.flush_fs_events(cx).await;
3521
3522 tree.read_with(cx, |tree, _cx| {
3523 let tree = tree.as_local().unwrap();
3524
3525 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3526 });
3527 }
3528
3529 #[test]
3530 fn test_changed_repos() {
3531 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> RepositoryEntry {
3532 RepositoryEntry {
3533 scan_id,
3534 git_dir_path: git_dir_path.as_ref().into(),
3535 git_dir_entry_id: ProjectEntryId(0),
3536 work_directory: RepositoryWorkDirectory(
3537 Path::new(&format!("don't-care-{}", scan_id)).into(),
3538 ),
3539 branch: None,
3540 }
3541 }
3542
3543 let mut prev_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3544 prev_repos.insert(
3545 RepositoryWorkDirectory(Path::new("don't-care-1").into()),
3546 fake_entry("/.git", 0),
3547 );
3548 prev_repos.insert(
3549 RepositoryWorkDirectory(Path::new("don't-care-2").into()),
3550 fake_entry("/a/.git", 0),
3551 );
3552 prev_repos.insert(
3553 RepositoryWorkDirectory(Path::new("don't-care-3").into()),
3554 fake_entry("/a/b/.git", 0),
3555 );
3556
3557 let mut new_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3558 new_repos.insert(
3559 RepositoryWorkDirectory(Path::new("don't-care-4").into()),
3560 fake_entry("/a/.git", 1),
3561 );
3562 new_repos.insert(
3563 RepositoryWorkDirectory(Path::new("don't-care-5").into()),
3564 fake_entry("/a/b/.git", 0),
3565 );
3566 new_repos.insert(
3567 RepositoryWorkDirectory(Path::new("don't-care-6").into()),
3568 fake_entry("/a/c/.git", 0),
3569 );
3570
3571 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3572
3573 // Deletion retained
3574 assert!(res
3575 .iter()
3576 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3577 .is_some());
3578
3579 // Update retained
3580 assert!(res
3581 .iter()
3582 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3583 .is_some());
3584
3585 // Addition retained
3586 assert!(res
3587 .iter()
3588 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3589 .is_some());
3590
3591 // Nochange, not retained
3592 assert!(res
3593 .iter()
3594 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3595 .is_none());
3596 }
3597
3598 #[gpui::test]
3599 async fn test_write_file(cx: &mut TestAppContext) {
3600 let dir = temp_tree(json!({
3601 ".git": {},
3602 ".gitignore": "ignored-dir\n",
3603 "tracked-dir": {},
3604 "ignored-dir": {}
3605 }));
3606
3607 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3608
3609 let tree = Worktree::local(
3610 client,
3611 dir.path(),
3612 true,
3613 Arc::new(RealFs),
3614 Default::default(),
3615 &mut cx.to_async(),
3616 )
3617 .await
3618 .unwrap();
3619 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3620 .await;
3621 tree.flush_fs_events(cx).await;
3622
3623 tree.update(cx, |tree, cx| {
3624 tree.as_local().unwrap().write_file(
3625 Path::new("tracked-dir/file.txt"),
3626 "hello".into(),
3627 Default::default(),
3628 cx,
3629 )
3630 })
3631 .await
3632 .unwrap();
3633 tree.update(cx, |tree, cx| {
3634 tree.as_local().unwrap().write_file(
3635 Path::new("ignored-dir/file.txt"),
3636 "world".into(),
3637 Default::default(),
3638 cx,
3639 )
3640 })
3641 .await
3642 .unwrap();
3643
3644 tree.read_with(cx, |tree, _| {
3645 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3646 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3647 assert!(!tracked.is_ignored);
3648 assert!(ignored.is_ignored);
3649 });
3650 }
3651
3652 #[gpui::test(iterations = 30)]
3653 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3654 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3655
3656 let fs = FakeFs::new(cx.background());
3657 fs.insert_tree(
3658 "/root",
3659 json!({
3660 "b": {},
3661 "c": {},
3662 "d": {},
3663 }),
3664 )
3665 .await;
3666
3667 let tree = Worktree::local(
3668 client,
3669 "/root".as_ref(),
3670 true,
3671 fs,
3672 Default::default(),
3673 &mut cx.to_async(),
3674 )
3675 .await
3676 .unwrap();
3677
3678 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3679
3680 let entry = tree
3681 .update(cx, |tree, cx| {
3682 tree.as_local_mut()
3683 .unwrap()
3684 .create_entry("a/e".as_ref(), true, cx)
3685 })
3686 .await
3687 .unwrap();
3688 assert!(entry.is_dir());
3689
3690 cx.foreground().run_until_parked();
3691 tree.read_with(cx, |tree, _| {
3692 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3693 });
3694
3695 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3696 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3697 snapshot1.apply_remote_update(update).unwrap();
3698 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3699 }
3700
3701 #[gpui::test(iterations = 100)]
3702 async fn test_random_worktree_operations_during_initial_scan(
3703 cx: &mut TestAppContext,
3704 mut rng: StdRng,
3705 ) {
3706 let operations = env::var("OPERATIONS")
3707 .map(|o| o.parse().unwrap())
3708 .unwrap_or(5);
3709 let initial_entries = env::var("INITIAL_ENTRIES")
3710 .map(|o| o.parse().unwrap())
3711 .unwrap_or(20);
3712
3713 let root_dir = Path::new("/test");
3714 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3715 fs.as_fake().insert_tree(root_dir, json!({})).await;
3716 for _ in 0..initial_entries {
3717 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3718 }
3719 log::info!("generated initial tree");
3720
3721 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3722 let worktree = Worktree::local(
3723 client.clone(),
3724 root_dir,
3725 true,
3726 fs.clone(),
3727 Default::default(),
3728 &mut cx.to_async(),
3729 )
3730 .await
3731 .unwrap();
3732
3733 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3734
3735 for _ in 0..operations {
3736 worktree
3737 .update(cx, |worktree, cx| {
3738 randomly_mutate_worktree(worktree, &mut rng, cx)
3739 })
3740 .await
3741 .log_err();
3742 worktree.read_with(cx, |tree, _| {
3743 tree.as_local().unwrap().snapshot.check_invariants()
3744 });
3745
3746 if rng.gen_bool(0.6) {
3747 let new_snapshot =
3748 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3749 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3750 snapshot.apply_remote_update(update.clone()).unwrap();
3751 assert_eq!(
3752 snapshot.to_vec(true),
3753 new_snapshot.to_vec(true),
3754 "incorrect snapshot after update {:?}",
3755 update
3756 );
3757 }
3758 }
3759
3760 worktree
3761 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3762 .await;
3763 worktree.read_with(cx, |tree, _| {
3764 tree.as_local().unwrap().snapshot.check_invariants()
3765 });
3766
3767 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3768 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3769 snapshot.apply_remote_update(update.clone()).unwrap();
3770 assert_eq!(
3771 snapshot.to_vec(true),
3772 new_snapshot.to_vec(true),
3773 "incorrect snapshot after update {:?}",
3774 update
3775 );
3776 }
3777
3778 #[gpui::test(iterations = 100)]
3779 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3780 let operations = env::var("OPERATIONS")
3781 .map(|o| o.parse().unwrap())
3782 .unwrap_or(40);
3783 let initial_entries = env::var("INITIAL_ENTRIES")
3784 .map(|o| o.parse().unwrap())
3785 .unwrap_or(20);
3786
3787 let root_dir = Path::new("/test");
3788 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3789 fs.as_fake().insert_tree(root_dir, json!({})).await;
3790 for _ in 0..initial_entries {
3791 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3792 }
3793 log::info!("generated initial tree");
3794
3795 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3796 let worktree = Worktree::local(
3797 client.clone(),
3798 root_dir,
3799 true,
3800 fs.clone(),
3801 Default::default(),
3802 &mut cx.to_async(),
3803 )
3804 .await
3805 .unwrap();
3806
3807 worktree
3808 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3809 .await;
3810
3811 // After the initial scan is complete, the `UpdatedEntries` event can
3812 // be used to follow along with all changes to the worktree's snapshot.
3813 worktree.update(cx, |tree, cx| {
3814 let mut paths = tree
3815 .as_local()
3816 .unwrap()
3817 .paths()
3818 .cloned()
3819 .collect::<Vec<_>>();
3820
3821 cx.subscribe(&worktree, move |tree, _, event, _| {
3822 if let Event::UpdatedEntries(changes) = event {
3823 for (path, change_type) in changes.iter() {
3824 let path = path.clone();
3825 let ix = match paths.binary_search(&path) {
3826 Ok(ix) | Err(ix) => ix,
3827 };
3828 match change_type {
3829 PathChange::Added => {
3830 assert_ne!(paths.get(ix), Some(&path));
3831 paths.insert(ix, path);
3832 }
3833 PathChange::Removed => {
3834 assert_eq!(paths.get(ix), Some(&path));
3835 paths.remove(ix);
3836 }
3837 PathChange::Updated => {
3838 assert_eq!(paths.get(ix), Some(&path));
3839 }
3840 PathChange::AddedOrUpdated => {
3841 if paths[ix] != path {
3842 paths.insert(ix, path);
3843 }
3844 }
3845 }
3846 }
3847 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3848 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3849 }
3850 })
3851 .detach();
3852 });
3853
3854 let mut snapshots = Vec::new();
3855 let mut mutations_len = operations;
3856 while mutations_len > 1 {
3857 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3858 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3859 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3860 let len = rng.gen_range(0..=buffered_event_count);
3861 log::info!("flushing {} events", len);
3862 fs.as_fake().flush_events(len).await;
3863 } else {
3864 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3865 mutations_len -= 1;
3866 }
3867
3868 cx.foreground().run_until_parked();
3869 if rng.gen_bool(0.2) {
3870 log::info!("storing snapshot {}", snapshots.len());
3871 let snapshot =
3872 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3873 snapshots.push(snapshot);
3874 }
3875 }
3876
3877 log::info!("quiescing");
3878 fs.as_fake().flush_events(usize::MAX).await;
3879 cx.foreground().run_until_parked();
3880 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3881 snapshot.check_invariants();
3882
3883 {
3884 let new_worktree = Worktree::local(
3885 client.clone(),
3886 root_dir,
3887 true,
3888 fs.clone(),
3889 Default::default(),
3890 &mut cx.to_async(),
3891 )
3892 .await
3893 .unwrap();
3894 new_worktree
3895 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3896 .await;
3897 let new_snapshot =
3898 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3899 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3900 }
3901
3902 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3903 let include_ignored = rng.gen::<bool>();
3904 if !include_ignored {
3905 let mut entries_by_path_edits = Vec::new();
3906 let mut entries_by_id_edits = Vec::new();
3907 for entry in prev_snapshot
3908 .entries_by_id
3909 .cursor::<()>()
3910 .filter(|e| e.is_ignored)
3911 {
3912 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3913 entries_by_id_edits.push(Edit::Remove(entry.id));
3914 }
3915
3916 prev_snapshot
3917 .entries_by_path
3918 .edit(entries_by_path_edits, &());
3919 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3920 }
3921
3922 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3923 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3924 assert_eq!(
3925 prev_snapshot.to_vec(include_ignored),
3926 snapshot.to_vec(include_ignored),
3927 "wrong update for snapshot {i}. update: {:?}",
3928 update
3929 );
3930 }
3931 }
3932
3933 fn randomly_mutate_worktree(
3934 worktree: &mut Worktree,
3935 rng: &mut impl Rng,
3936 cx: &mut ModelContext<Worktree>,
3937 ) -> Task<Result<()>> {
3938 let worktree = worktree.as_local_mut().unwrap();
3939 let snapshot = worktree.snapshot();
3940 let entry = snapshot.entries(false).choose(rng).unwrap();
3941
3942 match rng.gen_range(0_u32..100) {
3943 0..=33 if entry.path.as_ref() != Path::new("") => {
3944 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3945 worktree.delete_entry(entry.id, cx).unwrap()
3946 }
3947 ..=66 if entry.path.as_ref() != Path::new("") => {
3948 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3949 let new_parent_path = if other_entry.is_dir() {
3950 other_entry.path.clone()
3951 } else {
3952 other_entry.path.parent().unwrap().into()
3953 };
3954 let mut new_path = new_parent_path.join(gen_name(rng));
3955 if new_path.starts_with(&entry.path) {
3956 new_path = gen_name(rng).into();
3957 }
3958
3959 log::info!(
3960 "renaming entry {:?} ({}) to {:?}",
3961 entry.path,
3962 entry.id.0,
3963 new_path
3964 );
3965 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3966 cx.foreground().spawn(async move {
3967 task.await?;
3968 Ok(())
3969 })
3970 }
3971 _ => {
3972 let task = if entry.is_dir() {
3973 let child_path = entry.path.join(gen_name(rng));
3974 let is_dir = rng.gen_bool(0.3);
3975 log::info!(
3976 "creating {} at {:?}",
3977 if is_dir { "dir" } else { "file" },
3978 child_path,
3979 );
3980 worktree.create_entry(child_path, is_dir, cx)
3981 } else {
3982 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3983 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3984 };
3985 cx.foreground().spawn(async move {
3986 task.await?;
3987 Ok(())
3988 })
3989 }
3990 }
3991 }
3992
3993 async fn randomly_mutate_fs(
3994 fs: &Arc<dyn Fs>,
3995 root_path: &Path,
3996 insertion_probability: f64,
3997 rng: &mut impl Rng,
3998 ) {
3999 let mut files = Vec::new();
4000 let mut dirs = Vec::new();
4001 for path in fs.as_fake().paths() {
4002 if path.starts_with(root_path) {
4003 if fs.is_file(&path).await {
4004 files.push(path);
4005 } else {
4006 dirs.push(path);
4007 }
4008 }
4009 }
4010
4011 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
4012 let path = dirs.choose(rng).unwrap();
4013 let new_path = path.join(gen_name(rng));
4014
4015 if rng.gen() {
4016 log::info!(
4017 "creating dir {:?}",
4018 new_path.strip_prefix(root_path).unwrap()
4019 );
4020 fs.create_dir(&new_path).await.unwrap();
4021 } else {
4022 log::info!(
4023 "creating file {:?}",
4024 new_path.strip_prefix(root_path).unwrap()
4025 );
4026 fs.create_file(&new_path, Default::default()).await.unwrap();
4027 }
4028 } else if rng.gen_bool(0.05) {
4029 let ignore_dir_path = dirs.choose(rng).unwrap();
4030 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
4031
4032 let subdirs = dirs
4033 .iter()
4034 .filter(|d| d.starts_with(&ignore_dir_path))
4035 .cloned()
4036 .collect::<Vec<_>>();
4037 let subfiles = files
4038 .iter()
4039 .filter(|d| d.starts_with(&ignore_dir_path))
4040 .cloned()
4041 .collect::<Vec<_>>();
4042 let files_to_ignore = {
4043 let len = rng.gen_range(0..=subfiles.len());
4044 subfiles.choose_multiple(rng, len)
4045 };
4046 let dirs_to_ignore = {
4047 let len = rng.gen_range(0..subdirs.len());
4048 subdirs.choose_multiple(rng, len)
4049 };
4050
4051 let mut ignore_contents = String::new();
4052 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4053 writeln!(
4054 ignore_contents,
4055 "{}",
4056 path_to_ignore
4057 .strip_prefix(&ignore_dir_path)
4058 .unwrap()
4059 .to_str()
4060 .unwrap()
4061 )
4062 .unwrap();
4063 }
4064 log::info!(
4065 "creating gitignore {:?} with contents:\n{}",
4066 ignore_path.strip_prefix(&root_path).unwrap(),
4067 ignore_contents
4068 );
4069 fs.save(
4070 &ignore_path,
4071 &ignore_contents.as_str().into(),
4072 Default::default(),
4073 )
4074 .await
4075 .unwrap();
4076 } else {
4077 let old_path = {
4078 let file_path = files.choose(rng);
4079 let dir_path = dirs[1..].choose(rng);
4080 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4081 };
4082
4083 let is_rename = rng.gen();
4084 if is_rename {
4085 let new_path_parent = dirs
4086 .iter()
4087 .filter(|d| !d.starts_with(old_path))
4088 .choose(rng)
4089 .unwrap();
4090
4091 let overwrite_existing_dir =
4092 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4093 let new_path = if overwrite_existing_dir {
4094 fs.remove_dir(
4095 &new_path_parent,
4096 RemoveOptions {
4097 recursive: true,
4098 ignore_if_not_exists: true,
4099 },
4100 )
4101 .await
4102 .unwrap();
4103 new_path_parent.to_path_buf()
4104 } else {
4105 new_path_parent.join(gen_name(rng))
4106 };
4107
4108 log::info!(
4109 "renaming {:?} to {}{:?}",
4110 old_path.strip_prefix(&root_path).unwrap(),
4111 if overwrite_existing_dir {
4112 "overwrite "
4113 } else {
4114 ""
4115 },
4116 new_path.strip_prefix(&root_path).unwrap()
4117 );
4118 fs.rename(
4119 &old_path,
4120 &new_path,
4121 fs::RenameOptions {
4122 overwrite: true,
4123 ignore_if_exists: true,
4124 },
4125 )
4126 .await
4127 .unwrap();
4128 } else if fs.is_file(&old_path).await {
4129 log::info!(
4130 "deleting file {:?}",
4131 old_path.strip_prefix(&root_path).unwrap()
4132 );
4133 fs.remove_file(old_path, Default::default()).await.unwrap();
4134 } else {
4135 log::info!(
4136 "deleting dir {:?}",
4137 old_path.strip_prefix(&root_path).unwrap()
4138 );
4139 fs.remove_dir(
4140 &old_path,
4141 RemoveOptions {
4142 recursive: true,
4143 ignore_if_not_exists: true,
4144 },
4145 )
4146 .await
4147 .unwrap();
4148 }
4149 }
4150 }
4151
4152 fn gen_name(rng: &mut impl Rng) -> String {
4153 (0..6)
4154 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4155 .map(char::from)
4156 .collect()
4157 }
4158
4159 impl LocalSnapshot {
4160 fn check_invariants(&self) {
4161 assert_eq!(
4162 self.entries_by_path
4163 .cursor::<()>()
4164 .map(|e| (&e.path, e.id))
4165 .collect::<Vec<_>>(),
4166 self.entries_by_id
4167 .cursor::<()>()
4168 .map(|e| (&e.path, e.id))
4169 .collect::<collections::BTreeSet<_>>()
4170 .into_iter()
4171 .collect::<Vec<_>>(),
4172 "entries_by_path and entries_by_id are inconsistent"
4173 );
4174
4175 let mut files = self.files(true, 0);
4176 let mut visible_files = self.files(false, 0);
4177 for entry in self.entries_by_path.cursor::<()>() {
4178 if entry.is_file() {
4179 assert_eq!(files.next().unwrap().inode, entry.inode);
4180 if !entry.is_ignored {
4181 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4182 }
4183 }
4184 }
4185
4186 assert!(files.next().is_none());
4187 assert!(visible_files.next().is_none());
4188
4189 let mut bfs_paths = Vec::new();
4190 let mut stack = vec![Path::new("")];
4191 while let Some(path) = stack.pop() {
4192 bfs_paths.push(path);
4193 let ix = stack.len();
4194 for child_entry in self.child_entries(path) {
4195 stack.insert(ix, &child_entry.path);
4196 }
4197 }
4198
4199 let dfs_paths_via_iter = self
4200 .entries_by_path
4201 .cursor::<()>()
4202 .map(|e| e.path.as_ref())
4203 .collect::<Vec<_>>();
4204 assert_eq!(bfs_paths, dfs_paths_via_iter);
4205
4206 let dfs_paths_via_traversal = self
4207 .entries(true)
4208 .map(|e| e.path.as_ref())
4209 .collect::<Vec<_>>();
4210 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4211
4212 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4213 let ignore_parent_path =
4214 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4215 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4216 assert!(self
4217 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4218 .is_some());
4219 }
4220 }
4221
4222 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4223 let mut paths = Vec::new();
4224 for entry in self.entries_by_path.cursor::<()>() {
4225 if include_ignored || !entry.is_ignored {
4226 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4227 }
4228 }
4229 paths.sort_by(|a, b| a.0.cmp(b.0));
4230 paths
4231 }
4232 }
4233}