1use crate::{
2 copy_recursive, ignore::IgnoreStack, DiagnosticSummary, ProjectEntryId, RemoveOptions,
3};
4use ::ignore::gitignore::{Gitignore, GitignoreBuilder};
5use anyhow::{anyhow, Context, Result};
6use client::{proto, Client};
7use clock::ReplicaId;
8use collections::{HashMap, VecDeque};
9use fs::{repository::GitRepository, Fs, LineEnding};
10use futures::{
11 channel::{
12 mpsc::{self, UnboundedSender},
13 oneshot,
14 },
15 select_biased,
16 task::Poll,
17 Stream, StreamExt,
18};
19use fuzzy::CharBag;
20use git::{DOT_GIT, GITIGNORE};
21use gpui::{executor, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Task};
22use language::{
23 proto::{
24 deserialize_fingerprint, deserialize_version, serialize_fingerprint, serialize_line_ending,
25 serialize_version,
26 },
27 Buffer, DiagnosticEntry, File as _, PointUtf16, Rope, RopeFingerprint, Unclipped,
28};
29use lsp::LanguageServerId;
30use parking_lot::Mutex;
31use postage::{
32 barrier,
33 prelude::{Sink as _, Stream as _},
34 watch,
35};
36use smol::channel::{self, Sender};
37use std::{
38 any::Any,
39 cmp::{self, Ordering},
40 convert::TryFrom,
41 ffi::OsStr,
42 fmt,
43 future::Future,
44 mem,
45 ops::{Deref, DerefMut},
46 path::{Path, PathBuf},
47 pin::Pin,
48 sync::{
49 atomic::{AtomicUsize, Ordering::SeqCst},
50 Arc,
51 },
52 time::{Duration, SystemTime},
53};
54use sum_tree::{Bias, Edit, SeekTarget, SumTree, TreeMap, TreeSet};
55use util::{paths::HOME, ResultExt, TryFutureExt};
56
57#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)]
58pub struct WorktreeId(usize);
59
60pub enum Worktree {
61 Local(LocalWorktree),
62 Remote(RemoteWorktree),
63}
64
65pub struct LocalWorktree {
66 snapshot: LocalSnapshot,
67 path_changes_tx: channel::Sender<(Vec<PathBuf>, barrier::Sender)>,
68 is_scanning: (watch::Sender<bool>, watch::Receiver<bool>),
69 _background_scanner_task: Task<()>,
70 share: Option<ShareState>,
71 diagnostics: HashMap<
72 Arc<Path>,
73 Vec<(
74 LanguageServerId,
75 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
76 )>,
77 >,
78 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
79 client: Arc<Client>,
80 fs: Arc<dyn Fs>,
81 visible: bool,
82}
83
84pub struct RemoteWorktree {
85 snapshot: Snapshot,
86 background_snapshot: Arc<Mutex<Snapshot>>,
87 project_id: u64,
88 client: Arc<Client>,
89 updates_tx: Option<UnboundedSender<proto::UpdateWorktree>>,
90 snapshot_subscriptions: VecDeque<(usize, oneshot::Sender<()>)>,
91 replica_id: ReplicaId,
92 diagnostic_summaries: HashMap<Arc<Path>, HashMap<LanguageServerId, DiagnosticSummary>>,
93 visible: bool,
94 disconnected: bool,
95}
96
97#[derive(Clone)]
98pub struct Snapshot {
99 id: WorktreeId,
100 abs_path: Arc<Path>,
101 root_name: String,
102 root_char_bag: CharBag,
103 entries_by_path: SumTree<Entry>,
104 entries_by_id: SumTree<PathEntry>,
105 repository_entries: TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
106
107 /// A number that increases every time the worktree begins scanning
108 /// a set of paths from the filesystem. This scanning could be caused
109 /// by some operation performed on the worktree, such as reading or
110 /// writing a file, or by an event reported by the filesystem.
111 scan_id: usize,
112
113 /// The latest scan id that has completed, and whose preceding scans
114 /// have all completed. The current `scan_id` could be more than one
115 /// greater than the `completed_scan_id` if operations are performed
116 /// on the worktree while it is processing a file-system event.
117 completed_scan_id: usize,
118}
119
120#[derive(Clone, Debug)]
121pub struct RepositoryEntry {
122 // Path to the actual .git folder.
123 // Note: if .git is a file, this points to the folder indicated by the .git file
124 pub(crate) git_dir_path: Arc<Path>,
125 pub(crate) git_dir_entry_id: ProjectEntryId,
126 pub(crate) work_directory: RepositoryWorkDirectory,
127 pub(crate) scan_id: usize,
128 // TODO: pub(crate) head_ref: Arc<str>,
129}
130
131impl RepositoryEntry {
132 // Note that this path should be relative to the worktree root.
133 pub(crate) fn in_dot_git(&self, path: &Path) -> bool {
134 path.starts_with(self.git_dir_path.as_ref())
135 }
136}
137
138/// This path corresponds to the 'content path' (the folder that contains the .git)
139#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
140pub struct RepositoryWorkDirectory(Arc<Path>);
141
142impl RepositoryWorkDirectory {
143 // Note that these paths should be relative to the worktree root.
144 pub(crate) fn contains(&self, path: &Path) -> bool {
145 path.starts_with(self.0.as_ref())
146 }
147
148 pub(crate) fn relativize(&self, path: &Path) -> Option<RepoPath> {
149 path.strip_prefix(self.0.as_ref())
150 .ok()
151 .map(move |path| RepoPath(path.to_owned()))
152 }
153}
154
155impl Deref for RepositoryWorkDirectory {
156 type Target = Path;
157
158 fn deref(&self) -> &Self::Target {
159 self.0.as_ref()
160 }
161}
162
163#[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)]
164pub struct RepoPath(PathBuf);
165
166impl AsRef<Path> for RepoPath {
167 fn as_ref(&self) -> &Path {
168 self.0.as_ref()
169 }
170}
171
172impl Deref for RepoPath {
173 type Target = PathBuf;
174
175 fn deref(&self) -> &Self::Target {
176 &self.0
177 }
178}
179
180impl AsRef<Path> for RepositoryWorkDirectory {
181 fn as_ref(&self) -> &Path {
182 self.0.as_ref()
183 }
184}
185
186impl Default for RepositoryWorkDirectory {
187 fn default() -> Self {
188 RepositoryWorkDirectory(Arc::from(Path::new("")))
189 }
190}
191
192#[derive(Debug, Clone)]
193pub struct LocalSnapshot {
194 ignores_by_parent_abs_path: HashMap<Arc<Path>, (Arc<Gitignore>, usize)>,
195 // The ProjectEntryId corresponds to the entry for the .git dir
196 git_repositories: TreeMap<ProjectEntryId, Arc<Mutex<dyn GitRepository>>>,
197 removed_entry_ids: HashMap<u64, ProjectEntryId>,
198 next_entry_id: Arc<AtomicUsize>,
199 snapshot: Snapshot,
200}
201
202impl Deref for LocalSnapshot {
203 type Target = Snapshot;
204
205 fn deref(&self) -> &Self::Target {
206 &self.snapshot
207 }
208}
209
210impl DerefMut for LocalSnapshot {
211 fn deref_mut(&mut self) -> &mut Self::Target {
212 &mut self.snapshot
213 }
214}
215
216enum ScanState {
217 Started,
218 Updated {
219 snapshot: LocalSnapshot,
220 changes: HashMap<Arc<Path>, PathChange>,
221 barrier: Option<barrier::Sender>,
222 scanning: bool,
223 },
224}
225
226struct ShareState {
227 project_id: u64,
228 snapshots_tx: watch::Sender<LocalSnapshot>,
229 resume_updates: watch::Sender<()>,
230 _maintain_remote_snapshot: Task<Option<()>>,
231}
232
233pub enum Event {
234 UpdatedEntries(HashMap<Arc<Path>, PathChange>),
235 UpdatedGitRepositories(Vec<RepositoryEntry>),
236}
237
238impl Entity for Worktree {
239 type Event = Event;
240}
241
242impl Worktree {
243 pub async fn local(
244 client: Arc<Client>,
245 path: impl Into<Arc<Path>>,
246 visible: bool,
247 fs: Arc<dyn Fs>,
248 next_entry_id: Arc<AtomicUsize>,
249 cx: &mut AsyncAppContext,
250 ) -> Result<ModelHandle<Self>> {
251 // After determining whether the root entry is a file or a directory, populate the
252 // snapshot's "root name", which will be used for the purpose of fuzzy matching.
253 let abs_path = path.into();
254 let metadata = fs
255 .metadata(&abs_path)
256 .await
257 .context("failed to stat worktree path")?;
258
259 Ok(cx.add_model(move |cx: &mut ModelContext<Worktree>| {
260 let root_name = abs_path
261 .file_name()
262 .map_or(String::new(), |f| f.to_string_lossy().to_string());
263
264 let mut snapshot = LocalSnapshot {
265 ignores_by_parent_abs_path: Default::default(),
266 removed_entry_ids: Default::default(),
267 git_repositories: Default::default(),
268 next_entry_id,
269 snapshot: Snapshot {
270 id: WorktreeId::from_usize(cx.model_id()),
271 abs_path: abs_path.clone(),
272 root_name: root_name.clone(),
273 root_char_bag: root_name.chars().map(|c| c.to_ascii_lowercase()).collect(),
274 entries_by_path: Default::default(),
275 entries_by_id: Default::default(),
276 repository_entries: Default::default(),
277 scan_id: 1,
278 completed_scan_id: 0,
279 },
280 };
281
282 if let Some(metadata) = metadata {
283 snapshot.insert_entry(
284 Entry::new(
285 Arc::from(Path::new("")),
286 &metadata,
287 &snapshot.next_entry_id,
288 snapshot.root_char_bag,
289 ),
290 fs.as_ref(),
291 );
292 }
293
294 let (path_changes_tx, path_changes_rx) = channel::unbounded();
295 let (scan_states_tx, mut scan_states_rx) = mpsc::unbounded();
296
297 cx.spawn_weak(|this, mut cx| async move {
298 while let Some((state, this)) = scan_states_rx.next().await.zip(this.upgrade(&cx)) {
299 this.update(&mut cx, |this, cx| {
300 let this = this.as_local_mut().unwrap();
301 match state {
302 ScanState::Started => {
303 *this.is_scanning.0.borrow_mut() = true;
304 }
305 ScanState::Updated {
306 snapshot,
307 changes,
308 barrier,
309 scanning,
310 } => {
311 *this.is_scanning.0.borrow_mut() = scanning;
312 this.set_snapshot(snapshot, cx);
313 cx.emit(Event::UpdatedEntries(changes));
314 drop(barrier);
315 }
316 }
317 cx.notify();
318 });
319 }
320 })
321 .detach();
322
323 let background_scanner_task = cx.background().spawn({
324 let fs = fs.clone();
325 let snapshot = snapshot.clone();
326 let background = cx.background().clone();
327 async move {
328 let events = fs.watch(&abs_path, Duration::from_millis(100)).await;
329 BackgroundScanner::new(
330 snapshot,
331 fs,
332 scan_states_tx,
333 background,
334 path_changes_rx,
335 )
336 .run(events)
337 .await;
338 }
339 });
340
341 Worktree::Local(LocalWorktree {
342 snapshot,
343 is_scanning: watch::channel_with(true),
344 share: None,
345 path_changes_tx,
346 _background_scanner_task: background_scanner_task,
347 diagnostics: Default::default(),
348 diagnostic_summaries: Default::default(),
349 client,
350 fs,
351 visible,
352 })
353 }))
354 }
355
356 pub fn remote(
357 project_remote_id: u64,
358 replica_id: ReplicaId,
359 worktree: proto::WorktreeMetadata,
360 client: Arc<Client>,
361 cx: &mut AppContext,
362 ) -> ModelHandle<Self> {
363 cx.add_model(|cx: &mut ModelContext<Self>| {
364 let snapshot = Snapshot {
365 id: WorktreeId(worktree.id as usize),
366 abs_path: Arc::from(PathBuf::from(worktree.abs_path)),
367 root_name: worktree.root_name.clone(),
368 root_char_bag: worktree
369 .root_name
370 .chars()
371 .map(|c| c.to_ascii_lowercase())
372 .collect(),
373 entries_by_path: Default::default(),
374 entries_by_id: Default::default(),
375 repository_entries: Default::default(),
376 scan_id: 1,
377 completed_scan_id: 0,
378 };
379
380 let (updates_tx, mut updates_rx) = mpsc::unbounded();
381 let background_snapshot = Arc::new(Mutex::new(snapshot.clone()));
382 let (mut snapshot_updated_tx, mut snapshot_updated_rx) = watch::channel();
383
384 cx.background()
385 .spawn({
386 let background_snapshot = background_snapshot.clone();
387 async move {
388 while let Some(update) = updates_rx.next().await {
389 if let Err(error) =
390 background_snapshot.lock().apply_remote_update(update)
391 {
392 log::error!("error applying worktree update: {}", error);
393 }
394 snapshot_updated_tx.send(()).await.ok();
395 }
396 }
397 })
398 .detach();
399
400 cx.spawn_weak(|this, mut cx| async move {
401 while (snapshot_updated_rx.recv().await).is_some() {
402 if let Some(this) = this.upgrade(&cx) {
403 this.update(&mut cx, |this, cx| {
404 let this = this.as_remote_mut().unwrap();
405 this.snapshot = this.background_snapshot.lock().clone();
406 cx.emit(Event::UpdatedEntries(Default::default()));
407 cx.notify();
408 while let Some((scan_id, _)) = this.snapshot_subscriptions.front() {
409 if this.observed_snapshot(*scan_id) {
410 let (_, tx) = this.snapshot_subscriptions.pop_front().unwrap();
411 let _ = tx.send(());
412 } else {
413 break;
414 }
415 }
416 });
417 } else {
418 break;
419 }
420 }
421 })
422 .detach();
423
424 Worktree::Remote(RemoteWorktree {
425 project_id: project_remote_id,
426 replica_id,
427 snapshot: snapshot.clone(),
428 background_snapshot,
429 updates_tx: Some(updates_tx),
430 snapshot_subscriptions: Default::default(),
431 client: client.clone(),
432 diagnostic_summaries: Default::default(),
433 visible: worktree.visible,
434 disconnected: false,
435 })
436 })
437 }
438
439 pub fn as_local(&self) -> Option<&LocalWorktree> {
440 if let Worktree::Local(worktree) = self {
441 Some(worktree)
442 } else {
443 None
444 }
445 }
446
447 pub fn as_remote(&self) -> Option<&RemoteWorktree> {
448 if let Worktree::Remote(worktree) = self {
449 Some(worktree)
450 } else {
451 None
452 }
453 }
454
455 pub fn as_local_mut(&mut self) -> Option<&mut LocalWorktree> {
456 if let Worktree::Local(worktree) = self {
457 Some(worktree)
458 } else {
459 None
460 }
461 }
462
463 pub fn as_remote_mut(&mut self) -> Option<&mut RemoteWorktree> {
464 if let Worktree::Remote(worktree) = self {
465 Some(worktree)
466 } else {
467 None
468 }
469 }
470
471 pub fn is_local(&self) -> bool {
472 matches!(self, Worktree::Local(_))
473 }
474
475 pub fn is_remote(&self) -> bool {
476 !self.is_local()
477 }
478
479 pub fn snapshot(&self) -> Snapshot {
480 match self {
481 Worktree::Local(worktree) => worktree.snapshot().snapshot,
482 Worktree::Remote(worktree) => worktree.snapshot(),
483 }
484 }
485
486 pub fn scan_id(&self) -> usize {
487 match self {
488 Worktree::Local(worktree) => worktree.snapshot.scan_id,
489 Worktree::Remote(worktree) => worktree.snapshot.scan_id,
490 }
491 }
492
493 pub fn completed_scan_id(&self) -> usize {
494 match self {
495 Worktree::Local(worktree) => worktree.snapshot.completed_scan_id,
496 Worktree::Remote(worktree) => worktree.snapshot.completed_scan_id,
497 }
498 }
499
500 pub fn is_visible(&self) -> bool {
501 match self {
502 Worktree::Local(worktree) => worktree.visible,
503 Worktree::Remote(worktree) => worktree.visible,
504 }
505 }
506
507 pub fn replica_id(&self) -> ReplicaId {
508 match self {
509 Worktree::Local(_) => 0,
510 Worktree::Remote(worktree) => worktree.replica_id,
511 }
512 }
513
514 pub fn diagnostic_summaries(
515 &self,
516 ) -> impl Iterator<Item = (Arc<Path>, LanguageServerId, DiagnosticSummary)> + '_ {
517 match self {
518 Worktree::Local(worktree) => &worktree.diagnostic_summaries,
519 Worktree::Remote(worktree) => &worktree.diagnostic_summaries,
520 }
521 .iter()
522 .flat_map(|(path, summaries)| {
523 summaries
524 .iter()
525 .map(move |(&server_id, &summary)| (path.clone(), server_id, summary))
526 })
527 }
528
529 pub fn abs_path(&self) -> Arc<Path> {
530 match self {
531 Worktree::Local(worktree) => worktree.abs_path.clone(),
532 Worktree::Remote(worktree) => worktree.abs_path.clone(),
533 }
534 }
535}
536
537impl LocalWorktree {
538 pub fn contains_abs_path(&self, path: &Path) -> bool {
539 path.starts_with(&self.abs_path)
540 }
541
542 fn absolutize(&self, path: &Path) -> PathBuf {
543 if path.file_name().is_some() {
544 self.abs_path.join(path)
545 } else {
546 self.abs_path.to_path_buf()
547 }
548 }
549
550 pub(crate) fn load_buffer(
551 &mut self,
552 id: u64,
553 path: &Path,
554 cx: &mut ModelContext<Worktree>,
555 ) -> Task<Result<ModelHandle<Buffer>>> {
556 let path = Arc::from(path);
557 cx.spawn(move |this, mut cx| async move {
558 let (file, contents, diff_base) = this
559 .update(&mut cx, |t, cx| t.as_local().unwrap().load(&path, cx))
560 .await?;
561 let text_buffer = cx
562 .background()
563 .spawn(async move { text::Buffer::new(0, id, contents) })
564 .await;
565 Ok(cx.add_model(|cx| {
566 let mut buffer = Buffer::build(text_buffer, diff_base, Some(Arc::new(file)));
567 buffer.git_diff_recalc(cx);
568 buffer
569 }))
570 })
571 }
572
573 pub fn diagnostics_for_path(
574 &self,
575 path: &Path,
576 ) -> Vec<(
577 LanguageServerId,
578 Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
579 )> {
580 self.diagnostics.get(path).cloned().unwrap_or_default()
581 }
582
583 pub fn update_diagnostics(
584 &mut self,
585 server_id: LanguageServerId,
586 worktree_path: Arc<Path>,
587 diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
588 _: &mut ModelContext<Worktree>,
589 ) -> Result<bool> {
590 let summaries_by_server_id = self
591 .diagnostic_summaries
592 .entry(worktree_path.clone())
593 .or_default();
594
595 let old_summary = summaries_by_server_id
596 .remove(&server_id)
597 .unwrap_or_default();
598
599 let new_summary = DiagnosticSummary::new(&diagnostics);
600 if new_summary.is_empty() {
601 if let Some(diagnostics_by_server_id) = self.diagnostics.get_mut(&worktree_path) {
602 if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
603 diagnostics_by_server_id.remove(ix);
604 }
605 if diagnostics_by_server_id.is_empty() {
606 self.diagnostics.remove(&worktree_path);
607 }
608 }
609 } else {
610 summaries_by_server_id.insert(server_id, new_summary);
611 let diagnostics_by_server_id =
612 self.diagnostics.entry(worktree_path.clone()).or_default();
613 match diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
614 Ok(ix) => {
615 diagnostics_by_server_id[ix] = (server_id, diagnostics);
616 }
617 Err(ix) => {
618 diagnostics_by_server_id.insert(ix, (server_id, diagnostics));
619 }
620 }
621 }
622
623 if !old_summary.is_empty() || !new_summary.is_empty() {
624 if let Some(share) = self.share.as_ref() {
625 self.client
626 .send(proto::UpdateDiagnosticSummary {
627 project_id: share.project_id,
628 worktree_id: self.id().to_proto(),
629 summary: Some(proto::DiagnosticSummary {
630 path: worktree_path.to_string_lossy().to_string(),
631 language_server_id: server_id.0 as u64,
632 error_count: new_summary.error_count as u32,
633 warning_count: new_summary.warning_count as u32,
634 }),
635 })
636 .log_err();
637 }
638 }
639
640 Ok(!old_summary.is_empty() || !new_summary.is_empty())
641 }
642
643 fn set_snapshot(&mut self, new_snapshot: LocalSnapshot, cx: &mut ModelContext<Worktree>) {
644 let updated_repos = Self::changed_repos(
645 &self.snapshot.repository_entries,
646 &new_snapshot.repository_entries,
647 );
648 self.snapshot = new_snapshot;
649
650 if let Some(share) = self.share.as_mut() {
651 *share.snapshots_tx.borrow_mut() = self.snapshot.clone();
652 }
653
654 if !updated_repos.is_empty() {
655 cx.emit(Event::UpdatedGitRepositories(updated_repos));
656 }
657 }
658
659 fn changed_repos(
660 old_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
661 new_repos: &TreeMap<RepositoryWorkDirectory, RepositoryEntry>,
662 ) -> Vec<RepositoryEntry> {
663 fn diff<'a>(
664 a: impl Iterator<Item = &'a RepositoryEntry>,
665 mut b: impl Iterator<Item = &'a RepositoryEntry>,
666 updated: &mut HashMap<&'a Path, RepositoryEntry>,
667 ) {
668 for a_repo in a {
669 let matched = b.find(|b_repo| {
670 a_repo.git_dir_path == b_repo.git_dir_path && a_repo.scan_id == b_repo.scan_id
671 });
672
673 if matched.is_none() {
674 updated.insert(a_repo.git_dir_path.as_ref(), a_repo.clone());
675 }
676 }
677 }
678
679 let mut updated = HashMap::<&Path, RepositoryEntry>::default();
680
681 diff(old_repos.values(), new_repos.values(), &mut updated);
682 diff(new_repos.values(), old_repos.values(), &mut updated);
683
684 updated.into_values().collect()
685 }
686
687 pub fn scan_complete(&self) -> impl Future<Output = ()> {
688 let mut is_scanning_rx = self.is_scanning.1.clone();
689 async move {
690 let mut is_scanning = is_scanning_rx.borrow().clone();
691 while is_scanning {
692 if let Some(value) = is_scanning_rx.recv().await {
693 is_scanning = value;
694 } else {
695 break;
696 }
697 }
698 }
699 }
700
701 pub fn snapshot(&self) -> LocalSnapshot {
702 self.snapshot.clone()
703 }
704
705 pub fn metadata_proto(&self) -> proto::WorktreeMetadata {
706 proto::WorktreeMetadata {
707 id: self.id().to_proto(),
708 root_name: self.root_name().to_string(),
709 visible: self.visible,
710 abs_path: self.abs_path().as_os_str().to_string_lossy().into(),
711 }
712 }
713
714 fn load(
715 &self,
716 path: &Path,
717 cx: &mut ModelContext<Worktree>,
718 ) -> Task<Result<(File, String, Option<String>)>> {
719 let handle = cx.handle();
720 let path = Arc::from(path);
721 let abs_path = self.absolutize(&path);
722 let fs = self.fs.clone();
723 let snapshot = self.snapshot();
724
725 let mut index_task = None;
726
727 if let Some(repo) = snapshot.repo_for(&path) {
728 let repo_path = repo.work_directory.relativize(&path).unwrap();
729 if let Some(repo) = self.git_repositories.get(&repo.git_dir_entry_id) {
730 let repo = repo.to_owned();
731 index_task = Some(
732 cx.background()
733 .spawn(async move { repo.lock().load_index_text(&repo_path) }),
734 );
735 }
736 }
737
738 cx.spawn(|this, mut cx| async move {
739 let text = fs.load(&abs_path).await?;
740
741 let diff_base = if let Some(index_task) = index_task {
742 index_task.await
743 } else {
744 None
745 };
746
747 // Eagerly populate the snapshot with an updated entry for the loaded file
748 let entry = this
749 .update(&mut cx, |this, cx| {
750 this.as_local().unwrap().refresh_entry(path, None, cx)
751 })
752 .await?;
753
754 Ok((
755 File {
756 entry_id: entry.id,
757 worktree: handle,
758 path: entry.path,
759 mtime: entry.mtime,
760 is_local: true,
761 is_deleted: false,
762 },
763 text,
764 diff_base,
765 ))
766 })
767 }
768
769 pub fn save_buffer(
770 &self,
771 buffer_handle: ModelHandle<Buffer>,
772 path: Arc<Path>,
773 has_changed_file: bool,
774 cx: &mut ModelContext<Worktree>,
775 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
776 let handle = cx.handle();
777 let buffer = buffer_handle.read(cx);
778
779 let rpc = self.client.clone();
780 let buffer_id = buffer.remote_id();
781 let project_id = self.share.as_ref().map(|share| share.project_id);
782
783 let text = buffer.as_rope().clone();
784 let fingerprint = text.fingerprint();
785 let version = buffer.version();
786 let save = self.write_file(path, text, buffer.line_ending(), cx);
787
788 cx.as_mut().spawn(|mut cx| async move {
789 let entry = save.await?;
790
791 if has_changed_file {
792 let new_file = Arc::new(File {
793 entry_id: entry.id,
794 worktree: handle,
795 path: entry.path,
796 mtime: entry.mtime,
797 is_local: true,
798 is_deleted: false,
799 });
800
801 if let Some(project_id) = project_id {
802 rpc.send(proto::UpdateBufferFile {
803 project_id,
804 buffer_id,
805 file: Some(new_file.to_proto()),
806 })
807 .log_err();
808 }
809
810 buffer_handle.update(&mut cx, |buffer, cx| {
811 if has_changed_file {
812 buffer.file_updated(new_file, cx).detach();
813 }
814 });
815 }
816
817 if let Some(project_id) = project_id {
818 rpc.send(proto::BufferSaved {
819 project_id,
820 buffer_id,
821 version: serialize_version(&version),
822 mtime: Some(entry.mtime.into()),
823 fingerprint: serialize_fingerprint(fingerprint),
824 })?;
825 }
826
827 buffer_handle.update(&mut cx, |buffer, cx| {
828 buffer.did_save(version.clone(), fingerprint, entry.mtime, cx);
829 });
830
831 Ok((version, fingerprint, entry.mtime))
832 })
833 }
834
835 pub fn create_entry(
836 &self,
837 path: impl Into<Arc<Path>>,
838 is_dir: bool,
839 cx: &mut ModelContext<Worktree>,
840 ) -> Task<Result<Entry>> {
841 let path = path.into();
842 let abs_path = self.absolutize(&path);
843 let fs = self.fs.clone();
844 let write = cx.background().spawn(async move {
845 if is_dir {
846 fs.create_dir(&abs_path).await
847 } else {
848 fs.save(&abs_path, &Default::default(), Default::default())
849 .await
850 }
851 });
852
853 cx.spawn(|this, mut cx| async move {
854 write.await?;
855 this.update(&mut cx, |this, cx| {
856 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
857 })
858 .await
859 })
860 }
861
862 pub fn write_file(
863 &self,
864 path: impl Into<Arc<Path>>,
865 text: Rope,
866 line_ending: LineEnding,
867 cx: &mut ModelContext<Worktree>,
868 ) -> Task<Result<Entry>> {
869 let path = path.into();
870 let abs_path = self.absolutize(&path);
871 let fs = self.fs.clone();
872 let write = cx
873 .background()
874 .spawn(async move { fs.save(&abs_path, &text, line_ending).await });
875
876 cx.spawn(|this, mut cx| async move {
877 write.await?;
878 this.update(&mut cx, |this, cx| {
879 this.as_local_mut().unwrap().refresh_entry(path, None, cx)
880 })
881 .await
882 })
883 }
884
885 pub fn delete_entry(
886 &self,
887 entry_id: ProjectEntryId,
888 cx: &mut ModelContext<Worktree>,
889 ) -> Option<Task<Result<()>>> {
890 let entry = self.entry_for_id(entry_id)?.clone();
891 let abs_path = self.abs_path.clone();
892 let fs = self.fs.clone();
893
894 let delete = cx.background().spawn(async move {
895 let mut abs_path = fs.canonicalize(&abs_path).await?;
896 if entry.path.file_name().is_some() {
897 abs_path = abs_path.join(&entry.path);
898 }
899 if entry.is_file() {
900 fs.remove_file(&abs_path, Default::default()).await?;
901 } else {
902 fs.remove_dir(
903 &abs_path,
904 RemoveOptions {
905 recursive: true,
906 ignore_if_not_exists: false,
907 },
908 )
909 .await?;
910 }
911 anyhow::Ok(abs_path)
912 });
913
914 Some(cx.spawn(|this, mut cx| async move {
915 let abs_path = delete.await?;
916 let (tx, mut rx) = barrier::channel();
917 this.update(&mut cx, |this, _| {
918 this.as_local_mut()
919 .unwrap()
920 .path_changes_tx
921 .try_send((vec![abs_path], tx))
922 })?;
923 rx.recv().await;
924 Ok(())
925 }))
926 }
927
928 pub fn rename_entry(
929 &self,
930 entry_id: ProjectEntryId,
931 new_path: impl Into<Arc<Path>>,
932 cx: &mut ModelContext<Worktree>,
933 ) -> Option<Task<Result<Entry>>> {
934 let old_path = self.entry_for_id(entry_id)?.path.clone();
935 let new_path = new_path.into();
936 let abs_old_path = self.absolutize(&old_path);
937 let abs_new_path = self.absolutize(&new_path);
938 let fs = self.fs.clone();
939 let rename = cx.background().spawn(async move {
940 fs.rename(&abs_old_path, &abs_new_path, Default::default())
941 .await
942 });
943
944 Some(cx.spawn(|this, mut cx| async move {
945 rename.await?;
946 this.update(&mut cx, |this, cx| {
947 this.as_local_mut()
948 .unwrap()
949 .refresh_entry(new_path.clone(), Some(old_path), cx)
950 })
951 .await
952 }))
953 }
954
955 pub fn copy_entry(
956 &self,
957 entry_id: ProjectEntryId,
958 new_path: impl Into<Arc<Path>>,
959 cx: &mut ModelContext<Worktree>,
960 ) -> Option<Task<Result<Entry>>> {
961 let old_path = self.entry_for_id(entry_id)?.path.clone();
962 let new_path = new_path.into();
963 let abs_old_path = self.absolutize(&old_path);
964 let abs_new_path = self.absolutize(&new_path);
965 let fs = self.fs.clone();
966 let copy = cx.background().spawn(async move {
967 copy_recursive(
968 fs.as_ref(),
969 &abs_old_path,
970 &abs_new_path,
971 Default::default(),
972 )
973 .await
974 });
975
976 Some(cx.spawn(|this, mut cx| async move {
977 copy.await?;
978 this.update(&mut cx, |this, cx| {
979 this.as_local_mut()
980 .unwrap()
981 .refresh_entry(new_path.clone(), None, cx)
982 })
983 .await
984 }))
985 }
986
987 fn refresh_entry(
988 &self,
989 path: Arc<Path>,
990 old_path: Option<Arc<Path>>,
991 cx: &mut ModelContext<Worktree>,
992 ) -> Task<Result<Entry>> {
993 let fs = self.fs.clone();
994 let abs_root_path = self.abs_path.clone();
995 let path_changes_tx = self.path_changes_tx.clone();
996 cx.spawn_weak(move |this, mut cx| async move {
997 let abs_path = fs.canonicalize(&abs_root_path).await?;
998 let mut paths = Vec::with_capacity(2);
999 paths.push(if path.file_name().is_some() {
1000 abs_path.join(&path)
1001 } else {
1002 abs_path.clone()
1003 });
1004 if let Some(old_path) = old_path {
1005 paths.push(if old_path.file_name().is_some() {
1006 abs_path.join(&old_path)
1007 } else {
1008 abs_path.clone()
1009 });
1010 }
1011
1012 let (tx, mut rx) = barrier::channel();
1013 path_changes_tx.try_send((paths, tx))?;
1014 rx.recv().await;
1015 this.upgrade(&cx)
1016 .ok_or_else(|| anyhow!("worktree was dropped"))?
1017 .update(&mut cx, |this, _| {
1018 this.entry_for_path(path)
1019 .cloned()
1020 .ok_or_else(|| anyhow!("failed to read path after update"))
1021 })
1022 })
1023 }
1024
1025 pub fn share(&mut self, project_id: u64, cx: &mut ModelContext<Worktree>) -> Task<Result<()>> {
1026 let (share_tx, share_rx) = oneshot::channel();
1027
1028 if let Some(share) = self.share.as_mut() {
1029 let _ = share_tx.send(());
1030 *share.resume_updates.borrow_mut() = ();
1031 } else {
1032 let (snapshots_tx, mut snapshots_rx) = watch::channel_with(self.snapshot());
1033 let (resume_updates_tx, mut resume_updates_rx) = watch::channel();
1034 let worktree_id = cx.model_id() as u64;
1035
1036 for (path, summaries) in &self.diagnostic_summaries {
1037 for (&server_id, summary) in summaries {
1038 if let Err(e) = self.client.send(proto::UpdateDiagnosticSummary {
1039 project_id,
1040 worktree_id,
1041 summary: Some(summary.to_proto(server_id, &path)),
1042 }) {
1043 return Task::ready(Err(e));
1044 }
1045 }
1046 }
1047
1048 let _maintain_remote_snapshot = cx.background().spawn({
1049 let client = self.client.clone();
1050 async move {
1051 let mut share_tx = Some(share_tx);
1052 let mut prev_snapshot = LocalSnapshot {
1053 ignores_by_parent_abs_path: Default::default(),
1054 removed_entry_ids: Default::default(),
1055 next_entry_id: Default::default(),
1056 git_repositories: Default::default(),
1057 snapshot: Snapshot {
1058 id: WorktreeId(worktree_id as usize),
1059 abs_path: Path::new("").into(),
1060 root_name: Default::default(),
1061 root_char_bag: Default::default(),
1062 entries_by_path: Default::default(),
1063 entries_by_id: Default::default(),
1064 repository_entries: Default::default(),
1065 scan_id: 0,
1066 completed_scan_id: 0,
1067 },
1068 };
1069 while let Some(snapshot) = snapshots_rx.recv().await {
1070 #[cfg(any(test, feature = "test-support"))]
1071 const MAX_CHUNK_SIZE: usize = 2;
1072 #[cfg(not(any(test, feature = "test-support")))]
1073 const MAX_CHUNK_SIZE: usize = 256;
1074
1075 let update =
1076 snapshot.build_update(&prev_snapshot, project_id, worktree_id, true);
1077 for update in proto::split_worktree_update(update, MAX_CHUNK_SIZE) {
1078 let _ = resume_updates_rx.try_recv();
1079 while let Err(error) = client.request(update.clone()).await {
1080 log::error!("failed to send worktree update: {}", error);
1081 log::info!("waiting to resume updates");
1082 if resume_updates_rx.next().await.is_none() {
1083 return Ok(());
1084 }
1085 }
1086 }
1087
1088 if let Some(share_tx) = share_tx.take() {
1089 let _ = share_tx.send(());
1090 }
1091
1092 prev_snapshot = snapshot;
1093 }
1094
1095 Ok::<_, anyhow::Error>(())
1096 }
1097 .log_err()
1098 });
1099
1100 self.share = Some(ShareState {
1101 project_id,
1102 snapshots_tx,
1103 resume_updates: resume_updates_tx,
1104 _maintain_remote_snapshot,
1105 });
1106 }
1107
1108 cx.foreground()
1109 .spawn(async move { share_rx.await.map_err(|_| anyhow!("share ended")) })
1110 }
1111
1112 pub fn unshare(&mut self) {
1113 self.share.take();
1114 }
1115
1116 pub fn is_shared(&self) -> bool {
1117 self.share.is_some()
1118 }
1119
1120 pub fn load_index_text(
1121 &self,
1122 repo: RepositoryEntry,
1123 repo_path: RepoPath,
1124 cx: &mut ModelContext<Worktree>,
1125 ) -> Task<Option<String>> {
1126 let Some(git_ptr) = self.git_repositories.get(&repo.git_dir_entry_id).map(|git_ptr| git_ptr.to_owned()) else {
1127 return Task::Ready(Some(None))
1128 };
1129 cx.background()
1130 .spawn(async move { git_ptr.lock().load_index_text(&repo_path) })
1131 }
1132}
1133
1134impl RemoteWorktree {
1135 fn snapshot(&self) -> Snapshot {
1136 self.snapshot.clone()
1137 }
1138
1139 pub fn disconnected_from_host(&mut self) {
1140 self.updates_tx.take();
1141 self.snapshot_subscriptions.clear();
1142 self.disconnected = true;
1143 }
1144
1145 pub fn save_buffer(
1146 &self,
1147 buffer_handle: ModelHandle<Buffer>,
1148 cx: &mut ModelContext<Worktree>,
1149 ) -> Task<Result<(clock::Global, RopeFingerprint, SystemTime)>> {
1150 let buffer = buffer_handle.read(cx);
1151 let buffer_id = buffer.remote_id();
1152 let version = buffer.version();
1153 let rpc = self.client.clone();
1154 let project_id = self.project_id;
1155 cx.as_mut().spawn(|mut cx| async move {
1156 let response = rpc
1157 .request(proto::SaveBuffer {
1158 project_id,
1159 buffer_id,
1160 version: serialize_version(&version),
1161 })
1162 .await?;
1163 let version = deserialize_version(&response.version);
1164 let fingerprint = deserialize_fingerprint(&response.fingerprint)?;
1165 let mtime = response
1166 .mtime
1167 .ok_or_else(|| anyhow!("missing mtime"))?
1168 .into();
1169
1170 buffer_handle.update(&mut cx, |buffer, cx| {
1171 buffer.did_save(version.clone(), fingerprint, mtime, cx);
1172 });
1173
1174 Ok((version, fingerprint, mtime))
1175 })
1176 }
1177
1178 pub fn update_from_remote(&mut self, update: proto::UpdateWorktree) {
1179 if let Some(updates_tx) = &self.updates_tx {
1180 updates_tx
1181 .unbounded_send(update)
1182 .expect("consumer runs to completion");
1183 }
1184 }
1185
1186 fn observed_snapshot(&self, scan_id: usize) -> bool {
1187 self.completed_scan_id >= scan_id
1188 }
1189
1190 fn wait_for_snapshot(&mut self, scan_id: usize) -> impl Future<Output = Result<()>> {
1191 let (tx, rx) = oneshot::channel();
1192 if self.observed_snapshot(scan_id) {
1193 let _ = tx.send(());
1194 } else if self.disconnected {
1195 drop(tx);
1196 } else {
1197 match self
1198 .snapshot_subscriptions
1199 .binary_search_by_key(&scan_id, |probe| probe.0)
1200 {
1201 Ok(ix) | Err(ix) => self.snapshot_subscriptions.insert(ix, (scan_id, tx)),
1202 }
1203 }
1204
1205 async move {
1206 rx.await?;
1207 Ok(())
1208 }
1209 }
1210
1211 pub fn update_diagnostic_summary(
1212 &mut self,
1213 path: Arc<Path>,
1214 summary: &proto::DiagnosticSummary,
1215 ) {
1216 let server_id = LanguageServerId(summary.language_server_id as usize);
1217 let summary = DiagnosticSummary {
1218 error_count: summary.error_count as usize,
1219 warning_count: summary.warning_count as usize,
1220 };
1221
1222 if summary.is_empty() {
1223 if let Some(summaries) = self.diagnostic_summaries.get_mut(&path) {
1224 summaries.remove(&server_id);
1225 if summaries.is_empty() {
1226 self.diagnostic_summaries.remove(&path);
1227 }
1228 }
1229 } else {
1230 self.diagnostic_summaries
1231 .entry(path)
1232 .or_default()
1233 .insert(server_id, summary);
1234 }
1235 }
1236
1237 pub fn insert_entry(
1238 &mut self,
1239 entry: proto::Entry,
1240 scan_id: usize,
1241 cx: &mut ModelContext<Worktree>,
1242 ) -> Task<Result<Entry>> {
1243 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1244 cx.spawn(|this, mut cx| async move {
1245 wait_for_snapshot.await?;
1246 this.update(&mut cx, |worktree, _| {
1247 let worktree = worktree.as_remote_mut().unwrap();
1248 let mut snapshot = worktree.background_snapshot.lock();
1249 let entry = snapshot.insert_entry(entry);
1250 worktree.snapshot = snapshot.clone();
1251 entry
1252 })
1253 })
1254 }
1255
1256 pub(crate) fn delete_entry(
1257 &mut self,
1258 id: ProjectEntryId,
1259 scan_id: usize,
1260 cx: &mut ModelContext<Worktree>,
1261 ) -> Task<Result<()>> {
1262 let wait_for_snapshot = self.wait_for_snapshot(scan_id);
1263 cx.spawn(|this, mut cx| async move {
1264 wait_for_snapshot.await?;
1265 this.update(&mut cx, |worktree, _| {
1266 let worktree = worktree.as_remote_mut().unwrap();
1267 let mut snapshot = worktree.background_snapshot.lock();
1268 snapshot.delete_entry(id);
1269 worktree.snapshot = snapshot.clone();
1270 });
1271 Ok(())
1272 })
1273 }
1274}
1275
1276impl Snapshot {
1277 pub fn id(&self) -> WorktreeId {
1278 self.id
1279 }
1280
1281 pub fn abs_path(&self) -> &Arc<Path> {
1282 &self.abs_path
1283 }
1284
1285 pub fn contains_entry(&self, entry_id: ProjectEntryId) -> bool {
1286 self.entries_by_id.get(&entry_id, &()).is_some()
1287 }
1288
1289 pub(crate) fn insert_entry(&mut self, entry: proto::Entry) -> Result<Entry> {
1290 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1291 let old_entry = self.entries_by_id.insert_or_replace(
1292 PathEntry {
1293 id: entry.id,
1294 path: entry.path.clone(),
1295 is_ignored: entry.is_ignored,
1296 scan_id: 0,
1297 },
1298 &(),
1299 );
1300 if let Some(old_entry) = old_entry {
1301 self.entries_by_path.remove(&PathKey(old_entry.path), &());
1302 }
1303 self.entries_by_path.insert_or_replace(entry.clone(), &());
1304 Ok(entry)
1305 }
1306
1307 fn delete_entry(&mut self, entry_id: ProjectEntryId) -> Option<Arc<Path>> {
1308 let removed_entry = self.entries_by_id.remove(&entry_id, &())?;
1309 self.entries_by_path = {
1310 let mut cursor = self.entries_by_path.cursor();
1311 let mut new_entries_by_path =
1312 cursor.slice(&TraversalTarget::Path(&removed_entry.path), Bias::Left, &());
1313 while let Some(entry) = cursor.item() {
1314 if entry.path.starts_with(&removed_entry.path) {
1315 self.entries_by_id.remove(&entry.id, &());
1316 cursor.next(&());
1317 } else {
1318 break;
1319 }
1320 }
1321 new_entries_by_path.push_tree(cursor.suffix(&()), &());
1322 new_entries_by_path
1323 };
1324
1325 Some(removed_entry.path)
1326 }
1327
1328 pub(crate) fn apply_remote_update(&mut self, update: proto::UpdateWorktree) -> Result<()> {
1329 let mut entries_by_path_edits = Vec::new();
1330 let mut entries_by_id_edits = Vec::new();
1331 for entry_id in update.removed_entries {
1332 if let Some(entry) = self.entry_for_id(ProjectEntryId::from_proto(entry_id)) {
1333 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
1334 entries_by_id_edits.push(Edit::Remove(entry.id));
1335 }
1336 }
1337
1338 for entry in update.updated_entries {
1339 let entry = Entry::try_from((&self.root_char_bag, entry))?;
1340 if let Some(PathEntry { path, .. }) = self.entries_by_id.get(&entry.id, &()) {
1341 entries_by_path_edits.push(Edit::Remove(PathKey(path.clone())));
1342 }
1343 entries_by_id_edits.push(Edit::Insert(PathEntry {
1344 id: entry.id,
1345 path: entry.path.clone(),
1346 is_ignored: entry.is_ignored,
1347 scan_id: 0,
1348 }));
1349 entries_by_path_edits.push(Edit::Insert(entry));
1350 }
1351
1352 self.entries_by_path.edit(entries_by_path_edits, &());
1353 self.entries_by_id.edit(entries_by_id_edits, &());
1354 self.scan_id = update.scan_id as usize;
1355 if update.is_last_update {
1356 self.completed_scan_id = update.scan_id as usize;
1357 }
1358
1359 Ok(())
1360 }
1361
1362 pub fn file_count(&self) -> usize {
1363 self.entries_by_path.summary().file_count
1364 }
1365
1366 pub fn visible_file_count(&self) -> usize {
1367 self.entries_by_path.summary().visible_file_count
1368 }
1369
1370 fn traverse_from_offset(
1371 &self,
1372 include_dirs: bool,
1373 include_ignored: bool,
1374 start_offset: usize,
1375 ) -> Traversal {
1376 let mut cursor = self.entries_by_path.cursor();
1377 cursor.seek(
1378 &TraversalTarget::Count {
1379 count: start_offset,
1380 include_dirs,
1381 include_ignored,
1382 },
1383 Bias::Right,
1384 &(),
1385 );
1386 Traversal {
1387 cursor,
1388 include_dirs,
1389 include_ignored,
1390 }
1391 }
1392
1393 fn traverse_from_path(
1394 &self,
1395 include_dirs: bool,
1396 include_ignored: bool,
1397 path: &Path,
1398 ) -> Traversal {
1399 let mut cursor = self.entries_by_path.cursor();
1400 cursor.seek(&TraversalTarget::Path(path), Bias::Left, &());
1401 Traversal {
1402 cursor,
1403 include_dirs,
1404 include_ignored,
1405 }
1406 }
1407
1408 pub fn files(&self, include_ignored: bool, start: usize) -> Traversal {
1409 self.traverse_from_offset(false, include_ignored, start)
1410 }
1411
1412 pub fn entries(&self, include_ignored: bool) -> Traversal {
1413 self.traverse_from_offset(true, include_ignored, 0)
1414 }
1415
1416 pub fn paths(&self) -> impl Iterator<Item = &Arc<Path>> {
1417 let empty_path = Path::new("");
1418 self.entries_by_path
1419 .cursor::<()>()
1420 .filter(move |entry| entry.path.as_ref() != empty_path)
1421 .map(|entry| &entry.path)
1422 }
1423
1424 fn child_entries<'a>(&'a self, parent_path: &'a Path) -> ChildEntriesIter<'a> {
1425 let mut cursor = self.entries_by_path.cursor();
1426 cursor.seek(&TraversalTarget::Path(parent_path), Bias::Right, &());
1427 let traversal = Traversal {
1428 cursor,
1429 include_dirs: true,
1430 include_ignored: true,
1431 };
1432 ChildEntriesIter {
1433 traversal,
1434 parent_path,
1435 }
1436 }
1437
1438 pub fn root_entry(&self) -> Option<&Entry> {
1439 self.entry_for_path("")
1440 }
1441
1442 pub fn root_name(&self) -> &str {
1443 &self.root_name
1444 }
1445
1446 pub fn scan_id(&self) -> usize {
1447 self.scan_id
1448 }
1449
1450 pub fn entry_for_path(&self, path: impl AsRef<Path>) -> Option<&Entry> {
1451 let path = path.as_ref();
1452 self.traverse_from_path(true, true, path)
1453 .entry()
1454 .and_then(|entry| {
1455 if entry.path.as_ref() == path {
1456 Some(entry)
1457 } else {
1458 None
1459 }
1460 })
1461 }
1462
1463 pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> {
1464 let entry = self.entries_by_id.get(&id, &())?;
1465 self.entry_for_path(&entry.path)
1466 }
1467
1468 pub fn inode_for_path(&self, path: impl AsRef<Path>) -> Option<u64> {
1469 self.entry_for_path(path.as_ref()).map(|e| e.inode)
1470 }
1471}
1472
1473impl LocalSnapshot {
1474 pub(crate) fn repo_for(&self, path: &Path) -> Option<RepositoryEntry> {
1475 dbg!(&self.repository_entries)
1476 .closest(&RepositoryWorkDirectory(path.into()))
1477 .map(|(_, entry)| entry.to_owned())
1478 }
1479
1480 pub(crate) fn repo_for_metadata(
1481 &self,
1482 path: &Path,
1483 ) -> Option<(RepositoryWorkDirectory, Arc<Mutex<dyn GitRepository>>)> {
1484 self.repository_entries
1485 .iter()
1486 .find(|(_, repo)| repo.in_dot_git(path))
1487 .map(|(work_directory, entry)| {
1488 (
1489 work_directory.to_owned(),
1490 self.git_repositories
1491 .get(&entry.git_dir_entry_id)
1492 .expect("These two data structures should be in sync")
1493 .to_owned(),
1494 )
1495 })
1496 }
1497
1498 #[cfg(test)]
1499 pub(crate) fn build_initial_update(&self, project_id: u64) -> proto::UpdateWorktree {
1500 let root_name = self.root_name.clone();
1501 proto::UpdateWorktree {
1502 project_id,
1503 worktree_id: self.id().to_proto(),
1504 abs_path: self.abs_path().to_string_lossy().into(),
1505 root_name,
1506 updated_entries: self.entries_by_path.iter().map(Into::into).collect(),
1507 removed_entries: Default::default(),
1508 scan_id: self.scan_id as u64,
1509 is_last_update: true,
1510 }
1511 }
1512
1513 pub(crate) fn build_update(
1514 &self,
1515 other: &Self,
1516 project_id: u64,
1517 worktree_id: u64,
1518 include_ignored: bool,
1519 ) -> proto::UpdateWorktree {
1520 let mut updated_entries = Vec::new();
1521 let mut removed_entries = Vec::new();
1522 let mut self_entries = self
1523 .entries_by_id
1524 .cursor::<()>()
1525 .filter(|e| include_ignored || !e.is_ignored)
1526 .peekable();
1527 let mut other_entries = other
1528 .entries_by_id
1529 .cursor::<()>()
1530 .filter(|e| include_ignored || !e.is_ignored)
1531 .peekable();
1532 loop {
1533 match (self_entries.peek(), other_entries.peek()) {
1534 (Some(self_entry), Some(other_entry)) => {
1535 match Ord::cmp(&self_entry.id, &other_entry.id) {
1536 Ordering::Less => {
1537 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1538 updated_entries.push(entry);
1539 self_entries.next();
1540 }
1541 Ordering::Equal => {
1542 if self_entry.scan_id != other_entry.scan_id {
1543 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1544 updated_entries.push(entry);
1545 }
1546
1547 self_entries.next();
1548 other_entries.next();
1549 }
1550 Ordering::Greater => {
1551 removed_entries.push(other_entry.id.to_proto());
1552 other_entries.next();
1553 }
1554 }
1555 }
1556 (Some(self_entry), None) => {
1557 let entry = self.entry_for_id(self_entry.id).unwrap().into();
1558 updated_entries.push(entry);
1559 self_entries.next();
1560 }
1561 (None, Some(other_entry)) => {
1562 removed_entries.push(other_entry.id.to_proto());
1563 other_entries.next();
1564 }
1565 (None, None) => break,
1566 }
1567 }
1568
1569 proto::UpdateWorktree {
1570 project_id,
1571 worktree_id,
1572 abs_path: self.abs_path().to_string_lossy().into(),
1573 root_name: self.root_name().to_string(),
1574 updated_entries,
1575 removed_entries,
1576 scan_id: self.scan_id as u64,
1577 is_last_update: self.completed_scan_id == self.scan_id,
1578 }
1579 }
1580
1581 fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs) -> Entry {
1582 if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) {
1583 let abs_path = self.abs_path.join(&entry.path);
1584 match smol::block_on(build_gitignore(&abs_path, fs)) {
1585 Ok(ignore) => {
1586 self.ignores_by_parent_abs_path.insert(
1587 abs_path.parent().unwrap().into(),
1588 (Arc::new(ignore), self.scan_id),
1589 );
1590 }
1591 Err(error) => {
1592 log::error!(
1593 "error loading .gitignore file {:?} - {:?}",
1594 &entry.path,
1595 error
1596 );
1597 }
1598 }
1599 }
1600
1601 self.reuse_entry_id(&mut entry);
1602
1603 if entry.kind == EntryKind::PendingDir {
1604 if let Some(existing_entry) =
1605 self.entries_by_path.get(&PathKey(entry.path.clone()), &())
1606 {
1607 entry.kind = existing_entry.kind;
1608 }
1609 }
1610
1611 let scan_id = self.scan_id;
1612 let removed = self.entries_by_path.insert_or_replace(entry.clone(), &());
1613 if let Some(removed) = removed {
1614 if removed.id != entry.id {
1615 self.entries_by_id.remove(&removed.id, &());
1616 }
1617 }
1618 self.entries_by_id.insert_or_replace(
1619 PathEntry {
1620 id: entry.id,
1621 path: entry.path.clone(),
1622 is_ignored: entry.is_ignored,
1623 scan_id,
1624 },
1625 &(),
1626 );
1627
1628 entry
1629 }
1630
1631 fn populate_dir(
1632 &mut self,
1633 parent_path: Arc<Path>,
1634 entries: impl IntoIterator<Item = Entry>,
1635 ignore: Option<Arc<Gitignore>>,
1636 fs: &dyn Fs,
1637 ) {
1638 let mut parent_entry = if let Some(parent_entry) =
1639 self.entries_by_path.get(&PathKey(parent_path.clone()), &())
1640 {
1641 parent_entry.clone()
1642 } else {
1643 log::warn!(
1644 "populating a directory {:?} that has been removed",
1645 parent_path
1646 );
1647 return;
1648 };
1649
1650 match parent_entry.kind {
1651 EntryKind::PendingDir => {
1652 parent_entry.kind = EntryKind::Dir;
1653 }
1654 EntryKind::Dir => {}
1655 _ => return,
1656 }
1657
1658 if let Some(ignore) = ignore {
1659 self.ignores_by_parent_abs_path.insert(
1660 self.abs_path.join(&parent_path).into(),
1661 (ignore, self.scan_id),
1662 );
1663 }
1664
1665 if parent_path.file_name() == Some(&DOT_GIT) {
1666 let abs_path = self.abs_path.join(&parent_path);
1667 let content_path: Arc<Path> = parent_path.parent().unwrap().into();
1668
1669 let key = RepositoryWorkDirectory(content_path.clone());
1670 if self.repository_entries.get(&key).is_none() {
1671 if let Some(repo) = fs.open_repo(abs_path.as_path()) {
1672 self.repository_entries.insert(
1673 key.clone(),
1674 RepositoryEntry {
1675 git_dir_path: parent_path.clone(),
1676 git_dir_entry_id: parent_entry.id,
1677 work_directory: key,
1678 scan_id: 0,
1679 },
1680 );
1681
1682 self.git_repositories.insert(parent_entry.id, repo)
1683 }
1684 }
1685 }
1686
1687 let mut entries_by_path_edits = vec![Edit::Insert(parent_entry)];
1688 let mut entries_by_id_edits = Vec::new();
1689
1690 for mut entry in entries {
1691 self.reuse_entry_id(&mut entry);
1692 entries_by_id_edits.push(Edit::Insert(PathEntry {
1693 id: entry.id,
1694 path: entry.path.clone(),
1695 is_ignored: entry.is_ignored,
1696 scan_id: self.scan_id,
1697 }));
1698 entries_by_path_edits.push(Edit::Insert(entry));
1699 }
1700
1701 self.entries_by_path.edit(entries_by_path_edits, &());
1702 self.entries_by_id.edit(entries_by_id_edits, &());
1703 }
1704
1705 fn reuse_entry_id(&mut self, entry: &mut Entry) {
1706 if let Some(removed_entry_id) = self.removed_entry_ids.remove(&entry.inode) {
1707 entry.id = removed_entry_id;
1708 } else if let Some(existing_entry) = self.entry_for_path(&entry.path) {
1709 entry.id = existing_entry.id;
1710 }
1711 }
1712
1713 fn remove_path(&mut self, path: &Path) {
1714 let mut new_entries;
1715 let removed_entries;
1716 {
1717 let mut cursor = self.entries_by_path.cursor::<TraversalProgress>();
1718 new_entries = cursor.slice(&TraversalTarget::Path(path), Bias::Left, &());
1719 removed_entries = cursor.slice(&TraversalTarget::PathSuccessor(path), Bias::Left, &());
1720 new_entries.push_tree(cursor.suffix(&()), &());
1721 }
1722 self.entries_by_path = new_entries;
1723
1724 let mut entries_by_id_edits = Vec::new();
1725 for entry in removed_entries.cursor::<()>() {
1726 let removed_entry_id = self
1727 .removed_entry_ids
1728 .entry(entry.inode)
1729 .or_insert(entry.id);
1730 *removed_entry_id = cmp::max(*removed_entry_id, entry.id);
1731 entries_by_id_edits.push(Edit::Remove(entry.id));
1732 }
1733 self.entries_by_id.edit(entries_by_id_edits, &());
1734
1735 if path.file_name() == Some(&GITIGNORE) {
1736 let abs_parent_path = self.abs_path.join(path.parent().unwrap());
1737 if let Some((_, scan_id)) = self
1738 .ignores_by_parent_abs_path
1739 .get_mut(abs_parent_path.as_path())
1740 {
1741 *scan_id = self.snapshot.scan_id;
1742 }
1743 } else if path.file_name() == Some(&DOT_GIT) {
1744 let repo_entry_key = RepositoryWorkDirectory(path.parent().unwrap().into());
1745 self.snapshot
1746 .repository_entries
1747 .update(&repo_entry_key, |repo| repo.scan_id = self.snapshot.scan_id);
1748 }
1749 }
1750
1751 fn ancestor_inodes_for_path(&self, path: &Path) -> TreeSet<u64> {
1752 let mut inodes = TreeSet::default();
1753 for ancestor in path.ancestors().skip(1) {
1754 if let Some(entry) = self.entry_for_path(ancestor) {
1755 inodes.insert(entry.inode);
1756 }
1757 }
1758 inodes
1759 }
1760
1761 fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool) -> Arc<IgnoreStack> {
1762 let mut new_ignores = Vec::new();
1763 for ancestor in abs_path.ancestors().skip(1) {
1764 if let Some((ignore, _)) = self.ignores_by_parent_abs_path.get(ancestor) {
1765 new_ignores.push((ancestor, Some(ignore.clone())));
1766 } else {
1767 new_ignores.push((ancestor, None));
1768 }
1769 }
1770
1771 let mut ignore_stack = IgnoreStack::none();
1772 for (parent_abs_path, ignore) in new_ignores.into_iter().rev() {
1773 if ignore_stack.is_abs_path_ignored(parent_abs_path, true) {
1774 ignore_stack = IgnoreStack::all();
1775 break;
1776 } else if let Some(ignore) = ignore {
1777 ignore_stack = ignore_stack.append(parent_abs_path.into(), ignore);
1778 }
1779 }
1780
1781 if ignore_stack.is_abs_path_ignored(abs_path, is_dir) {
1782 ignore_stack = IgnoreStack::all();
1783 }
1784
1785 ignore_stack
1786 }
1787}
1788
1789async fn build_gitignore(abs_path: &Path, fs: &dyn Fs) -> Result<Gitignore> {
1790 let contents = fs.load(abs_path).await?;
1791 let parent = abs_path.parent().unwrap_or_else(|| Path::new("/"));
1792 let mut builder = GitignoreBuilder::new(parent);
1793 for line in contents.lines() {
1794 builder.add_line(Some(abs_path.into()), line)?;
1795 }
1796 Ok(builder.build()?)
1797}
1798
1799impl WorktreeId {
1800 pub fn from_usize(handle_id: usize) -> Self {
1801 Self(handle_id)
1802 }
1803
1804 pub(crate) fn from_proto(id: u64) -> Self {
1805 Self(id as usize)
1806 }
1807
1808 pub fn to_proto(&self) -> u64 {
1809 self.0 as u64
1810 }
1811
1812 pub fn to_usize(&self) -> usize {
1813 self.0
1814 }
1815}
1816
1817impl fmt::Display for WorktreeId {
1818 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1819 self.0.fmt(f)
1820 }
1821}
1822
1823impl Deref for Worktree {
1824 type Target = Snapshot;
1825
1826 fn deref(&self) -> &Self::Target {
1827 match self {
1828 Worktree::Local(worktree) => &worktree.snapshot,
1829 Worktree::Remote(worktree) => &worktree.snapshot,
1830 }
1831 }
1832}
1833
1834impl Deref for LocalWorktree {
1835 type Target = LocalSnapshot;
1836
1837 fn deref(&self) -> &Self::Target {
1838 &self.snapshot
1839 }
1840}
1841
1842impl Deref for RemoteWorktree {
1843 type Target = Snapshot;
1844
1845 fn deref(&self) -> &Self::Target {
1846 &self.snapshot
1847 }
1848}
1849
1850impl fmt::Debug for LocalWorktree {
1851 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1852 self.snapshot.fmt(f)
1853 }
1854}
1855
1856impl fmt::Debug for Snapshot {
1857 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1858 struct EntriesById<'a>(&'a SumTree<PathEntry>);
1859 struct EntriesByPath<'a>(&'a SumTree<Entry>);
1860
1861 impl<'a> fmt::Debug for EntriesByPath<'a> {
1862 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1863 f.debug_map()
1864 .entries(self.0.iter().map(|entry| (&entry.path, entry.id)))
1865 .finish()
1866 }
1867 }
1868
1869 impl<'a> fmt::Debug for EntriesById<'a> {
1870 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1871 f.debug_list().entries(self.0.iter()).finish()
1872 }
1873 }
1874
1875 f.debug_struct("Snapshot")
1876 .field("id", &self.id)
1877 .field("root_name", &self.root_name)
1878 .field("entries_by_path", &EntriesByPath(&self.entries_by_path))
1879 .field("entries_by_id", &EntriesById(&self.entries_by_id))
1880 .finish()
1881 }
1882}
1883
1884#[derive(Clone, PartialEq)]
1885pub struct File {
1886 pub worktree: ModelHandle<Worktree>,
1887 pub path: Arc<Path>,
1888 pub mtime: SystemTime,
1889 pub(crate) entry_id: ProjectEntryId,
1890 pub(crate) is_local: bool,
1891 pub(crate) is_deleted: bool,
1892}
1893
1894impl language::File for File {
1895 fn as_local(&self) -> Option<&dyn language::LocalFile> {
1896 if self.is_local {
1897 Some(self)
1898 } else {
1899 None
1900 }
1901 }
1902
1903 fn mtime(&self) -> SystemTime {
1904 self.mtime
1905 }
1906
1907 fn path(&self) -> &Arc<Path> {
1908 &self.path
1909 }
1910
1911 fn full_path(&self, cx: &AppContext) -> PathBuf {
1912 let mut full_path = PathBuf::new();
1913 let worktree = self.worktree.read(cx);
1914
1915 if worktree.is_visible() {
1916 full_path.push(worktree.root_name());
1917 } else {
1918 let path = worktree.abs_path();
1919
1920 if worktree.is_local() && path.starts_with(HOME.as_path()) {
1921 full_path.push("~");
1922 full_path.push(path.strip_prefix(HOME.as_path()).unwrap());
1923 } else {
1924 full_path.push(path)
1925 }
1926 }
1927
1928 if self.path.components().next().is_some() {
1929 full_path.push(&self.path);
1930 }
1931
1932 full_path
1933 }
1934
1935 /// Returns the last component of this handle's absolute path. If this handle refers to the root
1936 /// of its worktree, then this method will return the name of the worktree itself.
1937 fn file_name<'a>(&'a self, cx: &'a AppContext) -> &'a OsStr {
1938 self.path
1939 .file_name()
1940 .unwrap_or_else(|| OsStr::new(&self.worktree.read(cx).root_name))
1941 }
1942
1943 fn is_deleted(&self) -> bool {
1944 self.is_deleted
1945 }
1946
1947 fn as_any(&self) -> &dyn Any {
1948 self
1949 }
1950
1951 fn to_proto(&self) -> rpc::proto::File {
1952 rpc::proto::File {
1953 worktree_id: self.worktree.id() as u64,
1954 entry_id: self.entry_id.to_proto(),
1955 path: self.path.to_string_lossy().into(),
1956 mtime: Some(self.mtime.into()),
1957 is_deleted: self.is_deleted,
1958 }
1959 }
1960}
1961
1962impl language::LocalFile for File {
1963 fn abs_path(&self, cx: &AppContext) -> PathBuf {
1964 self.worktree
1965 .read(cx)
1966 .as_local()
1967 .unwrap()
1968 .abs_path
1969 .join(&self.path)
1970 }
1971
1972 fn load(&self, cx: &AppContext) -> Task<Result<String>> {
1973 let worktree = self.worktree.read(cx).as_local().unwrap();
1974 let abs_path = worktree.absolutize(&self.path);
1975 let fs = worktree.fs.clone();
1976 cx.background()
1977 .spawn(async move { fs.load(&abs_path).await })
1978 }
1979
1980 fn buffer_reloaded(
1981 &self,
1982 buffer_id: u64,
1983 version: &clock::Global,
1984 fingerprint: RopeFingerprint,
1985 line_ending: LineEnding,
1986 mtime: SystemTime,
1987 cx: &mut AppContext,
1988 ) {
1989 let worktree = self.worktree.read(cx).as_local().unwrap();
1990 if let Some(project_id) = worktree.share.as_ref().map(|share| share.project_id) {
1991 worktree
1992 .client
1993 .send(proto::BufferReloaded {
1994 project_id,
1995 buffer_id,
1996 version: serialize_version(version),
1997 mtime: Some(mtime.into()),
1998 fingerprint: serialize_fingerprint(fingerprint),
1999 line_ending: serialize_line_ending(line_ending) as i32,
2000 })
2001 .log_err();
2002 }
2003 }
2004}
2005
2006impl File {
2007 pub fn from_proto(
2008 proto: rpc::proto::File,
2009 worktree: ModelHandle<Worktree>,
2010 cx: &AppContext,
2011 ) -> Result<Self> {
2012 let worktree_id = worktree
2013 .read(cx)
2014 .as_remote()
2015 .ok_or_else(|| anyhow!("not remote"))?
2016 .id();
2017
2018 if worktree_id.to_proto() != proto.worktree_id {
2019 return Err(anyhow!("worktree id does not match file"));
2020 }
2021
2022 Ok(Self {
2023 worktree,
2024 path: Path::new(&proto.path).into(),
2025 mtime: proto.mtime.ok_or_else(|| anyhow!("no timestamp"))?.into(),
2026 entry_id: ProjectEntryId::from_proto(proto.entry_id),
2027 is_local: false,
2028 is_deleted: proto.is_deleted,
2029 })
2030 }
2031
2032 pub fn from_dyn(file: Option<&Arc<dyn language::File>>) -> Option<&Self> {
2033 file.and_then(|f| f.as_any().downcast_ref())
2034 }
2035
2036 pub fn worktree_id(&self, cx: &AppContext) -> WorktreeId {
2037 self.worktree.read(cx).id()
2038 }
2039
2040 pub fn project_entry_id(&self, _: &AppContext) -> Option<ProjectEntryId> {
2041 if self.is_deleted {
2042 None
2043 } else {
2044 Some(self.entry_id)
2045 }
2046 }
2047}
2048
2049#[derive(Clone, Debug, PartialEq, Eq)]
2050pub struct Entry {
2051 pub id: ProjectEntryId,
2052 pub kind: EntryKind,
2053 pub path: Arc<Path>,
2054 pub inode: u64,
2055 pub mtime: SystemTime,
2056 pub is_symlink: bool,
2057 pub is_ignored: bool,
2058}
2059
2060#[derive(Clone, Copy, Debug, PartialEq, Eq)]
2061pub enum EntryKind {
2062 PendingDir,
2063 Dir,
2064 File(CharBag),
2065}
2066
2067#[derive(Clone, Copy, Debug)]
2068pub enum PathChange {
2069 Added,
2070 Removed,
2071 Updated,
2072 AddedOrUpdated,
2073}
2074
2075impl Entry {
2076 fn new(
2077 path: Arc<Path>,
2078 metadata: &fs::Metadata,
2079 next_entry_id: &AtomicUsize,
2080 root_char_bag: CharBag,
2081 ) -> Self {
2082 Self {
2083 id: ProjectEntryId::new(next_entry_id),
2084 kind: if metadata.is_dir {
2085 EntryKind::PendingDir
2086 } else {
2087 EntryKind::File(char_bag_for_path(root_char_bag, &path))
2088 },
2089 path,
2090 inode: metadata.inode,
2091 mtime: metadata.mtime,
2092 is_symlink: metadata.is_symlink,
2093 is_ignored: false,
2094 }
2095 }
2096
2097 pub fn is_dir(&self) -> bool {
2098 matches!(self.kind, EntryKind::Dir | EntryKind::PendingDir)
2099 }
2100
2101 pub fn is_file(&self) -> bool {
2102 matches!(self.kind, EntryKind::File(_))
2103 }
2104}
2105
2106impl sum_tree::Item for Entry {
2107 type Summary = EntrySummary;
2108
2109 fn summary(&self) -> Self::Summary {
2110 let visible_count = if self.is_ignored { 0 } else { 1 };
2111 let file_count;
2112 let visible_file_count;
2113 if self.is_file() {
2114 file_count = 1;
2115 visible_file_count = visible_count;
2116 } else {
2117 file_count = 0;
2118 visible_file_count = 0;
2119 }
2120
2121 EntrySummary {
2122 max_path: self.path.clone(),
2123 count: 1,
2124 visible_count,
2125 file_count,
2126 visible_file_count,
2127 }
2128 }
2129}
2130
2131impl sum_tree::KeyedItem for Entry {
2132 type Key = PathKey;
2133
2134 fn key(&self) -> Self::Key {
2135 PathKey(self.path.clone())
2136 }
2137}
2138
2139#[derive(Clone, Debug)]
2140pub struct EntrySummary {
2141 max_path: Arc<Path>,
2142 count: usize,
2143 visible_count: usize,
2144 file_count: usize,
2145 visible_file_count: usize,
2146}
2147
2148impl Default for EntrySummary {
2149 fn default() -> Self {
2150 Self {
2151 max_path: Arc::from(Path::new("")),
2152 count: 0,
2153 visible_count: 0,
2154 file_count: 0,
2155 visible_file_count: 0,
2156 }
2157 }
2158}
2159
2160impl sum_tree::Summary for EntrySummary {
2161 type Context = ();
2162
2163 fn add_summary(&mut self, rhs: &Self, _: &()) {
2164 self.max_path = rhs.max_path.clone();
2165 self.count += rhs.count;
2166 self.visible_count += rhs.visible_count;
2167 self.file_count += rhs.file_count;
2168 self.visible_file_count += rhs.visible_file_count;
2169 }
2170}
2171
2172#[derive(Clone, Debug)]
2173struct PathEntry {
2174 id: ProjectEntryId,
2175 path: Arc<Path>,
2176 is_ignored: bool,
2177 scan_id: usize,
2178}
2179
2180impl sum_tree::Item for PathEntry {
2181 type Summary = PathEntrySummary;
2182
2183 fn summary(&self) -> Self::Summary {
2184 PathEntrySummary { max_id: self.id }
2185 }
2186}
2187
2188impl sum_tree::KeyedItem for PathEntry {
2189 type Key = ProjectEntryId;
2190
2191 fn key(&self) -> Self::Key {
2192 self.id
2193 }
2194}
2195
2196#[derive(Clone, Debug, Default)]
2197struct PathEntrySummary {
2198 max_id: ProjectEntryId,
2199}
2200
2201impl sum_tree::Summary for PathEntrySummary {
2202 type Context = ();
2203
2204 fn add_summary(&mut self, summary: &Self, _: &Self::Context) {
2205 self.max_id = summary.max_id;
2206 }
2207}
2208
2209impl<'a> sum_tree::Dimension<'a, PathEntrySummary> for ProjectEntryId {
2210 fn add_summary(&mut self, summary: &'a PathEntrySummary, _: &()) {
2211 *self = summary.max_id;
2212 }
2213}
2214
2215#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
2216pub struct PathKey(Arc<Path>);
2217
2218impl Default for PathKey {
2219 fn default() -> Self {
2220 Self(Path::new("").into())
2221 }
2222}
2223
2224impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey {
2225 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2226 self.0 = summary.max_path.clone();
2227 }
2228}
2229
2230struct BackgroundScanner {
2231 snapshot: Mutex<LocalSnapshot>,
2232 fs: Arc<dyn Fs>,
2233 status_updates_tx: UnboundedSender<ScanState>,
2234 executor: Arc<executor::Background>,
2235 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2236 prev_state: Mutex<(Snapshot, Vec<Arc<Path>>)>,
2237 finished_initial_scan: bool,
2238}
2239
2240impl BackgroundScanner {
2241 fn new(
2242 snapshot: LocalSnapshot,
2243 fs: Arc<dyn Fs>,
2244 status_updates_tx: UnboundedSender<ScanState>,
2245 executor: Arc<executor::Background>,
2246 refresh_requests_rx: channel::Receiver<(Vec<PathBuf>, barrier::Sender)>,
2247 ) -> Self {
2248 Self {
2249 fs,
2250 status_updates_tx,
2251 executor,
2252 refresh_requests_rx,
2253 prev_state: Mutex::new((snapshot.snapshot.clone(), Vec::new())),
2254 snapshot: Mutex::new(snapshot),
2255 finished_initial_scan: false,
2256 }
2257 }
2258
2259 async fn run(
2260 &mut self,
2261 mut events_rx: Pin<Box<dyn Send + Stream<Item = Vec<fsevent::Event>>>>,
2262 ) {
2263 use futures::FutureExt as _;
2264
2265 let (root_abs_path, root_inode) = {
2266 let snapshot = self.snapshot.lock();
2267 (
2268 snapshot.abs_path.clone(),
2269 snapshot.root_entry().map(|e| e.inode),
2270 )
2271 };
2272
2273 // Populate ignores above the root.
2274 let ignore_stack;
2275 for ancestor in root_abs_path.ancestors().skip(1) {
2276 if let Ok(ignore) = build_gitignore(&ancestor.join(&*GITIGNORE), self.fs.as_ref()).await
2277 {
2278 self.snapshot
2279 .lock()
2280 .ignores_by_parent_abs_path
2281 .insert(ancestor.into(), (ignore.into(), 0));
2282 }
2283 }
2284 {
2285 let mut snapshot = self.snapshot.lock();
2286 snapshot.scan_id += 1;
2287 ignore_stack = snapshot.ignore_stack_for_abs_path(&root_abs_path, true);
2288 if ignore_stack.is_all() {
2289 if let Some(mut root_entry) = snapshot.root_entry().cloned() {
2290 root_entry.is_ignored = true;
2291 snapshot.insert_entry(root_entry, self.fs.as_ref());
2292 }
2293 }
2294 };
2295
2296 // Perform an initial scan of the directory.
2297 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2298 smol::block_on(scan_job_tx.send(ScanJob {
2299 abs_path: root_abs_path,
2300 path: Arc::from(Path::new("")),
2301 ignore_stack,
2302 ancestor_inodes: TreeSet::from_ordered_entries(root_inode),
2303 scan_queue: scan_job_tx.clone(),
2304 }))
2305 .unwrap();
2306 drop(scan_job_tx);
2307 self.scan_dirs(true, scan_job_rx).await;
2308 {
2309 let mut snapshot = self.snapshot.lock();
2310 snapshot.completed_scan_id = snapshot.scan_id;
2311 }
2312 self.send_status_update(false, None);
2313
2314 // Process any any FS events that occurred while performing the initial scan.
2315 // For these events, update events cannot be as precise, because we didn't
2316 // have the previous state loaded yet.
2317 if let Poll::Ready(Some(events)) = futures::poll!(events_rx.next()) {
2318 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2319 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2320 paths.extend(more_events.into_iter().map(|e| e.path));
2321 }
2322 self.process_events(paths).await;
2323 }
2324
2325 self.finished_initial_scan = true;
2326
2327 // Continue processing events until the worktree is dropped.
2328 loop {
2329 select_biased! {
2330 // Process any path refresh requests from the worktree. Prioritize
2331 // these before handling changes reported by the filesystem.
2332 request = self.refresh_requests_rx.recv().fuse() => {
2333 let Ok((paths, barrier)) = request else { break };
2334 if !self.process_refresh_request(paths, barrier).await {
2335 return;
2336 }
2337 }
2338
2339 events = events_rx.next().fuse() => {
2340 let Some(events) = events else { break };
2341 let mut paths = events.into_iter().map(|e| e.path).collect::<Vec<_>>();
2342 while let Poll::Ready(Some(more_events)) = futures::poll!(events_rx.next()) {
2343 paths.extend(more_events.into_iter().map(|e| e.path));
2344 }
2345 self.process_events(paths).await;
2346 }
2347 }
2348 }
2349 }
2350
2351 async fn process_refresh_request(&self, paths: Vec<PathBuf>, barrier: barrier::Sender) -> bool {
2352 self.reload_entries_for_paths(paths, None).await;
2353 self.send_status_update(false, Some(barrier))
2354 }
2355
2356 async fn process_events(&mut self, paths: Vec<PathBuf>) {
2357 let (scan_job_tx, scan_job_rx) = channel::unbounded();
2358 if let Some(mut paths) = self
2359 .reload_entries_for_paths(paths, Some(scan_job_tx.clone()))
2360 .await
2361 {
2362 paths.sort_unstable();
2363 util::extend_sorted(&mut self.prev_state.lock().1, paths, usize::MAX, Ord::cmp);
2364 }
2365 drop(scan_job_tx);
2366 self.scan_dirs(false, scan_job_rx).await;
2367
2368 self.update_ignore_statuses().await;
2369
2370 let mut snapshot = self.snapshot.lock();
2371
2372 let mut git_repositories = mem::take(&mut snapshot.git_repositories);
2373 git_repositories.retain(|project_entry_id, _| snapshot.contains_entry(*project_entry_id));
2374 snapshot.git_repositories = git_repositories;
2375
2376 snapshot.removed_entry_ids.clear();
2377 snapshot.completed_scan_id = snapshot.scan_id;
2378 drop(snapshot);
2379
2380 self.send_status_update(false, None);
2381 }
2382
2383 async fn scan_dirs(
2384 &self,
2385 enable_progress_updates: bool,
2386 scan_jobs_rx: channel::Receiver<ScanJob>,
2387 ) {
2388 use futures::FutureExt as _;
2389
2390 if self
2391 .status_updates_tx
2392 .unbounded_send(ScanState::Started)
2393 .is_err()
2394 {
2395 return;
2396 }
2397
2398 let progress_update_count = AtomicUsize::new(0);
2399 self.executor
2400 .scoped(|scope| {
2401 for _ in 0..self.executor.num_cpus() {
2402 scope.spawn(async {
2403 let mut last_progress_update_count = 0;
2404 let progress_update_timer = self.progress_timer(enable_progress_updates).fuse();
2405 futures::pin_mut!(progress_update_timer);
2406
2407 loop {
2408 select_biased! {
2409 // Process any path refresh requests before moving on to process
2410 // the scan queue, so that user operations are prioritized.
2411 request = self.refresh_requests_rx.recv().fuse() => {
2412 let Ok((paths, barrier)) = request else { break };
2413 if !self.process_refresh_request(paths, barrier).await {
2414 return;
2415 }
2416 }
2417
2418 // Send periodic progress updates to the worktree. Use an atomic counter
2419 // to ensure that only one of the workers sends a progress update after
2420 // the update interval elapses.
2421 _ = progress_update_timer => {
2422 match progress_update_count.compare_exchange(
2423 last_progress_update_count,
2424 last_progress_update_count + 1,
2425 SeqCst,
2426 SeqCst
2427 ) {
2428 Ok(_) => {
2429 last_progress_update_count += 1;
2430 self.send_status_update(true, None);
2431 }
2432 Err(count) => {
2433 last_progress_update_count = count;
2434 }
2435 }
2436 progress_update_timer.set(self.progress_timer(enable_progress_updates).fuse());
2437 }
2438
2439 // Recursively load directories from the file system.
2440 job = scan_jobs_rx.recv().fuse() => {
2441 let Ok(job) = job else { break };
2442 if let Err(err) = self.scan_dir(&job).await {
2443 if job.path.as_ref() != Path::new("") {
2444 log::error!("error scanning directory {:?}: {}", job.abs_path, err);
2445 }
2446 }
2447 }
2448 }
2449 }
2450 })
2451 }
2452 })
2453 .await;
2454 }
2455
2456 fn send_status_update(&self, scanning: bool, barrier: Option<barrier::Sender>) -> bool {
2457 let mut prev_state = self.prev_state.lock();
2458 let snapshot = self.snapshot.lock().clone();
2459 let mut old_snapshot = snapshot.snapshot.clone();
2460 mem::swap(&mut old_snapshot, &mut prev_state.0);
2461 let changed_paths = mem::take(&mut prev_state.1);
2462 let changes = self.build_change_set(&old_snapshot, &snapshot.snapshot, changed_paths);
2463 self.status_updates_tx
2464 .unbounded_send(ScanState::Updated {
2465 snapshot,
2466 changes,
2467 scanning,
2468 barrier,
2469 })
2470 .is_ok()
2471 }
2472
2473 async fn scan_dir(&self, job: &ScanJob) -> Result<()> {
2474 let mut new_entries: Vec<Entry> = Vec::new();
2475 let mut new_jobs: Vec<Option<ScanJob>> = Vec::new();
2476 let mut ignore_stack = job.ignore_stack.clone();
2477 let mut new_ignore = None;
2478 let (root_abs_path, root_char_bag, next_entry_id) = {
2479 let snapshot = self.snapshot.lock();
2480 (
2481 snapshot.abs_path().clone(),
2482 snapshot.root_char_bag,
2483 snapshot.next_entry_id.clone(),
2484 )
2485 };
2486 let mut child_paths = self.fs.read_dir(&job.abs_path).await?;
2487 while let Some(child_abs_path) = child_paths.next().await {
2488 let child_abs_path: Arc<Path> = match child_abs_path {
2489 Ok(child_abs_path) => child_abs_path.into(),
2490 Err(error) => {
2491 log::error!("error processing entry {:?}", error);
2492 continue;
2493 }
2494 };
2495
2496 let child_name = child_abs_path.file_name().unwrap();
2497 let child_path: Arc<Path> = job.path.join(child_name).into();
2498 let child_metadata = match self.fs.metadata(&child_abs_path).await {
2499 Ok(Some(metadata)) => metadata,
2500 Ok(None) => continue,
2501 Err(err) => {
2502 log::error!("error processing {:?}: {:?}", child_abs_path, err);
2503 continue;
2504 }
2505 };
2506
2507 // If we find a .gitignore, add it to the stack of ignores used to determine which paths are ignored
2508 if child_name == *GITIGNORE {
2509 match build_gitignore(&child_abs_path, self.fs.as_ref()).await {
2510 Ok(ignore) => {
2511 let ignore = Arc::new(ignore);
2512 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2513 new_ignore = Some(ignore);
2514 }
2515 Err(error) => {
2516 log::error!(
2517 "error loading .gitignore file {:?} - {:?}",
2518 child_name,
2519 error
2520 );
2521 }
2522 }
2523
2524 // Update ignore status of any child entries we've already processed to reflect the
2525 // ignore file in the current directory. Because `.gitignore` starts with a `.`,
2526 // there should rarely be too numerous. Update the ignore stack associated with any
2527 // new jobs as well.
2528 let mut new_jobs = new_jobs.iter_mut();
2529 for entry in &mut new_entries {
2530 let entry_abs_path = root_abs_path.join(&entry.path);
2531 entry.is_ignored =
2532 ignore_stack.is_abs_path_ignored(&entry_abs_path, entry.is_dir());
2533
2534 if entry.is_dir() {
2535 if let Some(job) = new_jobs.next().expect("Missing scan job for entry") {
2536 job.ignore_stack = if entry.is_ignored {
2537 IgnoreStack::all()
2538 } else {
2539 ignore_stack.clone()
2540 };
2541 }
2542 }
2543 }
2544 }
2545
2546 let mut child_entry = Entry::new(
2547 child_path.clone(),
2548 &child_metadata,
2549 &next_entry_id,
2550 root_char_bag,
2551 );
2552
2553 if child_entry.is_dir() {
2554 let is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, true);
2555 child_entry.is_ignored = is_ignored;
2556
2557 // Avoid recursing until crash in the case of a recursive symlink
2558 if !job.ancestor_inodes.contains(&child_entry.inode) {
2559 let mut ancestor_inodes = job.ancestor_inodes.clone();
2560 ancestor_inodes.insert(child_entry.inode);
2561
2562 new_jobs.push(Some(ScanJob {
2563 abs_path: child_abs_path,
2564 path: child_path,
2565 ignore_stack: if is_ignored {
2566 IgnoreStack::all()
2567 } else {
2568 ignore_stack.clone()
2569 },
2570 ancestor_inodes,
2571 scan_queue: job.scan_queue.clone(),
2572 }));
2573 } else {
2574 new_jobs.push(None);
2575 }
2576 } else {
2577 child_entry.is_ignored = ignore_stack.is_abs_path_ignored(&child_abs_path, false);
2578 }
2579
2580 new_entries.push(child_entry);
2581 }
2582
2583 self.snapshot.lock().populate_dir(
2584 job.path.clone(),
2585 new_entries,
2586 new_ignore,
2587 self.fs.as_ref(),
2588 );
2589
2590 for new_job in new_jobs {
2591 if let Some(new_job) = new_job {
2592 job.scan_queue.send(new_job).await.unwrap();
2593 }
2594 }
2595
2596 Ok(())
2597 }
2598
2599 async fn reload_entries_for_paths(
2600 &self,
2601 mut abs_paths: Vec<PathBuf>,
2602 scan_queue_tx: Option<Sender<ScanJob>>,
2603 ) -> Option<Vec<Arc<Path>>> {
2604 let doing_recursive_update = scan_queue_tx.is_some();
2605
2606 abs_paths.sort_unstable();
2607 abs_paths.dedup_by(|a, b| a.starts_with(&b));
2608
2609 let root_abs_path = self.snapshot.lock().abs_path.clone();
2610 let root_canonical_path = self.fs.canonicalize(&root_abs_path).await.log_err()?;
2611 let metadata = futures::future::join_all(
2612 abs_paths
2613 .iter()
2614 .map(|abs_path| self.fs.metadata(&abs_path))
2615 .collect::<Vec<_>>(),
2616 )
2617 .await;
2618
2619 let mut snapshot = self.snapshot.lock();
2620 let is_idle = snapshot.completed_scan_id == snapshot.scan_id;
2621 snapshot.scan_id += 1;
2622 if is_idle && !doing_recursive_update {
2623 snapshot.completed_scan_id = snapshot.scan_id;
2624 }
2625
2626 // Remove any entries for paths that no longer exist or are being recursively
2627 // refreshed. Do this before adding any new entries, so that renames can be
2628 // detected regardless of the order of the paths.
2629 let mut event_paths = Vec::<Arc<Path>>::with_capacity(abs_paths.len());
2630 for (abs_path, metadata) in abs_paths.iter().zip(metadata.iter()) {
2631 if let Ok(path) = abs_path.strip_prefix(&root_canonical_path) {
2632 if matches!(metadata, Ok(None)) || doing_recursive_update {
2633 snapshot.remove_path(path);
2634 }
2635 event_paths.push(path.into());
2636 } else {
2637 log::error!(
2638 "unexpected event {:?} for root path {:?}",
2639 abs_path,
2640 root_canonical_path
2641 );
2642 }
2643 }
2644
2645 for (path, metadata) in event_paths.iter().cloned().zip(metadata.into_iter()) {
2646 let abs_path: Arc<Path> = root_abs_path.join(&path).into();
2647
2648 match metadata {
2649 Ok(Some(metadata)) => {
2650 let ignore_stack =
2651 snapshot.ignore_stack_for_abs_path(&abs_path, metadata.is_dir);
2652 let mut fs_entry = Entry::new(
2653 path.clone(),
2654 &metadata,
2655 snapshot.next_entry_id.as_ref(),
2656 snapshot.root_char_bag,
2657 );
2658 fs_entry.is_ignored = ignore_stack.is_all();
2659 snapshot.insert_entry(fs_entry, self.fs.as_ref());
2660
2661 let scan_id = snapshot.scan_id;
2662
2663 let repo_with_path_in_dotgit = snapshot.repo_for_metadata(&path);
2664 if let Some((key, repo)) = repo_with_path_in_dotgit {
2665 repo.lock().reload_index();
2666
2667 snapshot
2668 .repository_entries
2669 .update(&key, |entry| entry.scan_id = scan_id);
2670 }
2671
2672 if let Some(scan_queue_tx) = &scan_queue_tx {
2673 let mut ancestor_inodes = snapshot.ancestor_inodes_for_path(&path);
2674 if metadata.is_dir && !ancestor_inodes.contains(&metadata.inode) {
2675 ancestor_inodes.insert(metadata.inode);
2676 smol::block_on(scan_queue_tx.send(ScanJob {
2677 abs_path,
2678 path,
2679 ignore_stack,
2680 ancestor_inodes,
2681 scan_queue: scan_queue_tx.clone(),
2682 }))
2683 .unwrap();
2684 }
2685 }
2686 }
2687 Ok(None) => {}
2688 Err(err) => {
2689 // TODO - create a special 'error' entry in the entries tree to mark this
2690 log::error!("error reading file on event {:?}", err);
2691 }
2692 }
2693 }
2694
2695 Some(event_paths)
2696 }
2697
2698 async fn update_ignore_statuses(&self) {
2699 use futures::FutureExt as _;
2700
2701 let mut snapshot = self.snapshot.lock().clone();
2702 let mut ignores_to_update = Vec::new();
2703 let mut ignores_to_delete = Vec::new();
2704 for (parent_abs_path, (_, scan_id)) in &snapshot.ignores_by_parent_abs_path {
2705 if let Ok(parent_path) = parent_abs_path.strip_prefix(&snapshot.abs_path) {
2706 if *scan_id > snapshot.completed_scan_id
2707 && snapshot.entry_for_path(parent_path).is_some()
2708 {
2709 ignores_to_update.push(parent_abs_path.clone());
2710 }
2711
2712 let ignore_path = parent_path.join(&*GITIGNORE);
2713 if snapshot.entry_for_path(ignore_path).is_none() {
2714 ignores_to_delete.push(parent_abs_path.clone());
2715 }
2716 }
2717 }
2718
2719 for parent_abs_path in ignores_to_delete {
2720 snapshot.ignores_by_parent_abs_path.remove(&parent_abs_path);
2721 self.snapshot
2722 .lock()
2723 .ignores_by_parent_abs_path
2724 .remove(&parent_abs_path);
2725 }
2726
2727 let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded();
2728 ignores_to_update.sort_unstable();
2729 let mut ignores_to_update = ignores_to_update.into_iter().peekable();
2730 while let Some(parent_abs_path) = ignores_to_update.next() {
2731 while ignores_to_update
2732 .peek()
2733 .map_or(false, |p| p.starts_with(&parent_abs_path))
2734 {
2735 ignores_to_update.next().unwrap();
2736 }
2737
2738 let ignore_stack = snapshot.ignore_stack_for_abs_path(&parent_abs_path, true);
2739 smol::block_on(ignore_queue_tx.send(UpdateIgnoreStatusJob {
2740 abs_path: parent_abs_path,
2741 ignore_stack,
2742 ignore_queue: ignore_queue_tx.clone(),
2743 }))
2744 .unwrap();
2745 }
2746 drop(ignore_queue_tx);
2747
2748 self.executor
2749 .scoped(|scope| {
2750 for _ in 0..self.executor.num_cpus() {
2751 scope.spawn(async {
2752 loop {
2753 select_biased! {
2754 // Process any path refresh requests before moving on to process
2755 // the queue of ignore statuses.
2756 request = self.refresh_requests_rx.recv().fuse() => {
2757 let Ok((paths, barrier)) = request else { break };
2758 if !self.process_refresh_request(paths, barrier).await {
2759 return;
2760 }
2761 }
2762
2763 // Recursively process directories whose ignores have changed.
2764 job = ignore_queue_rx.recv().fuse() => {
2765 let Ok(job) = job else { break };
2766 self.update_ignore_status(job, &snapshot).await;
2767 }
2768 }
2769 }
2770 });
2771 }
2772 })
2773 .await;
2774 }
2775
2776 async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) {
2777 let mut ignore_stack = job.ignore_stack;
2778 if let Some((ignore, _)) = snapshot.ignores_by_parent_abs_path.get(&job.abs_path) {
2779 ignore_stack = ignore_stack.append(job.abs_path.clone(), ignore.clone());
2780 }
2781
2782 let mut entries_by_id_edits = Vec::new();
2783 let mut entries_by_path_edits = Vec::new();
2784 let path = job.abs_path.strip_prefix(&snapshot.abs_path).unwrap();
2785 for mut entry in snapshot.child_entries(path).cloned() {
2786 let was_ignored = entry.is_ignored;
2787 let abs_path = snapshot.abs_path().join(&entry.path);
2788 entry.is_ignored = ignore_stack.is_abs_path_ignored(&abs_path, entry.is_dir());
2789 if entry.is_dir() {
2790 let child_ignore_stack = if entry.is_ignored {
2791 IgnoreStack::all()
2792 } else {
2793 ignore_stack.clone()
2794 };
2795 job.ignore_queue
2796 .send(UpdateIgnoreStatusJob {
2797 abs_path: abs_path.into(),
2798 ignore_stack: child_ignore_stack,
2799 ignore_queue: job.ignore_queue.clone(),
2800 })
2801 .await
2802 .unwrap();
2803 }
2804
2805 if entry.is_ignored != was_ignored {
2806 let mut path_entry = snapshot.entries_by_id.get(&entry.id, &()).unwrap().clone();
2807 path_entry.scan_id = snapshot.scan_id;
2808 path_entry.is_ignored = entry.is_ignored;
2809 entries_by_id_edits.push(Edit::Insert(path_entry));
2810 entries_by_path_edits.push(Edit::Insert(entry));
2811 }
2812 }
2813
2814 let mut snapshot = self.snapshot.lock();
2815 snapshot.entries_by_path.edit(entries_by_path_edits, &());
2816 snapshot.entries_by_id.edit(entries_by_id_edits, &());
2817 }
2818
2819 fn build_change_set(
2820 &self,
2821 old_snapshot: &Snapshot,
2822 new_snapshot: &Snapshot,
2823 event_paths: Vec<Arc<Path>>,
2824 ) -> HashMap<Arc<Path>, PathChange> {
2825 use PathChange::{Added, AddedOrUpdated, Removed, Updated};
2826
2827 let mut changes = HashMap::default();
2828 let mut old_paths = old_snapshot.entries_by_path.cursor::<PathKey>();
2829 let mut new_paths = new_snapshot.entries_by_path.cursor::<PathKey>();
2830 let received_before_initialized = !self.finished_initial_scan;
2831
2832 for path in event_paths {
2833 let path = PathKey(path);
2834 old_paths.seek(&path, Bias::Left, &());
2835 new_paths.seek(&path, Bias::Left, &());
2836
2837 loop {
2838 match (old_paths.item(), new_paths.item()) {
2839 (Some(old_entry), Some(new_entry)) => {
2840 if old_entry.path > path.0
2841 && new_entry.path > path.0
2842 && !old_entry.path.starts_with(&path.0)
2843 && !new_entry.path.starts_with(&path.0)
2844 {
2845 break;
2846 }
2847
2848 match Ord::cmp(&old_entry.path, &new_entry.path) {
2849 Ordering::Less => {
2850 changes.insert(old_entry.path.clone(), Removed);
2851 old_paths.next(&());
2852 }
2853 Ordering::Equal => {
2854 if received_before_initialized {
2855 // If the worktree was not fully initialized when this event was generated,
2856 // we can't know whether this entry was added during the scan or whether
2857 // it was merely updated.
2858 changes.insert(new_entry.path.clone(), AddedOrUpdated);
2859 } else if old_entry.mtime != new_entry.mtime {
2860 changes.insert(new_entry.path.clone(), Updated);
2861 }
2862 old_paths.next(&());
2863 new_paths.next(&());
2864 }
2865 Ordering::Greater => {
2866 changes.insert(new_entry.path.clone(), Added);
2867 new_paths.next(&());
2868 }
2869 }
2870 }
2871 (Some(old_entry), None) => {
2872 changes.insert(old_entry.path.clone(), Removed);
2873 old_paths.next(&());
2874 }
2875 (None, Some(new_entry)) => {
2876 changes.insert(new_entry.path.clone(), Added);
2877 new_paths.next(&());
2878 }
2879 (None, None) => break,
2880 }
2881 }
2882 }
2883 changes
2884 }
2885
2886 async fn progress_timer(&self, running: bool) {
2887 if !running {
2888 return futures::future::pending().await;
2889 }
2890
2891 #[cfg(any(test, feature = "test-support"))]
2892 if self.fs.is_fake() {
2893 return self.executor.simulate_random_delay().await;
2894 }
2895
2896 smol::Timer::after(Duration::from_millis(100)).await;
2897 }
2898}
2899
2900fn char_bag_for_path(root_char_bag: CharBag, path: &Path) -> CharBag {
2901 let mut result = root_char_bag;
2902 result.extend(
2903 path.to_string_lossy()
2904 .chars()
2905 .map(|c| c.to_ascii_lowercase()),
2906 );
2907 result
2908}
2909
2910struct ScanJob {
2911 abs_path: Arc<Path>,
2912 path: Arc<Path>,
2913 ignore_stack: Arc<IgnoreStack>,
2914 scan_queue: Sender<ScanJob>,
2915 ancestor_inodes: TreeSet<u64>,
2916}
2917
2918struct UpdateIgnoreStatusJob {
2919 abs_path: Arc<Path>,
2920 ignore_stack: Arc<IgnoreStack>,
2921 ignore_queue: Sender<UpdateIgnoreStatusJob>,
2922}
2923
2924pub trait WorktreeHandle {
2925 #[cfg(any(test, feature = "test-support"))]
2926 fn flush_fs_events<'a>(
2927 &self,
2928 cx: &'a gpui::TestAppContext,
2929 ) -> futures::future::LocalBoxFuture<'a, ()>;
2930}
2931
2932impl WorktreeHandle for ModelHandle<Worktree> {
2933 // When the worktree's FS event stream sometimes delivers "redundant" events for FS changes that
2934 // occurred before the worktree was constructed. These events can cause the worktree to perfrom
2935 // extra directory scans, and emit extra scan-state notifications.
2936 //
2937 // This function mutates the worktree's directory and waits for those mutations to be picked up,
2938 // to ensure that all redundant FS events have already been processed.
2939 #[cfg(any(test, feature = "test-support"))]
2940 fn flush_fs_events<'a>(
2941 &self,
2942 cx: &'a gpui::TestAppContext,
2943 ) -> futures::future::LocalBoxFuture<'a, ()> {
2944 use smol::future::FutureExt;
2945
2946 let filename = "fs-event-sentinel";
2947 let tree = self.clone();
2948 let (fs, root_path) = self.read_with(cx, |tree, _| {
2949 let tree = tree.as_local().unwrap();
2950 (tree.fs.clone(), tree.abs_path().clone())
2951 });
2952
2953 async move {
2954 fs.create_file(&root_path.join(filename), Default::default())
2955 .await
2956 .unwrap();
2957 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_some())
2958 .await;
2959
2960 fs.remove_file(&root_path.join(filename), Default::default())
2961 .await
2962 .unwrap();
2963 tree.condition(cx, |tree, _| tree.entry_for_path(filename).is_none())
2964 .await;
2965
2966 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
2967 .await;
2968 }
2969 .boxed_local()
2970 }
2971}
2972
2973#[derive(Clone, Debug)]
2974struct TraversalProgress<'a> {
2975 max_path: &'a Path,
2976 count: usize,
2977 visible_count: usize,
2978 file_count: usize,
2979 visible_file_count: usize,
2980}
2981
2982impl<'a> TraversalProgress<'a> {
2983 fn count(&self, include_dirs: bool, include_ignored: bool) -> usize {
2984 match (include_ignored, include_dirs) {
2985 (true, true) => self.count,
2986 (true, false) => self.file_count,
2987 (false, true) => self.visible_count,
2988 (false, false) => self.visible_file_count,
2989 }
2990 }
2991}
2992
2993impl<'a> sum_tree::Dimension<'a, EntrySummary> for TraversalProgress<'a> {
2994 fn add_summary(&mut self, summary: &'a EntrySummary, _: &()) {
2995 self.max_path = summary.max_path.as_ref();
2996 self.count += summary.count;
2997 self.visible_count += summary.visible_count;
2998 self.file_count += summary.file_count;
2999 self.visible_file_count += summary.visible_file_count;
3000 }
3001}
3002
3003impl<'a> Default for TraversalProgress<'a> {
3004 fn default() -> Self {
3005 Self {
3006 max_path: Path::new(""),
3007 count: 0,
3008 visible_count: 0,
3009 file_count: 0,
3010 visible_file_count: 0,
3011 }
3012 }
3013}
3014
3015pub struct Traversal<'a> {
3016 cursor: sum_tree::Cursor<'a, Entry, TraversalProgress<'a>>,
3017 include_ignored: bool,
3018 include_dirs: bool,
3019}
3020
3021impl<'a> Traversal<'a> {
3022 pub fn advance(&mut self) -> bool {
3023 self.advance_to_offset(self.offset() + 1)
3024 }
3025
3026 pub fn advance_to_offset(&mut self, offset: usize) -> bool {
3027 self.cursor.seek_forward(
3028 &TraversalTarget::Count {
3029 count: offset,
3030 include_dirs: self.include_dirs,
3031 include_ignored: self.include_ignored,
3032 },
3033 Bias::Right,
3034 &(),
3035 )
3036 }
3037
3038 pub fn advance_to_sibling(&mut self) -> bool {
3039 while let Some(entry) = self.cursor.item() {
3040 self.cursor.seek_forward(
3041 &TraversalTarget::PathSuccessor(&entry.path),
3042 Bias::Left,
3043 &(),
3044 );
3045 if let Some(entry) = self.cursor.item() {
3046 if (self.include_dirs || !entry.is_dir())
3047 && (self.include_ignored || !entry.is_ignored)
3048 {
3049 return true;
3050 }
3051 }
3052 }
3053 false
3054 }
3055
3056 pub fn entry(&self) -> Option<&'a Entry> {
3057 self.cursor.item()
3058 }
3059
3060 pub fn offset(&self) -> usize {
3061 self.cursor
3062 .start()
3063 .count(self.include_dirs, self.include_ignored)
3064 }
3065}
3066
3067impl<'a> Iterator for Traversal<'a> {
3068 type Item = &'a Entry;
3069
3070 fn next(&mut self) -> Option<Self::Item> {
3071 if let Some(item) = self.entry() {
3072 self.advance();
3073 Some(item)
3074 } else {
3075 None
3076 }
3077 }
3078}
3079
3080#[derive(Debug)]
3081enum TraversalTarget<'a> {
3082 Path(&'a Path),
3083 PathSuccessor(&'a Path),
3084 Count {
3085 count: usize,
3086 include_ignored: bool,
3087 include_dirs: bool,
3088 },
3089}
3090
3091impl<'a, 'b> SeekTarget<'a, EntrySummary, TraversalProgress<'a>> for TraversalTarget<'b> {
3092 fn cmp(&self, cursor_location: &TraversalProgress<'a>, _: &()) -> Ordering {
3093 match self {
3094 TraversalTarget::Path(path) => path.cmp(&cursor_location.max_path),
3095 TraversalTarget::PathSuccessor(path) => {
3096 if !cursor_location.max_path.starts_with(path) {
3097 Ordering::Equal
3098 } else {
3099 Ordering::Greater
3100 }
3101 }
3102 TraversalTarget::Count {
3103 count,
3104 include_dirs,
3105 include_ignored,
3106 } => Ord::cmp(
3107 count,
3108 &cursor_location.count(*include_dirs, *include_ignored),
3109 ),
3110 }
3111 }
3112}
3113
3114struct ChildEntriesIter<'a> {
3115 parent_path: &'a Path,
3116 traversal: Traversal<'a>,
3117}
3118
3119impl<'a> Iterator for ChildEntriesIter<'a> {
3120 type Item = &'a Entry;
3121
3122 fn next(&mut self) -> Option<Self::Item> {
3123 if let Some(item) = self.traversal.entry() {
3124 if item.path.starts_with(&self.parent_path) {
3125 self.traversal.advance_to_sibling();
3126 return Some(item);
3127 }
3128 }
3129 None
3130 }
3131}
3132
3133impl<'a> From<&'a Entry> for proto::Entry {
3134 fn from(entry: &'a Entry) -> Self {
3135 Self {
3136 id: entry.id.to_proto(),
3137 is_dir: entry.is_dir(),
3138 path: entry.path.to_string_lossy().into(),
3139 inode: entry.inode,
3140 mtime: Some(entry.mtime.into()),
3141 is_symlink: entry.is_symlink,
3142 is_ignored: entry.is_ignored,
3143 }
3144 }
3145}
3146
3147impl<'a> TryFrom<(&'a CharBag, proto::Entry)> for Entry {
3148 type Error = anyhow::Error;
3149
3150 fn try_from((root_char_bag, entry): (&'a CharBag, proto::Entry)) -> Result<Self> {
3151 if let Some(mtime) = entry.mtime {
3152 let kind = if entry.is_dir {
3153 EntryKind::Dir
3154 } else {
3155 let mut char_bag = *root_char_bag;
3156 char_bag.extend(entry.path.chars().map(|c| c.to_ascii_lowercase()));
3157 EntryKind::File(char_bag)
3158 };
3159 let path: Arc<Path> = PathBuf::from(entry.path).into();
3160 Ok(Entry {
3161 id: ProjectEntryId::from_proto(entry.id),
3162 kind,
3163 path,
3164 inode: entry.inode,
3165 mtime: mtime.into(),
3166 is_symlink: entry.is_symlink,
3167 is_ignored: entry.is_ignored,
3168 })
3169 } else {
3170 Err(anyhow!(
3171 "missing mtime in remote worktree entry {:?}",
3172 entry.path
3173 ))
3174 }
3175 }
3176}
3177
3178#[cfg(test)]
3179mod tests {
3180 use super::*;
3181 use fs::{FakeFs, RealFs};
3182 use gpui::{executor::Deterministic, TestAppContext};
3183 use pretty_assertions::assert_eq;
3184 use rand::prelude::*;
3185 use serde_json::json;
3186 use std::{env, fmt::Write};
3187 use util::{http::FakeHttpClient, test::temp_tree};
3188
3189 #[gpui::test]
3190 async fn test_traversal(cx: &mut TestAppContext) {
3191 let fs = FakeFs::new(cx.background());
3192 fs.insert_tree(
3193 "/root",
3194 json!({
3195 ".gitignore": "a/b\n",
3196 "a": {
3197 "b": "",
3198 "c": "",
3199 }
3200 }),
3201 )
3202 .await;
3203
3204 let http_client = FakeHttpClient::with_404_response();
3205 let client = cx.read(|cx| Client::new(http_client, cx));
3206
3207 let tree = Worktree::local(
3208 client,
3209 Path::new("/root"),
3210 true,
3211 fs,
3212 Default::default(),
3213 &mut cx.to_async(),
3214 )
3215 .await
3216 .unwrap();
3217 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3218 .await;
3219
3220 tree.read_with(cx, |tree, _| {
3221 assert_eq!(
3222 tree.entries(false)
3223 .map(|entry| entry.path.as_ref())
3224 .collect::<Vec<_>>(),
3225 vec![
3226 Path::new(""),
3227 Path::new(".gitignore"),
3228 Path::new("a"),
3229 Path::new("a/c"),
3230 ]
3231 );
3232 assert_eq!(
3233 tree.entries(true)
3234 .map(|entry| entry.path.as_ref())
3235 .collect::<Vec<_>>(),
3236 vec![
3237 Path::new(""),
3238 Path::new(".gitignore"),
3239 Path::new("a"),
3240 Path::new("a/b"),
3241 Path::new("a/c"),
3242 ]
3243 );
3244 })
3245 }
3246
3247 #[gpui::test(iterations = 10)]
3248 async fn test_circular_symlinks(executor: Arc<Deterministic>, cx: &mut TestAppContext) {
3249 let fs = FakeFs::new(cx.background());
3250 fs.insert_tree(
3251 "/root",
3252 json!({
3253 "lib": {
3254 "a": {
3255 "a.txt": ""
3256 },
3257 "b": {
3258 "b.txt": ""
3259 }
3260 }
3261 }),
3262 )
3263 .await;
3264 fs.insert_symlink("/root/lib/a/lib", "..".into()).await;
3265 fs.insert_symlink("/root/lib/b/lib", "..".into()).await;
3266
3267 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3268 let tree = Worktree::local(
3269 client,
3270 Path::new("/root"),
3271 true,
3272 fs.clone(),
3273 Default::default(),
3274 &mut cx.to_async(),
3275 )
3276 .await
3277 .unwrap();
3278
3279 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3280 .await;
3281
3282 tree.read_with(cx, |tree, _| {
3283 assert_eq!(
3284 tree.entries(false)
3285 .map(|entry| entry.path.as_ref())
3286 .collect::<Vec<_>>(),
3287 vec![
3288 Path::new(""),
3289 Path::new("lib"),
3290 Path::new("lib/a"),
3291 Path::new("lib/a/a.txt"),
3292 Path::new("lib/a/lib"),
3293 Path::new("lib/b"),
3294 Path::new("lib/b/b.txt"),
3295 Path::new("lib/b/lib"),
3296 ]
3297 );
3298 });
3299
3300 fs.rename(
3301 Path::new("/root/lib/a/lib"),
3302 Path::new("/root/lib/a/lib-2"),
3303 Default::default(),
3304 )
3305 .await
3306 .unwrap();
3307 executor.run_until_parked();
3308 tree.read_with(cx, |tree, _| {
3309 assert_eq!(
3310 tree.entries(false)
3311 .map(|entry| entry.path.as_ref())
3312 .collect::<Vec<_>>(),
3313 vec![
3314 Path::new(""),
3315 Path::new("lib"),
3316 Path::new("lib/a"),
3317 Path::new("lib/a/a.txt"),
3318 Path::new("lib/a/lib-2"),
3319 Path::new("lib/b"),
3320 Path::new("lib/b/b.txt"),
3321 Path::new("lib/b/lib"),
3322 ]
3323 );
3324 });
3325 }
3326
3327 #[gpui::test]
3328 async fn test_rescan_with_gitignore(cx: &mut TestAppContext) {
3329 let parent_dir = temp_tree(json!({
3330 ".gitignore": "ancestor-ignored-file1\nancestor-ignored-file2\n",
3331 "tree": {
3332 ".git": {},
3333 ".gitignore": "ignored-dir\n",
3334 "tracked-dir": {
3335 "tracked-file1": "",
3336 "ancestor-ignored-file1": "",
3337 },
3338 "ignored-dir": {
3339 "ignored-file1": ""
3340 }
3341 }
3342 }));
3343 let dir = parent_dir.path().join("tree");
3344
3345 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3346
3347 let tree = Worktree::local(
3348 client,
3349 dir.as_path(),
3350 true,
3351 Arc::new(RealFs),
3352 Default::default(),
3353 &mut cx.to_async(),
3354 )
3355 .await
3356 .unwrap();
3357 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3358 .await;
3359 tree.flush_fs_events(cx).await;
3360 cx.read(|cx| {
3361 let tree = tree.read(cx);
3362 assert!(
3363 !tree
3364 .entry_for_path("tracked-dir/tracked-file1")
3365 .unwrap()
3366 .is_ignored
3367 );
3368 assert!(
3369 tree.entry_for_path("tracked-dir/ancestor-ignored-file1")
3370 .unwrap()
3371 .is_ignored
3372 );
3373 assert!(
3374 tree.entry_for_path("ignored-dir/ignored-file1")
3375 .unwrap()
3376 .is_ignored
3377 );
3378 });
3379
3380 std::fs::write(dir.join("tracked-dir/tracked-file2"), "").unwrap();
3381 std::fs::write(dir.join("tracked-dir/ancestor-ignored-file2"), "").unwrap();
3382 std::fs::write(dir.join("ignored-dir/ignored-file2"), "").unwrap();
3383 tree.flush_fs_events(cx).await;
3384 cx.read(|cx| {
3385 let tree = tree.read(cx);
3386 assert!(
3387 !tree
3388 .entry_for_path("tracked-dir/tracked-file2")
3389 .unwrap()
3390 .is_ignored
3391 );
3392 assert!(
3393 tree.entry_for_path("tracked-dir/ancestor-ignored-file2")
3394 .unwrap()
3395 .is_ignored
3396 );
3397 assert!(
3398 tree.entry_for_path("ignored-dir/ignored-file2")
3399 .unwrap()
3400 .is_ignored
3401 );
3402 assert!(tree.entry_for_path(".git").unwrap().is_ignored);
3403 });
3404 }
3405
3406 #[gpui::test]
3407 async fn test_git_repository_for_path(cx: &mut TestAppContext) {
3408 let root = temp_tree(json!({
3409 "dir1": {
3410 ".git": {},
3411 "deps": {
3412 "dep1": {
3413 ".git": {},
3414 "src": {
3415 "a.txt": ""
3416 }
3417 }
3418 },
3419 "src": {
3420 "b.txt": ""
3421 }
3422 },
3423 "c.txt": "",
3424 }));
3425
3426 let http_client = FakeHttpClient::with_404_response();
3427 let client = cx.read(|cx| Client::new(http_client, cx));
3428 let tree = Worktree::local(
3429 client,
3430 root.path(),
3431 true,
3432 Arc::new(RealFs),
3433 Default::default(),
3434 &mut cx.to_async(),
3435 )
3436 .await
3437 .unwrap();
3438
3439 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3440 .await;
3441 tree.flush_fs_events(cx).await;
3442
3443 tree.read_with(cx, |tree, _cx| {
3444 let tree = tree.as_local().unwrap();
3445
3446 assert!(tree.repo_for("c.txt".as_ref()).is_none());
3447
3448 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3449 assert_eq!(entry.work_directory.0.as_ref(), Path::new("dir1"));
3450 assert_eq!(entry.git_dir_path.as_ref(), Path::new("dir1/.git"));
3451
3452 let entry = tree.repo_for("dir1/deps/dep1/src/a.txt".as_ref()).unwrap();
3453 assert_eq!(entry.work_directory.deref(), Path::new("dir1/deps/dep1"));
3454 assert_eq!(
3455 entry.git_dir_path.as_ref(),
3456 Path::new("dir1/deps/dep1/.git"),
3457 );
3458 });
3459
3460 let original_scan_id = tree.read_with(cx, |tree, _cx| {
3461 let tree = tree.as_local().unwrap();
3462 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3463 entry.scan_id
3464 });
3465
3466 std::fs::write(root.path().join("dir1/.git/random_new_file"), "hello").unwrap();
3467 tree.flush_fs_events(cx).await;
3468
3469 tree.read_with(cx, |tree, _cx| {
3470 let tree = tree.as_local().unwrap();
3471 let new_scan_id = {
3472 let entry = tree.repo_for("dir1/src/b.txt".as_ref()).unwrap();
3473 entry.scan_id
3474 };
3475 assert_ne!(
3476 original_scan_id, new_scan_id,
3477 "original {original_scan_id}, new {new_scan_id}"
3478 );
3479 });
3480
3481 std::fs::remove_dir_all(root.path().join("dir1/.git")).unwrap();
3482 tree.flush_fs_events(cx).await;
3483
3484 tree.read_with(cx, |tree, _cx| {
3485 let tree = tree.as_local().unwrap();
3486
3487 assert!(tree.repo_for("dir1/src/b.txt".as_ref()).is_none());
3488 });
3489 }
3490
3491 #[test]
3492 fn test_changed_repos() {
3493 fn fake_entry(git_dir_path: impl AsRef<Path>, scan_id: usize) -> RepositoryEntry {
3494 RepositoryEntry {
3495 scan_id,
3496 git_dir_path: git_dir_path.as_ref().into(),
3497 git_dir_entry_id: ProjectEntryId(0),
3498 work_directory: RepositoryWorkDirectory(
3499 Path::new(&format!("don't-care-{}", scan_id)).into(),
3500 ),
3501 }
3502 }
3503
3504 let mut prev_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3505 prev_repos.insert(
3506 RepositoryWorkDirectory(Path::new("don't-care-1").into()),
3507 fake_entry("/.git", 0),
3508 );
3509 prev_repos.insert(
3510 RepositoryWorkDirectory(Path::new("don't-care-2").into()),
3511 fake_entry("/a/.git", 0),
3512 );
3513 prev_repos.insert(
3514 RepositoryWorkDirectory(Path::new("don't-care-3").into()),
3515 fake_entry("/a/b/.git", 0),
3516 );
3517
3518 let mut new_repos = TreeMap::<RepositoryWorkDirectory, RepositoryEntry>::default();
3519 new_repos.insert(
3520 RepositoryWorkDirectory(Path::new("don't-care-4").into()),
3521 fake_entry("/a/.git", 1),
3522 );
3523 new_repos.insert(
3524 RepositoryWorkDirectory(Path::new("don't-care-5").into()),
3525 fake_entry("/a/b/.git", 0),
3526 );
3527 new_repos.insert(
3528 RepositoryWorkDirectory(Path::new("don't-care-6").into()),
3529 fake_entry("/a/c/.git", 0),
3530 );
3531
3532 let res = LocalWorktree::changed_repos(&prev_repos, &new_repos);
3533
3534 // Deletion retained
3535 assert!(res
3536 .iter()
3537 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/.git") && repo.scan_id == 0)
3538 .is_some());
3539
3540 // Update retained
3541 assert!(res
3542 .iter()
3543 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/.git") && repo.scan_id == 1)
3544 .is_some());
3545
3546 // Addition retained
3547 assert!(res
3548 .iter()
3549 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/c/.git") && repo.scan_id == 0)
3550 .is_some());
3551
3552 // Nochange, not retained
3553 assert!(res
3554 .iter()
3555 .find(|repo| repo.git_dir_path.as_ref() == Path::new("/a/b/.git") && repo.scan_id == 0)
3556 .is_none());
3557 }
3558
3559 #[gpui::test]
3560 async fn test_write_file(cx: &mut TestAppContext) {
3561 let dir = temp_tree(json!({
3562 ".git": {},
3563 ".gitignore": "ignored-dir\n",
3564 "tracked-dir": {},
3565 "ignored-dir": {}
3566 }));
3567
3568 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3569
3570 let tree = Worktree::local(
3571 client,
3572 dir.path(),
3573 true,
3574 Arc::new(RealFs),
3575 Default::default(),
3576 &mut cx.to_async(),
3577 )
3578 .await
3579 .unwrap();
3580 cx.read(|cx| tree.read(cx).as_local().unwrap().scan_complete())
3581 .await;
3582 tree.flush_fs_events(cx).await;
3583
3584 tree.update(cx, |tree, cx| {
3585 tree.as_local().unwrap().write_file(
3586 Path::new("tracked-dir/file.txt"),
3587 "hello".into(),
3588 Default::default(),
3589 cx,
3590 )
3591 })
3592 .await
3593 .unwrap();
3594 tree.update(cx, |tree, cx| {
3595 tree.as_local().unwrap().write_file(
3596 Path::new("ignored-dir/file.txt"),
3597 "world".into(),
3598 Default::default(),
3599 cx,
3600 )
3601 })
3602 .await
3603 .unwrap();
3604
3605 tree.read_with(cx, |tree, _| {
3606 let tracked = tree.entry_for_path("tracked-dir/file.txt").unwrap();
3607 let ignored = tree.entry_for_path("ignored-dir/file.txt").unwrap();
3608 assert!(!tracked.is_ignored);
3609 assert!(ignored.is_ignored);
3610 });
3611 }
3612
3613 #[gpui::test(iterations = 30)]
3614 async fn test_create_directory_during_initial_scan(cx: &mut TestAppContext) {
3615 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3616
3617 let fs = FakeFs::new(cx.background());
3618 fs.insert_tree(
3619 "/root",
3620 json!({
3621 "b": {},
3622 "c": {},
3623 "d": {},
3624 }),
3625 )
3626 .await;
3627
3628 let tree = Worktree::local(
3629 client,
3630 "/root".as_ref(),
3631 true,
3632 fs,
3633 Default::default(),
3634 &mut cx.to_async(),
3635 )
3636 .await
3637 .unwrap();
3638
3639 let mut snapshot1 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3640
3641 let entry = tree
3642 .update(cx, |tree, cx| {
3643 tree.as_local_mut()
3644 .unwrap()
3645 .create_entry("a/e".as_ref(), true, cx)
3646 })
3647 .await
3648 .unwrap();
3649 assert!(entry.is_dir());
3650
3651 cx.foreground().run_until_parked();
3652 tree.read_with(cx, |tree, _| {
3653 assert_eq!(tree.entry_for_path("a/e").unwrap().kind, EntryKind::Dir);
3654 });
3655
3656 let snapshot2 = tree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3657 let update = snapshot2.build_update(&snapshot1, 0, 0, true);
3658 snapshot1.apply_remote_update(update).unwrap();
3659 assert_eq!(snapshot1.to_vec(true), snapshot2.to_vec(true),);
3660 }
3661
3662 #[gpui::test(iterations = 100)]
3663 async fn test_random_worktree_operations_during_initial_scan(
3664 cx: &mut TestAppContext,
3665 mut rng: StdRng,
3666 ) {
3667 let operations = env::var("OPERATIONS")
3668 .map(|o| o.parse().unwrap())
3669 .unwrap_or(5);
3670 let initial_entries = env::var("INITIAL_ENTRIES")
3671 .map(|o| o.parse().unwrap())
3672 .unwrap_or(20);
3673
3674 let root_dir = Path::new("/test");
3675 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3676 fs.as_fake().insert_tree(root_dir, json!({})).await;
3677 for _ in 0..initial_entries {
3678 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3679 }
3680 log::info!("generated initial tree");
3681
3682 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3683 let worktree = Worktree::local(
3684 client.clone(),
3685 root_dir,
3686 true,
3687 fs.clone(),
3688 Default::default(),
3689 &mut cx.to_async(),
3690 )
3691 .await
3692 .unwrap();
3693
3694 let mut snapshot = worktree.update(cx, |tree, _| tree.as_local().unwrap().snapshot());
3695
3696 for _ in 0..operations {
3697 worktree
3698 .update(cx, |worktree, cx| {
3699 randomly_mutate_worktree(worktree, &mut rng, cx)
3700 })
3701 .await
3702 .log_err();
3703 worktree.read_with(cx, |tree, _| {
3704 tree.as_local().unwrap().snapshot.check_invariants()
3705 });
3706
3707 if rng.gen_bool(0.6) {
3708 let new_snapshot =
3709 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3710 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3711 snapshot.apply_remote_update(update.clone()).unwrap();
3712 assert_eq!(
3713 snapshot.to_vec(true),
3714 new_snapshot.to_vec(true),
3715 "incorrect snapshot after update {:?}",
3716 update
3717 );
3718 }
3719 }
3720
3721 worktree
3722 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3723 .await;
3724 worktree.read_with(cx, |tree, _| {
3725 tree.as_local().unwrap().snapshot.check_invariants()
3726 });
3727
3728 let new_snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3729 let update = new_snapshot.build_update(&snapshot, 0, 0, true);
3730 snapshot.apply_remote_update(update.clone()).unwrap();
3731 assert_eq!(
3732 snapshot.to_vec(true),
3733 new_snapshot.to_vec(true),
3734 "incorrect snapshot after update {:?}",
3735 update
3736 );
3737 }
3738
3739 #[gpui::test(iterations = 100)]
3740 async fn test_random_worktree_changes(cx: &mut TestAppContext, mut rng: StdRng) {
3741 let operations = env::var("OPERATIONS")
3742 .map(|o| o.parse().unwrap())
3743 .unwrap_or(40);
3744 let initial_entries = env::var("INITIAL_ENTRIES")
3745 .map(|o| o.parse().unwrap())
3746 .unwrap_or(20);
3747
3748 let root_dir = Path::new("/test");
3749 let fs = FakeFs::new(cx.background()) as Arc<dyn Fs>;
3750 fs.as_fake().insert_tree(root_dir, json!({})).await;
3751 for _ in 0..initial_entries {
3752 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3753 }
3754 log::info!("generated initial tree");
3755
3756 let client = cx.read(|cx| Client::new(FakeHttpClient::with_404_response(), cx));
3757 let worktree = Worktree::local(
3758 client.clone(),
3759 root_dir,
3760 true,
3761 fs.clone(),
3762 Default::default(),
3763 &mut cx.to_async(),
3764 )
3765 .await
3766 .unwrap();
3767
3768 worktree
3769 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3770 .await;
3771
3772 // After the initial scan is complete, the `UpdatedEntries` event can
3773 // be used to follow along with all changes to the worktree's snapshot.
3774 worktree.update(cx, |tree, cx| {
3775 let mut paths = tree
3776 .as_local()
3777 .unwrap()
3778 .paths()
3779 .cloned()
3780 .collect::<Vec<_>>();
3781
3782 cx.subscribe(&worktree, move |tree, _, event, _| {
3783 if let Event::UpdatedEntries(changes) = event {
3784 for (path, change_type) in changes.iter() {
3785 let path = path.clone();
3786 let ix = match paths.binary_search(&path) {
3787 Ok(ix) | Err(ix) => ix,
3788 };
3789 match change_type {
3790 PathChange::Added => {
3791 assert_ne!(paths.get(ix), Some(&path));
3792 paths.insert(ix, path);
3793 }
3794 PathChange::Removed => {
3795 assert_eq!(paths.get(ix), Some(&path));
3796 paths.remove(ix);
3797 }
3798 PathChange::Updated => {
3799 assert_eq!(paths.get(ix), Some(&path));
3800 }
3801 PathChange::AddedOrUpdated => {
3802 if paths[ix] != path {
3803 paths.insert(ix, path);
3804 }
3805 }
3806 }
3807 }
3808 let new_paths = tree.paths().cloned().collect::<Vec<_>>();
3809 assert_eq!(paths, new_paths, "incorrect changes: {:?}", changes);
3810 }
3811 })
3812 .detach();
3813 });
3814
3815 let mut snapshots = Vec::new();
3816 let mut mutations_len = operations;
3817 while mutations_len > 1 {
3818 randomly_mutate_fs(&fs, root_dir, 1.0, &mut rng).await;
3819 let buffered_event_count = fs.as_fake().buffered_event_count().await;
3820 if buffered_event_count > 0 && rng.gen_bool(0.3) {
3821 let len = rng.gen_range(0..=buffered_event_count);
3822 log::info!("flushing {} events", len);
3823 fs.as_fake().flush_events(len).await;
3824 } else {
3825 randomly_mutate_fs(&fs, root_dir, 0.6, &mut rng).await;
3826 mutations_len -= 1;
3827 }
3828
3829 cx.foreground().run_until_parked();
3830 if rng.gen_bool(0.2) {
3831 log::info!("storing snapshot {}", snapshots.len());
3832 let snapshot =
3833 worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3834 snapshots.push(snapshot);
3835 }
3836 }
3837
3838 log::info!("quiescing");
3839 fs.as_fake().flush_events(usize::MAX).await;
3840 cx.foreground().run_until_parked();
3841 let snapshot = worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3842 snapshot.check_invariants();
3843
3844 {
3845 let new_worktree = Worktree::local(
3846 client.clone(),
3847 root_dir,
3848 true,
3849 fs.clone(),
3850 Default::default(),
3851 &mut cx.to_async(),
3852 )
3853 .await
3854 .unwrap();
3855 new_worktree
3856 .update(cx, |tree, _| tree.as_local_mut().unwrap().scan_complete())
3857 .await;
3858 let new_snapshot =
3859 new_worktree.read_with(cx, |tree, _| tree.as_local().unwrap().snapshot());
3860 assert_eq!(snapshot.to_vec(true), new_snapshot.to_vec(true));
3861 }
3862
3863 for (i, mut prev_snapshot) in snapshots.into_iter().enumerate() {
3864 let include_ignored = rng.gen::<bool>();
3865 if !include_ignored {
3866 let mut entries_by_path_edits = Vec::new();
3867 let mut entries_by_id_edits = Vec::new();
3868 for entry in prev_snapshot
3869 .entries_by_id
3870 .cursor::<()>()
3871 .filter(|e| e.is_ignored)
3872 {
3873 entries_by_path_edits.push(Edit::Remove(PathKey(entry.path.clone())));
3874 entries_by_id_edits.push(Edit::Remove(entry.id));
3875 }
3876
3877 prev_snapshot
3878 .entries_by_path
3879 .edit(entries_by_path_edits, &());
3880 prev_snapshot.entries_by_id.edit(entries_by_id_edits, &());
3881 }
3882
3883 let update = snapshot.build_update(&prev_snapshot, 0, 0, include_ignored);
3884 prev_snapshot.apply_remote_update(update.clone()).unwrap();
3885 assert_eq!(
3886 prev_snapshot.to_vec(include_ignored),
3887 snapshot.to_vec(include_ignored),
3888 "wrong update for snapshot {i}. update: {:?}",
3889 update
3890 );
3891 }
3892 }
3893
3894 fn randomly_mutate_worktree(
3895 worktree: &mut Worktree,
3896 rng: &mut impl Rng,
3897 cx: &mut ModelContext<Worktree>,
3898 ) -> Task<Result<()>> {
3899 let worktree = worktree.as_local_mut().unwrap();
3900 let snapshot = worktree.snapshot();
3901 let entry = snapshot.entries(false).choose(rng).unwrap();
3902
3903 match rng.gen_range(0_u32..100) {
3904 0..=33 if entry.path.as_ref() != Path::new("") => {
3905 log::info!("deleting entry {:?} ({})", entry.path, entry.id.0);
3906 worktree.delete_entry(entry.id, cx).unwrap()
3907 }
3908 ..=66 if entry.path.as_ref() != Path::new("") => {
3909 let other_entry = snapshot.entries(false).choose(rng).unwrap();
3910 let new_parent_path = if other_entry.is_dir() {
3911 other_entry.path.clone()
3912 } else {
3913 other_entry.path.parent().unwrap().into()
3914 };
3915 let mut new_path = new_parent_path.join(gen_name(rng));
3916 if new_path.starts_with(&entry.path) {
3917 new_path = gen_name(rng).into();
3918 }
3919
3920 log::info!(
3921 "renaming entry {:?} ({}) to {:?}",
3922 entry.path,
3923 entry.id.0,
3924 new_path
3925 );
3926 let task = worktree.rename_entry(entry.id, new_path, cx).unwrap();
3927 cx.foreground().spawn(async move {
3928 task.await?;
3929 Ok(())
3930 })
3931 }
3932 _ => {
3933 let task = if entry.is_dir() {
3934 let child_path = entry.path.join(gen_name(rng));
3935 let is_dir = rng.gen_bool(0.3);
3936 log::info!(
3937 "creating {} at {:?}",
3938 if is_dir { "dir" } else { "file" },
3939 child_path,
3940 );
3941 worktree.create_entry(child_path, is_dir, cx)
3942 } else {
3943 log::info!("overwriting file {:?} ({})", entry.path, entry.id.0);
3944 worktree.write_file(entry.path.clone(), "".into(), Default::default(), cx)
3945 };
3946 cx.foreground().spawn(async move {
3947 task.await?;
3948 Ok(())
3949 })
3950 }
3951 }
3952 }
3953
3954 async fn randomly_mutate_fs(
3955 fs: &Arc<dyn Fs>,
3956 root_path: &Path,
3957 insertion_probability: f64,
3958 rng: &mut impl Rng,
3959 ) {
3960 let mut files = Vec::new();
3961 let mut dirs = Vec::new();
3962 for path in fs.as_fake().paths() {
3963 if path.starts_with(root_path) {
3964 if fs.is_file(&path).await {
3965 files.push(path);
3966 } else {
3967 dirs.push(path);
3968 }
3969 }
3970 }
3971
3972 if (files.is_empty() && dirs.len() == 1) || rng.gen_bool(insertion_probability) {
3973 let path = dirs.choose(rng).unwrap();
3974 let new_path = path.join(gen_name(rng));
3975
3976 if rng.gen() {
3977 log::info!(
3978 "creating dir {:?}",
3979 new_path.strip_prefix(root_path).unwrap()
3980 );
3981 fs.create_dir(&new_path).await.unwrap();
3982 } else {
3983 log::info!(
3984 "creating file {:?}",
3985 new_path.strip_prefix(root_path).unwrap()
3986 );
3987 fs.create_file(&new_path, Default::default()).await.unwrap();
3988 }
3989 } else if rng.gen_bool(0.05) {
3990 let ignore_dir_path = dirs.choose(rng).unwrap();
3991 let ignore_path = ignore_dir_path.join(&*GITIGNORE);
3992
3993 let subdirs = dirs
3994 .iter()
3995 .filter(|d| d.starts_with(&ignore_dir_path))
3996 .cloned()
3997 .collect::<Vec<_>>();
3998 let subfiles = files
3999 .iter()
4000 .filter(|d| d.starts_with(&ignore_dir_path))
4001 .cloned()
4002 .collect::<Vec<_>>();
4003 let files_to_ignore = {
4004 let len = rng.gen_range(0..=subfiles.len());
4005 subfiles.choose_multiple(rng, len)
4006 };
4007 let dirs_to_ignore = {
4008 let len = rng.gen_range(0..subdirs.len());
4009 subdirs.choose_multiple(rng, len)
4010 };
4011
4012 let mut ignore_contents = String::new();
4013 for path_to_ignore in files_to_ignore.chain(dirs_to_ignore) {
4014 writeln!(
4015 ignore_contents,
4016 "{}",
4017 path_to_ignore
4018 .strip_prefix(&ignore_dir_path)
4019 .unwrap()
4020 .to_str()
4021 .unwrap()
4022 )
4023 .unwrap();
4024 }
4025 log::info!(
4026 "creating gitignore {:?} with contents:\n{}",
4027 ignore_path.strip_prefix(&root_path).unwrap(),
4028 ignore_contents
4029 );
4030 fs.save(
4031 &ignore_path,
4032 &ignore_contents.as_str().into(),
4033 Default::default(),
4034 )
4035 .await
4036 .unwrap();
4037 } else {
4038 let old_path = {
4039 let file_path = files.choose(rng);
4040 let dir_path = dirs[1..].choose(rng);
4041 file_path.into_iter().chain(dir_path).choose(rng).unwrap()
4042 };
4043
4044 let is_rename = rng.gen();
4045 if is_rename {
4046 let new_path_parent = dirs
4047 .iter()
4048 .filter(|d| !d.starts_with(old_path))
4049 .choose(rng)
4050 .unwrap();
4051
4052 let overwrite_existing_dir =
4053 !old_path.starts_with(&new_path_parent) && rng.gen_bool(0.3);
4054 let new_path = if overwrite_existing_dir {
4055 fs.remove_dir(
4056 &new_path_parent,
4057 RemoveOptions {
4058 recursive: true,
4059 ignore_if_not_exists: true,
4060 },
4061 )
4062 .await
4063 .unwrap();
4064 new_path_parent.to_path_buf()
4065 } else {
4066 new_path_parent.join(gen_name(rng))
4067 };
4068
4069 log::info!(
4070 "renaming {:?} to {}{:?}",
4071 old_path.strip_prefix(&root_path).unwrap(),
4072 if overwrite_existing_dir {
4073 "overwrite "
4074 } else {
4075 ""
4076 },
4077 new_path.strip_prefix(&root_path).unwrap()
4078 );
4079 fs.rename(
4080 &old_path,
4081 &new_path,
4082 fs::RenameOptions {
4083 overwrite: true,
4084 ignore_if_exists: true,
4085 },
4086 )
4087 .await
4088 .unwrap();
4089 } else if fs.is_file(&old_path).await {
4090 log::info!(
4091 "deleting file {:?}",
4092 old_path.strip_prefix(&root_path).unwrap()
4093 );
4094 fs.remove_file(old_path, Default::default()).await.unwrap();
4095 } else {
4096 log::info!(
4097 "deleting dir {:?}",
4098 old_path.strip_prefix(&root_path).unwrap()
4099 );
4100 fs.remove_dir(
4101 &old_path,
4102 RemoveOptions {
4103 recursive: true,
4104 ignore_if_not_exists: true,
4105 },
4106 )
4107 .await
4108 .unwrap();
4109 }
4110 }
4111 }
4112
4113 fn gen_name(rng: &mut impl Rng) -> String {
4114 (0..6)
4115 .map(|_| rng.sample(rand::distributions::Alphanumeric))
4116 .map(char::from)
4117 .collect()
4118 }
4119
4120 impl LocalSnapshot {
4121 fn check_invariants(&self) {
4122 assert_eq!(
4123 self.entries_by_path
4124 .cursor::<()>()
4125 .map(|e| (&e.path, e.id))
4126 .collect::<Vec<_>>(),
4127 self.entries_by_id
4128 .cursor::<()>()
4129 .map(|e| (&e.path, e.id))
4130 .collect::<collections::BTreeSet<_>>()
4131 .into_iter()
4132 .collect::<Vec<_>>(),
4133 "entries_by_path and entries_by_id are inconsistent"
4134 );
4135
4136 let mut files = self.files(true, 0);
4137 let mut visible_files = self.files(false, 0);
4138 for entry in self.entries_by_path.cursor::<()>() {
4139 if entry.is_file() {
4140 assert_eq!(files.next().unwrap().inode, entry.inode);
4141 if !entry.is_ignored {
4142 assert_eq!(visible_files.next().unwrap().inode, entry.inode);
4143 }
4144 }
4145 }
4146
4147 assert!(files.next().is_none());
4148 assert!(visible_files.next().is_none());
4149
4150 let mut bfs_paths = Vec::new();
4151 let mut stack = vec![Path::new("")];
4152 while let Some(path) = stack.pop() {
4153 bfs_paths.push(path);
4154 let ix = stack.len();
4155 for child_entry in self.child_entries(path) {
4156 stack.insert(ix, &child_entry.path);
4157 }
4158 }
4159
4160 let dfs_paths_via_iter = self
4161 .entries_by_path
4162 .cursor::<()>()
4163 .map(|e| e.path.as_ref())
4164 .collect::<Vec<_>>();
4165 assert_eq!(bfs_paths, dfs_paths_via_iter);
4166
4167 let dfs_paths_via_traversal = self
4168 .entries(true)
4169 .map(|e| e.path.as_ref())
4170 .collect::<Vec<_>>();
4171 assert_eq!(dfs_paths_via_traversal, dfs_paths_via_iter);
4172
4173 for ignore_parent_abs_path in self.ignores_by_parent_abs_path.keys() {
4174 let ignore_parent_path =
4175 ignore_parent_abs_path.strip_prefix(&self.abs_path).unwrap();
4176 assert!(self.entry_for_path(&ignore_parent_path).is_some());
4177 assert!(self
4178 .entry_for_path(ignore_parent_path.join(&*GITIGNORE))
4179 .is_some());
4180 }
4181 }
4182
4183 fn to_vec(&self, include_ignored: bool) -> Vec<(&Path, u64, bool)> {
4184 let mut paths = Vec::new();
4185 for entry in self.entries_by_path.cursor::<()>() {
4186 if include_ignored || !entry.is_ignored {
4187 paths.push((entry.path.as_ref(), entry.inode, entry.is_ignored));
4188 }
4189 }
4190 paths.sort_by(|a, b| a.0.cmp(b.0));
4191 paths
4192 }
4193 }
4194}